1 // 2 // Copyright 2010 The Android Open Source Project 3 // 4 // A looper implementation based on epoll(). 5 // 6 #define LOG_TAG "Looper" 7 8 //#define LOG_NDEBUG 0 9 10 // Debugs poll and wake interactions. 11 #define DEBUG_POLL_AND_WAKE 0 12 13 // Debugs callback registration and invocation. 14 #define DEBUG_CALLBACKS 0 15 16 #include <utils/Looper.h> 17 18 #include <sys/eventfd.h> 19 #include <cinttypes> 20 21 namespace android { 22 23 // --- WeakMessageHandler --- 24 25 WeakMessageHandler::WeakMessageHandler(const wp<MessageHandler>& handler) : 26 mHandler(handler) { 27 } 28 29 WeakMessageHandler::~WeakMessageHandler() { 30 } 31 32 void WeakMessageHandler::handleMessage(const Message& message) { 33 sp<MessageHandler> handler = mHandler.promote(); 34 if (handler != nullptr) { 35 handler->handleMessage(message); 36 } 37 } 38 39 40 // --- SimpleLooperCallback --- 41 42 SimpleLooperCallback::SimpleLooperCallback(Looper_callbackFunc callback) : 43 mCallback(callback) { 44 } 45 46 SimpleLooperCallback::~SimpleLooperCallback() { 47 } 48 49 int SimpleLooperCallback::handleEvent(int fd, int events, void* data) { 50 return mCallback(fd, events, data); 51 } 52 53 54 // --- Looper --- 55 56 // Maximum number of file descriptors for which to retrieve poll events each iteration. 57 static const int EPOLL_MAX_EVENTS = 16; 58 59 static pthread_once_t gTLSOnce = PTHREAD_ONCE_INIT; 60 static pthread_key_t gTLSKey = 0; 61 62 Looper::Looper(bool allowNonCallbacks) 63 : mAllowNonCallbacks(allowNonCallbacks), 64 mSendingMessage(false), 65 mPolling(false), 66 mEpollRebuildRequired(false), 67 mNextRequestSeq(0), 68 mResponseIndex(0), 69 mNextMessageUptime(LLONG_MAX) { 70 mWakeEventFd.reset(eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC)); 71 LOG_ALWAYS_FATAL_IF(mWakeEventFd.get() < 0, "Could not make wake event fd: %s", strerror(errno)); 72 73 AutoMutex _l(mLock); 74 rebuildEpollLocked(); 75 } 76 77 Looper::~Looper() { 78 } 79 80 void Looper::initTLSKey() { 81 int error = pthread_key_create(&gTLSKey, threadDestructor); 82 LOG_ALWAYS_FATAL_IF(error != 0, "Could not allocate TLS key: %s", strerror(error)); 83 } 84 85 void Looper::threadDestructor(void *st) { 86 Looper* const self = static_cast<Looper*>(st); 87 if (self != nullptr) { 88 self->decStrong((void*)threadDestructor); 89 } 90 } 91 92 void Looper::setForThread(const sp<Looper>& looper) { 93 sp<Looper> old = getForThread(); // also has side-effect of initializing TLS 94 95 if (looper != nullptr) { 96 looper->incStrong((void*)threadDestructor); 97 } 98 99 pthread_setspecific(gTLSKey, looper.get()); 100 101 if (old != nullptr) { 102 old->decStrong((void*)threadDestructor); 103 } 104 } 105 106 sp<Looper> Looper::getForThread() { 107 int result = pthread_once(& gTLSOnce, initTLSKey); 108 LOG_ALWAYS_FATAL_IF(result != 0, "pthread_once failed"); 109 110 return (Looper*)pthread_getspecific(gTLSKey); 111 } 112 113 sp<Looper> Looper::prepare(int opts) { 114 bool allowNonCallbacks = opts & PREPARE_ALLOW_NON_CALLBACKS; 115 sp<Looper> looper = Looper::getForThread(); 116 if (looper == nullptr) { 117 looper = new Looper(allowNonCallbacks); 118 Looper::setForThread(looper); 119 } 120 if (looper->getAllowNonCallbacks() != allowNonCallbacks) { 121 ALOGW("Looper already prepared for this thread with a different value for the " 122 "LOOPER_PREPARE_ALLOW_NON_CALLBACKS option."); 123 } 124 return looper; 125 } 126 127 bool Looper::getAllowNonCallbacks() const { 128 return mAllowNonCallbacks; 129 } 130 131 void Looper::rebuildEpollLocked() { 132 // Close old epoll instance if we have one. 133 if (mEpollFd >= 0) { 134 #if DEBUG_CALLBACKS 135 ALOGD("%p ~ rebuildEpollLocked - rebuilding epoll set", this); 136 #endif 137 mEpollFd.reset(); 138 } 139 140 // Allocate the new epoll instance and register the wake pipe. 141 mEpollFd.reset(epoll_create1(EPOLL_CLOEXEC)); 142 LOG_ALWAYS_FATAL_IF(mEpollFd < 0, "Could not create epoll instance: %s", strerror(errno)); 143 144 struct epoll_event eventItem; 145 memset(& eventItem, 0, sizeof(epoll_event)); // zero out unused members of data field union 146 eventItem.events = EPOLLIN; 147 eventItem.data.fd = mWakeEventFd.get(); 148 int result = epoll_ctl(mEpollFd.get(), EPOLL_CTL_ADD, mWakeEventFd.get(), &eventItem); 149 LOG_ALWAYS_FATAL_IF(result != 0, "Could not add wake event fd to epoll instance: %s", 150 strerror(errno)); 151 152 for (size_t i = 0; i < mRequests.size(); i++) { 153 const Request& request = mRequests.valueAt(i); 154 struct epoll_event eventItem; 155 request.initEventItem(&eventItem); 156 157 int epollResult = epoll_ctl(mEpollFd.get(), EPOLL_CTL_ADD, request.fd, &eventItem); 158 if (epollResult < 0) { 159 ALOGE("Error adding epoll events for fd %d while rebuilding epoll set: %s", 160 request.fd, strerror(errno)); 161 } 162 } 163 } 164 165 void Looper::scheduleEpollRebuildLocked() { 166 if (!mEpollRebuildRequired) { 167 #if DEBUG_CALLBACKS 168 ALOGD("%p ~ scheduleEpollRebuildLocked - scheduling epoll set rebuild", this); 169 #endif 170 mEpollRebuildRequired = true; 171 wake(); 172 } 173 } 174 175 int Looper::pollOnce(int timeoutMillis, int* outFd, int* outEvents, void** outData) { 176 int result = 0; 177 for (;;) { 178 while (mResponseIndex < mResponses.size()) { 179 const Response& response = mResponses.itemAt(mResponseIndex++); 180 int ident = response.request.ident; 181 if (ident >= 0) { 182 int fd = response.request.fd; 183 int events = response.events; 184 void* data = response.request.data; 185 #if DEBUG_POLL_AND_WAKE 186 ALOGD("%p ~ pollOnce - returning signalled identifier %d: " 187 "fd=%d, events=0x%x, data=%p", 188 this, ident, fd, events, data); 189 #endif 190 if (outFd != nullptr) *outFd = fd; 191 if (outEvents != nullptr) *outEvents = events; 192 if (outData != nullptr) *outData = data; 193 return ident; 194 } 195 } 196 197 if (result != 0) { 198 #if DEBUG_POLL_AND_WAKE 199 ALOGD("%p ~ pollOnce - returning result %d", this, result); 200 #endif 201 if (outFd != nullptr) *outFd = 0; 202 if (outEvents != nullptr) *outEvents = 0; 203 if (outData != nullptr) *outData = nullptr; 204 return result; 205 } 206 207 result = pollInner(timeoutMillis); 208 } 209 } 210 211 int Looper::pollInner(int timeoutMillis) { 212 #if DEBUG_POLL_AND_WAKE 213 ALOGD("%p ~ pollOnce - waiting: timeoutMillis=%d", this, timeoutMillis); 214 #endif 215 216 // Adjust the timeout based on when the next message is due. 217 if (timeoutMillis != 0 && mNextMessageUptime != LLONG_MAX) { 218 nsecs_t now = systemTime(SYSTEM_TIME_MONOTONIC); 219 int messageTimeoutMillis = toMillisecondTimeoutDelay(now, mNextMessageUptime); 220 if (messageTimeoutMillis >= 0 221 && (timeoutMillis < 0 || messageTimeoutMillis < timeoutMillis)) { 222 timeoutMillis = messageTimeoutMillis; 223 } 224 #if DEBUG_POLL_AND_WAKE 225 ALOGD("%p ~ pollOnce - next message in %" PRId64 "ns, adjusted timeout: timeoutMillis=%d", 226 this, mNextMessageUptime - now, timeoutMillis); 227 #endif 228 } 229 230 // Poll. 231 int result = POLL_WAKE; 232 mResponses.clear(); 233 mResponseIndex = 0; 234 235 // We are about to idle. 236 mPolling = true; 237 238 struct epoll_event eventItems[EPOLL_MAX_EVENTS]; 239 int eventCount = epoll_wait(mEpollFd.get(), eventItems, EPOLL_MAX_EVENTS, timeoutMillis); 240 241 // No longer idling. 242 mPolling = false; 243 244 // Acquire lock. 245 mLock.lock(); 246 247 // Rebuild epoll set if needed. 248 if (mEpollRebuildRequired) { 249 mEpollRebuildRequired = false; 250 rebuildEpollLocked(); 251 goto Done; 252 } 253 254 // Check for poll error. 255 if (eventCount < 0) { 256 if (errno == EINTR) { 257 goto Done; 258 } 259 ALOGW("Poll failed with an unexpected error: %s", strerror(errno)); 260 result = POLL_ERROR; 261 goto Done; 262 } 263 264 // Check for poll timeout. 265 if (eventCount == 0) { 266 #if DEBUG_POLL_AND_WAKE 267 ALOGD("%p ~ pollOnce - timeout", this); 268 #endif 269 result = POLL_TIMEOUT; 270 goto Done; 271 } 272 273 // Handle all events. 274 #if DEBUG_POLL_AND_WAKE 275 ALOGD("%p ~ pollOnce - handling events from %d fds", this, eventCount); 276 #endif 277 278 for (int i = 0; i < eventCount; i++) { 279 int fd = eventItems[i].data.fd; 280 uint32_t epollEvents = eventItems[i].events; 281 if (fd == mWakeEventFd.get()) { 282 if (epollEvents & EPOLLIN) { 283 awoken(); 284 } else { 285 ALOGW("Ignoring unexpected epoll events 0x%x on wake event fd.", epollEvents); 286 } 287 } else { 288 ssize_t requestIndex = mRequests.indexOfKey(fd); 289 if (requestIndex >= 0) { 290 int events = 0; 291 if (epollEvents & EPOLLIN) events |= EVENT_INPUT; 292 if (epollEvents & EPOLLOUT) events |= EVENT_OUTPUT; 293 if (epollEvents & EPOLLERR) events |= EVENT_ERROR; 294 if (epollEvents & EPOLLHUP) events |= EVENT_HANGUP; 295 pushResponse(events, mRequests.valueAt(requestIndex)); 296 } else { 297 ALOGW("Ignoring unexpected epoll events 0x%x on fd %d that is " 298 "no longer registered.", epollEvents, fd); 299 } 300 } 301 } 302 Done: ; 303 304 // Invoke pending message callbacks. 305 mNextMessageUptime = LLONG_MAX; 306 while (mMessageEnvelopes.size() != 0) { 307 nsecs_t now = systemTime(SYSTEM_TIME_MONOTONIC); 308 const MessageEnvelope& messageEnvelope = mMessageEnvelopes.itemAt(0); 309 if (messageEnvelope.uptime <= now) { 310 // Remove the envelope from the list. 311 // We keep a strong reference to the handler until the call to handleMessage 312 // finishes. Then we drop it so that the handler can be deleted *before* 313 // we reacquire our lock. 314 { // obtain handler 315 sp<MessageHandler> handler = messageEnvelope.handler; 316 Message message = messageEnvelope.message; 317 mMessageEnvelopes.removeAt(0); 318 mSendingMessage = true; 319 mLock.unlock(); 320 321 #if DEBUG_POLL_AND_WAKE || DEBUG_CALLBACKS 322 ALOGD("%p ~ pollOnce - sending message: handler=%p, what=%d", 323 this, handler.get(), message.what); 324 #endif 325 handler->handleMessage(message); 326 } // release handler 327 328 mLock.lock(); 329 mSendingMessage = false; 330 result = POLL_CALLBACK; 331 } else { 332 // The last message left at the head of the queue determines the next wakeup time. 333 mNextMessageUptime = messageEnvelope.uptime; 334 break; 335 } 336 } 337 338 // Release lock. 339 mLock.unlock(); 340 341 // Invoke all response callbacks. 342 for (size_t i = 0; i < mResponses.size(); i++) { 343 Response& response = mResponses.editItemAt(i); 344 if (response.request.ident == POLL_CALLBACK) { 345 int fd = response.request.fd; 346 int events = response.events; 347 void* data = response.request.data; 348 #if DEBUG_POLL_AND_WAKE || DEBUG_CALLBACKS 349 ALOGD("%p ~ pollOnce - invoking fd event callback %p: fd=%d, events=0x%x, data=%p", 350 this, response.request.callback.get(), fd, events, data); 351 #endif 352 // Invoke the callback. Note that the file descriptor may be closed by 353 // the callback (and potentially even reused) before the function returns so 354 // we need to be a little careful when removing the file descriptor afterwards. 355 int callbackResult = response.request.callback->handleEvent(fd, events, data); 356 if (callbackResult == 0) { 357 removeFd(fd, response.request.seq); 358 } 359 360 // Clear the callback reference in the response structure promptly because we 361 // will not clear the response vector itself until the next poll. 362 response.request.callback.clear(); 363 result = POLL_CALLBACK; 364 } 365 } 366 return result; 367 } 368 369 int Looper::pollAll(int timeoutMillis, int* outFd, int* outEvents, void** outData) { 370 if (timeoutMillis <= 0) { 371 int result; 372 do { 373 result = pollOnce(timeoutMillis, outFd, outEvents, outData); 374 } while (result == POLL_CALLBACK); 375 return result; 376 } else { 377 nsecs_t endTime = systemTime(SYSTEM_TIME_MONOTONIC) 378 + milliseconds_to_nanoseconds(timeoutMillis); 379 380 for (;;) { 381 int result = pollOnce(timeoutMillis, outFd, outEvents, outData); 382 if (result != POLL_CALLBACK) { 383 return result; 384 } 385 386 nsecs_t now = systemTime(SYSTEM_TIME_MONOTONIC); 387 timeoutMillis = toMillisecondTimeoutDelay(now, endTime); 388 if (timeoutMillis == 0) { 389 return POLL_TIMEOUT; 390 } 391 } 392 } 393 } 394 395 void Looper::wake() { 396 #if DEBUG_POLL_AND_WAKE 397 ALOGD("%p ~ wake", this); 398 #endif 399 400 uint64_t inc = 1; 401 ssize_t nWrite = TEMP_FAILURE_RETRY(write(mWakeEventFd.get(), &inc, sizeof(uint64_t))); 402 if (nWrite != sizeof(uint64_t)) { 403 if (errno != EAGAIN) { 404 LOG_ALWAYS_FATAL("Could not write wake signal to fd %d (returned %zd): %s", 405 mWakeEventFd.get(), nWrite, strerror(errno)); 406 } 407 } 408 } 409 410 void Looper::awoken() { 411 #if DEBUG_POLL_AND_WAKE 412 ALOGD("%p ~ awoken", this); 413 #endif 414 415 uint64_t counter; 416 TEMP_FAILURE_RETRY(read(mWakeEventFd.get(), &counter, sizeof(uint64_t))); 417 } 418 419 void Looper::pushResponse(int events, const Request& request) { 420 Response response; 421 response.events = events; 422 response.request = request; 423 mResponses.push(response); 424 } 425 426 int Looper::addFd(int fd, int ident, int events, Looper_callbackFunc callback, void* data) { 427 return addFd(fd, ident, events, callback ? new SimpleLooperCallback(callback) : nullptr, data); 428 } 429 430 int Looper::addFd(int fd, int ident, int events, const sp<LooperCallback>& callback, void* data) { 431 #if DEBUG_CALLBACKS 432 ALOGD("%p ~ addFd - fd=%d, ident=%d, events=0x%x, callback=%p, data=%p", this, fd, ident, 433 events, callback.get(), data); 434 #endif 435 436 if (!callback.get()) { 437 if (! mAllowNonCallbacks) { 438 ALOGE("Invalid attempt to set NULL callback but not allowed for this looper."); 439 return -1; 440 } 441 442 if (ident < 0) { 443 ALOGE("Invalid attempt to set NULL callback with ident < 0."); 444 return -1; 445 } 446 } else { 447 ident = POLL_CALLBACK; 448 } 449 450 { // acquire lock 451 AutoMutex _l(mLock); 452 453 Request request; 454 request.fd = fd; 455 request.ident = ident; 456 request.events = events; 457 request.seq = mNextRequestSeq++; 458 request.callback = callback; 459 request.data = data; 460 if (mNextRequestSeq == -1) mNextRequestSeq = 0; // reserve sequence number -1 461 462 struct epoll_event eventItem; 463 request.initEventItem(&eventItem); 464 465 ssize_t requestIndex = mRequests.indexOfKey(fd); 466 if (requestIndex < 0) { 467 int epollResult = epoll_ctl(mEpollFd.get(), EPOLL_CTL_ADD, fd, &eventItem); 468 if (epollResult < 0) { 469 ALOGE("Error adding epoll events for fd %d: %s", fd, strerror(errno)); 470 return -1; 471 } 472 mRequests.add(fd, request); 473 } else { 474 int epollResult = epoll_ctl(mEpollFd.get(), EPOLL_CTL_MOD, fd, &eventItem); 475 if (epollResult < 0) { 476 if (errno == ENOENT) { 477 // Tolerate ENOENT because it means that an older file descriptor was 478 // closed before its callback was unregistered and meanwhile a new 479 // file descriptor with the same number has been created and is now 480 // being registered for the first time. This error may occur naturally 481 // when a callback has the side-effect of closing the file descriptor 482 // before returning and unregistering itself. Callback sequence number 483 // checks further ensure that the race is benign. 484 // 485 // Unfortunately due to kernel limitations we need to rebuild the epoll 486 // set from scratch because it may contain an old file handle that we are 487 // now unable to remove since its file descriptor is no longer valid. 488 // No such problem would have occurred if we were using the poll system 489 // call instead, but that approach carries others disadvantages. 490 #if DEBUG_CALLBACKS 491 ALOGD("%p ~ addFd - EPOLL_CTL_MOD failed due to file descriptor " 492 "being recycled, falling back on EPOLL_CTL_ADD: %s", 493 this, strerror(errno)); 494 #endif 495 epollResult = epoll_ctl(mEpollFd.get(), EPOLL_CTL_ADD, fd, &eventItem); 496 if (epollResult < 0) { 497 ALOGE("Error modifying or adding epoll events for fd %d: %s", 498 fd, strerror(errno)); 499 return -1; 500 } 501 scheduleEpollRebuildLocked(); 502 } else { 503 ALOGE("Error modifying epoll events for fd %d: %s", fd, strerror(errno)); 504 return -1; 505 } 506 } 507 mRequests.replaceValueAt(requestIndex, request); 508 } 509 } // release lock 510 return 1; 511 } 512 513 int Looper::removeFd(int fd) { 514 return removeFd(fd, -1); 515 } 516 517 int Looper::removeFd(int fd, int seq) { 518 #if DEBUG_CALLBACKS 519 ALOGD("%p ~ removeFd - fd=%d, seq=%d", this, fd, seq); 520 #endif 521 522 { // acquire lock 523 AutoMutex _l(mLock); 524 ssize_t requestIndex = mRequests.indexOfKey(fd); 525 if (requestIndex < 0) { 526 return 0; 527 } 528 529 // Check the sequence number if one was given. 530 if (seq != -1 && mRequests.valueAt(requestIndex).seq != seq) { 531 #if DEBUG_CALLBACKS 532 ALOGD("%p ~ removeFd - sequence number mismatch, oldSeq=%d", 533 this, mRequests.valueAt(requestIndex).seq); 534 #endif 535 return 0; 536 } 537 538 // Always remove the FD from the request map even if an error occurs while 539 // updating the epoll set so that we avoid accidentally leaking callbacks. 540 mRequests.removeItemsAt(requestIndex); 541 542 int epollResult = epoll_ctl(mEpollFd.get(), EPOLL_CTL_DEL, fd, nullptr); 543 if (epollResult < 0) { 544 if (seq != -1 && (errno == EBADF || errno == ENOENT)) { 545 // Tolerate EBADF or ENOENT when the sequence number is known because it 546 // means that the file descriptor was closed before its callback was 547 // unregistered. This error may occur naturally when a callback has the 548 // side-effect of closing the file descriptor before returning and 549 // unregistering itself. 550 // 551 // Unfortunately due to kernel limitations we need to rebuild the epoll 552 // set from scratch because it may contain an old file handle that we are 553 // now unable to remove since its file descriptor is no longer valid. 554 // No such problem would have occurred if we were using the poll system 555 // call instead, but that approach carries others disadvantages. 556 #if DEBUG_CALLBACKS 557 ALOGD("%p ~ removeFd - EPOLL_CTL_DEL failed due to file descriptor " 558 "being closed: %s", this, strerror(errno)); 559 #endif 560 scheduleEpollRebuildLocked(); 561 } else { 562 // Some other error occurred. This is really weird because it means 563 // our list of callbacks got out of sync with the epoll set somehow. 564 // We defensively rebuild the epoll set to avoid getting spurious 565 // notifications with nowhere to go. 566 ALOGE("Error removing epoll events for fd %d: %s", fd, strerror(errno)); 567 scheduleEpollRebuildLocked(); 568 return -1; 569 } 570 } 571 } // release lock 572 return 1; 573 } 574 575 void Looper::sendMessage(const sp<MessageHandler>& handler, const Message& message) { 576 nsecs_t now = systemTime(SYSTEM_TIME_MONOTONIC); 577 sendMessageAtTime(now, handler, message); 578 } 579 580 void Looper::sendMessageDelayed(nsecs_t uptimeDelay, const sp<MessageHandler>& handler, 581 const Message& message) { 582 nsecs_t now = systemTime(SYSTEM_TIME_MONOTONIC); 583 sendMessageAtTime(now + uptimeDelay, handler, message); 584 } 585 586 void Looper::sendMessageAtTime(nsecs_t uptime, const sp<MessageHandler>& handler, 587 const Message& message) { 588 #if DEBUG_CALLBACKS 589 ALOGD("%p ~ sendMessageAtTime - uptime=%" PRId64 ", handler=%p, what=%d", 590 this, uptime, handler.get(), message.what); 591 #endif 592 593 size_t i = 0; 594 { // acquire lock 595 AutoMutex _l(mLock); 596 597 size_t messageCount = mMessageEnvelopes.size(); 598 while (i < messageCount && uptime >= mMessageEnvelopes.itemAt(i).uptime) { 599 i += 1; 600 } 601 602 MessageEnvelope messageEnvelope(uptime, handler, message); 603 mMessageEnvelopes.insertAt(messageEnvelope, i, 1); 604 605 // Optimization: If the Looper is currently sending a message, then we can skip 606 // the call to wake() because the next thing the Looper will do after processing 607 // messages is to decide when the next wakeup time should be. In fact, it does 608 // not even matter whether this code is running on the Looper thread. 609 if (mSendingMessage) { 610 return; 611 } 612 } // release lock 613 614 // Wake the poll loop only when we enqueue a new message at the head. 615 if (i == 0) { 616 wake(); 617 } 618 } 619 620 void Looper::removeMessages(const sp<MessageHandler>& handler) { 621 #if DEBUG_CALLBACKS 622 ALOGD("%p ~ removeMessages - handler=%p", this, handler.get()); 623 #endif 624 625 { // acquire lock 626 AutoMutex _l(mLock); 627 628 for (size_t i = mMessageEnvelopes.size(); i != 0; ) { 629 const MessageEnvelope& messageEnvelope = mMessageEnvelopes.itemAt(--i); 630 if (messageEnvelope.handler == handler) { 631 mMessageEnvelopes.removeAt(i); 632 } 633 } 634 } // release lock 635 } 636 637 void Looper::removeMessages(const sp<MessageHandler>& handler, int what) { 638 #if DEBUG_CALLBACKS 639 ALOGD("%p ~ removeMessages - handler=%p, what=%d", this, handler.get(), what); 640 #endif 641 642 { // acquire lock 643 AutoMutex _l(mLock); 644 645 for (size_t i = mMessageEnvelopes.size(); i != 0; ) { 646 const MessageEnvelope& messageEnvelope = mMessageEnvelopes.itemAt(--i); 647 if (messageEnvelope.handler == handler 648 && messageEnvelope.message.what == what) { 649 mMessageEnvelopes.removeAt(i); 650 } 651 } 652 } // release lock 653 } 654 655 bool Looper::isPolling() const { 656 return mPolling; 657 } 658 659 void Looper::Request::initEventItem(struct epoll_event* eventItem) const { 660 int epollEvents = 0; 661 if (events & EVENT_INPUT) epollEvents |= EPOLLIN; 662 if (events & EVENT_OUTPUT) epollEvents |= EPOLLOUT; 663 664 memset(eventItem, 0, sizeof(epoll_event)); // zero out unused members of data field union 665 eventItem->events = epollEvents; 666 eventItem->data.fd = fd; 667 } 668 669 MessageHandler::~MessageHandler() { } 670 671 LooperCallback::~LooperCallback() { } 672 673 } // namespace android 674