1 /* 2 * Copyright (C) 2016 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #include "chre/core/event_loop.h" 18 19 #include "chre/core/event.h" 20 #include "chre/core/event_loop_manager.h" 21 #include "chre/core/nanoapp.h" 22 #include "chre/platform/context.h" 23 #include "chre/platform/fatal_error.h" 24 #include "chre/platform/log.h" 25 #include "chre/platform/system_time.h" 26 #include "chre/util/conditional_lock_guard.h" 27 #include "chre/util/lock_guard.h" 28 #include "chre/util/system/debug_dump.h" 29 #include "chre/util/time.h" 30 #include "chre_api/chre/version.h" 31 32 namespace chre { 33 34 // Out of line declaration required for nonintegral static types 35 constexpr Nanoseconds EventLoop::kIntervalWakeupBucket; 36 37 namespace { 38 39 /** 40 * Populates a chreNanoappInfo structure using info from the given Nanoapp 41 * instance. 42 * 43 * @param app A potentially null pointer to the Nanoapp to read from 44 * @param info The structure to populate - should not be null, but this function 45 * will handle that input 46 * 47 * @return true if neither app nor info were null, and info was populated 48 */ 49 bool populateNanoappInfo(const Nanoapp *app, struct chreNanoappInfo *info) { 50 bool success = false; 51 52 if (app != nullptr && info != nullptr) { 53 info->appId = app->getAppId(); 54 info->version = app->getAppVersion(); 55 info->instanceId = app->getInstanceId(); 56 success = true; 57 } 58 59 return success; 60 } 61 62 } // anonymous namespace 63 64 bool EventLoop::findNanoappInstanceIdByAppId(uint64_t appId, 65 uint32_t *instanceId) const { 66 CHRE_ASSERT(instanceId != nullptr); 67 ConditionalLockGuard<Mutex> lock(mNanoappsLock, !inEventLoopThread()); 68 69 bool found = false; 70 for (const UniquePtr<Nanoapp> &app : mNanoapps) { 71 if (app->getAppId() == appId) { 72 *instanceId = app->getInstanceId(); 73 found = true; 74 break; 75 } 76 } 77 78 return found; 79 } 80 81 void EventLoop::forEachNanoapp(NanoappCallbackFunction *callback, void *data) { 82 ConditionalLockGuard<Mutex> lock(mNanoappsLock, !inEventLoopThread()); 83 84 for (const UniquePtr<Nanoapp> &nanoapp : mNanoapps) { 85 callback(nanoapp.get(), data); 86 } 87 } 88 89 void EventLoop::invokeMessageFreeFunction(uint64_t appId, 90 chreMessageFreeFunction *freeFunction, 91 void *message, size_t messageSize) { 92 Nanoapp *nanoapp = lookupAppByAppId(appId); 93 if (nanoapp == nullptr) { 94 LOGE("Couldn't find app 0x%016" PRIx64 " for message free callback", appId); 95 } else { 96 auto prevCurrentApp = mCurrentApp; 97 mCurrentApp = nanoapp; 98 freeFunction(message, messageSize); 99 mCurrentApp = prevCurrentApp; 100 } 101 } 102 103 void EventLoop::run() { 104 LOGI("EventLoop start"); 105 106 bool havePendingEvents = false; 107 while (mRunning) { 108 // Events are delivered in two stages: first they arrive in the inbound 109 // event queue mEvents (potentially posted from another thread), then within 110 // this context these events are distributed to smaller event queues 111 // associated with each Nanoapp that should receive the event. Once the 112 // event is delivered to all interested Nanoapps, its free callback is 113 // invoked. 114 if (!havePendingEvents || !mEvents.empty()) { 115 if (mEvents.size() > mMaxEventPoolUsage) { 116 mMaxEventPoolUsage = mEvents.size(); 117 } 118 119 // mEvents.pop() will be a blocking call if mEvents.empty() 120 distributeEvent(mEvents.pop()); 121 } 122 123 havePendingEvents = deliverEvents(); 124 125 mPowerControlManager.postEventLoopProcess(mEvents.size()); 126 } 127 128 // Deliver any events sitting in Nanoapps' own queues (we could drop them to 129 // exit faster, but this is less code and should complete quickly under normal 130 // conditions), then purge the main queue of events pending distribution. All 131 // nanoapps should be prevented from sending events or messages at this point 132 // via currentNanoappIsStopping() returning true. 133 flushNanoappEventQueues(); 134 while (!mEvents.empty()) { 135 freeEvent(mEvents.pop()); 136 } 137 138 // Unload all running nanoapps 139 while (!mNanoapps.empty()) { 140 unloadNanoappAtIndex(mNanoapps.size() - 1); 141 } 142 143 LOGI("Exiting EventLoop"); 144 } 145 146 bool EventLoop::startNanoapp(UniquePtr<Nanoapp> &nanoapp) { 147 CHRE_ASSERT(!nanoapp.isNull()); 148 bool success = false; 149 auto *eventLoopManager = EventLoopManagerSingleton::get(); 150 EventLoop &eventLoop = eventLoopManager->getEventLoop(); 151 uint32_t existingInstanceId; 152 153 if (nanoapp.isNull()) { 154 // no-op, invalid argument 155 } else if (eventLoop.findNanoappInstanceIdByAppId(nanoapp->getAppId(), 156 &existingInstanceId)) { 157 LOGE("App with ID 0x%016" PRIx64 158 " already exists as instance ID 0x%" PRIx32, 159 nanoapp->getAppId(), existingInstanceId); 160 } else if (!mNanoapps.prepareForPush()) { 161 LOG_OOM(); 162 } else { 163 nanoapp->setInstanceId(eventLoopManager->getNextInstanceId()); 164 LOGD("Instance ID %" PRIu32 " assigned to app ID 0x%016" PRIx64, 165 nanoapp->getInstanceId(), nanoapp->getAppId()); 166 167 Nanoapp *newNanoapp = nanoapp.get(); 168 { 169 LockGuard<Mutex> lock(mNanoappsLock); 170 mNanoapps.push_back(std::move(nanoapp)); 171 // After this point, nanoapp is null as we've transferred ownership into 172 // mNanoapps.back() - use newNanoapp to reference it 173 } 174 175 mCurrentApp = newNanoapp; 176 success = newNanoapp->start(); 177 mCurrentApp = nullptr; 178 if (!success) { 179 // TODO: to be fully safe, need to purge/flush any events and messages 180 // sent by the nanoapp here (but don't call nanoappEnd). For now, we just 181 // destroy the Nanoapp instance. 182 LOGE("Nanoapp %" PRIu32 " failed to start", newNanoapp->getInstanceId()); 183 184 // Note that this lock protects against concurrent read and modification 185 // of mNanoapps, but we are assured that no new nanoapps were added since 186 // we pushed the new nanoapp 187 LockGuard<Mutex> lock(mNanoappsLock); 188 mNanoapps.pop_back(); 189 } else { 190 notifyAppStatusChange(CHRE_EVENT_NANOAPP_STARTED, *newNanoapp); 191 } 192 } 193 194 return success; 195 } 196 197 bool EventLoop::unloadNanoapp(uint32_t instanceId, 198 bool allowSystemNanoappUnload) { 199 bool unloaded = false; 200 201 for (size_t i = 0; i < mNanoapps.size(); i++) { 202 if (instanceId == mNanoapps[i]->getInstanceId()) { 203 if (!allowSystemNanoappUnload && mNanoapps[i]->isSystemNanoapp()) { 204 LOGE("Refusing to unload system nanoapp"); 205 } else { 206 // Make sure all messages sent by this nanoapp at least have their 207 // associated free callback processing pending in the event queue (i.e. 208 // there are no messages pending delivery to the host) 209 EventLoopManagerSingleton::get() 210 ->getHostCommsManager() 211 .flushMessagesSentByNanoapp(mNanoapps[i]->getAppId()); 212 213 // Distribute all inbound events we have at this time - here we're 214 // interested in handling any message free callbacks generated by 215 // flushMessagesSentByNanoapp() 216 flushInboundEventQueue(); 217 218 // Mark that this nanoapp is stopping early, so it can't send events or 219 // messages during the nanoapp event queue flush 220 mStoppingNanoapp = mNanoapps[i].get(); 221 222 // Process any pending events, with the intent of ensuring that we free 223 // all events generated by this nanoapp 224 flushNanoappEventQueues(); 225 226 // Post the unload event now (so we can reference the Nanoapp instance 227 // directly), but nanoapps won't get it until after the unload completes 228 notifyAppStatusChange(CHRE_EVENT_NANOAPP_STOPPED, *mStoppingNanoapp); 229 230 // Finally, we are at a point where there should not be any pending 231 // events or messages sent by the app that could potentially reference 232 // the nanoapp's memory, so we are safe to unload it 233 unloadNanoappAtIndex(i); 234 mStoppingNanoapp = nullptr; 235 236 // TODO: right now we assume that the nanoapp will clean up all of its 237 // resource allocations in its nanoappEnd callback (memory, sensor 238 // subscriptions, etc.), otherwise we're leaking resources. We should 239 // perform resource cleanup automatically here to avoid these types of 240 // potential leaks. 241 242 LOGD("Unloaded nanoapp with instanceId %" PRIu32, instanceId); 243 unloaded = true; 244 } 245 break; 246 } 247 } 248 249 return unloaded; 250 } 251 252 bool EventLoop::postEventOrDie(uint16_t eventType, void *eventData, 253 chreEventCompleteFunction *freeCallback, 254 uint32_t targetInstanceId) { 255 bool success = false; 256 257 if (mRunning) { 258 success = allocateAndPostEvent(eventType, eventData, freeCallback, 259 kSystemInstanceId, targetInstanceId); 260 if (!success) { 261 // This can only happen if the event is a system event type. This 262 // postEvent method will fail if a non-system event is posted when the 263 // memory pool is close to full. 264 FATAL_ERROR("Failed to allocate system event type %" PRIu16, eventType); 265 } 266 } 267 268 return success; 269 } 270 271 bool EventLoop::postLowPriorityEventOrFree( 272 uint16_t eventType, void *eventData, 273 chreEventCompleteFunction *freeCallback, uint32_t senderInstanceId, 274 uint32_t targetInstanceId) { 275 bool success = false; 276 277 if (mRunning) { 278 if (mEventPool.getFreeBlockCount() > kMinReservedHighPriorityEventCount) { 279 success = allocateAndPostEvent(eventType, eventData, freeCallback, 280 senderInstanceId, targetInstanceId); 281 } 282 if (!success) { 283 if (freeCallback != nullptr) { 284 freeCallback(eventType, eventData); 285 } 286 LOGE("Failed to allocate event 0x%" PRIx16 " to instanceId %" PRIu32, 287 eventType, targetInstanceId); 288 } 289 } 290 291 return success; 292 } 293 294 void EventLoop::stop() { 295 auto callback = [](uint16_t /* type */, void * /* data */) { 296 EventLoopManagerSingleton::get()->getEventLoop().onStopComplete(); 297 }; 298 299 // Stop accepting new events and tell the main loop to finish. 300 postEventOrDie(0, nullptr, callback, kSystemInstanceId); 301 } 302 303 void EventLoop::onStopComplete() { 304 mRunning = false; 305 } 306 307 Nanoapp *EventLoop::findNanoappByInstanceId(uint32_t instanceId) const { 308 ConditionalLockGuard<Mutex> lock(mNanoappsLock, !inEventLoopThread()); 309 return lookupAppByInstanceId(instanceId); 310 } 311 312 bool EventLoop::populateNanoappInfoForAppId( 313 uint64_t appId, struct chreNanoappInfo *info) const { 314 ConditionalLockGuard<Mutex> lock(mNanoappsLock, !inEventLoopThread()); 315 Nanoapp *app = lookupAppByAppId(appId); 316 return populateNanoappInfo(app, info); 317 } 318 319 bool EventLoop::populateNanoappInfoForInstanceId( 320 uint32_t instanceId, struct chreNanoappInfo *info) const { 321 ConditionalLockGuard<Mutex> lock(mNanoappsLock, !inEventLoopThread()); 322 Nanoapp *app = lookupAppByInstanceId(instanceId); 323 return populateNanoappInfo(app, info); 324 } 325 326 bool EventLoop::currentNanoappIsStopping() const { 327 return (mCurrentApp == mStoppingNanoapp || !mRunning); 328 } 329 330 void EventLoop::logStateToBuffer(DebugDumpWrapper &debugDump) const { 331 debugDump.print("\nEvent Loop:\n"); 332 debugDump.print(" Max event pool usage: %zu/%zu\n", mMaxEventPoolUsage, 333 kMaxEventCount); 334 335 Nanoseconds timeSince = 336 SystemTime::getMonotonicTime() - mTimeLastWakeupBucketCycled; 337 uint64_t timeSinceMins = 338 timeSince.toRawNanoseconds() / kOneMinuteInNanoseconds; 339 uint64_t durationMins = 340 kIntervalWakeupBucket.toRawNanoseconds() / kOneMinuteInNanoseconds; 341 debugDump.print(" Nanoapp host wakeup tracking: cycled %" PRIu64 342 "mins ago, bucketDuration=%" PRIu64 "mins\n", 343 timeSinceMins, durationMins); 344 345 debugDump.print("\nNanoapps:\n"); 346 for (const UniquePtr<Nanoapp> &app : mNanoapps) { 347 app->logStateToBuffer(debugDump); 348 } 349 } 350 351 bool EventLoop::allocateAndPostEvent(uint16_t eventType, void *eventData, 352 chreEventCompleteFunction *freeCallback, 353 uint32_t senderInstanceId, 354 uint32_t targetInstanceId) { 355 bool success = false; 356 357 Milliseconds receivedTime = Nanoseconds(SystemTime::getMonotonicTime()); 358 // The event loop should never contain more than 65 seconds worth of data 359 // unless something has gone terribly wrong so use uint16_t to save space. 360 uint16_t receivedTimeMillis = 361 static_cast<uint16_t>(receivedTime.getMilliseconds()); 362 363 Event *event = 364 mEventPool.allocate(eventType, receivedTimeMillis, eventData, 365 freeCallback, senderInstanceId, targetInstanceId); 366 367 if (event != nullptr) { 368 success = mEvents.push(event); 369 } 370 return success; 371 } 372 373 bool EventLoop::deliverEvents() { 374 bool havePendingEvents = false; 375 376 // Do one loop of round-robin. We might want to have some kind of priority or 377 // time sharing in the future, but this should be good enough for now. 378 for (const UniquePtr<Nanoapp> &app : mNanoapps) { 379 if (app->hasPendingEvent()) { 380 havePendingEvents |= deliverNextEvent(app); 381 } 382 } 383 384 return havePendingEvents; 385 } 386 387 bool EventLoop::deliverNextEvent(const UniquePtr<Nanoapp> &app) { 388 // TODO: cleaner way to set/clear this? RAII-style? 389 mCurrentApp = app.get(); 390 Event *event = app->processNextEvent(); 391 mCurrentApp = nullptr; 392 393 if (event->isUnreferenced()) { 394 freeEvent(event); 395 } 396 397 return app->hasPendingEvent(); 398 } 399 400 void EventLoop::distributeEvent(Event *event) { 401 for (const UniquePtr<Nanoapp> &app : mNanoapps) { 402 if ((event->targetInstanceId == chre::kBroadcastInstanceId && 403 app->isRegisteredForBroadcastEvent(event->eventType)) || 404 event->targetInstanceId == app->getInstanceId()) { 405 app->postEvent(event); 406 } 407 } 408 409 if (event->isUnreferenced()) { 410 // Events sent to the system instance ID are processed via the free callback 411 // and are not expected to be delivered to any nanoapp, so no need to log a 412 // warning in that case 413 if (event->senderInstanceId != kSystemInstanceId) { 414 LOGW("Dropping event 0x%" PRIx16, event->eventType); 415 } 416 freeEvent(event); 417 } 418 } 419 420 void EventLoop::flushInboundEventQueue() { 421 while (!mEvents.empty()) { 422 distributeEvent(mEvents.pop()); 423 } 424 } 425 426 void EventLoop::flushNanoappEventQueues() { 427 while (deliverEvents()) 428 ; 429 } 430 431 void EventLoop::freeEvent(Event *event) { 432 if (event->freeCallback != nullptr) { 433 // TODO: find a better way to set the context to the creator of the event 434 mCurrentApp = lookupAppByInstanceId(event->senderInstanceId); 435 event->freeCallback(event->eventType, event->eventData); 436 mCurrentApp = nullptr; 437 } 438 439 mEventPool.deallocate(event); 440 } 441 442 Nanoapp *EventLoop::lookupAppByAppId(uint64_t appId) const { 443 for (const UniquePtr<Nanoapp> &app : mNanoapps) { 444 if (app->getAppId() == appId) { 445 return app.get(); 446 } 447 } 448 449 return nullptr; 450 } 451 452 Nanoapp *EventLoop::lookupAppByInstanceId(uint32_t instanceId) const { 453 // The system instance ID always has nullptr as its Nanoapp pointer, so can 454 // skip iterating through the nanoapp list for that case 455 if (instanceId != kSystemInstanceId) { 456 for (const UniquePtr<Nanoapp> &app : mNanoapps) { 457 if (app->getInstanceId() == instanceId) { 458 return app.get(); 459 } 460 } 461 } 462 463 return nullptr; 464 } 465 466 void EventLoop::notifyAppStatusChange(uint16_t eventType, 467 const Nanoapp &nanoapp) { 468 auto *info = memoryAlloc<chreNanoappInfo>(); 469 if (info == nullptr) { 470 LOG_OOM(); 471 } else { 472 info->appId = nanoapp.getAppId(); 473 info->version = nanoapp.getAppVersion(); 474 info->instanceId = nanoapp.getInstanceId(); 475 476 postEventOrDie(eventType, info, freeEventDataCallback); 477 } 478 } 479 480 void EventLoop::unloadNanoappAtIndex(size_t index) { 481 const UniquePtr<Nanoapp> &nanoapp = mNanoapps[index]; 482 483 // Lock here to prevent the nanoapp instance from being accessed between the 484 // time it is ended and fully erased 485 LockGuard<Mutex> lock(mNanoappsLock); 486 487 // Let the app know it's going away 488 mCurrentApp = nanoapp.get(); 489 nanoapp->end(); 490 mCurrentApp = nullptr; 491 492 // Destroy the Nanoapp instance 493 mNanoapps.erase(index); 494 } 495 496 void EventLoop::handleNanoappWakeupBuckets() { 497 Nanoseconds now = SystemTime::getMonotonicTime(); 498 Nanoseconds duration = now - mTimeLastWakeupBucketCycled; 499 if (duration > kIntervalWakeupBucket) { 500 size_t numBuckets = static_cast<size_t>( 501 duration.toRawNanoseconds() / kIntervalWakeupBucket.toRawNanoseconds()); 502 mTimeLastWakeupBucketCycled = now; 503 for (auto &nanoapp : mNanoapps) { 504 nanoapp->cycleWakeupBuckets(numBuckets); 505 } 506 } 507 } 508 509 } // namespace chre 510