1 /*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <plat/inc/eeData.h>
18 #include <plat/inc/plat.h>
19 #include <plat/inc/bl.h>
20 #include <platform.h>
21 #include <hostIntf.h>
22 #include <inttypes.h>
23 #include <syscall.h>
24 #include <sensors.h>
25 #include <string.h>
26 #include <stdlib.h>
27 #include <stdarg.h>
28 #include <printf.h>
29 #include <eventQ.h>
30 #include <apInt.h>
31 #include <timer.h>
32 #include <osApi.h>
33 #include <seos.h>
34 #include <heap.h>
35 #include <slab.h>
36 #include <cpu.h>
37 #include <util.h>
38 #include <mpu.h>
39 #include <nanohubPacket.h>
40 #include <atomic.h>
41
42 #include <nanohub/nanohub.h>
43 #include <nanohub/crc.h>
44
45 #define NO_NODE (TaskIndex)(-1)
46 #define for_each_task(listHead, task) for (task = osTaskByIdx((listHead)->next); task; task = osTaskByIdx(task->list.next))
47 #define MAKE_NEW_TID(task) task->tid = ((task->tid + TASK_TID_INCREMENT) & TASK_TID_COUNTER_MASK) | \
48 (osTaskIndex(task) & TASK_TID_IDX_MASK);
49 #define TID_TO_TASK_IDX(tid) (tid & TASK_TID_IDX_MASK)
50
51 #define FL_TASK_STOPPED 1
52
53 #define EVT_SUBSCRIBE_TO_EVT 0x00000000
54 #define EVT_UNSUBSCRIBE_TO_EVT 0x00000001
55 #define EVT_DEFERRED_CALLBACK 0x00000002
56 #define EVT_PRIVATE_EVT 0x00000003
57
58 #define EVENT_WITH_ORIGIN(evt, origin) (((evt) & EVT_MASK) | ((origin) << (32 - TASK_TID_BITS)))
59 #define EVENT_GET_ORIGIN(evt) ((evt) >> (32 - TASK_TID_BITS))
60 #define EVENT_GET_EVENT(evt) ((evt) & (EVT_MASK & ~EVENT_TYPE_BIT_DISCARDABLE))
61
62 /*
63 * Since locking is difficult to do right for adding/removing listeners and such
64 * since it can happen in interrupt context and not, and one such operation can
65 * interrupt another, and we do have a working event queue, we enqueue all the
66 * requests and then deal with them in the main code only when the event bubbles
67 * up to the front of the queue. This allows us to not need locks around the
68 * data structures.
69 */
70
71 SET_PACKED_STRUCT_MODE_ON
72 struct TaskList {
73 TaskIndex prev;
74 TaskIndex next;
75 } ATTRIBUTE_PACKED;
76 SET_PACKED_STRUCT_MODE_OFF
77
78 struct Task {
79 /* App entry points */
80 const struct AppHdr *app;
81
82 /* per-platform app info */
83 struct PlatAppInfo platInfo;
84
85 /* for some basic number of subbed events, the array is stored directly here. after that, a heap chunk is used */
86 uint32_t subbedEventsInt[MAX_EMBEDDED_EVT_SUBS];
87 uint32_t *subbedEvents; /* NULL for invalid tasks */
88
89 struct TaskList list;
90
91 /* task pointer will not change throughout task lifetime,
92 * however same task pointer may be reused for a new task; to eliminate the ambiguity,
93 * TID is maintained for each task such that new tasks will be guaranteed to receive different TID */
94 uint16_t tid;
95
96 uint8_t subbedEvtCount;
97 uint8_t subbedEvtListSz;
98 uint8_t flags;
99 uint8_t ioCount;
100
101 };
102
103 struct TaskPool {
104 struct Task data[MAX_TASKS];
105 };
106
107 union InternalThing {
108 struct {
109 uint32_t tid;
110 uint32_t evt;
111 } evtSub;
112 struct {
113 OsDeferCbkF callback;
114 void *cookie;
115 } deferred;
116 struct {
117 uint32_t evtType;
118 void *evtData;
119 TaggedPtr evtFreeInfo;
120 uint32_t toTid;
121 } privateEvt;
122 union OsApiSlabItem osApiItem;
123 };
124
125 static struct TaskPool mTaskPool;
126 static struct EvtQueue *mEvtsInternal;
127 static struct SlabAllocator* mMiscInternalThingsSlab;
128 static struct TaskList mFreeTasks;
129 static struct TaskList mTasks;
130 static struct Task *mCurrentTask;
131 static struct Task *mSystemTask;
132 static TaggedPtr *mCurEvtEventFreeingInfo = NULL; //used as flag for retaining. NULL when none or already retained
133
list_init(struct TaskList * l)134 static inline void list_init(struct TaskList *l)
135 {
136 l->prev = l->next = NO_NODE;
137 }
138
osGetCurrentTask()139 static inline struct Task *osGetCurrentTask()
140 {
141 return mCurrentTask;
142 }
143
osSetCurrentTask(struct Task * task)144 static struct Task *osSetCurrentTask(struct Task *task)
145 {
146 struct Task *old = mCurrentTask;
147 while (true) {
148 old = mCurrentTask;
149 if (atomicCmpXchg32bits((uint32_t*)&mCurrentTask, (uint32_t)old, (uint32_t)task))
150 break;
151 }
152 return old;
153 }
154
155 // beyond this point, noone shall access mCurrentTask directly
156
osTaskTestFlags(struct Task * task,uint32_t mask)157 static inline bool osTaskTestFlags(struct Task *task, uint32_t mask)
158 {
159 return (atomicReadByte(&task->flags) & mask) != 0;
160 }
161
osTaskClrSetFlags(struct Task * task,uint32_t clrMask,uint32_t setMask)162 static inline uint32_t osTaskClrSetFlags(struct Task *task, uint32_t clrMask, uint32_t setMask)
163 {
164 while (true) {
165 uint8_t flags = atomicReadByte(&task->flags);
166 uint8_t newFlags = (flags & ~clrMask) | setMask;
167 if (atomicCmpXchgByte(&task->flags, flags, newFlags))
168 return newFlags;
169 }
170 }
171
osTaskAddIoCount(struct Task * task,int32_t delta)172 static inline uint32_t osTaskAddIoCount(struct Task *task, int32_t delta)
173 {
174 uint8_t count = atomicAddByte(&task->ioCount, delta);
175
176 count += delta; // old value is returned, so we add it again
177
178 return count;
179 }
180
osTaskGetIoCount(struct Task * task)181 static inline uint32_t osTaskGetIoCount(struct Task *task)
182 {
183 return atomicReadByte(&task->ioCount);
184 }
185
osTaskIndex(struct Task * task)186 static inline uint8_t osTaskIndex(struct Task *task)
187 {
188 // we don't need signed diff here: this way we simplify boundary check
189 size_t idx = task - &mTaskPool.data[0];
190 return idx >= MAX_TASKS || &mTaskPool.data[idx] != task ? NO_NODE : idx;
191 }
192
osTaskByIdx(size_t idx)193 static inline struct Task *osTaskByIdx(size_t idx)
194 {
195 return idx >= MAX_TASKS ? NULL : &mTaskPool.data[idx];
196 }
197
osGetCurrentTid()198 uint32_t osGetCurrentTid()
199 {
200 return osGetCurrentTask()->tid;
201 }
202
osSetCurrentTid(uint32_t tid)203 uint32_t osSetCurrentTid(uint32_t tid)
204 {
205 struct Task *task = osTaskByIdx(TID_TO_TASK_IDX(tid));
206
207 if (task && task->tid == tid) {
208 struct Task *preempted = osSetCurrentTask(task);
209 return preempted->tid;
210 }
211
212 return osGetCurrentTid();
213 }
214
osTaskListPeekHead(struct TaskList * listHead)215 static inline struct Task *osTaskListPeekHead(struct TaskList *listHead)
216 {
217 TaskIndex idx = listHead->next;
218 return idx == NO_NODE ? NULL : &mTaskPool.data[idx];
219 }
220
221 #ifdef DEBUG
dumpListItems(const char * p,struct TaskList * listHead)222 static void dumpListItems(const char *p, struct TaskList *listHead)
223 {
224 int i = 0;
225 struct Task *task;
226
227 osLog(LOG_ERROR, "List: %s (%p) [%u;%u]\n",
228 p,
229 listHead,
230 listHead ? listHead->prev : NO_NODE,
231 listHead ? listHead->next : NO_NODE
232 );
233 if (!listHead)
234 return;
235
236 for_each_task(listHead, task) {
237 osLog(LOG_ERROR, " item %d: task=%p TID=%04X [%u;%u;%u]\n",
238 i,
239 task,
240 task->tid,
241 task->list.prev,
242 osTaskIndex(task),
243 task->list.next
244 );
245 ++i;
246 }
247 }
248
dumpTaskList(const char * f,struct Task * task,struct TaskList * listHead)249 static void dumpTaskList(const char *f, struct Task *task, struct TaskList *listHead)
250 {
251 osLog(LOG_ERROR, "%s: pool: %p; task=%p [%u;%u;%u]; listHead=%p [%u;%u]\n",
252 f,
253 &mTaskPool,
254 task,
255 task ? task->list.prev : NO_NODE,
256 osTaskIndex(task),
257 task ? task->list.next : NO_NODE,
258 listHead,
259 listHead ? listHead->prev : NO_NODE,
260 listHead ? listHead->next : NO_NODE
261 );
262 dumpListItems("Tasks", &mTasks);
263 dumpListItems("Free Tasks", &mFreeTasks);
264 }
265 #else
266 #define dumpTaskList(a,b,c)
267 #endif
268
osTaskListRemoveTask(struct TaskList * listHead,struct Task * task)269 static inline void osTaskListRemoveTask(struct TaskList *listHead, struct Task *task)
270 {
271 if (task && listHead) {
272 struct TaskList *cur = &task->list;
273 TaskIndex left_idx = cur->prev;
274 TaskIndex right_idx = cur->next;
275 struct TaskList *left = left_idx == NO_NODE ? listHead : &mTaskPool.data[left_idx].list;
276 struct TaskList *right = right_idx == NO_NODE ? listHead : &mTaskPool.data[right_idx].list;
277 cur->prev = cur->next = NO_NODE;
278 left->next = right_idx;
279 right->prev = left_idx;
280 } else {
281 dumpTaskList(__func__, task, listHead);
282 }
283 }
284
osTaskListAddTail(struct TaskList * listHead,struct Task * task)285 static inline void osTaskListAddTail(struct TaskList *listHead, struct Task *task)
286 {
287 if (task && listHead) {
288 struct TaskList *cur = &task->list;
289 TaskIndex last_idx = listHead->prev;
290 TaskIndex new_idx = osTaskIndex(task);
291 struct TaskList *last = last_idx == NO_NODE ? listHead : &mTaskPool.data[last_idx].list;
292 cur->prev = last_idx;
293 cur->next = NO_NODE;
294 last->next = new_idx;
295 listHead->prev = new_idx;
296 } else {
297 dumpTaskList(__func__, task, listHead);
298 }
299 }
300
osAllocTask()301 static struct Task *osAllocTask()
302 {
303 struct Task *task = osTaskListPeekHead(&mFreeTasks);
304
305 if (task) {
306 osTaskListRemoveTask(&mFreeTasks, task);
307 uint16_t tid = task->tid;
308 memset(task, 0, sizeof(*task));
309 task->tid = tid;
310 }
311
312 return task;
313 }
314
osFreeTask(struct Task * task)315 static void osFreeTask(struct Task *task)
316 {
317 if (task) {
318 task->flags = 0;
319 task->ioCount = 0;
320 osTaskListAddTail(&mFreeTasks, task);
321 }
322 }
323
osRemoveTask(struct Task * task)324 static void osRemoveTask(struct Task *task)
325 {
326 osTaskListRemoveTask(&mTasks, task);
327 }
328
osAddTask(struct Task * task)329 static void osAddTask(struct Task *task)
330 {
331 osTaskListAddTail(&mTasks, task);
332 }
333
osTaskFindByTid(uint32_t tid)334 static inline struct Task* osTaskFindByTid(uint32_t tid)
335 {
336 TaskIndex idx = TID_TO_TASK_IDX(tid);
337
338 return idx < MAX_TASKS ? &mTaskPool.data[idx] : NULL;
339 }
340
osTaskInit(struct Task * task)341 static inline bool osTaskInit(struct Task *task)
342 {
343 struct Task *preempted = osSetCurrentTask(task);
344 bool done = cpuAppInit(task->app, &task->platInfo, task->tid);
345 osSetCurrentTask(preempted);
346 return done;
347 }
348
osTaskEnd(struct Task * task)349 static inline void osTaskEnd(struct Task *task)
350 {
351 struct Task *preempted = osSetCurrentTask(task);
352 uint16_t tid = task->tid;
353
354 cpuAppEnd(task->app, &task->platInfo);
355
356 // task was supposed to release it's resources,
357 // but we do our cleanup anyway
358 osSetCurrentTask(mSystemTask);
359 platFreeResources(tid); // HW resources cleanup (IRQ, DMA etc)
360 sensorUnregisterAll(tid);
361 timTimerCancelAll(tid);
362 heapFreeAll(tid);
363 // NOTE: we don't need to unsubscribe from events
364 osSetCurrentTask(preempted);
365 }
366
osTaskHandle(struct Task * task,uint32_t evtType,const void * evtData)367 static inline void osTaskHandle(struct Task *task, uint32_t evtType, const void* evtData)
368 {
369 struct Task *preempted = osSetCurrentTask(task);
370 cpuAppHandle(task->app, &task->platInfo, evtType, evtData);
371 osSetCurrentTask(preempted);
372 }
373
handleEventFreeing(uint32_t evtType,void * evtData,uintptr_t evtFreeData)374 static void handleEventFreeing(uint32_t evtType, void *evtData, uintptr_t evtFreeData) // watch out, this is synchronous
375 {
376 if ((taggedPtrIsPtr(evtFreeData) && !taggedPtrToPtr(evtFreeData)) ||
377 (taggedPtrIsUint(evtFreeData) && !taggedPtrToUint(evtFreeData)))
378 return;
379
380 if (taggedPtrIsPtr(evtFreeData))
381 ((EventFreeF)taggedPtrToPtr(evtFreeData))(evtData);
382 else {
383 struct AppEventFreeData fd = {.evtType = evtType, .evtData = evtData};
384 struct Task* task = osTaskFindByTid(taggedPtrToUint(evtFreeData));
385
386 if (!task)
387 osLog(LOG_ERROR, "EINCEPTION: Failed to find app to call app to free event sent to app(s).\n");
388 else
389 osTaskHandle(task, EVT_APP_FREE_EVT_DATA, &fd);
390 }
391 }
392
osInit(void)393 static void osInit(void)
394 {
395 heapInit();
396 platInitialize();
397
398 osLog(LOG_INFO, "SEOS Initializing\n");
399 cpuInitLate();
400
401 /* create the queues */
402 if (!(mEvtsInternal = evtQueueAlloc(512, handleEventFreeing))) {
403 osLog(LOG_INFO, "events failed to init\n");
404 return;
405 }
406
407 mMiscInternalThingsSlab = slabAllocatorNew(sizeof(union InternalThing), alignof(union InternalThing), 64 /* for now? */);
408 if (!mMiscInternalThingsSlab) {
409 osLog(LOG_INFO, "deferred actions list failed to init\n");
410 return;
411 }
412 }
413
osTaskFindByAppID(uint64_t appID)414 static struct Task* osTaskFindByAppID(uint64_t appID)
415 {
416 struct Task *task;
417
418 for_each_task(&mTasks, task) {
419 if (task->app && task->app->hdr.appId == appID)
420 return task;
421 }
422
423 return NULL;
424 }
425
osSegmentIteratorInit(struct SegmentIterator * it)426 void osSegmentIteratorInit(struct SegmentIterator *it)
427 {
428 uint32_t sz;
429 uint8_t *start = platGetSharedAreaInfo(&sz);
430
431 it->shared = (const struct Segment *)(start);
432 it->sharedEnd = (const struct Segment *)(start + sz);
433 it->seg = NULL;
434 }
435
osAppSegmentSetState(const struct AppHdr * app,uint32_t segState)436 bool osAppSegmentSetState(const struct AppHdr *app, uint32_t segState)
437 {
438 bool done;
439 struct Segment *seg = osGetSegment(app);
440 uint8_t state = segState;
441
442 if (!seg)
443 return false;
444
445 mpuAllowRamExecution(true);
446 mpuAllowRomWrite(true);
447 done = BL.blProgramShared(&seg->state, &state, sizeof(state), BL_FLASH_KEY1, BL_FLASH_KEY2);
448 mpuAllowRomWrite(false);
449 mpuAllowRamExecution(false);
450
451 return done;
452 }
453
osSegmentSetSize(struct Segment * seg,uint32_t size)454 bool osSegmentSetSize(struct Segment *seg, uint32_t size)
455 {
456 bool ret = true;
457
458 if (!seg)
459 return false;
460
461 if (size > SEG_SIZE_MAX) {
462 seg->state = SEG_ST_ERASED;
463 size = SEG_SIZE_MAX;
464 ret = false;
465 }
466 seg->size[0] = size;
467 seg->size[1] = size >> 8;
468 seg->size[2] = size >> 16;
469
470 return ret;
471 }
472
osSegmentGetEnd()473 struct Segment *osSegmentGetEnd()
474 {
475 uint32_t size;
476 uint8_t *start = platGetSharedAreaInfo(&size);
477 return (struct Segment *)(start + size);
478 }
479
osGetSegment(const struct AppHdr * app)480 struct Segment *osGetSegment(const struct AppHdr *app)
481 {
482 uint32_t size;
483 uint8_t *start = platGetSharedAreaInfo(&size);
484
485 return (struct Segment *)((uint8_t*)app &&
486 (uint8_t*)app >= start &&
487 (uint8_t*)app < (start + size) ?
488 (uint8_t*)app - sizeof(struct Segment) : NULL);
489 }
490
osEraseShared()491 bool osEraseShared()
492 {
493 mpuAllowRamExecution(true);
494 mpuAllowRomWrite(true);
495 (void)BL.blEraseShared(BL_FLASH_KEY1, BL_FLASH_KEY2);
496 mpuAllowRomWrite(false);
497 mpuAllowRamExecution(false);
498 return true;
499 }
500
osWriteShared(void * dest,const void * src,uint32_t len)501 bool osWriteShared(void *dest, const void *src, uint32_t len)
502 {
503 bool ret;
504
505 mpuAllowRamExecution(true);
506 mpuAllowRomWrite(true);
507 ret = BL.blProgramShared(dest, src, len, BL_FLASH_KEY1, BL_FLASH_KEY2);
508 mpuAllowRomWrite(false);
509 mpuAllowRamExecution(false);
510
511 if (!ret)
512 osLog(LOG_ERROR, "osWriteShared: blProgramShared return false\n");
513
514 return ret;
515 }
516
osAppSegmentCreate(uint32_t size)517 struct AppHdr *osAppSegmentCreate(uint32_t size)
518 {
519 struct SegmentIterator it;
520 const struct Segment *storageSeg = NULL;
521 struct AppHdr *app;
522
523 osSegmentIteratorInit(&it);
524 while (osSegmentIteratorNext(&it)) {
525 if (osSegmentGetState(it.seg) == SEG_ST_EMPTY) {
526 storageSeg = it.seg;
527 break;
528 }
529 }
530 if (!storageSeg || osSegmentSizeGetNext(storageSeg, size) > it.sharedEnd)
531 return NULL;
532
533 app = osSegmentGetData(storageSeg);
534 osAppSegmentSetState(app, SEG_ST_RESERVED);
535
536 return app;
537 }
538
osAppSegmentClose(struct AppHdr * app,uint32_t segDataSize,uint32_t segState)539 bool osAppSegmentClose(struct AppHdr *app, uint32_t segDataSize, uint32_t segState)
540 {
541 struct Segment seg;
542
543 // this is enough for holding padding to uint32_t and the footer
544 uint8_t footer[sizeof(uint32_t) + FOOTER_SIZE];
545 int footerLen;
546 bool ret;
547 uint32_t totalSize;
548 uint8_t *start = platGetSharedAreaInfo(&totalSize);
549 uint8_t *end = start + totalSize;
550 int32_t fullSize = segDataSize + sizeof(seg); // without footer or padding
551 struct Segment *storageSeg = osGetSegment(app);
552
553 // sanity check
554 if (segDataSize >= SEG_SIZE_MAX)
555 return false;
556
557 // physical limits check
558 if (osSegmentSizeAlignedWithFooter(segDataSize) + sizeof(struct Segment) > totalSize)
559 return false;
560
561 // available space check: we could truncate size, instead of disallowing it,
562 // but we know that we performed validation on the size before, in *Create call,
563 // and it was fine, so this must be a programming error, and so we fail.
564 // on a side note: size may grow or shrink compared to original estimate.
565 // typically it shrinks, since we skip some header info and padding, as well
566 // as signature blocks, but it is possible that at some point we may produce
567 // more data for some reason. At that time the logic here may need to change
568 if (osSegmentSizeGetNext(storageSeg, segDataSize) > (struct Segment*)end)
569 return false;
570
571 seg.state = segState;
572 osSegmentSetSize(&seg, segDataSize);
573
574 ret = osWriteShared((uint8_t*)storageSeg, (uint8_t*)&seg, sizeof(seg));
575
576 footerLen = (-fullSize) & 3;
577 memset(footer, 0x00, footerLen);
578
579 #ifdef SEGMENT_CRC_SUPPORT
580 struct SegmentFooter segFooter {
581 .crc = ~crc32(storageSeg, fullSize, ~0),
582 };
583 memcpy(&footer[footerLen], &segFooter, sizeof(segFooter));
584 footerLen += sizeof(segFooter);
585 #endif
586
587 if (ret && footerLen)
588 ret = osWriteShared((uint8_t*)storageSeg + fullSize, footer, footerLen);
589
590 return ret;
591 }
592
osAppWipeData(struct AppHdr * app)593 bool osAppWipeData(struct AppHdr *app)
594 {
595 struct Segment *seg = osGetSegment(app);
596 int32_t size = osSegmentGetSize(seg);
597 uint8_t *p = (uint8_t*)app;
598 uint32_t state = osSegmentGetState(seg);
599 uint8_t buf[256];
600 bool done = true;
601
602 if (!seg || size == SEG_SIZE_INVALID || state == SEG_ST_EMPTY) {
603 osLog(LOG_ERROR, "%s: can't erase segment: app=%p; seg=%p"
604 "; size=%" PRIu32
605 "; state=%" PRIu32
606 "\n",
607 __func__, app, seg, size, state);
608 return false;
609 }
610
611 size = osSegmentSizeAlignedWithFooter(size);
612
613 memset(buf, 0, sizeof(buf));
614 while (size > 0) {
615 uint32_t flashSz = size > sizeof(buf) ? sizeof(buf) : size;
616 // keep trying to zero-out stuff even in case of intermittent failures.
617 // flash write may occasionally fail on some byte, but it is not good enough
618 // reason to not rewrite other bytes
619 bool res = osWriteShared(p, buf, flashSz);
620 done = done && res;
621 size -= flashSz;
622 p += flashSz;
623 }
624
625 return done;
626 }
627
osAppIsValid(const struct AppHdr * app)628 static inline bool osAppIsValid(const struct AppHdr *app)
629 {
630 return app->hdr.magic == APP_HDR_MAGIC &&
631 app->hdr.fwVer == APP_HDR_VER_CUR &&
632 (app->hdr.fwFlags & FL_APP_HDR_APPLICATION) != 0 &&
633 app->hdr.payInfoType == LAYOUT_APP;
634 }
635
osExtAppIsValid(const struct AppHdr * app,uint32_t len)636 static bool osExtAppIsValid(const struct AppHdr *app, uint32_t len)
637 {
638 //TODO: when CRC support is ready, add CRC check here
639 return osAppIsValid(app) &&
640 len >= sizeof(*app) &&
641 osAppSegmentGetState(app) == SEG_ST_VALID &&
642 !(app->hdr.fwFlags & FL_APP_HDR_INTERNAL);
643 }
644
osIntAppIsValid(const struct AppHdr * app)645 static bool osIntAppIsValid(const struct AppHdr *app)
646 {
647 return osAppIsValid(app) &&
648 osAppSegmentGetState(app) == SEG_STATE_INVALID &&
649 (app->hdr.fwFlags & FL_APP_HDR_INTERNAL) != 0;
650 }
651
osExtAppErase(const struct AppHdr * app)652 static inline bool osExtAppErase(const struct AppHdr *app)
653 {
654 return osAppSegmentSetState(app, SEG_ST_ERASED);
655 }
656
osLoadApp(const struct AppHdr * app)657 static struct Task *osLoadApp(const struct AppHdr *app) {
658 struct Task *task;
659
660 task = osAllocTask();
661 if (!task) {
662 osLog(LOG_WARN, "External app id %016" PRIX64 " @ %p cannot be used as too many apps already exist.\n", app->hdr.appId, app);
663 return NULL;
664 }
665 task->app = app;
666 bool done = (app->hdr.fwFlags & FL_APP_HDR_INTERNAL) ?
667 cpuInternalAppLoad(task->app, &task->platInfo) :
668 cpuAppLoad(task->app, &task->platInfo);
669
670 if (!done) {
671 osLog(LOG_WARN, "App @ %p ID %016" PRIX64 " failed to load\n", app, app->hdr.appId);
672 osFreeTask(task);
673 task = NULL;
674 }
675
676 return task;
677 }
678
osUnloadApp(struct Task * task)679 static void osUnloadApp(struct Task *task)
680 {
681 // this is called on task that has stopped running, or had never run
682 cpuAppUnload(task->app, &task->platInfo);
683 osFreeTask(task);
684 }
685
osStartApp(const struct AppHdr * app)686 static bool osStartApp(const struct AppHdr *app)
687 {
688 bool done = false;
689 struct Task *task;
690
691 if ((task = osLoadApp(app)) != NULL) {
692 task->subbedEvtListSz = MAX_EMBEDDED_EVT_SUBS;
693 task->subbedEvents = task->subbedEventsInt;
694 MAKE_NEW_TID(task);
695
696 done = osTaskInit(task);
697
698 if (!done) {
699 osLog(LOG_WARN, "App @ %p ID %016" PRIX64 "failed to init\n", task->app, task->app->hdr.appId);
700 osUnloadApp(task);
701 } else {
702 osAddTask(task);
703 }
704 }
705
706 return done;
707 }
708
osStopTask(struct Task * task)709 static bool osStopTask(struct Task *task)
710 {
711 if (!task)
712 return false;
713
714 osTaskClrSetFlags(task, 0, FL_TASK_STOPPED);
715 osRemoveTask(task);
716
717 if (osTaskGetIoCount(task)) {
718 osTaskHandle(task, EVT_APP_STOP, NULL);
719 osEnqueueEvtOrFree(EVT_APP_END, task, NULL);
720 } else {
721 osTaskEnd(task);
722 osUnloadApp(task);
723 }
724
725 return true;
726 }
727
osExtAppFind(struct SegmentIterator * it,uint64_t appId)728 static bool osExtAppFind(struct SegmentIterator *it, uint64_t appId)
729 {
730 uint64_t vendor = APP_ID_GET_VENDOR(appId);
731 uint64_t seqId = APP_ID_GET_SEQ_ID(appId);
732 uint64_t curAppId;
733 const struct AppHdr *app;
734 const struct Segment *seg;
735
736 while (osSegmentIteratorNext(it)) {
737 seg = it->seg;
738 if (seg->state == SEG_ST_EMPTY)
739 break;
740 if (seg->state != SEG_ST_VALID)
741 continue;
742 app = osSegmentGetData(seg);
743 curAppId = app->hdr.appId;
744
745 if ((vendor == APP_VENDOR_ANY || vendor == APP_ID_GET_VENDOR(curAppId)) &&
746 (seqId == APP_SEQ_ID_ANY || seqId == APP_ID_GET_SEQ_ID(curAppId)))
747 return true;
748 }
749
750 return false;
751 }
752
osExtAppStopEraseApps(uint64_t appId,bool doErase)753 static uint32_t osExtAppStopEraseApps(uint64_t appId, bool doErase)
754 {
755 const struct AppHdr *app;
756 int32_t len;
757 struct Task *task;
758 struct SegmentIterator it;
759 uint32_t stopCount = 0;
760 uint32_t eraseCount = 0;
761 uint32_t appCount = 0;
762 uint32_t taskCount = 0;
763 struct MgmtStatus stat = { .value = 0 };
764
765 osSegmentIteratorInit(&it);
766 while (osExtAppFind(&it, appId)) {
767 app = osSegmentGetData(it.seg);
768 len = osSegmentGetSize(it.seg);
769 if (!osExtAppIsValid(app, len))
770 continue;
771 appCount++;
772 task = osTaskFindByAppID(app->hdr.appId);
773 if (task)
774 taskCount++;
775 if (task && task->app == app) {
776 if (osStopTask(task))
777 stopCount++;
778 else
779 continue;
780 if (doErase && osExtAppErase(app))
781 eraseCount++;
782 }
783 }
784 SET_COUNTER(stat.app, appCount);
785 SET_COUNTER(stat.task, taskCount);
786 SET_COUNTER(stat.op, stopCount);
787 SET_COUNTER(stat.erase, eraseCount);
788
789 return stat.value;
790 }
791
osExtAppStopApps(uint64_t appId)792 uint32_t osExtAppStopApps(uint64_t appId)
793 {
794 return osExtAppStopEraseApps(appId, false);
795 }
796
osExtAppEraseApps(uint64_t appId)797 uint32_t osExtAppEraseApps(uint64_t appId)
798 {
799 return osExtAppStopEraseApps(appId, true);
800 }
801
osScanExternal()802 static void osScanExternal()
803 {
804 struct SegmentIterator it;
805 osSegmentIteratorInit(&it);
806 while (osSegmentIteratorNext(&it)) {
807 switch (osSegmentGetState(it.seg)) {
808 case SEG_ST_EMPTY:
809 // everything looks good
810 osLog(LOG_INFO, "External area is good\n");
811 return;
812 case SEG_ST_ERASED:
813 case SEG_ST_VALID:
814 // this is valid stuff, ignore
815 break;
816 case SEG_ST_RESERVED:
817 default:
818 // something is wrong: erase everything
819 osLog(LOG_ERROR, "External area is damaged. Erasing\n");
820 osEraseShared();
821 return;
822 }
823 }
824 }
825
osExtAppStartApps(uint64_t appId)826 uint32_t osExtAppStartApps(uint64_t appId)
827 {
828 const struct AppHdr *app;
829 int32_t len;
830 struct SegmentIterator it;
831 struct SegmentIterator checkIt;
832 uint32_t startCount = 0;
833 uint32_t eraseCount = 0;
834 uint32_t appCount = 0;
835 uint32_t taskCount = 0;
836 struct MgmtStatus stat = { .value = 0 };
837
838 osScanExternal();
839
840 osSegmentIteratorInit(&it);
841 while (osExtAppFind(&it, appId)) {
842 app = osSegmentGetData(it.seg);
843 len = osSegmentGetSize(it.seg);
844
845 // skip erased or malformed apps
846 if (!osExtAppIsValid(app, len))
847 continue;
848
849 appCount++;
850 checkIt = it;
851 // find the most recent copy
852 while (osExtAppFind(&checkIt, app->hdr.appId)) {
853 if (osExtAppErase(app)) // erase the old one, so we skip it next time
854 eraseCount++;
855 app = osSegmentGetData(checkIt.seg);
856 }
857
858 if (osTaskFindByAppID(app->hdr.appId)) {
859 // this either the most recent external app with the same ID,
860 // or internal app with the same id; in both cases we do nothing
861 taskCount++;
862 continue;
863 }
864
865 if (osStartApp(app))
866 startCount++;
867 }
868 SET_COUNTER(stat.app, appCount);
869 SET_COUNTER(stat.task, taskCount);
870 SET_COUNTER(stat.op, startCount);
871 SET_COUNTER(stat.erase, eraseCount);
872
873 return stat.value;
874 }
875
osStartTasks(void)876 static void osStartTasks(void)
877 {
878 const struct AppHdr *app;
879 uint32_t i, nApps;
880 struct Task* task;
881 uint32_t status = 0;
882 uint32_t taskCnt = 0;
883
884 osLog(LOG_DEBUG, "Initializing task pool...\n");
885 list_init(&mTasks);
886 list_init(&mFreeTasks);
887 for (i = 0; i < MAX_TASKS; ++i) {
888 task = &mTaskPool.data[i];
889 list_init(&task->list);
890 osFreeTask(task);
891 }
892
893 mSystemTask = osAllocTask(); // this is a dummy task; holder of TID 0; all system code will run with TID 0
894 osSetCurrentTask(mSystemTask);
895 osLog(LOG_DEBUG, "System task is: %p\n", mSystemTask);
896
897 /* first enum all internal apps, making sure to check for dupes */
898 osLog(LOG_DEBUG, "Starting internal apps...\n");
899 for (i = 0, app = platGetInternalAppList(&nApps); i < nApps; i++, app++) {
900 if (!osIntAppIsValid(app)) {
901 osLog(LOG_WARN, "Invalid internal app @ %p ID %016" PRIX64
902 "header version: %" PRIu16
903 "\n",
904 app, app->hdr.appId, app->hdr.fwVer);
905 continue;
906 }
907
908 if (!(app->hdr.fwFlags & FL_APP_HDR_INTERNAL)) {
909 osLog(LOG_WARN, "Internal app is not marked: [%p]: flags: 0x%04" PRIX16
910 "; ID: %016" PRIX64
911 "; ignored\n",
912 app, app->hdr.fwFlags, app->hdr.appId);
913 continue;
914 }
915 if ((task = osTaskFindByAppID(app->hdr.appId))) {
916 osLog(LOG_WARN, "Internal app ID %016" PRIX64
917 "@ %p attempting to update internal app @ %p; app @%p ignored.\n",
918 app->hdr.appId, app, task->app, app);
919 continue;
920 }
921 if (osStartApp(app))
922 taskCnt++;
923 }
924
925 osLog(LOG_DEBUG, "Starting external apps...\n");
926 status = osExtAppStartApps(APP_ID_ANY);
927 osLog(LOG_DEBUG, "Started %" PRIu32 " internal apps; EXT status: %08" PRIX32 "\n", taskCnt, status);
928 }
929
osInternalEvtHandle(uint32_t evtType,void * evtData)930 static void osInternalEvtHandle(uint32_t evtType, void *evtData)
931 {
932 union InternalThing *da = (union InternalThing*)evtData;
933 struct Task *task;
934 uint32_t i;
935
936 switch (evtType) {
937 case EVT_SUBSCRIBE_TO_EVT:
938 case EVT_UNSUBSCRIBE_TO_EVT:
939 /* get task */
940 task = osTaskFindByTid(da->evtSub.tid);
941 if (!task)
942 break;
943
944 /* find if subscribed to this evt */
945 for (i = 0; i < task->subbedEvtCount && task->subbedEvents[i] != da->evtSub.evt; i++);
946
947 /* if unsub & found -> unsub */
948 if (evtType == EVT_UNSUBSCRIBE_TO_EVT && i != task->subbedEvtCount)
949 task->subbedEvents[i] = task->subbedEvents[--task->subbedEvtCount];
950 /* if sub & not found -> sub */
951 else if (evtType == EVT_SUBSCRIBE_TO_EVT && i == task->subbedEvtCount) {
952 if (task->subbedEvtListSz == task->subbedEvtCount) { /* enlarge the list */
953 uint32_t newSz = (task->subbedEvtListSz * 3 + 1) / 2;
954 uint32_t *newList = heapAlloc(sizeof(uint32_t[newSz])); /* grow by 50% */
955 if (newList) {
956 memcpy(newList, task->subbedEvents, sizeof(uint32_t[task->subbedEvtListSz]));
957 if (task->subbedEvents != task->subbedEventsInt)
958 heapFree(task->subbedEvents);
959 task->subbedEvents = newList;
960 task->subbedEvtListSz = newSz;
961 }
962 }
963 if (task->subbedEvtListSz > task->subbedEvtCount) { /* have space ? */
964 task->subbedEvents[task->subbedEvtCount++] = da->evtSub.evt;
965 }
966 }
967 break;
968
969 case EVT_APP_END:
970 task = evtData;
971 osTaskEnd(task);
972 osUnloadApp(task);
973 break;
974
975 case EVT_DEFERRED_CALLBACK:
976 da->deferred.callback(da->deferred.cookie);
977 break;
978
979 case EVT_PRIVATE_EVT:
980 task = osTaskFindByTid(da->privateEvt.toTid);
981 if (task) {
982 //private events cannot be retained
983 TaggedPtr *tmp = mCurEvtEventFreeingInfo;
984 mCurEvtEventFreeingInfo = NULL;
985
986 osTaskHandle(task, da->privateEvt.evtType, da->privateEvt.evtData);
987
988 mCurEvtEventFreeingInfo = tmp;
989 }
990
991 handleEventFreeing(da->privateEvt.evtType, da->privateEvt.evtData, da->privateEvt.evtFreeInfo);
992 break;
993 }
994 }
995
abort(void)996 void abort(void)
997 {
998 /* this is necessary for va_* funcs... */
999 osLog(LOG_ERROR, "Abort called");
1000 while(1);
1001 }
1002
osRetainCurrentEvent(TaggedPtr * evtFreeingInfoP)1003 bool osRetainCurrentEvent(TaggedPtr *evtFreeingInfoP)
1004 {
1005 if (!mCurEvtEventFreeingInfo)
1006 return false;
1007
1008 *evtFreeingInfoP = *mCurEvtEventFreeingInfo;
1009 mCurEvtEventFreeingInfo = NULL;
1010 return true;
1011 }
1012
osFreeRetainedEvent(uint32_t evtType,void * evtData,TaggedPtr * evtFreeingInfoP)1013 void osFreeRetainedEvent(uint32_t evtType, void *evtData, TaggedPtr *evtFreeingInfoP)
1014 {
1015 handleEventFreeing(evtType, evtData, *evtFreeingInfoP);
1016 }
1017
osMainInit(void)1018 void osMainInit(void)
1019 {
1020 cpuInit();
1021 cpuIntsOff();
1022 osInit();
1023 timInit();
1024 sensorsInit();
1025 syscallInit();
1026 osApiExport(mMiscInternalThingsSlab);
1027 apIntInit();
1028 cpuIntsOn();
1029 osStartTasks();
1030
1031 //broadcast app start to all already-loaded apps
1032 (void)osEnqueueEvt(EVT_APP_START, NULL, NULL);
1033 }
1034
osMainDequeueLoop(void)1035 void osMainDequeueLoop(void)
1036 {
1037 TaggedPtr evtFreeingInfo;
1038 uint32_t evtType, j;
1039 void *evtData;
1040 struct Task *task;
1041 uint16_t tid;
1042
1043 /* get an event */
1044 if (!evtQueueDequeue(mEvtsInternal, &evtType, &evtData, &evtFreeingInfo, true))
1045 return;
1046
1047 evtType = EVENT_GET_EVENT(evtType);
1048 tid = EVENT_GET_ORIGIN(evtType);
1049 task = osTaskFindByTid(tid);
1050 if (task)
1051 osTaskAddIoCount(task, -1);
1052
1053 /* by default we free them when we're done with them */
1054 mCurEvtEventFreeingInfo = &evtFreeingInfo;
1055
1056 if (evtType < EVT_NO_FIRST_USER_EVENT) {
1057 /* handle deferred actions and other reserved events here */
1058 osInternalEvtHandle(evtType, evtData);
1059 } else {
1060 /* send this event to all tasks who want it */
1061 for_each_task(&mTasks, task) {
1062 for (j = 0; j < task->subbedEvtCount; j++) {
1063 if (task->subbedEvents[j] == evtType) {
1064 osTaskHandle(task, evtType, evtData);
1065 break;
1066 }
1067 }
1068 }
1069 }
1070
1071 /* free it */
1072 if (mCurEvtEventFreeingInfo)
1073 handleEventFreeing(evtType, evtData, evtFreeingInfo);
1074
1075 /* avoid some possible errors */
1076 mCurEvtEventFreeingInfo = NULL;
1077 }
1078
osMain(void)1079 void __attribute__((noreturn)) osMain(void)
1080 {
1081 osMainInit();
1082
1083 while (true)
1084 {
1085 osMainDequeueLoop();
1086 }
1087 }
1088
osDeferredActionFreeF(void * event)1089 static void osDeferredActionFreeF(void* event)
1090 {
1091 slabAllocatorFree(mMiscInternalThingsSlab, event);
1092 }
1093
osEventSubscribeUnsubscribe(uint32_t tid,uint32_t evtType,bool sub)1094 static bool osEventSubscribeUnsubscribe(uint32_t tid, uint32_t evtType, bool sub)
1095 {
1096 union InternalThing *act = slabAllocatorAlloc(mMiscInternalThingsSlab);
1097
1098 if (!act)
1099 return false;
1100 act->evtSub.evt = evtType;
1101 act->evtSub.tid = tid;
1102
1103 return osEnqueueEvtOrFree(sub ? EVT_SUBSCRIBE_TO_EVT : EVT_UNSUBSCRIBE_TO_EVT, act, osDeferredActionFreeF);
1104 }
1105
osEventSubscribe(uint32_t tid,uint32_t evtType)1106 bool osEventSubscribe(uint32_t tid, uint32_t evtType)
1107 {
1108 (void)tid;
1109 return osEventSubscribeUnsubscribe(osGetCurrentTid(), evtType, true);
1110 }
1111
osEventUnsubscribe(uint32_t tid,uint32_t evtType)1112 bool osEventUnsubscribe(uint32_t tid, uint32_t evtType)
1113 {
1114 (void)tid;
1115 return osEventSubscribeUnsubscribe(osGetCurrentTid(), evtType, false);
1116 }
1117
osEnqueueEvtCommon(uint32_t evtType,void * evtData,TaggedPtr evtFreeInfo)1118 static bool osEnqueueEvtCommon(uint32_t evtType, void *evtData, TaggedPtr evtFreeInfo)
1119 {
1120 struct Task *task = osGetCurrentTask();
1121
1122 if (osTaskTestFlags(task, FL_TASK_STOPPED)) {
1123 handleEventFreeing(evtType, evtData, evtFreeInfo);
1124 return true;
1125 }
1126
1127 evtType = EVENT_WITH_ORIGIN(evtType, osGetCurrentTid());
1128 osTaskAddIoCount(task, 1);
1129
1130 if (evtQueueEnqueue(mEvtsInternal, evtType, evtData, evtFreeInfo, false))
1131 return true;
1132
1133 osTaskAddIoCount(task, -1);
1134 return false;
1135 }
1136
osEnqueueEvt(uint32_t evtType,void * evtData,EventFreeF evtFreeF)1137 bool osEnqueueEvt(uint32_t evtType, void *evtData, EventFreeF evtFreeF)
1138 {
1139 return osEnqueueEvtCommon(evtType, evtData, taggedPtrMakeFromPtr(evtFreeF));
1140 }
1141
osEnqueueEvtOrFree(uint32_t evtType,void * evtData,EventFreeF evtFreeF)1142 bool osEnqueueEvtOrFree(uint32_t evtType, void *evtData, EventFreeF evtFreeF)
1143 {
1144 bool success = osEnqueueEvt(evtType, evtData, evtFreeF);
1145
1146 if (!success && evtFreeF)
1147 evtFreeF(evtData);
1148
1149 return success;
1150 }
1151
osEnqueueEvtAsApp(uint32_t evtType,void * evtData,uint32_t fromAppTid)1152 bool osEnqueueEvtAsApp(uint32_t evtType, void *evtData, uint32_t fromAppTid)
1153 {
1154 // compatibility with existing external apps
1155 if (evtType & EVENT_TYPE_BIT_DISCARDABLE_COMPAT)
1156 evtType |= EVENT_TYPE_BIT_DISCARDABLE;
1157
1158 (void)fromAppTid;
1159 return osEnqueueEvtCommon(evtType, evtData, taggedPtrMakeFromUint(osGetCurrentTid()));
1160 }
1161
osDefer(OsDeferCbkF callback,void * cookie,bool urgent)1162 bool osDefer(OsDeferCbkF callback, void *cookie, bool urgent)
1163 {
1164 union InternalThing *act = slabAllocatorAlloc(mMiscInternalThingsSlab);
1165 if (!act)
1166 return false;
1167
1168 act->deferred.callback = callback;
1169 act->deferred.cookie = cookie;
1170
1171 if (evtQueueEnqueue(mEvtsInternal, EVT_DEFERRED_CALLBACK, act, taggedPtrMakeFromPtr(osDeferredActionFreeF), urgent))
1172 return true;
1173
1174 slabAllocatorFree(mMiscInternalThingsSlab, act);
1175 return false;
1176 }
1177
osEnqueuePrivateEvtEx(uint32_t evtType,void * evtData,TaggedPtr evtFreeInfo,uint32_t toTid)1178 static bool osEnqueuePrivateEvtEx(uint32_t evtType, void *evtData, TaggedPtr evtFreeInfo, uint32_t toTid)
1179 {
1180 union InternalThing *act = slabAllocatorAlloc(mMiscInternalThingsSlab);
1181 if (!act)
1182 return false;
1183
1184 act->privateEvt.evtType = evtType;
1185 act->privateEvt.evtData = evtData;
1186 act->privateEvt.evtFreeInfo = evtFreeInfo;
1187 act->privateEvt.toTid = toTid;
1188
1189 return osEnqueueEvtOrFree(EVT_PRIVATE_EVT, act, osDeferredActionFreeF);
1190 }
1191
osEnqueuePrivateEvt(uint32_t evtType,void * evtData,EventFreeF evtFreeF,uint32_t toTid)1192 bool osEnqueuePrivateEvt(uint32_t evtType, void *evtData, EventFreeF evtFreeF, uint32_t toTid)
1193 {
1194 return osEnqueuePrivateEvtEx(evtType, evtData, taggedPtrMakeFromPtr(evtFreeF), toTid);
1195 }
1196
osEnqueuePrivateEvtAsApp(uint32_t evtType,void * evtData,uint32_t fromAppTid,uint32_t toTid)1197 bool osEnqueuePrivateEvtAsApp(uint32_t evtType, void *evtData, uint32_t fromAppTid, uint32_t toTid)
1198 {
1199 (void)fromAppTid;
1200 return osEnqueuePrivateEvtEx(evtType, evtData, taggedPtrMakeFromUint(osGetCurrentTid()), toTid);
1201 }
1202
osTidById(uint64_t appId,uint32_t * tid)1203 bool osTidById(uint64_t appId, uint32_t *tid)
1204 {
1205 struct Task *task;
1206
1207 for_each_task(&mTasks, task) {
1208 if (task->app && task->app->hdr.appId == appId) {
1209 *tid = task->tid;
1210 return true;
1211 }
1212 }
1213
1214 return false;
1215 }
1216
osAppInfoById(uint64_t appId,uint32_t * appIdx,uint32_t * appVer,uint32_t * appSize)1217 bool osAppInfoById(uint64_t appId, uint32_t *appIdx, uint32_t *appVer, uint32_t *appSize)
1218 {
1219 uint32_t i = 0;
1220 struct Task *task;
1221
1222 for_each_task(&mTasks, task) {
1223 const struct AppHdr *app = task->app;
1224 if (app && app->hdr.appId == appId) {
1225 *appIdx = i;
1226 *appVer = app->hdr.appVer;
1227 *appSize = app->sect.rel_end;
1228 return true;
1229 }
1230 i++;
1231 }
1232
1233 return false;
1234 }
1235
osAppInfoByIndex(uint32_t appIdx,uint64_t * appId,uint32_t * appVer,uint32_t * appSize)1236 bool osAppInfoByIndex(uint32_t appIdx, uint64_t *appId, uint32_t *appVer, uint32_t *appSize)
1237 {
1238 struct Task *task;
1239 int i = 0;
1240
1241 for_each_task(&mTasks, task) {
1242 if (i != appIdx) {
1243 ++i;
1244 } else {
1245 const struct AppHdr *app = task->app;
1246 *appId = app->hdr.appId;
1247 *appVer = app->hdr.appVer;
1248 *appSize = app->sect.rel_end;
1249 return true;
1250 }
1251 }
1252
1253 return false;
1254 }
1255
osLogv(enum LogLevel level,const char * str,va_list vl)1256 void osLogv(enum LogLevel level, const char *str, va_list vl)
1257 {
1258 void *userData = platLogAllocUserData();
1259
1260 platLogPutcharF(userData, level);
1261 cvprintf(platLogPutcharF, userData, str, vl);
1262
1263 platLogFlush(userData);
1264 }
1265
osLog(enum LogLevel level,const char * str,...)1266 void osLog(enum LogLevel level, const char *str, ...)
1267 {
1268 va_list vl;
1269
1270 va_start(vl, str);
1271 osLogv(level, str, vl);
1272 va_end(vl);
1273 }
1274
1275
1276
1277
1278 //Google's public key for Google's apps' signing
1279 const uint8_t __attribute__ ((section (".pubkeys"))) _RSA_KEY_GOOGLE[] = {
1280 0xd9, 0xcd, 0x83, 0xae, 0xb5, 0x9e, 0xe4, 0x63, 0xf1, 0x4c, 0x26, 0x6a, 0x1c, 0xeb, 0x4c, 0x12,
1281 0x5b, 0xa6, 0x71, 0x7f, 0xa2, 0x4e, 0x7b, 0xa2, 0xee, 0x02, 0x86, 0xfc, 0x0d, 0x31, 0x26, 0x74,
1282 0x1e, 0x9c, 0x41, 0x43, 0xba, 0x16, 0xe9, 0x23, 0x4d, 0xfc, 0xc4, 0xca, 0xcc, 0xd5, 0x27, 0x2f,
1283 0x16, 0x4c, 0xe2, 0x85, 0x39, 0xb3, 0x0b, 0xcb, 0x73, 0xb6, 0x56, 0xc2, 0x98, 0x83, 0xf6, 0xfa,
1284 0x7a, 0x6e, 0xa0, 0x9a, 0xcc, 0x83, 0x97, 0x9d, 0xde, 0x89, 0xb2, 0xa3, 0x05, 0x46, 0x0c, 0x12,
1285 0xae, 0x01, 0xf8, 0x0c, 0xf5, 0x39, 0x32, 0xe5, 0x94, 0xb9, 0xa0, 0x8f, 0x19, 0xe4, 0x39, 0x54,
1286 0xad, 0xdb, 0x81, 0x60, 0x74, 0x63, 0xd5, 0x80, 0x3b, 0xd2, 0x88, 0xf4, 0xcb, 0x6b, 0x47, 0x28,
1287 0x80, 0xb0, 0xd1, 0x89, 0x6d, 0xd9, 0x62, 0x88, 0x81, 0xd6, 0xc0, 0x13, 0x88, 0x91, 0xfb, 0x7d,
1288 0xa3, 0x7f, 0xa5, 0x40, 0x12, 0xfb, 0x77, 0x77, 0x4c, 0x98, 0xe4, 0xd3, 0x62, 0x39, 0xcc, 0x63,
1289 0x34, 0x76, 0xb9, 0x12, 0x67, 0xfe, 0x83, 0x23, 0x5d, 0x40, 0x6b, 0x77, 0x93, 0xd6, 0xc0, 0x86,
1290 0x6c, 0x03, 0x14, 0xdf, 0x78, 0x2d, 0xe0, 0x9b, 0x5e, 0x05, 0xf0, 0x93, 0xbd, 0x03, 0x1d, 0x17,
1291 0x56, 0x88, 0x58, 0x25, 0xa6, 0xae, 0x63, 0xd2, 0x01, 0x43, 0xbb, 0x7e, 0x7a, 0xa5, 0x62, 0xdf,
1292 0x8a, 0x31, 0xbd, 0x24, 0x1b, 0x1b, 0xeb, 0xfe, 0xdf, 0xd1, 0x31, 0x61, 0x4a, 0xfa, 0xdd, 0x6e,
1293 0x62, 0x0c, 0xa9, 0xcd, 0x08, 0x0c, 0xa1, 0x1b, 0xe7, 0xf2, 0xed, 0x36, 0x22, 0xd0, 0x5d, 0x80,
1294 0x78, 0xeb, 0x6f, 0x5a, 0x58, 0x18, 0xb5, 0xaf, 0x82, 0x77, 0x4c, 0x95, 0xce, 0xc6, 0x4d, 0xda,
1295 0xca, 0xef, 0x68, 0xa6, 0x6d, 0x71, 0x4d, 0xf1, 0x14, 0xaf, 0x68, 0x25, 0xb8, 0xf3, 0xff, 0xbe,
1296 };
1297
1298
1299 #ifdef DEBUG
1300
1301 //debug key whose privatekey is checked in as misc/debug.privkey
1302 const uint8_t __attribute__ ((section (".pubkeys"))) _RSA_KEY_GOOGLE_DEBUG[] = {
1303 0x2d, 0xff, 0xa6, 0xb5, 0x65, 0x87, 0xbe, 0x61, 0xd1, 0xe1, 0x67, 0x10, 0xa1, 0x9b, 0xc6, 0xca,
1304 0xc8, 0xb1, 0xf0, 0xaa, 0x88, 0x60, 0x9f, 0xa1, 0x00, 0xa1, 0x41, 0x9a, 0xd8, 0xb4, 0xd1, 0x74,
1305 0x9f, 0x23, 0x28, 0x0d, 0xc2, 0xc4, 0x37, 0x15, 0xb1, 0x4a, 0x80, 0xca, 0xab, 0xb9, 0xba, 0x09,
1306 0x7d, 0xf8, 0x44, 0xd6, 0xa2, 0x72, 0x28, 0x12, 0x91, 0xf6, 0xa5, 0xea, 0xbd, 0xf8, 0x81, 0x6b,
1307 0xd2, 0x3c, 0x50, 0xa2, 0xc6, 0x19, 0x54, 0x48, 0x45, 0x8d, 0x92, 0xac, 0x01, 0xda, 0x14, 0x32,
1308 0xdb, 0x05, 0x82, 0x06, 0x30, 0x25, 0x09, 0x7f, 0x5a, 0xbb, 0x86, 0x64, 0x70, 0x98, 0x64, 0x1e,
1309 0xe6, 0xca, 0x1d, 0xc1, 0xcb, 0xb6, 0x23, 0xd2, 0x62, 0x00, 0x46, 0x97, 0xd5, 0xcc, 0xe6, 0x36,
1310 0x72, 0xec, 0x2e, 0x43, 0x1f, 0x0a, 0xaf, 0xf2, 0x51, 0xe1, 0xcd, 0xd2, 0x98, 0x5d, 0x7b, 0x64,
1311 0xeb, 0xd1, 0x35, 0x4d, 0x59, 0x13, 0x82, 0x6c, 0xbd, 0xc4, 0xa2, 0xfc, 0xad, 0x64, 0x73, 0xe2,
1312 0x71, 0xb5, 0xf4, 0x45, 0x53, 0x6b, 0xc3, 0x56, 0xb9, 0x8b, 0x3d, 0xeb, 0x00, 0x48, 0x6e, 0x29,
1313 0xb1, 0xb4, 0x8e, 0x2e, 0x43, 0x39, 0xef, 0x45, 0xa0, 0xb8, 0x8b, 0x5f, 0x80, 0xb5, 0x0c, 0xc3,
1314 0x03, 0xe3, 0xda, 0x51, 0xdc, 0xec, 0x80, 0x2c, 0x0c, 0xdc, 0xe2, 0x71, 0x0a, 0x14, 0x4f, 0x2c,
1315 0x22, 0x2b, 0x0e, 0xd1, 0x8b, 0x8f, 0x93, 0xd2, 0xf3, 0xec, 0x3a, 0x5a, 0x1c, 0xba, 0x80, 0x54,
1316 0x23, 0x7f, 0xb0, 0x54, 0x8b, 0xe3, 0x98, 0x22, 0xbb, 0x4b, 0xd0, 0x29, 0x5f, 0xce, 0xf2, 0xaa,
1317 0x99, 0x89, 0xf2, 0xb7, 0x5d, 0x8d, 0xb2, 0x72, 0x0b, 0x52, 0x02, 0xb8, 0xa4, 0x37, 0xa0, 0x3b,
1318 0xfe, 0x0a, 0xbc, 0xb3, 0xb3, 0xed, 0x8f, 0x8c, 0x42, 0x59, 0xbe, 0x4e, 0x31, 0xed, 0x11, 0x9b,
1319 };
1320
1321 #endif
1322