1 /*
2  * Copyright (c) 2012-2013, NVIDIA CORPORATION. All rights reserved
3  * Copyright (c) 2013, Google, Inc. All rights reserved
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining
6  * a copy of this software and associated documentation files
7  * (the "Software"), to deal in the Software without restriction,
8  * including without limitation the rights to use, copy, modify, merge,
9  * publish, distribute, sublicense, and/or sell copies of the Software,
10  * and to permit persons to whom the Software is furnished to do so,
11  * subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be
14  * included in all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
17  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
19  * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
20  * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
21  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
22  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
23  */
24 
25 #include <lib/backtrace/backtrace.h>
26 #include <lib/trusty/elf.h>
27 #include <lib/trusty/trusty_app.h>
28 
29 #include <arch.h>
30 #include <assert.h>
31 #include <compiler.h>
32 #include <debug.h>
33 #include <err.h>
34 #include <inttypes.h>
35 #include <kernel/event.h>
36 #include <kernel/mutex.h>
37 #include <kernel/thread.h>
38 #include <lib/app_manifest/app_manifest.h>
39 #include <lib/rand/rand.h>
40 #include <lib/trusty/ipc.h>
41 #include <lk/init.h>
42 #include <malloc.h>
43 #include <platform.h>
44 #include <stdio.h>
45 #include <stdlib.h>
46 #include <string.h>
47 #include <sys/types.h>
48 #include <trace.h>
49 #include <uapi/mm.h>
50 #include <version.h>
51 
52 #define LOCAL_TRACE 0
53 
54 #define DEFAULT_MGMT_FLAGS APP_MANIFEST_MGMT_FLAGS_NONE
55 
56 #define TRUSTY_APP_RESTART_TIMEOUT_SUCCESS (10ULL * 1000ULL * 1000ULL)
57 #define TRUSTY_APP_RESTART_TIMEOUT_FAILURE (5ULL * 1000ULL * 1000ULL * 1000ULL)
58 
59 #ifdef TRUSTY_APP_STACK_TOP
60 #error "TRUSTY_APP_STACK_TOP is no longer respected"
61 #endif
62 
63 /* Don't allow NULL to be a valid userspace address */
64 STATIC_ASSERT(USER_ASPACE_BASE != 0);
65 
66 #ifndef DEFAULT_HEAP_SIZE
67 #define DEFAULT_HEAP_SIZE (4 * PAGE_SIZE)
68 #endif
69 
70 #define PAGE_MASK (PAGE_SIZE - 1)
71 
72 #undef ELF_64BIT
73 #if !IS_64BIT || USER_32BIT
74 #define ELF_64BIT 0
75 #else
76 #define ELF_64BIT 1
77 #endif
78 
79 #if ELF_64BIT
80 #define ELF_NHDR Elf64_Nhdr
81 #define ELF_SHDR Elf64_Shdr
82 #define ELF_EHDR Elf64_Ehdr
83 #define ELF_PHDR Elf64_Phdr
84 #define Elf_Addr Elf64_Addr
85 #define Elf_Off Elf64_Off
86 #define Elf_Word Elf64_Word
87 
88 #define PRIxELF_Off PRIx64
89 #define PRIuELF_Size PRIu64
90 #define PRIxELF_Size PRIx64
91 #define PRIxELF_Addr PRIx64
92 #define PRIxELF_Flags PRIx64
93 #else
94 #define ELF_NHDR Elf32_Nhdr
95 #define ELF_SHDR Elf32_Shdr
96 #define ELF_EHDR Elf32_Ehdr
97 #define ELF_PHDR Elf32_Phdr
98 #define Elf_Addr Elf32_Addr
99 #define Elf_Off Elf32_Off
100 #define Elf_Word Elf32_Word
101 
102 #define PRIxELF_Off PRIx32
103 #define PRIuELF_Size PRIu32
104 #define PRIxELF_Size PRIx32
105 #define PRIxELF_Addr PRIx32
106 #define PRIxELF_Flags PRIx32
107 #endif
108 
109 static u_int trusty_next_app_id;
110 static struct list_node trusty_app_list = LIST_INITIAL_VALUE(trusty_app_list);
111 
112 struct trusty_builtin_app_img {
113     intptr_t manifest_start;
114     intptr_t manifest_end;
115     intptr_t img_start;
116     intptr_t img_end;
117 };
118 
119 /* These symbols are linker defined and are declared as unsized arrays to
120  * prevent compiler(clang) optimizations that break when the list is empty and
121  * the symbols alias
122  */
123 extern struct trusty_builtin_app_img __trusty_app_list_start[];
124 extern struct trusty_builtin_app_img __trusty_app_list_end[];
125 
126 static bool apps_started;
127 static mutex_t apps_lock = MUTEX_INITIAL_VALUE(apps_lock);
128 static struct list_node app_notifier_list =
129         LIST_INITIAL_VALUE(app_notifier_list);
130 uint als_slot_cnt;
131 static event_t app_mgr_event =
132         EVENT_INITIAL_VALUE(app_mgr_event, 0, EVENT_FLAG_AUTOUNSIGNAL);
133 
134 static struct list_node allowed_mmio_ranges_list =
135         LIST_INITIAL_VALUE(allowed_mmio_ranges_list);
136 
137 #define PRINT_TRUSTY_APP_UUID(level, tid, u)                                                       \
138     dprintf((level),                                                                               \
139             "trusty_app %d uuid: 0x%08xx 0x%04xx 0x%04xx 0x%02x%02x 0x%02x%02x%02x%02x%02x%02x\n", \
140             tid, (u)->time_low, (u)->time_mid, (u)->time_hi_and_version,                           \
141             (u)->clock_seq_and_node[0], (u)->clock_seq_and_node[1],                                \
142             (u)->clock_seq_and_node[2], (u)->clock_seq_and_node[3],                                \
143             (u)->clock_seq_and_node[4], (u)->clock_seq_and_node[5],                                \
144             (u)->clock_seq_and_node[6], (u)->clock_seq_and_node[7]);
145 
address_range_within_bounds(const void * range_start,size_t range_size,const void * lower_bound,const void * upper_bound)146 static bool address_range_within_bounds(const void* range_start,
147                                         size_t range_size,
148                                         const void* lower_bound,
149                                         const void* upper_bound) {
150     const void* range_end = range_start + range_size;
151 
152     if (upper_bound < lower_bound) {
153         LTRACEF("upper bound(%p) is below upper bound(%p)\n", upper_bound,
154                 lower_bound);
155         return false;
156     }
157 
158     if (range_end < range_start) {
159         LTRACEF("Range overflows. start:%p size:%zd end:%p\n", range_start,
160                 range_size, range_end);
161         return false;
162     }
163 
164     if (range_start < lower_bound) {
165         LTRACEF("Range starts(%p) before lower bound(%p)\n", range_start,
166                 lower_bound);
167         return false;
168     }
169 
170     if (range_end > upper_bound) {
171         LTRACEF("Range ends(%p) past upper bound(%p)\n", range_end,
172                 upper_bound);
173         return false;
174     }
175 
176     return true;
177 }
178 
address_range_within_img(const void * range_start,size_t range_size,const struct trusty_app_img * appimg)179 static inline bool address_range_within_img(
180         const void* range_start,
181         size_t range_size,
182         const struct trusty_app_img* appimg) {
183     return address_range_within_bounds(range_start, range_size,
184                                        (const void*)appimg->img_start,
185                                        (const void*)appimg->img_end);
186 }
187 
trusty_app_allow_mmio_range(struct trusty_app_mmio_allowed_range * range)188 void trusty_app_allow_mmio_range(struct trusty_app_mmio_allowed_range* range) {
189     DEBUG_ASSERT(range);
190 
191     if (!range->size) {
192         dprintf(CRITICAL, "Allowed mmio range is empty\n");
193         return;
194     }
195 
196     mutex_acquire(&apps_lock);
197     list_add_tail(&allowed_mmio_ranges_list, &range->node);
198     mutex_release(&apps_lock);
199 }
200 
201 /**
202  * app_mmio_is_allowed() - Check whether an app is allowed to map a given
203  *                         physical memory range.
204  * @trusty_app: The application to check.
205  * @mmio_start: The start of the physical memory range to map.
206  * @mmio_size:  The size of the physical memory range.
207  *
208  * For security reasons, we do not want to allow any loadable app to map
209  * physical memory by default. However, some specific apps need to
210  * map device memory, so we maintain an allowlist of per-app ranges that
211  * can be mapped. This function checks a given physical memory range for the
212  * loadable app at @trusty_app against the allowlist. Each project can add its
213  * own allowlist entries using @trusty_app_allow_mmio_range.
214  */
app_mmio_is_allowed(struct trusty_app * trusty_app,paddr_t mmio_start,size_t mmio_size)215 static bool app_mmio_is_allowed(struct trusty_app* trusty_app,
216                                 paddr_t mmio_start,
217                                 size_t mmio_size) {
218     if (!(trusty_app->flags & APP_FLAGS_LOADABLE)) {
219         return true;
220     }
221 
222     DEBUG_ASSERT(mmio_size);
223     DEBUG_ASSERT(is_mutex_held(&apps_lock));
224 
225     paddr_t mmio_end = mmio_start + (mmio_size - 1);
226     const struct trusty_app_mmio_allowed_range* range;
227     list_for_every_entry(&allowed_mmio_ranges_list, range,
228                          struct trusty_app_mmio_allowed_range, node) {
229         DEBUG_ASSERT(range->size);
230         paddr_t range_end = range->start + (range->size - 1);
231         if (!memcmp(&range->uuid, &trusty_app->props.uuid, sizeof(uuid_t)) &&
232             mmio_start >= range->start && mmio_end <= range_end) {
233             return true;
234         }
235     }
236 
237     return false;
238 }
239 
240 /**
241  * struct trusty_app_dma_allowed_range - Prepared dma range for trusty app.
242  * @node:  Internal list node, should be initialized to
243  *         %LIST_INITIAL_CLEARED_VALUE
244  * @app:   Pointer to the trusty app which prepared this range
245  * @slice: Represents a virtual memory range prepared for dma. Can be used to
246  *         get the physical pages used for dma
247  * @vaddr: Virtual memory address. Used to tear down all mappings from a single
248  *         call to prepare_dma
249  * @flags: Flags used to map dma range
250  */
251 struct trusty_app_dma_allowed_range {
252     struct list_node node;
253     struct vmm_obj_slice slice;
254     vaddr_t vaddr;
255     uint32_t flags;
256 };
257 
trusty_app_allow_dma_range(struct trusty_app * app,struct vmm_obj * obj,size_t offset,size_t size,vaddr_t vaddr,uint32_t flags)258 status_t trusty_app_allow_dma_range(struct trusty_app* app,
259                                     struct vmm_obj* obj,
260                                     size_t offset,
261                                     size_t size,
262                                     vaddr_t vaddr,
263                                     uint32_t flags) {
264     DEBUG_ASSERT(obj);
265     DEBUG_ASSERT(app);
266     DEBUG_ASSERT(size);
267 
268     /* check that dma range hasn't already been mapped at vaddr */
269     const struct trusty_app_dma_allowed_range* check_range;
270     list_for_every_entry(&app->props.dma_entry_list, check_range,
271                          struct trusty_app_dma_allowed_range, node) {
272         if (check_range->vaddr == vaddr)
273             return ERR_INVALID_ARGS;
274     }
275 
276     struct trusty_app_dma_allowed_range* range_list_entry =
277             (struct trusty_app_dma_allowed_range*)calloc(
278                     1, sizeof(struct trusty_app_dma_allowed_range));
279     if (!range_list_entry) {
280         return ERR_NO_MEMORY;
281     }
282     /* range_list_entry->node is already zero-initialized */
283     vmm_obj_slice_init(&range_list_entry->slice);
284     vmm_obj_slice_bind(&range_list_entry->slice, obj, offset, size);
285     range_list_entry->vaddr = vaddr;
286     range_list_entry->flags = flags;
287 
288     mutex_acquire(&apps_lock);
289     list_add_tail(&app->props.dma_entry_list, &range_list_entry->node);
290     mutex_release(&apps_lock);
291 
292     return NO_ERROR;
293 }
294 
trusty_app_destroy_dma_range(vaddr_t vaddr,size_t size)295 status_t trusty_app_destroy_dma_range(vaddr_t vaddr, size_t size) {
296     status_t ret = ERR_NOT_FOUND;
297     struct trusty_app_dma_allowed_range* range;
298     struct trusty_app_dma_allowed_range* next_range;
299     struct trusty_app* app = current_trusty_app();
300 
301     mutex_acquire(&apps_lock);
302     list_for_every_entry_safe(&app->props.dma_entry_list, range, next_range,
303                               struct trusty_app_dma_allowed_range, node) {
304         DEBUG_ASSERT(range->slice.size);
305         if (range->vaddr == vaddr && range->slice.size == size) {
306             list_delete(&range->node);
307             vmm_obj_slice_release(&range->slice);
308             free(range);
309             ret = NO_ERROR;
310             break;
311         }
312     }
313 
314     mutex_release(&apps_lock);
315 
316     return ret;
317 }
318 
319 /* Must be called with the apps_lock held */
trusty_app_dma_is_allowed_locked(const struct trusty_app * app,paddr_t paddr)320 static bool trusty_app_dma_is_allowed_locked(const struct trusty_app* app,
321                                              paddr_t paddr) {
322     int ret;
323     size_t offset;
324     const struct trusty_app_dma_allowed_range* range;
325 
326     DEBUG_ASSERT(app);
327     DEBUG_ASSERT(is_mutex_held(&apps_lock));
328     list_for_every_entry(&app->props.dma_entry_list, range,
329                          struct trusty_app_dma_allowed_range, node) {
330         DEBUG_ASSERT(range->slice.size);
331         offset = 0;
332         do {
333             paddr_t prepared_paddr;
334             size_t prepared_paddr_size;
335             ret = range->slice.obj->ops->get_page(
336                     range->slice.obj, range->slice.offset + offset,
337                     &prepared_paddr, &prepared_paddr_size);
338             if (ret != NO_ERROR) {
339                 TRACEF("failed to get pages for paddr 0x%" PRIxPADDR "\n",
340                        paddr);
341                 return false;
342             }
343             paddr_t prepared_paddr_end =
344                     prepared_paddr + (prepared_paddr_size - 1);
345             if (paddr >= prepared_paddr && paddr <= prepared_paddr_end) {
346                 return true;
347             }
348             offset += MIN(range->slice.size - offset, prepared_paddr_size);
349         } while (offset < range->slice.size &&
350                  (range->flags & DMA_FLAG_MULTI_PMEM));
351     }
352 
353     TRACEF("paddr 0x%" PRIxPADDR " is not valid for dma\n", paddr);
354     return false;
355 }
356 
trusty_app_dma_is_allowed(const struct trusty_app * app,paddr_t paddr)357 bool trusty_app_dma_is_allowed(const struct trusty_app* app, paddr_t paddr) {
358     bool res;
359     mutex_acquire(&apps_lock);
360     res = trusty_app_dma_is_allowed_locked(app, paddr);
361     mutex_release(&apps_lock);
362     return res;
363 }
364 
finalize_registration(void)365 static void finalize_registration(void) {
366     mutex_acquire(&apps_lock);
367     apps_started = true;
368     mutex_release(&apps_lock);
369 }
370 
trusty_register_app_notifier(struct trusty_app_notifier * n)371 status_t trusty_register_app_notifier(struct trusty_app_notifier* n) {
372     status_t ret = NO_ERROR;
373 
374     mutex_acquire(&apps_lock);
375     if (!apps_started)
376         list_add_tail(&app_notifier_list, &n->node);
377     else
378         ret = ERR_ALREADY_STARTED;
379     mutex_release(&apps_lock);
380     return ret;
381 }
382 
trusty_als_alloc_slot(void)383 int trusty_als_alloc_slot(void) {
384     int ret;
385 
386     mutex_acquire(&apps_lock);
387     if (!apps_started)
388         ret = ++als_slot_cnt;
389     else
390         ret = ERR_ALREADY_STARTED;
391     mutex_release(&apps_lock);
392     return ret;
393 }
394 
395 #if ELF_64BIT
396 #define ENTER_USPACE_FLAGS 0
397 #else
398 #define ENTER_USPACE_FLAGS ARCH_ENTER_USPACE_FLAG_32BIT
399 #endif
400 
401 /*
402  * Allocate space on the user stack.
403  */
user_stack_alloc(struct trusty_thread * trusty_thread,user_size_t data_len,user_size_t align,user_addr_t * stack_ptr)404 static user_addr_t user_stack_alloc(struct trusty_thread* trusty_thread,
405                                     user_size_t data_len,
406                                     user_size_t align,
407                                     user_addr_t* stack_ptr) {
408     user_addr_t ptr = round_down(*stack_ptr - data_len, align);
409     if (ptr < trusty_thread->stack_start - trusty_thread->stack_size) {
410         panic("stack underflow while initializing user space\n");
411     }
412     *stack_ptr = ptr;
413     return ptr;
414 }
415 
416 /*
417  * Copy data to a preallocated spot on the user stack. This should not fail.
418  */
copy_to_user_stack(user_addr_t dst_ptr,const void * data,user_size_t data_len)419 static void copy_to_user_stack(user_addr_t dst_ptr,
420                                const void* data,
421                                user_size_t data_len) {
422     int ret = copy_to_user(dst_ptr, data, data_len);
423     if (ret) {
424         panic("copy_to_user failed %d\n", ret);
425     }
426 }
427 
428 /*
429  * Allocate space on the user stack and fill it with data.
430  */
add_to_user_stack(struct trusty_thread * trusty_thread,const void * data,user_size_t data_len,user_size_t align,user_addr_t * stack_ptr)431 static user_addr_t add_to_user_stack(struct trusty_thread* trusty_thread,
432                                      const void* data,
433                                      user_size_t data_len,
434                                      user_size_t align,
435                                      user_addr_t* stack_ptr) {
436     user_addr_t ptr =
437             user_stack_alloc(trusty_thread, data_len, align, stack_ptr);
438     copy_to_user_stack(ptr, data, data_len);
439     return ptr;
440 }
441 
442 /* TODO share a common header file. */
443 #define AT_PAGESZ 6
444 #define AT_BASE 7
445 #define AT_RANDOM 25
446 #define AT_HWCAP2 26
447 #define HWCAP2_MTE (1 << 18)
448 
449 /*
450  * Pass data to libc on the user stack.
451  * Prevent inlining so that the stack allocations inside this function don't get
452  * trapped on the kernel stack.
453  */
454 static __NO_INLINE user_addr_t
trusty_thread_write_elf_tables(struct trusty_thread * trusty_thread,user_addr_t * stack_ptr,vaddr_t load_bias)455 trusty_thread_write_elf_tables(struct trusty_thread* trusty_thread,
456                                user_addr_t* stack_ptr,
457                                vaddr_t load_bias) {
458     /* Construct the elf tables in reverse order - the stack grows down. */
459 
460     /*
461      * sixteen random bytes
462      */
463     uint8_t rand_bytes[16] = {0};
464     rand_get_bytes(rand_bytes, sizeof(rand_bytes));
465     user_addr_t rand_bytes_addr = add_to_user_stack(
466             trusty_thread, rand_bytes, sizeof(rand_bytes), 1, stack_ptr);
467 
468     const char* app_name = trusty_thread->app->props.app_name;
469     user_addr_t app_name_addr =
470             add_to_user_stack(trusty_thread, app_name, strlen(app_name) + 1,
471                               sizeof(user_addr_t), stack_ptr);
472 
473     bool mte = arch_tagging_enabled();
474     /* auxv */
475     user_addr_t auxv[] = {
476             AT_PAGESZ, PAGE_SIZE,       AT_BASE,   load_bias,
477             AT_RANDOM, rand_bytes_addr, AT_HWCAP2, mte ? HWCAP2_MTE : 0,
478             0};
479     add_to_user_stack(trusty_thread, auxv, sizeof(auxv), sizeof(user_addr_t),
480                       stack_ptr);
481 
482     /* envp - for layout compatibility, unused */
483     user_addr_t envp[] = {
484             0,
485     };
486     add_to_user_stack(trusty_thread, envp, sizeof(envp), sizeof(user_addr_t),
487                       stack_ptr);
488 
489     /* argv. Only argv [0] and argv [1] (terminator) are set. */
490     user_addr_t argv[] = {
491             app_name_addr,
492             0,
493     };
494     add_to_user_stack(trusty_thread, argv, sizeof(argv), sizeof(user_addr_t),
495                       stack_ptr);
496 
497     /* argc. The null terminator is not counted. */
498     user_addr_t argc = countof(argv) - 1;
499     user_addr_t argc_ptr = add_to_user_stack(trusty_thread, &argc, sizeof(argc),
500                                              sizeof(user_addr_t), stack_ptr);
501 
502     return argc_ptr;
503 }
504 
trusty_thread_startup(void * arg)505 static int trusty_thread_startup(void* arg) {
506     struct trusty_thread* trusty_thread = current_trusty_thread();
507 
508     vmm_set_active_aspace(trusty_thread->app->aspace);
509 
510     user_addr_t stack_ptr = trusty_thread->stack_start;
511     user_addr_t elf_tables = trusty_thread_write_elf_tables(
512             trusty_thread, &stack_ptr, trusty_thread->app->load_bias);
513 
514     thread_sleep_until_ns(trusty_thread->app->min_start_time);
515 
516     user_addr_t shadow_stack_base = 0;
517 #if USER_SCS_SUPPORTED
518     shadow_stack_base = trusty_thread->shadow_stack_base;
519 #endif
520 
521     arch_enter_uspace(trusty_thread->entry, stack_ptr, shadow_stack_base,
522                       ENTER_USPACE_FLAGS, elf_tables);
523 
524     __UNREACHABLE;
525 }
526 
trusty_thread_start(struct trusty_thread * trusty_thread)527 static status_t trusty_thread_start(struct trusty_thread* trusty_thread) {
528     DEBUG_ASSERT(trusty_thread && trusty_thread->thread);
529 
530     return thread_resume(trusty_thread->thread);
531 }
532 
trusty_thread_exit(int retcode)533 void __NO_RETURN trusty_thread_exit(int retcode) {
534     struct trusty_thread* trusty_thread = current_trusty_thread();
535     vaddr_t stack_bot;
536 
537     ASSERT(trusty_thread);
538 
539     stack_bot = trusty_thread->stack_start - trusty_thread->stack_size;
540 
541     vmm_free_region(trusty_thread->app->aspace, stack_bot);
542 
543 #if USER_SCS_SUPPORTED
544     if (trusty_thread->shadow_stack_base) {
545         /*
546          * revert the adjustment of shadow_stack_base to reconstruct pointer
547          * returned by vmm_alloc.
548          */
549         size_t size = trusty_thread->shadow_stack_size;
550         size_t adjustment = round_up(size, PAGE_SIZE) - size;
551         vmm_free_region(trusty_thread->app->aspace,
552                         trusty_thread->shadow_stack_base - adjustment);
553     } else {
554         DEBUG_ASSERT(trusty_thread->app->props.min_shadow_stack_size == 0);
555     }
556 #endif
557 
558     thread_exit(retcode);
559 }
560 
trusty_thread_create(const char * name,vaddr_t entry,int priority,size_t stack_size,size_t shadow_stack_size,struct trusty_app * trusty_app)561 static struct trusty_thread* trusty_thread_create(
562         const char* name,
563         vaddr_t entry,
564         int priority,
565         size_t stack_size,
566         size_t shadow_stack_size,
567         struct trusty_app* trusty_app) {
568     struct trusty_thread* trusty_thread;
569     status_t err;
570     vaddr_t stack_bot = 0;
571     stack_size = round_up(stack_size, PAGE_SIZE);
572 
573     trusty_thread = calloc(1, sizeof(struct trusty_thread));
574     if (!trusty_thread)
575         return NULL;
576 
577     err = vmm_alloc(trusty_app->aspace, "stack", stack_size, (void**)&stack_bot,
578                     PAGE_SIZE_SHIFT, 0,
579                     ARCH_MMU_FLAG_PERM_USER | ARCH_MMU_FLAG_PERM_NO_EXECUTE);
580     if (err != NO_ERROR) {
581         dprintf(CRITICAL,
582                 "failed(%d) to create thread stack(0x%" PRIxVADDR
583                 ") for app %u, %s\n",
584                 err, stack_bot, trusty_app->app_id, trusty_app->props.app_name);
585         goto err_stack;
586     }
587 
588 #if USER_SCS_SUPPORTED
589     vaddr_t shadow_stack_base = 0;
590     if (shadow_stack_size) {
591         err = vmm_alloc(
592                 trusty_app->aspace, "shadow stack", shadow_stack_size,
593                 (void**)&shadow_stack_base, PAGE_SIZE_SHIFT, 0,
594                 ARCH_MMU_FLAG_PERM_USER | ARCH_MMU_FLAG_PERM_NO_EXECUTE);
595         if (err != NO_ERROR) {
596             dprintf(CRITICAL,
597                     "failed(%d) to allocate shadow stack(0x%" PRIxVADDR
598                     ") for app %u\n",
599                     err, shadow_stack_base, trusty_app->app_id);
600             goto err_shadow_stack;
601         }
602     }
603 #endif
604 
605     trusty_thread->thread = thread_create(name, trusty_thread_startup, NULL,
606                                           priority, DEFAULT_STACK_SIZE);
607     if (!trusty_thread->thread)
608         goto err_thread;
609 
610     trusty_thread->app = trusty_app;
611     trusty_thread->entry = entry;
612     trusty_thread->stack_start = stack_bot + stack_size; /* stack grows down */
613     trusty_thread->stack_size = stack_size;
614 #if USER_SCS_SUPPORTED
615     /* make shadow stack hit guard page if too small */
616     size_t adjustment =
617             round_up(shadow_stack_size, PAGE_SIZE) - shadow_stack_size;
618 
619     /* we only make an adjustment iff app has shadow call stacks enabled */
620     DEBUG_ASSERT(shadow_stack_size > 0 || adjustment == 0);
621 
622     /* shadow stack grows up */
623     trusty_thread->shadow_stack_base = shadow_stack_base + adjustment;
624     trusty_thread->shadow_stack_size = shadow_stack_size;
625 #endif
626     thread_tls_set(trusty_thread->thread, TLS_ENTRY_TRUSTY,
627                    (uintptr_t)trusty_thread);
628 
629     int pinned_cpu = trusty_app->props.pinned_cpu;
630     if (pinned_cpu != APP_MANIFEST_PINNED_CPU_NONE) {
631         thread_set_pinned_cpu(trusty_thread->thread, pinned_cpu);
632         dprintf(SPEW, "trusty_app %d, %s pinned to CPU: %u\n",
633                 trusty_app->app_id, trusty_app->props.app_name, pinned_cpu);
634     }
635 
636     return trusty_thread;
637 
638 err_thread:
639 #if USER_SCS_SUPPORTED
640     if (shadow_stack_size) {
641         vmm_free_region(trusty_app->aspace, shadow_stack_base);
642     }
643 err_shadow_stack:
644 #endif
645     vmm_free_region(trusty_app->aspace, stack_bot);
646 err_stack:
647     free(trusty_thread);
648     return NULL;
649 }
650 
651 /* Must be called with the apps_lock held */
find_manifest_port_entry_locked(const char * port_path,struct trusty_app ** app_out)652 static struct manifest_port_entry* find_manifest_port_entry_locked(
653         const char* port_path,
654         struct trusty_app** app_out) {
655     struct trusty_app* app;
656     struct manifest_port_entry* entry;
657 
658     DEBUG_ASSERT(is_mutex_held(&apps_lock));
659 
660     list_for_every_entry(&trusty_app_list, app, struct trusty_app, node) {
661         list_for_every_entry(&app->props.port_entry_list, entry,
662                              struct manifest_port_entry, node) {
663             if (!strncmp(port_path, entry->path, entry->path_len)) {
664                 if (app_out)
665                     *app_out = app;
666 
667                 return entry;
668             }
669         }
670     }
671 
672     return NULL;
673 }
674 /* Must be called with the apps_lock held */
trusty_app_find_by_uuid_locked(uuid_t * uuid)675 static struct trusty_app* trusty_app_find_by_uuid_locked(uuid_t* uuid) {
676     struct trusty_app* app;
677 
678     DEBUG_ASSERT(is_mutex_held(&apps_lock));
679 
680     list_for_every_entry(&trusty_app_list, app, struct trusty_app, node) {
681         if (!memcmp(&app->props.uuid, uuid, sizeof(uuid_t)))
682             return app;
683     }
684 
685     return NULL;
686 }
687 
trusty_uuid_dma_is_allowed(const struct uuid * uuid,paddr_t paddr)688 bool trusty_uuid_dma_is_allowed(const struct uuid* uuid, paddr_t paddr) {
689     bool res;
690     const struct trusty_app* app;
691     mutex_acquire(&apps_lock);
692     app = trusty_app_find_by_uuid_locked((struct uuid*)uuid);
693     res = trusty_app_dma_is_allowed_locked(app, paddr);
694     mutex_release(&apps_lock);
695     return res;
696 }
697 
get_app_manifest_config_data(struct trusty_app * trusty_app,char ** manifest_data,size_t * size)698 static status_t get_app_manifest_config_data(struct trusty_app* trusty_app,
699                                              char** manifest_data,
700                                              size_t* size) {
701     struct trusty_app_img* app_img;
702 
703     app_img = &trusty_app->app_img;
704     if (!app_img->manifest_start) {
705         dprintf(CRITICAL, "manifest section header not found\n");
706         return ERR_NOT_VALID;
707     }
708 
709     /* manifest data is embedded in kernel */
710     dprintf(SPEW,
711             "trusty app manifest: start %p size 0x%08" PRIxPTR " end %p\n",
712             (void*)app_img->manifest_start,
713             app_img->manifest_end - app_img->manifest_start,
714             (void*)app_img->manifest_end);
715 
716     *size = app_img->manifest_end - app_img->manifest_start;
717     *manifest_data = (char*)app_img->manifest_start;
718 
719     return NO_ERROR;
720 }
721 
destroy_app_phys_mem(struct phys_mem_obj * obj)722 static void destroy_app_phys_mem(struct phys_mem_obj* obj) {
723     struct manifest_mmio_entry* mmio_entry;
724     mmio_entry = containerof(obj, struct manifest_mmio_entry, phys_mem_obj);
725     assert(!list_in_list(&mmio_entry->node));
726     free(mmio_entry);
727 }
728 
729 /**
730  * load_app_elf_gnu_property_array() - Load app properties from ELF GNU property
731  * array.
732  * @trusty_app:  Trusty application, both giving ELF section and props.
733  * @offset:      Byte offset of the ELF GNU property array structure.
734  * @length:      Length in bytes of the ELF GNU property array.
735  * @out:         Out pointer to write the selected bias to. Only valid if the
736  *               function returned 0.
737  *
738  * Return: If nonzero, the ELF is malformed.  Otherwise NO_ERROR.
739  */
load_app_elf_gnu_property_array(struct trusty_app * trusty_app,Elf_Off offset,size_t length)740 static status_t load_app_elf_gnu_property_array(struct trusty_app* trusty_app,
741                                                 Elf_Off offset,
742                                                 size_t length) {
743     const void* elf_start = (void*)trusty_app->app_img.img_start;
744 
745     /* Check property array is within the ELF image */
746     if (!address_range_within_img(elf_start + offset, length,
747                                   &trusty_app->app_img)) {
748         return ERR_NOT_VALID;
749     }
750 
751     /* Walk through the variable length properties */
752     while (length >= sizeof(ELF_GnuProp)) {
753         const ELF_GnuProp* gp = elf_start + offset;
754         Elf_Word gp_size = sizeof(ELF_GnuProp);
755 
756         /* Check header is within bounds */
757         if (!address_range_within_img(gp, gp_size, &trusty_app->app_img)) {
758             return ERR_NOT_VALID;
759         }
760 
761         /* Update full size and round to either 4 or 8 byte alignment */
762         gp_size += gp->pr_datasz;
763         gp_size += sizeof(Elf_Word) - 1;
764         gp_size &= ~(sizeof(Elf_Word) - 1);
765 
766         /* Check access to the full property */
767         if (gp_size < sizeof(ELF_GnuProp) ||
768             !address_range_within_img(gp, gp_size, &trusty_app->app_img)) {
769             return ERR_NOT_VALID;
770         }
771 
772 #ifdef ARCH_ARM64
773         /* TODO(mikemcternan): Split into an arch specific function */
774         if (gp && gp->pr_type == GNU_PROPERTY_AARCH64_FEATURE_1_AND) {
775             /* This property should always have an 32-bit value */
776             if (gp->pr_datasz != sizeof(Elf32_Word)) {
777                 return ERR_NOT_VALID;
778             }
779 
780             switch (gp->pr_data[0]) {
781             case GNU_PROPERTY_AARCH64_FEATURE_1_BTI:
782                 trusty_app->props.feature_bti = true;
783                 break;
784             default:
785                 break;
786             }
787         }
788 #endif
789         if (length <= gp_size) {
790             length = 0;
791         } else {
792             length -= gp_size;
793             offset += gp_size;
794         }
795     }
796 
797     return NO_ERROR;
798 }
799 
load_app_elf_options(struct trusty_app * trusty_app)800 static status_t load_app_elf_options(struct trusty_app* trusty_app) {
801     const struct trusty_app_img* app_img = &trusty_app->app_img;
802     const ELF_EHDR* elf = (ELF_EHDR*)app_img->img_start;
803 
804     /* Iterate ELF program headers to find PT_GNU_PROPERTY section */
805     for (int i = 0; i < elf->e_phnum; i++) {
806         const ELF_PHDR* phdr =
807                 (const void*)elf + elf->e_phoff + (elf->e_phentsize * i);
808 
809         if (!address_range_within_img(phdr, sizeof(ELF_PHDR),
810                                       &trusty_app->app_img)) {
811             return ERR_NOT_VALID;
812         }
813 
814         /* Check for a GNU property section */
815         if (phdr->p_type == PT_GNU_PROPERTY) {
816             const ELF_NHDR* nhdr = (const void*)elf + phdr->p_offset;
817             const int nhdr_len = sizeof(ELF_NHDR) + sizeof("GNU");
818 
819             if (!address_range_within_img(nhdr, nhdr_len,
820                                           &trusty_app->app_img)) {
821                 return ERR_NOT_VALID;
822             }
823 
824             if (nhdr->n_namesz == sizeof("GNU") &&
825                 nhdr->n_type == NT_GNU_PROPERTY_TYPE_0 &&
826                 strcmp("GNU", (const char*)nhdr + sizeof(ELF_NHDR)) == 0) {
827                 const Elf_Off n_desc = phdr->p_offset + nhdr_len;
828 
829                 status_t ret = load_app_elf_gnu_property_array(
830                         trusty_app, n_desc, nhdr->n_descsz);
831                 if (ret != NO_ERROR) {
832                     return ret;
833                 }
834             }
835         }
836     }
837 
838     return NO_ERROR;
839 }
840 
load_app_config_options(struct trusty_app * trusty_app)841 static status_t load_app_config_options(struct trusty_app* trusty_app) {
842     char* manifest_data;
843     size_t manifest_size;
844     uint32_t mmio_arch_mmu_flags;
845     uint64_t mmio_size;
846     struct manifest_mmio_entry* mmio_entry;
847     paddr_t tmp_paddr;
848     status_t ret;
849     struct manifest_port_entry* entry;
850     struct app_manifest_iterator manifest_iter;
851     struct app_manifest_config_entry manifest_entry;
852     const char* unknown_app_name = "<unknown>";
853 
854     /* init default config options before parsing manifest */
855     trusty_app->props.app_name = unknown_app_name;
856     trusty_app->props.min_heap_size = DEFAULT_HEAP_SIZE;
857     trusty_app->props.min_stack_size = DEFAULT_STACK_SIZE;
858     /* binary manifest must specify the min shadow stack size */
859     trusty_app->props.min_shadow_stack_size = 0;
860     trusty_app->props.mgmt_flags = DEFAULT_MGMT_FLAGS;
861     trusty_app->props.pinned_cpu = APP_MANIFEST_PINNED_CPU_NONE;
862     trusty_app->props.priority = DEFAULT_PRIORITY;
863 
864     ret = load_app_elf_options(trusty_app);
865     if (ret != NO_ERROR) {
866         return ERR_NOT_VALID;
867     }
868 
869     manifest_data = NULL;
870     manifest_size = 0;
871     ret = get_app_manifest_config_data(trusty_app, &manifest_data,
872                                        &manifest_size);
873     if (ret != NO_ERROR) {
874         return ERR_NOT_VALID;
875     }
876 
877     /*
878      * Step thru configuration blob.
879      *
880      * Save off some configuration data while we are here but
881      * defer processing of other data until it is needed later.
882      */
883     ret = app_manifest_iterator_reset(&manifest_iter, manifest_data,
884                                       manifest_size);
885     if (ret != NO_ERROR) {
886         dprintf(CRITICAL, "error parsing manifest for app %u\n",
887                 trusty_app->app_id);
888         return ret;
889     }
890     while (app_manifest_iterator_next(&manifest_iter, &manifest_entry, &ret)) {
891         switch (manifest_entry.key) {
892         case APP_MANIFEST_CONFIG_KEY_MIN_STACK_SIZE:
893             trusty_app->props.min_stack_size =
894                     manifest_entry.value.min_stack_size;
895             if (trusty_app->props.min_stack_size == 0) {
896                 dprintf(CRITICAL,
897                         "manifest MIN_STACK_SIZE is 0 of app %u, %s\n",
898                         trusty_app->app_id, trusty_app->props.app_name);
899                 return ERR_NOT_VALID;
900             }
901             break;
902         case APP_MANIFEST_CONFIG_KEY_MIN_HEAP_SIZE:
903             trusty_app->props.min_heap_size =
904                     manifest_entry.value.min_heap_size;
905             break;
906         case APP_MANIFEST_CONFIG_KEY_MAP_MEM:
907             mmio_arch_mmu_flags = manifest_entry.value.mem_map.arch_mmu_flags;
908             mmio_size = round_up(manifest_entry.value.mem_map.size, PAGE_SIZE);
909             trusty_app->props.map_io_mem_cnt++;
910 
911             if (!IS_PAGE_ALIGNED(manifest_entry.value.mem_map.offset)) {
912                 dprintf(CRITICAL, "mmio_id %u not page aligned of app %u, %s\n",
913                         manifest_entry.value.mem_map.id, trusty_app->app_id,
914                         trusty_app->props.app_name);
915                 return ERR_NOT_VALID;
916             }
917 
918             if ((paddr_t)manifest_entry.value.mem_map.offset !=
919                         manifest_entry.value.mem_map.offset ||
920                 (size_t)mmio_size != mmio_size) {
921                 dprintf(CRITICAL,
922                         "mmio_id %d address/size too large of app %u, %s\n",
923                         manifest_entry.value.mem_map.id, trusty_app->app_id,
924                         trusty_app->props.app_name);
925                 return ERR_NOT_VALID;
926             }
927 
928             if (!mmio_size ||
929                 __builtin_add_overflow(manifest_entry.value.mem_map.offset,
930                                        mmio_size - 1, &tmp_paddr)) {
931                 dprintf(CRITICAL, "mmio_id %u bad size of app %u, %s\n",
932                         manifest_entry.value.mem_map.id, trusty_app->app_id,
933                         trusty_app->props.app_name);
934                 return ERR_NOT_VALID;
935             }
936 
937             if (manifest_entry.value.mem_map.arch_mmu_flags &
938                         ~(ARCH_MMU_FLAG_CACHE_MASK | ARCH_MMU_FLAG_NS) ||
939                 ((manifest_entry.value.mem_map.arch_mmu_flags &
940                   ARCH_MMU_FLAG_CACHE_MASK) != ARCH_MMU_FLAG_CACHED &&
941                  (manifest_entry.value.mem_map.arch_mmu_flags &
942                   ARCH_MMU_FLAG_CACHE_MASK) != ARCH_MMU_FLAG_UNCACHED &&
943                  (manifest_entry.value.mem_map.arch_mmu_flags &
944                   ARCH_MMU_FLAG_CACHE_MASK) != ARCH_MMU_FLAG_UNCACHED_DEVICE)) {
945                 dprintf(CRITICAL,
946                         "mmio_id %u bad arch_mmu_flags 0x%x of app %u, %s\n",
947                         manifest_entry.value.mem_map.id,
948                         manifest_entry.value.mem_map.arch_mmu_flags,
949                         trusty_app->app_id, trusty_app->props.app_name);
950                 return ERR_NOT_VALID;
951             }
952             mmio_arch_mmu_flags |= ARCH_MMU_FLAG_PERM_NO_EXECUTE;
953 
954             if (!app_mmio_is_allowed(
955                         trusty_app,
956                         (paddr_t)manifest_entry.value.mem_map.offset,
957                         mmio_size)) {
958                 dprintf(CRITICAL,
959                         "mmio_id %u not allowed for loadable app %u, %s\n",
960                         manifest_entry.value.mem_map.id, trusty_app->app_id,
961                         trusty_app->props.app_name);
962                 return ERR_NOT_VALID;
963             }
964 
965             mmio_entry = calloc(1, sizeof(struct manifest_mmio_entry));
966             if (!mmio_entry) {
967                 dprintf(CRITICAL,
968                         "Failed to allocate memory for manifest mmio %d of app %u, %s\n",
969                         manifest_entry.value.mem_map.id, trusty_app->app_id,
970                         trusty_app->props.app_name);
971                 return ERR_NO_MEMORY;
972             }
973 
974             phys_mem_obj_dynamic_initialize(&mmio_entry->phys_mem_obj,
975                                             &mmio_entry->phys_mem_obj_self_ref,
976                                             manifest_entry.value.mem_map.offset,
977                                             mmio_size, mmio_arch_mmu_flags,
978                                             destroy_app_phys_mem);
979             mmio_entry->id = manifest_entry.value.mem_map.id;
980             list_add_tail(&trusty_app->props.mmio_entry_list,
981                           &mmio_entry->node);
982 
983             break;
984         case APP_MANIFEST_CONFIG_KEY_MGMT_FLAGS:
985             trusty_app->props.mgmt_flags = manifest_entry.value.mgmt_flags;
986             break;
987         case APP_MANIFEST_CONFIG_KEY_START_PORT:
988             if (manifest_entry.value.start_port.name_size > IPC_PORT_PATH_MAX) {
989                 dprintf(CRITICAL,
990                         "manifest port name %s too long:%#" PRIx32
991                         " of app %u, %s\n",
992                         manifest_entry.value.start_port.name,
993                         manifest_entry.value.start_port.name_size,
994                         trusty_app->app_id, trusty_app->props.app_name);
995                 return ERR_NOT_VALID;
996             }
997 
998             entry = find_manifest_port_entry_locked(
999                     manifest_entry.value.start_port.name, NULL);
1000             if (entry) {
1001                 dprintf(CRITICAL, "Port %s is already registered\n",
1002                         manifest_entry.value.start_port.name);
1003                 return ERR_ALREADY_EXISTS;
1004             }
1005 
1006             entry = calloc(1, sizeof(struct manifest_port_entry));
1007             if (!entry) {
1008                 dprintf(CRITICAL,
1009                         "Failed to allocate memory for manifest port %s of app %u, %s\n",
1010                         manifest_entry.value.start_port.name,
1011                         trusty_app->app_id, trusty_app->props.app_name);
1012                 return ERR_NO_MEMORY;
1013             }
1014 
1015             entry->flags = manifest_entry.value.start_port.flags;
1016             entry->path_len = manifest_entry.value.start_port.name_size;
1017             entry->path = manifest_entry.value.start_port.name;
1018 
1019             list_add_tail(&trusty_app->props.port_entry_list, &entry->node);
1020 
1021             break;
1022         case APP_MANIFEST_CONFIG_KEY_PINNED_CPU:
1023             if (manifest_entry.value.pinned_cpu >= SMP_MAX_CPUS) {
1024                 dprintf(CRITICAL,
1025                         "pinned CPU index %u out of range, app %u, %s\n",
1026                         manifest_entry.value.pinned_cpu, trusty_app->app_id,
1027                         trusty_app->props.app_name);
1028                 return ERR_NOT_VALID;
1029             }
1030 
1031             trusty_app->props.pinned_cpu = manifest_entry.value.pinned_cpu;
1032             break;
1033         case APP_MANIFEST_CONFIG_KEY_PRIORITY:
1034             if (manifest_entry.value.priority < (LOWEST_PRIORITY + 2) ||
1035                 manifest_entry.value.priority > (HIGHEST_PRIORITY - 1)) {
1036                 dprintf(CRITICAL,
1037                         "priority value %u out of range, app %u, %s\n",
1038                         manifest_entry.value.priority, trusty_app->app_id,
1039                         trusty_app->props.app_name);
1040                 return ERR_NOT_VALID;
1041             }
1042             trusty_app->props.priority = manifest_entry.value.priority;
1043             break;
1044         case APP_MANIFEST_CONFIG_KEY_MIN_SHADOW_STACK_SIZE:
1045 #if !USER_SCS_SUPPORTED
1046             if (manifest_entry.value.min_shadow_stack_size) {
1047                 dprintf(CRITICAL,
1048                         "Shadow call stack requested by app %u, %s. Kernel "
1049                         "was not built to support user shadow call stacks\n",
1050                         trusty_app->app_id, trusty_app->props.app_name);
1051                 return ERR_NOT_VALID;
1052             }
1053 #endif
1054             trusty_app->props.min_shadow_stack_size =
1055                     manifest_entry.value.min_shadow_stack_size;
1056             /* min_shadow_stack_size == 0 means app opted out of shadow stack */
1057             break;
1058         case APP_MANIFEST_CONFIG_KEY_UUID:
1059             memcpy(&trusty_app->props.uuid, &manifest_entry.value.uuid,
1060                    sizeof(uuid_t));
1061             break;
1062         case APP_MANIFEST_CONFIG_KEY_APP_NAME:
1063             trusty_app->props.app_name = manifest_entry.value.app_name;
1064             break;
1065         case APP_MANIFEST_CONFIG_KEY_VERSION:
1066         case APP_MANIFEST_CONFIG_KEY_MIN_VERSION:
1067         case APP_MANIFEST_CONFIG_KEY_APPLOADER_FLAGS:
1068             /* Handled by apploader */
1069             break;
1070         }
1071     }
1072     if (ret != NO_ERROR) {
1073         dprintf(CRITICAL, "error parsing manifest for app %u\n",
1074                 trusty_app->app_id);
1075         return ret;
1076     }
1077     if (trusty_app->props.app_name == unknown_app_name) {
1078         dprintf(CRITICAL, "app-name missing for app %u\n", trusty_app->app_id);
1079         return ERR_NOT_VALID;
1080     }
1081 
1082     if (trusty_app_find_by_uuid_locked(&trusty_app->props.uuid)) {
1083         PRINT_TRUSTY_APP_UUID(CRITICAL, trusty_app->app_id,
1084                               &trusty_app->props.uuid);
1085         dprintf(CRITICAL, "app already registered\n");
1086         return ERR_ALREADY_EXISTS;
1087     }
1088 
1089     PRINT_TRUSTY_APP_UUID(SPEW, trusty_app->app_id, &trusty_app->props.uuid);
1090     dprintf(SPEW, "trusty_app %u name: %s priority: %u\n", trusty_app->app_id,
1091             trusty_app->props.app_name, trusty_app->props.priority);
1092 
1093     if (trusty_app->props.feature_bti) {
1094         const char* status;
1095 #ifndef USER_BTI_DISABLED
1096         status = arch_bti_supported() ? "enabled"
1097                                       : "ignored (unsupported by hw)";
1098 #else
1099         status = "ignored (disabled in kernel)";
1100 #endif
1101         dprintf(SPEW, "trusty_app %u  bti: %s\n", trusty_app->app_id, status);
1102     }
1103 
1104     LTRACEF("trusty_app %p: stack_sz=0x%x\n", trusty_app,
1105             trusty_app->props.min_stack_size);
1106     LTRACEF("trusty_app %p: heap_sz=0x%x\n", trusty_app,
1107             trusty_app->props.min_heap_size);
1108     LTRACEF("trusty_app %p: num_io_mem=%d\n", trusty_app,
1109             trusty_app->props.map_io_mem_cnt);
1110 
1111     return NO_ERROR;
1112 }
1113 
init_brk(struct trusty_app * trusty_app)1114 static status_t init_brk(struct trusty_app* trusty_app) {
1115     status_t status;
1116     vaddr_t start_brk;
1117     vaddr_t brk_size;
1118 
1119     /*
1120      * Make sure the heap is page aligned and page sized.
1121      * Most user space allocators assume this. Historically, we tried to
1122      * scavange space at the end of .bss for the heap but this misaligned the
1123      * heap and caused userspace allocators to behave is subtly unpredictable
1124      * ways.
1125      */
1126     start_brk = 0;
1127     brk_size = round_up(trusty_app->props.min_heap_size, PAGE_SIZE);
1128 
1129     /* Allocate if needed. */
1130     if (brk_size > 0) {
1131         status = vmm_alloc_no_physical(
1132                 trusty_app->aspace, "brk_heap_res", brk_size,
1133                 (void**)&start_brk, PAGE_SIZE_SHIFT, 0,
1134                 ARCH_MMU_FLAG_PERM_USER | ARCH_MMU_FLAG_PERM_NO_EXECUTE);
1135 
1136         if (status != NO_ERROR) {
1137             dprintf(CRITICAL,
1138                     "failed(%d) to create heap(0x%" PRIxPTR
1139                     ") for app %u, %s\n",
1140                     status, start_brk, trusty_app->app_id,
1141                     trusty_app->props.app_name);
1142             return ERR_NO_MEMORY;
1143         }
1144     }
1145 
1146     /* Record the location. */
1147     trusty_app->used_brk = false;
1148     trusty_app->start_brk = start_brk;
1149     trusty_app->cur_brk = start_brk;
1150     trusty_app->end_brk = start_brk + brk_size;
1151 
1152     return NO_ERROR;
1153 }
1154 
1155 /**
1156  * select_load_bias() - Pick a a load bias for an ELF
1157  * @phdr:      Pre-validated program header array base
1158  * @num_phdrs: Number of program headers
1159  * @aspace:    The address space the bias needs to be valid in
1160  * @out:       Out pointer to write the selected bias to. Only valid if the
1161  *             function returned 0.
1162  *
1163  * This function calculates an offset that can be added to every loadable ELF
1164  * segment in the image and still result in a legal load address.
1165  *
1166  * Return: A status code indicating whether a bias was located. If nonzero,
1167  *         the bias output may be invalid.
1168  */
select_load_bias(ELF_PHDR * phdr,size_t num_phdrs,vmm_aspace_t * aspace,vaddr_t * out)1169 static status_t select_load_bias(ELF_PHDR* phdr,
1170                                  size_t num_phdrs,
1171                                  vmm_aspace_t* aspace,
1172                                  vaddr_t* out) {
1173     DEBUG_ASSERT(out);
1174 #if ASLR
1175     vaddr_t low = VADDR_MAX;
1176     vaddr_t high = 0;
1177     for (size_t i = 0; i < num_phdrs; i++, phdr++) {
1178         low = MIN(low, phdr->p_vaddr);
1179         vaddr_t candidate_high;
1180         if (!__builtin_add_overflow(phdr->p_vaddr, phdr->p_memsz,
1181                                     &candidate_high)) {
1182             high = MAX(high, candidate_high);
1183         } else {
1184             dprintf(CRITICAL, "Segment %zu overflows virtual address space\n",
1185                     i);
1186             return ERR_NOT_VALID;
1187         }
1188     }
1189     LTRACEF("ELF Segment range: %" PRIxVADDR "->%" PRIxVADDR "\n", low, high);
1190 
1191     DEBUG_ASSERT(high >= low);
1192     size_t size = round_up(high - low, PAGE_SIZE);
1193     LTRACEF("Spot size: %zu\n", size);
1194 
1195     vaddr_t spot;
1196     if (!vmm_find_spot(aspace, size, &spot)) {
1197         return ERR_NO_MEMORY;
1198     }
1199     LTRACEF("Load target: %" PRIxVADDR "\n", spot);
1200 
1201     /*
1202      * Overflow is acceptable here, since adding the delta to the lowest
1203      * ELF load address will still return to spot, which was the goal.
1204      */
1205     __builtin_sub_overflow(spot, low, out);
1206 #else
1207     /* If ASLR is disabled, the app is not PIE, use a load bias of 0 */
1208     *out = 0;
1209 #endif
1210 
1211     LTRACEF("Load bias: %" PRIxVADDR "\n", *out);
1212 
1213     return NO_ERROR;
1214 }
1215 
elf_vaddr_mapped(struct trusty_app * trusty_app,size_t vaddr,ssize_t offset)1216 static bool elf_vaddr_mapped(struct trusty_app* trusty_app,
1217                              size_t vaddr,
1218                              ssize_t offset) {
1219     ELF_EHDR* elf_hdr = (ELF_EHDR*)trusty_app->app_img.img_start;
1220     void* trusty_app_image = (void*)trusty_app->app_img.img_start;
1221     ELF_PHDR* prg_hdr = (ELF_PHDR*)(trusty_app_image + elf_hdr->e_phoff);
1222     if (__builtin_add_overflow(vaddr, offset, &vaddr)) {
1223         return false;
1224     }
1225     for (size_t i = 0; i < elf_hdr->e_phnum; i++, prg_hdr++) {
1226         Elf_Addr end;
1227         __builtin_add_overflow(prg_hdr->p_vaddr, prg_hdr->p_memsz, &end);
1228         if (prg_hdr->p_type == PT_LOAD &&
1229             vaddr >= round_down(prg_hdr->p_vaddr, PAGE_SIZE) &&
1230             vaddr < round_up(end, PAGE_SIZE)) {
1231             return true;
1232         }
1233     }
1234     return false;
1235 }
1236 
alloc_address_map(struct trusty_app * trusty_app)1237 static status_t alloc_address_map(struct trusty_app* trusty_app) {
1238     ELF_EHDR* elf_hdr = (ELF_EHDR*)trusty_app->app_img.img_start;
1239     void* trusty_app_image;
1240     ELF_PHDR* prg_hdr;
1241     u_int i;
1242     status_t ret;
1243     trusty_app_image = (void*)trusty_app->app_img.img_start;
1244 
1245     prg_hdr = (ELF_PHDR*)(trusty_app_image + elf_hdr->e_phoff);
1246 
1247     if (!address_range_within_img(prg_hdr, sizeof(ELF_PHDR) * elf_hdr->e_phnum,
1248                                   &trusty_app->app_img)) {
1249         dprintf(CRITICAL, "ELF program headers table out of bounds\n");
1250         return ERR_NOT_VALID;
1251     }
1252 
1253     status_t bias_result =
1254             select_load_bias(prg_hdr, elf_hdr->e_phnum, trusty_app->aspace,
1255                              &trusty_app->load_bias);
1256     if (bias_result) {
1257         return bias_result;
1258     }
1259 
1260     size_t has_guard_low = 0;
1261     size_t has_guard_high = 0;
1262 
1263     /* create mappings for PT_LOAD sections */
1264     for (i = 0; i < elf_hdr->e_phnum; i++, prg_hdr++) {
1265         /* load_bias uses overflow to lower vaddr if needed */
1266         Elf_Addr p_vaddr;
1267         __builtin_add_overflow(prg_hdr->p_vaddr, trusty_app->load_bias,
1268                                &p_vaddr);
1269 
1270         LTRACEF("trusty_app %d, %s: ELF type 0x%x"
1271                 ", vaddr 0x%08" PRIxELF_Addr ", paddr 0x%08" PRIxELF_Addr
1272                 ", rsize 0x%08" PRIxELF_Size ", msize 0x%08" PRIxELF_Size
1273                 ", flags 0x%08x\n",
1274                 trusty_app->app_id, trusty_app->props.app_name, prg_hdr->p_type,
1275                 p_vaddr, prg_hdr->p_paddr, prg_hdr->p_filesz, prg_hdr->p_memsz,
1276                 prg_hdr->p_flags);
1277 
1278         if (prg_hdr->p_type != PT_LOAD)
1279             continue;
1280 
1281         if (p_vaddr < USER_ASPACE_BASE) {
1282             TRACEF("Attempted to load segment beneath user address space\n");
1283             return ERR_NOT_VALID;
1284         }
1285 
1286         vaddr_t vaddr = p_vaddr;
1287         vaddr_t img_kvaddr = (vaddr_t)(trusty_app_image + prg_hdr->p_offset);
1288         size_t mapping_size;
1289 
1290         if (vaddr & PAGE_MASK) {
1291             dprintf(CRITICAL,
1292                     "segment %u load address 0x%" PRIxVADDR
1293                     " in not page aligned for app %u, %s\n",
1294                     i, vaddr, trusty_app->app_id, trusty_app->props.app_name);
1295             return ERR_NOT_VALID;
1296         }
1297 
1298         if (img_kvaddr & PAGE_MASK) {
1299             dprintf(CRITICAL,
1300                     "segment %u image address 0x%" PRIxVADDR
1301                     " in not page aligned for app %u, %s\n",
1302                     i, img_kvaddr, trusty_app->app_id,
1303                     trusty_app->props.app_name);
1304             return ERR_NOT_VALID;
1305         }
1306 
1307         uint vmm_flags = VMM_FLAG_VALLOC_SPECIFIC;
1308         if (elf_vaddr_mapped(trusty_app, prg_hdr->p_vaddr,
1309                              -(ssize_t)PAGE_SIZE)) {
1310             vmm_flags |= VMM_FLAG_NO_START_GUARD;
1311         } else {
1312             has_guard_low++;
1313         }
1314         if (elf_vaddr_mapped(trusty_app, prg_hdr->p_vaddr + prg_hdr->p_memsz,
1315                              PAGE_SIZE)) {
1316             vmm_flags |= VMM_FLAG_NO_END_GUARD;
1317         } else {
1318             has_guard_high++;
1319         }
1320 
1321         uint arch_mmu_flags = ARCH_MMU_FLAG_PERM_USER;
1322         if (!(prg_hdr->p_flags & PF_X)) {
1323             arch_mmu_flags += ARCH_MMU_FLAG_PERM_NO_EXECUTE;
1324         }
1325 
1326         if (prg_hdr->p_flags & PF_W) {
1327             paddr_t upaddr;
1328             void* load_kvaddr;
1329             size_t copy_size;
1330             size_t file_size;
1331             mapping_size = round_up(prg_hdr->p_memsz, PAGE_SIZE);
1332 
1333             if (!address_range_within_img((void*)img_kvaddr, prg_hdr->p_filesz,
1334                                           &trusty_app->app_img)) {
1335                 dprintf(CRITICAL, "ELF Program segment %u out of bounds\n", i);
1336                 return ERR_NOT_VALID;
1337             }
1338 
1339             ret = vmm_alloc(trusty_app->aspace, "elfseg", mapping_size,
1340                             (void**)&vaddr, PAGE_SIZE_SHIFT, vmm_flags,
1341                             arch_mmu_flags);
1342 
1343             if (ret != NO_ERROR) {
1344                 dprintf(CRITICAL,
1345                         "failed(%d) to allocate data segment(0x%" PRIxVADDR
1346                         ") %u for app %u, %s\n",
1347                         ret, vaddr, i, trusty_app->app_id,
1348                         trusty_app->props.app_name);
1349                 return ret;
1350             }
1351 
1352             ASSERT(vaddr == p_vaddr);
1353 
1354             file_size = prg_hdr->p_filesz;
1355             while (file_size > 0) {
1356                 ret = arch_mmu_query(&trusty_app->aspace->arch_aspace, vaddr,
1357                                      &upaddr, NULL);
1358                 if (ret != NO_ERROR) {
1359                     dprintf(CRITICAL, "Could not copy data segment: %d\n", ret);
1360                     return ret;
1361                 }
1362 
1363                 load_kvaddr = paddr_to_kvaddr(upaddr);
1364                 ASSERT(load_kvaddr);
1365                 copy_size = MIN(file_size, PAGE_SIZE);
1366                 memcpy(load_kvaddr, (void*)img_kvaddr, copy_size);
1367                 file_size -= copy_size;
1368                 vaddr += copy_size;
1369                 img_kvaddr += copy_size;
1370             }
1371 
1372         } else {
1373             mapping_size = round_up(prg_hdr->p_filesz, PAGE_SIZE);
1374 
1375             if (!address_range_within_img((void*)img_kvaddr, mapping_size,
1376                                           &trusty_app->app_img)) {
1377                 dprintf(CRITICAL, "ELF Program segment %u out of bounds\n", i);
1378                 return ERR_NOT_VALID;
1379             }
1380             if (mapping_size != round_up(prg_hdr->p_memsz, PAGE_SIZE)) {
1381                 dprintf(CRITICAL, "ELF Program segment %u bad memsz\n", i);
1382                 return ERR_NOT_VALID;
1383             }
1384 
1385             paddr_t* paddr_arr =
1386                     calloc(mapping_size / PAGE_SIZE, sizeof(paddr_t));
1387             if (!paddr_arr) {
1388                 dprintf(CRITICAL,
1389                         "Failed to allocate physical address array\n");
1390                 return ERR_NO_MEMORY;
1391             }
1392 
1393             for (size_t j = 0; j < mapping_size / PAGE_SIZE; j++) {
1394                 paddr_arr[j] =
1395                         vaddr_to_paddr((void*)(img_kvaddr + PAGE_SIZE * j));
1396                 DEBUG_ASSERT(paddr_arr[j] && !(paddr_arr[j] & PAGE_MASK));
1397             }
1398 
1399             arch_mmu_flags += ARCH_MMU_FLAG_PERM_RO;
1400             ret = vmm_alloc_physical_etc(
1401                     trusty_app->aspace, "elfseg", mapping_size, (void**)&vaddr,
1402                     PAGE_SIZE_SHIFT, paddr_arr, mapping_size / PAGE_SIZE,
1403                     vmm_flags, arch_mmu_flags);
1404             if (ret != NO_ERROR) {
1405                 dprintf(CRITICAL,
1406                         "failed(%d) to map RO segment(0x%" PRIxVADDR
1407                         ") %u for app %u, %s\n",
1408                         ret, vaddr, i, trusty_app->app_id,
1409                         trusty_app->props.app_name);
1410                 free(paddr_arr);
1411                 return ret;
1412             }
1413 
1414             ASSERT(vaddr == p_vaddr);
1415             free(paddr_arr);
1416         }
1417 
1418         LTRACEF("trusty_app %d, %s: load vaddr 0x%08" PRIxVADDR
1419                 ", paddr 0x%08" PRIxVADDR
1420                 ", rsize 0x%08zx, msize 0x%08" PRIxELF_Size
1421                 ", access r%c%c, flags 0x%x\n",
1422                 trusty_app->app_id, trusty_app->props.app_name, vaddr,
1423                 vaddr_to_paddr((void*)vaddr), mapping_size, prg_hdr->p_memsz,
1424                 arch_mmu_flags & ARCH_MMU_FLAG_PERM_RO ? '-' : 'w',
1425                 arch_mmu_flags & ARCH_MMU_FLAG_PERM_NO_EXECUTE ? '-' : 'x',
1426                 arch_mmu_flags);
1427     }
1428 
1429     ASSERT(has_guard_low);
1430     ASSERT(has_guard_high);
1431     ASSERT(has_guard_low == has_guard_high);
1432 
1433     ret = init_brk(trusty_app);
1434     if (ret != NO_ERROR) {
1435         dprintf(CRITICAL,
1436                 "failed to load trusty_app: trusty_app heap creation error\n");
1437         return ret;
1438     }
1439 
1440     dprintf(SPEW,
1441             "trusty_app %d, %s: brk:  start 0x%08" PRIxPTR " end 0x%08" PRIxPTR
1442             "\n",
1443             trusty_app->app_id, trusty_app->props.app_name,
1444             trusty_app->start_brk, trusty_app->end_brk);
1445     dprintf(SPEW, "trusty_app %d, %s: entry 0x%08" PRIxELF_Addr "\n",
1446             trusty_app->app_id, trusty_app->props.app_name, elf_hdr->e_entry);
1447 
1448     return NO_ERROR;
1449 }
1450 
has_waiting_connection(struct trusty_app * app)1451 static bool has_waiting_connection(struct trusty_app* app) {
1452     struct manifest_port_entry* entry;
1453 
1454     /*
1455      * Don't hold the apps lock when calling into other subsystems with calls
1456      * that may grab additional locks.
1457      */
1458     DEBUG_ASSERT(!is_mutex_held(&apps_lock));
1459 
1460     list_for_every_entry(&app->props.port_entry_list, entry,
1461                          struct manifest_port_entry, node) {
1462         if (ipc_connection_waiting_for_port(entry->path, entry->flags)) {
1463             return true;
1464         }
1465     }
1466 
1467     return false;
1468 }
1469 
kill_waiting_connections(struct trusty_app * app)1470 static void kill_waiting_connections(struct trusty_app* app) {
1471     struct manifest_port_entry* entry;
1472 
1473     /*
1474      * Don't hold the apps lock when calling into other subsystems with calls
1475      * that may grab additional locks.
1476      */
1477     DEBUG_ASSERT(!is_mutex_held(&apps_lock));
1478 
1479     list_for_every_entry(&app->props.port_entry_list, entry,
1480                          struct manifest_port_entry, node) {
1481         ipc_remove_connection_waiting_for_port(entry->path, entry->flags);
1482     }
1483 }
1484 
1485 /* Must be called with the apps_lock held */
request_app_start_locked(struct trusty_app * app)1486 static status_t request_app_start_locked(struct trusty_app* app) {
1487     DEBUG_ASSERT(is_mutex_held(&apps_lock));
1488 
1489     switch (app->state) {
1490     case APP_NOT_RUNNING:
1491         app->state = APP_STARTING;
1492         event_signal(&app_mgr_event, false);
1493         return NO_ERROR;
1494     case APP_STARTING:
1495     case APP_RUNNING:
1496     case APP_RESTARTING:
1497         return ERR_ALREADY_STARTED;
1498     case APP_TERMINATING:
1499         /*
1500          * We got a new connection while terminating, change the state so
1501          * app_mgr_handle_terminating can restart the app.
1502          */
1503         app->state = APP_RESTARTING;
1504         return ERR_ALREADY_STARTED;
1505     case APP_FAILED_TO_START:
1506         /* The app failed to start so it shouldn't accept new connections. */
1507         return ERR_CANCELLED;
1508         /*
1509          * There is no default case here because we want the compiler to warn us
1510          * if we forget a state (controlled by the -Wswitch option which is
1511          * included in -Wall). Whenever someone adds a new state without
1512          * handling it here, they should get a compiler error.
1513          */
1514     }
1515 }
1516 
1517 /*
1518  * Create a trusty_app from its memory image and add it to the global list of
1519  * apps. Returns the created app in out_trusty_app if not NULL.
1520  */
trusty_app_create(struct trusty_app_img * app_img,struct trusty_app ** out_trusty_app,uint32_t flags)1521 static status_t trusty_app_create(struct trusty_app_img* app_img,
1522                                   struct trusty_app** out_trusty_app,
1523                                   uint32_t flags) {
1524     ELF_EHDR* ehdr;
1525     struct trusty_app* trusty_app;
1526     status_t ret;
1527     struct manifest_port_entry* port_entry;
1528     struct manifest_port_entry* tmp_port_entry;
1529     struct manifest_mmio_entry* mmio_entry;
1530     struct manifest_mmio_entry* tmp_mmio_entry;
1531 
1532     DEBUG_ASSERT(!(flags & ~(uint32_t)APP_FLAGS_CREATION_MASK));
1533 
1534     if (app_img->img_start & PAGE_MASK || app_img->img_end & PAGE_MASK) {
1535         dprintf(CRITICAL,
1536                 "app image is not page aligned start 0x%" PRIxPTR
1537                 " end 0x%" PRIxPTR "\n",
1538                 app_img->img_start, app_img->img_end);
1539         return ERR_NOT_VALID;
1540     }
1541 
1542     dprintf(SPEW, "trusty_app: start %p size 0x%08" PRIxPTR " end %p\n",
1543             (void*)app_img->img_start, app_img->img_end - app_img->img_start,
1544             (void*)app_img->img_end);
1545 
1546     trusty_app = (struct trusty_app*)calloc(1, sizeof(struct trusty_app));
1547     if (!trusty_app) {
1548         dprintf(CRITICAL,
1549                 "trusty_app: failed to allocate memory for trusty app\n");
1550         return ERR_NO_MEMORY;
1551     }
1552     list_initialize(&trusty_app->props.port_entry_list);
1553     list_initialize(&trusty_app->props.mmio_entry_list);
1554     list_initialize(&trusty_app->props.dma_entry_list);
1555 
1556     ehdr = (ELF_EHDR*)app_img->img_start;
1557     if (!address_range_within_img(ehdr, sizeof(ELF_EHDR), app_img)) {
1558         dprintf(CRITICAL, "trusty_app_create: ELF header out of bounds\n");
1559         ret = ERR_NOT_VALID;
1560         goto err_hdr;
1561     }
1562 
1563     if (strncmp((char*)ehdr->e_ident, ELFMAG, SELFMAG)) {
1564         dprintf(CRITICAL, "trusty_app_create: ELF header not found\n");
1565         ret = ERR_NOT_VALID;
1566         goto err_hdr;
1567     }
1568 
1569     trusty_app->app_id = trusty_next_app_id++;
1570     trusty_app->app_img = *app_img;
1571     trusty_app->state = APP_NOT_RUNNING;
1572     trusty_app->flags |= flags;
1573 
1574     mutex_acquire(&apps_lock);
1575 
1576     ret = load_app_config_options(trusty_app);
1577     if (ret == NO_ERROR) {
1578         list_add_tail(&trusty_app_list, &trusty_app->node);
1579     }
1580 
1581     mutex_release(&apps_lock);
1582 
1583     if (ret == NO_ERROR) {
1584         if (out_trusty_app) {
1585             /*
1586              * TODO: this returns an app pointer without holding the lock; the
1587              * app might get unloaded while the caller holds this pointer, so
1588              * we need to handle this case correctly
1589              */
1590             *out_trusty_app = trusty_app;
1591         }
1592 
1593         return ret;
1594     }
1595 
1596     dprintf(CRITICAL, "manifest processing failed(%d)\n", ret);
1597 
1598 err_load:
1599     list_for_every_entry_safe(&trusty_app->props.port_entry_list, port_entry,
1600                               tmp_port_entry, struct manifest_port_entry,
1601                               node) {
1602         list_delete(&port_entry->node);
1603         free(port_entry);
1604     }
1605     list_for_every_entry_safe(&trusty_app->props.mmio_entry_list, mmio_entry,
1606                               tmp_mmio_entry, struct manifest_mmio_entry,
1607                               node) {
1608         list_delete(&mmio_entry->node);
1609         vmm_obj_del_ref(&mmio_entry->phys_mem_obj.vmm_obj,
1610                         &mmio_entry->phys_mem_obj_self_ref);
1611     }
1612 err_hdr:
1613     free(trusty_app);
1614     return ret;
1615 }
1616 
trusty_app_create_and_start(struct trusty_app_img * app_img,uint32_t flags)1617 status_t trusty_app_create_and_start(struct trusty_app_img* app_img,
1618                                      uint32_t flags) {
1619     status_t ret;
1620     struct trusty_app* trusty_app;
1621 
1622     ret = trusty_app_create(app_img, &trusty_app, flags);
1623     if (ret != NO_ERROR) {
1624         return ret;
1625     }
1626 
1627     /* Loadable apps with deferred_start might have clients waiting for them */
1628     if (!(trusty_app->props.mgmt_flags &
1629           APP_MANIFEST_MGMT_FLAGS_DEFERRED_START) ||
1630         has_waiting_connection(trusty_app)) {
1631         mutex_acquire(&apps_lock);
1632         ret = request_app_start_locked(trusty_app);
1633         mutex_release(&apps_lock);
1634 
1635         /*
1636          * Since we drop apps_lock between trusty_app_create and here,
1637          * it is possible for another thread to race us and start the
1638          * app from trusty_app_request_start_by_port before we
1639          * reacquire the lock. In that case, request_app_start_locked
1640          * returns ERR_ALREADY_STARTED here. We treat this case as a
1641          * success and return NO_ERROR since the application is
1642          * running and we don't want the kernel service to
1643          * free its memory.
1644          */
1645         if (ret == ERR_ALREADY_STARTED) {
1646             ret = NO_ERROR;
1647         }
1648     }
1649 
1650     return ret;
1651 }
1652 
trusty_app_setup_mmio(struct trusty_app * trusty_app,uint32_t mmio_id,user_addr_t * uaddr_p,uint32_t map_size)1653 status_t trusty_app_setup_mmio(struct trusty_app* trusty_app,
1654                                uint32_t mmio_id,
1655                                user_addr_t* uaddr_p,
1656                                uint32_t map_size) {
1657     status_t ret;
1658     struct manifest_mmio_entry* mmio_entry;
1659 
1660     /* Should only be called on the currently running app */
1661     DEBUG_ASSERT(trusty_app == current_trusty_app());
1662 
1663     ASSERT(uaddr_p);
1664     void* va = (void*)(uintptr_t)(*uaddr_p);
1665 
1666     list_for_every_entry(&trusty_app->props.mmio_entry_list, mmio_entry,
1667                          struct manifest_mmio_entry, node) {
1668         if (mmio_entry->id != mmio_id) {
1669             continue;
1670         }
1671 
1672         map_size = round_up(map_size, PAGE_SIZE);
1673 
1674         ret = vmm_alloc_obj(
1675                 trusty_app->aspace, "mmio", &mmio_entry->phys_mem_obj.vmm_obj,
1676                 0, map_size, &va, 0, 0,
1677                 ARCH_MMU_FLAG_PERM_USER | ARCH_MMU_FLAG_PERM_NO_EXECUTE);
1678         if (ret == NO_ERROR) {
1679             *uaddr_p = (user_addr_t)(uintptr_t)va;
1680             DEBUG_ASSERT((void*)(uintptr_t)(*uaddr_p) == va);
1681         }
1682         return ret;
1683     }
1684 
1685     return ERR_NOT_FOUND;
1686 }
1687 
trusty_app_start(struct trusty_app * trusty_app)1688 static status_t trusty_app_start(struct trusty_app* trusty_app) {
1689     char name[32];
1690     struct trusty_thread* trusty_thread;
1691     struct trusty_app_notifier* n;
1692     ELF_EHDR* elf_hdr;
1693     uint flags = 0;
1694     int ret;
1695 
1696     DEBUG_ASSERT(trusty_app->state == APP_STARTING);
1697 
1698     snprintf(name, sizeof(name), "trusty_app_%d_%08x-%04x-%04x",
1699              trusty_app->app_id, trusty_app->props.uuid.time_low,
1700              trusty_app->props.uuid.time_mid,
1701              trusty_app->props.uuid.time_hi_and_version);
1702 
1703 #ifndef USER_BTI_DISABLED
1704     if (trusty_app->props.feature_bti && arch_bti_supported()) {
1705         flags |= VMM_ASPACE_FLAG_BTI;
1706     }
1707 #endif
1708 
1709     ret = vmm_create_aspace_with_quota(&trusty_app->aspace, name,
1710                                        trusty_app->props.min_heap_size, flags);
1711     if (ret != NO_ERROR) {
1712         dprintf(CRITICAL, "Failed(%d) to allocate address space for %s\n", ret,
1713                 name);
1714         goto err_aspace;
1715     }
1716 
1717     ret = alloc_address_map(trusty_app);
1718     if (ret != NO_ERROR) {
1719         dprintf(CRITICAL, "failed(%d) to load address map for %s\n", ret, name);
1720         goto err_map;
1721     }
1722 
1723     /* attach als_cnt */
1724     trusty_app->als = calloc(1, als_slot_cnt * sizeof(void*));
1725     if (!trusty_app->als) {
1726         dprintf(CRITICAL, "failed to allocate local storage for %s\n", name);
1727         ret = ERR_NO_MEMORY;
1728         /* alloc_address_map gets cleaned up by destroying the address space */
1729         goto err_alloc;
1730     }
1731 
1732     /* call all registered startup notifiers */
1733     list_for_every_entry(&app_notifier_list, n, struct trusty_app_notifier,
1734                          node) {
1735         if (!n->startup)
1736             continue;
1737 
1738         ret = n->startup(trusty_app);
1739         if (ret != NO_ERROR) {
1740             dprintf(CRITICAL, "failed(%d) to invoke startup notifier for %s\n",
1741                     ret, name);
1742             goto err_notifier;
1743         }
1744     }
1745 
1746     elf_hdr = (ELF_EHDR*)trusty_app->app_img.img_start;
1747     vaddr_t entry;
1748     __builtin_add_overflow(elf_hdr->e_entry, trusty_app->load_bias, &entry);
1749     trusty_thread = trusty_thread_create(
1750             name, entry, trusty_app->props.priority,
1751             trusty_app->props.min_stack_size,
1752             trusty_app->props.min_shadow_stack_size, trusty_app);
1753 
1754     if (!trusty_thread) {
1755         dprintf(CRITICAL, "failed to allocate trusty thread for %s\n", name);
1756         ret = ERR_NO_MEMORY;
1757         goto err_thread;
1758     }
1759 
1760     trusty_app->thread = trusty_thread;
1761 
1762     trusty_app->state = APP_RUNNING;
1763     ret = trusty_thread_start(trusty_app->thread);
1764 
1765     ASSERT(ret == NO_ERROR);
1766 
1767     return ret;
1768 
1769 err_thread:
1770 err_notifier:
1771     /* n points to failed notifier, or NULL if all were called successfully */
1772     if (n != NULL) {
1773         n = list_prev_type(&app_notifier_list, &n->node,
1774                            struct trusty_app_notifier, node);
1775     } else {
1776         n = list_peek_tail_type(&app_notifier_list, struct trusty_app_notifier,
1777                                 node);
1778     }
1779 
1780     while (n != NULL) {
1781         if (!n->shutdown)
1782             continue;
1783 
1784         if (n->shutdown(trusty_app) != NO_ERROR)
1785             panic("failed to invoke shutdown notifier for %s\n", name);
1786 
1787         n = list_prev_type(&app_notifier_list, &n->node,
1788                            struct trusty_app_notifier, node);
1789     }
1790 
1791     free(trusty_app->als);
1792     trusty_app->als = NULL;
1793 err_alloc:
1794 err_map:
1795     vmm_free_aspace(trusty_app->aspace);
1796     trusty_app->aspace = NULL;
1797 err_aspace:
1798     return ret;
1799 }
1800 
trusty_app_exit_etc(int status,uint32_t crash_reason,bool is_crash)1801 static void __NO_RETURN trusty_app_exit_etc(int status,
1802                                             uint32_t crash_reason,
1803                                             bool is_crash) {
1804     status_t ret;
1805     struct trusty_app* app;
1806     struct trusty_app_notifier* notifier;
1807     lk_time_ns_t restart_timeout;
1808 
1809     app = current_trusty_app();
1810 
1811     DEBUG_ASSERT(app->state == APP_RUNNING);
1812 
1813     LTRACEF("exiting app %u, %s...\n", app->app_id, app->props.app_name);
1814 
1815     if (status) {
1816         TRACEF("%s: exited with exit code %d\n", app->aspace->name, status);
1817         if (!(app->props.mgmt_flags &
1818               APP_MANIFEST_MGMT_FLAGS_NON_CRITICAL_APP)) {
1819             panic("Unclean exit from critical app\n");
1820         }
1821         dump_backtrace();
1822         dprintf(ALWAYS, "%s\n", lk_version);
1823         restart_timeout = TRUSTY_APP_RESTART_TIMEOUT_FAILURE;
1824     } else {
1825         restart_timeout = TRUSTY_APP_RESTART_TIMEOUT_SUCCESS;
1826     }
1827     app->min_start_time = current_time_ns() + restart_timeout;
1828 
1829     list_for_every_entry(&app_notifier_list, notifier,
1830                          struct trusty_app_notifier, node) {
1831         if (!notifier->shutdown)
1832             continue;
1833 
1834         ret = notifier->shutdown(app);
1835         if (ret != NO_ERROR)
1836             panic("shutdown notifier failed(%d) for app %u, %s\n", ret,
1837                   app->app_id, app->props.app_name);
1838     }
1839     /* Do not report normal exits with exit code 0 */
1840     if (is_crash || crash_reason != 0) {
1841         list_for_every_entry(&app_notifier_list, notifier,
1842                              struct trusty_app_notifier, node) {
1843             if (!notifier->crash) {
1844                 continue;
1845             }
1846 
1847             ret = notifier->crash(app, crash_reason, is_crash);
1848             if (ret != NO_ERROR) {
1849                 panic("crash notifier failed(%d) for app %u, %s\n", ret,
1850                       app->app_id, app->props.app_name);
1851             }
1852         }
1853     }
1854 
1855     free(app->als);
1856     app->als = NULL;
1857     mutex_acquire(&apps_lock);
1858     app->state = APP_TERMINATING;
1859     mutex_release(&apps_lock);
1860 
1861     event_signal(&app_mgr_event, false);
1862     trusty_thread_exit(status);
1863 }
1864 
trusty_app_exit(int status)1865 void trusty_app_exit(int status) {
1866     /* Report exits with non-zero status as crashes */
1867     trusty_app_exit_etc(status, (uint32_t)status, false);
1868 }
1869 
trusty_app_crash(uint32_t reason)1870 void trusty_app_crash(uint32_t reason) {
1871     trusty_app_exit_etc(1 /*EXIT_FAILURE*/, reason, true);
1872 }
1873 
app_mgr_handle_starting(struct trusty_app * app)1874 static status_t app_mgr_handle_starting(struct trusty_app* app) {
1875     status_t ret;
1876 
1877     DEBUG_ASSERT(is_mutex_held(&apps_lock));
1878     DEBUG_ASSERT(app->state == APP_STARTING);
1879 
1880     LTRACEF("starting app %u, %s\n", app->app_id, app->props.app_name);
1881 
1882     ret = trusty_app_start(app);
1883 
1884     if (ret != NO_ERROR) {
1885         /*
1886          * Drop the lock to call into ipc to kill waiting connections.
1887          * We put the app in the APP_FAILED_TO_START state so no new
1888          * connections are accepted and also to prevent it from being removed.
1889          */
1890         app->state = APP_FAILED_TO_START;
1891 
1892         mutex_release(&apps_lock);
1893         kill_waiting_connections(app);
1894         mutex_acquire(&apps_lock);
1895 
1896         DEBUG_ASSERT(app->state == APP_FAILED_TO_START);
1897     }
1898     return ret;
1899 }
1900 
app_mgr_handle_terminating(struct trusty_app * app)1901 static status_t app_mgr_handle_terminating(struct trusty_app* app) {
1902     status_t ret;
1903     int retcode;
1904     bool restart_app;
1905 
1906     DEBUG_ASSERT(is_mutex_held(&apps_lock));
1907     DEBUG_ASSERT(app->state == APP_TERMINATING || app->state == APP_RESTARTING);
1908 
1909     LTRACEF("waiting for app %u, %s to exit\n", app->app_id,
1910             app->props.app_name);
1911 
1912     ret = thread_join(app->thread->thread, &retcode, INFINITE_TIME);
1913     ASSERT(ret == NO_ERROR);
1914     free(app->thread);
1915     app->thread = NULL;
1916     ret = vmm_free_aspace(app->aspace);
1917     app->aspace = NULL;
1918 
1919     /*
1920      * Panic if app exited with dma active. An unclean exit from a critical app
1921      * will already have panic'ed the kernel so this check will only detect when
1922      * critical apps exit cleanly with dma active and when non-critical apps
1923      * exit for any reason with dma active.
1924      */
1925     if (!list_is_empty(&app->props.dma_entry_list)) {
1926         mutex_release(&apps_lock);
1927         panic("%s: exited(%d) with dma active\n", app->props.app_name, retcode);
1928     }
1929 
1930     if (app->props.mgmt_flags & APP_MANIFEST_MGMT_FLAGS_RESTART_ON_EXIT) {
1931         restart_app = true;
1932     } else if (app->state == APP_TERMINATING) {
1933         /*
1934          * Drop the lock to call into ipc to check for connections. This is safe
1935          * since the app is in the APP_TERMINATING state so it cannot be
1936          * removed. We don't need to do this in APP_RESTARTING since that state
1937          * already marks that a connection is pending. If the app is marked
1938          * restart-on-exit, then we also go ahead with the restart.
1939          */
1940         mutex_release(&apps_lock);
1941         restart_app = has_waiting_connection(app);
1942         /*
1943          * We might get a new connection after has_waiting_connection returns
1944          * false. In that case, request_app_start_locked should change the state
1945          * to APP_RESTARTING
1946          */
1947         mutex_acquire(&apps_lock);
1948     } else {
1949         restart_app = false;
1950     }
1951 
1952     DEBUG_ASSERT(app->state == APP_TERMINATING || app->state == APP_RESTARTING);
1953     if (app->state == APP_RESTARTING) {
1954         restart_app = true;
1955     }
1956 
1957     if (restart_app) {
1958         app->state = APP_STARTING;
1959         event_signal(&app_mgr_event, false);
1960     } else {
1961         app->state = APP_NOT_RUNNING;
1962     }
1963 
1964     return ret;
1965 }
1966 
app_mgr(void * arg)1967 static int app_mgr(void* arg) {
1968     status_t ret;
1969     struct trusty_app* app;
1970 
1971     while (true) {
1972         LTRACEF("app manager waiting for events\n");
1973         event_wait(&app_mgr_event);
1974 
1975         mutex_acquire(&apps_lock);
1976 
1977         list_for_every_entry(&trusty_app_list, app, struct trusty_app, node) {
1978             switch (app->state) {
1979             case APP_TERMINATING:
1980             case APP_RESTARTING:
1981                 ret = app_mgr_handle_terminating(app);
1982                 if (ret != NO_ERROR)
1983                     panic("failed(%d) to terminate app %u, %s\n", ret,
1984                           app->app_id, app->props.app_name);
1985                 break;
1986             case APP_NOT_RUNNING:
1987                 break;
1988             case APP_STARTING:
1989                 ret = app_mgr_handle_starting(app);
1990                 if (ret != NO_ERROR) {
1991                     if (!(app->props.mgmt_flags &
1992                           APP_MANIFEST_MGMT_FLAGS_NON_CRITICAL_APP)) {
1993                         panic("failed(%d) to start app %u, %s\n", ret,
1994                               app->app_id, app->props.app_name);
1995                     }
1996                     TRACEF("failed(%d) to start app %u, %s\n", ret, app->app_id,
1997                            app->props.app_name);
1998                 }
1999                 break;
2000             case APP_RUNNING:
2001                 break;
2002             case APP_FAILED_TO_START:
2003                 break;
2004             default:
2005                 panic("unknown state %u for app %u, %s\n", app->state,
2006                       app->app_id, app->props.app_name);
2007             }
2008         }
2009 
2010         mutex_release(&apps_lock);
2011     }
2012 }
2013 
app_mgr_init(void)2014 static void app_mgr_init(void) {
2015     status_t err;
2016     thread_t* app_mgr_thread;
2017 
2018     LTRACEF("Creating app manager thread\n");
2019     app_mgr_thread = thread_create("app manager", &app_mgr, NULL,
2020                                    DEFAULT_PRIORITY, DEFAULT_STACK_SIZE);
2021 
2022     if (!app_mgr_thread)
2023         panic("Failed to create app manager thread\n");
2024 
2025     err = thread_resume(app_mgr_thread);
2026     if (err != NO_ERROR)
2027         panic("Failed to start app manager thread\n");
2028 }
2029 
trusty_app_is_startup_port(const char * port_path)2030 bool trusty_app_is_startup_port(const char* port_path) {
2031     struct manifest_port_entry* entry;
2032 
2033     mutex_acquire(&apps_lock);
2034     entry = find_manifest_port_entry_locked(port_path, NULL);
2035     mutex_release(&apps_lock);
2036 
2037     return entry != NULL;
2038 }
2039 
trusty_app_request_start_by_port(const char * port_path,const uuid_t * uuid)2040 status_t trusty_app_request_start_by_port(const char* port_path,
2041                                           const uuid_t* uuid) {
2042     struct manifest_port_entry* entry;
2043     struct trusty_app* owner = NULL;
2044     status_t ret;
2045 
2046     mutex_acquire(&apps_lock);
2047 
2048     entry = find_manifest_port_entry_locked(port_path, &owner);
2049 
2050     if (!owner || ipc_port_check_access(entry->flags, uuid) != NO_ERROR) {
2051         ret = ERR_NOT_FOUND;
2052     } else {
2053         ret = request_app_start_locked(owner);
2054     }
2055 
2056     mutex_release(&apps_lock);
2057 
2058     return ret;
2059 }
2060 
2061 /**
2062  * prel_to_abs_ptr() - Convert a position-relative value to an absolute.
2063  * @ptr: Pointer to a pointer-sized position-relative value.
2064  * @result: Pointer to the location for the result.
2065  *
2066  * Return: %true in case of success, %false for overflow.
2067  */
prel_to_abs_ptr(const intptr_t * ptr,uintptr_t * result)2068 static inline bool prel_to_abs_ptr(const intptr_t* ptr, uintptr_t* result) {
2069     return !__builtin_add_overflow((uintptr_t)ptr, *ptr, result);
2070 }
2071 
trusty_app_init(void)2072 void trusty_app_init(void) {
2073     struct trusty_builtin_app_img* builtin_app_img;
2074 
2075     finalize_registration();
2076 
2077     app_mgr_init();
2078 
2079     for (builtin_app_img = __trusty_app_list_start;
2080          builtin_app_img != __trusty_app_list_end; builtin_app_img++) {
2081         struct trusty_app_img app_img;
2082         if (!prel_to_abs_ptr(&builtin_app_img->manifest_start,
2083                              &app_img.manifest_start) ||
2084             !prel_to_abs_ptr(&builtin_app_img->manifest_end,
2085                              &app_img.manifest_end) ||
2086             !prel_to_abs_ptr(&builtin_app_img->img_start, &app_img.img_start) ||
2087             !prel_to_abs_ptr(&builtin_app_img->img_end, &app_img.img_end)) {
2088             panic("Invalid builtin function entry\n");
2089         }
2090 
2091         if (trusty_app_create(&app_img, NULL, 0) != NO_ERROR)
2092             panic("Failed to create builtin apps\n");
2093     }
2094 }
2095 
2096 /* rather export trusty_app_list?  */
trusty_app_forall(void (* fn)(struct trusty_app * ta,void * data),void * data)2097 void trusty_app_forall(void (*fn)(struct trusty_app* ta, void* data),
2098                        void* data) {
2099     struct trusty_app* ta;
2100 
2101     if (fn == NULL)
2102         return;
2103 
2104     mutex_acquire(&apps_lock);
2105     list_for_every_entry(&trusty_app_list, ta, struct trusty_app, node)
2106             fn(ta, data);
2107     mutex_release(&apps_lock);
2108 }
2109 
start_apps(uint level)2110 static void start_apps(uint level) {
2111     struct trusty_app* trusty_app;
2112 
2113     mutex_acquire(&apps_lock);
2114     list_for_every_entry(&trusty_app_list, trusty_app, struct trusty_app,
2115                          node) {
2116         if (trusty_app->props.mgmt_flags &
2117             APP_MANIFEST_MGMT_FLAGS_DEFERRED_START)
2118             continue;
2119 
2120         request_app_start_locked(trusty_app);
2121     }
2122     mutex_release(&apps_lock);
2123 }
2124 
2125 LK_INIT_HOOK(libtrusty_apps, start_apps, LK_INIT_LEVEL_APPS + 1);
2126