1 /*
2 * Copyright (c) 2013-2016 Google Inc. All rights reserved
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining
5 * a copy of this software and associated documentation files
6 * (the "Software"), to deal in the Software without restriction,
7 * including without limitation the rights to use, copy, modify, merge,
8 * publish, distribute, sublicense, and/or sell copies of the Software,
9 * and to permit persons to whom the Software is furnished to do so,
10 * subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
18 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
19 * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
20 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
21 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 #include <err.h>
25 #include <kernel/event.h>
26 #include <kernel/mutex.h>
27 #include <kernel/thread.h>
28 #include <kernel/vm.h>
29 #include <lib/heap.h>
30 #include <lib/sm.h>
31 #include <lib/sm/sm_err.h>
32 #include <lib/sm/smcall.h>
33 #include <lk/init.h>
34 #include <platform.h>
35 #include <stdatomic.h>
36 #include <string.h>
37 #include <sys/types.h>
38 #include <trace.h>
39 #include <version.h>
40
41 #define LOCAL_TRACE 0
42
43 struct sm_std_call_state {
44 spin_lock_t lock;
45 event_t event;
46 struct smc32_args args;
47 long ret;
48 bool done;
49 int active_cpu; /* cpu that expects stdcall result */
50 int initial_cpu; /* Debug info: cpu that started stdcall */
51 int last_cpu; /* Debug info: most recent cpu expecting stdcall result */
52 int restart_count;
53 };
54
55 extern unsigned long monitor_vector_table;
56 extern ulong lk_boot_args[4];
57
58 static void* boot_args;
59 static int boot_args_refcnt;
60 static mutex_t boot_args_lock = MUTEX_INITIAL_VALUE(boot_args_lock);
61 static atomic_uint_fast32_t sm_api_version;
62 static atomic_uint_fast32_t sm_api_version_min;
63 static atomic_uint_fast32_t sm_api_version_max = TRUSTY_API_VERSION_CURRENT;
64 static spin_lock_t sm_api_version_lock;
65 static atomic_bool platform_halted;
66
67 static event_t nsirqevent[SMP_MAX_CPUS];
68 static thread_t* nsirqthreads[SMP_MAX_CPUS];
69 static thread_t* nsidlethreads[SMP_MAX_CPUS];
70 static thread_t* stdcallthread;
71 static bool irq_thread_ready[SMP_MAX_CPUS];
72 struct sm_std_call_state stdcallstate = {
73 .event = EVENT_INITIAL_VALUE(stdcallstate.event, 0, 0),
74 .active_cpu = -1,
75 .initial_cpu = -1,
76 .last_cpu = -1,
77 };
78
79 extern smc32_handler_t sm_stdcall_table[];
80 extern smc32_handler_t sm_nopcall_table[];
81 extern smc32_handler_t sm_fastcall_table[];
82
smc_sm_api_version(struct smc32_args * args)83 long smc_sm_api_version(struct smc32_args* args) {
84 uint32_t api_version = args->params[0];
85
86 spin_lock(&sm_api_version_lock);
87 LTRACEF("request api version %d\n", api_version);
88 if (api_version > sm_api_version_max) {
89 api_version = sm_api_version_max;
90 }
91
92 if (api_version < sm_api_version_min) {
93 TRACEF("ERROR: Tried to select incompatible api version %d < %d, current version %d\n",
94 api_version, sm_api_version_min, sm_api_version);
95 api_version = sm_api_version;
96 } else {
97 /* Update and lock the version to prevent downgrade */
98 sm_api_version = api_version;
99 sm_api_version_min = api_version;
100 }
101 spin_unlock(&sm_api_version_lock);
102
103 LTRACEF("return api version %d\n", api_version);
104 return api_version;
105 }
106
smc_get_smp_max_cpus(struct smc32_args * args)107 long smc_get_smp_max_cpus(struct smc32_args* args) {
108 return SMP_MAX_CPUS;
109 }
110
sm_get_api_version(void)111 uint32_t sm_get_api_version(void) {
112 return sm_api_version;
113 }
114
sm_check_and_lock_api_version(uint32_t version_wanted)115 bool sm_check_and_lock_api_version(uint32_t version_wanted) {
116 spin_lock_saved_state_t state;
117
118 DEBUG_ASSERT(version_wanted > 0);
119
120 if (sm_api_version_min >= version_wanted) {
121 return true;
122 }
123 if (sm_api_version_max < version_wanted) {
124 return false;
125 }
126
127 spin_lock_save(&sm_api_version_lock, &state, SPIN_LOCK_FLAG_IRQ_FIQ);
128 if (sm_api_version < version_wanted) {
129 sm_api_version_max = MIN(sm_api_version_max, version_wanted - 1);
130 TRACEF("max api version set: %d\n", sm_api_version_max);
131 } else {
132 sm_api_version_min = MAX(sm_api_version_min, version_wanted);
133 TRACEF("min api version set: %d\n", sm_api_version_min);
134 }
135 DEBUG_ASSERT(sm_api_version_min <= sm_api_version_max);
136 DEBUG_ASSERT(sm_api_version >= sm_api_version_min);
137 DEBUG_ASSERT(sm_api_version <= sm_api_version_max);
138
139 spin_unlock_restore(&sm_api_version_lock, state, SPIN_LOCK_FLAG_IRQ_FIQ);
140
141 return sm_api_version_min >= version_wanted;
142 }
143
sm_stdcall_loop(void * arg)144 static int __NO_RETURN sm_stdcall_loop(void* arg) {
145 long ret;
146 spin_lock_saved_state_t state;
147
148 while (true) {
149 LTRACEF("cpu %d, wait for stdcall\n", arch_curr_cpu_num());
150 event_wait(&stdcallstate.event);
151
152 /* Dispatch 'standard call' handler */
153 LTRACEF("cpu %d, got stdcall: 0x%x, 0x%x, 0x%x, 0x%x\n",
154 arch_curr_cpu_num(), stdcallstate.args.smc_nr,
155 stdcallstate.args.params[0], stdcallstate.args.params[1],
156 stdcallstate.args.params[2]);
157 ret = sm_stdcall_table[SMC_ENTITY(stdcallstate.args.smc_nr)](
158 &stdcallstate.args);
159 LTRACEF("cpu %d, stdcall(0x%x, 0x%x, 0x%x, 0x%x) returned 0x%lx (%ld)\n",
160 arch_curr_cpu_num(), stdcallstate.args.smc_nr,
161 stdcallstate.args.params[0], stdcallstate.args.params[1],
162 stdcallstate.args.params[2], ret, ret);
163 spin_lock_save(&stdcallstate.lock, &state, SPIN_LOCK_FLAG_IRQ);
164 stdcallstate.ret = ret;
165 stdcallstate.done = true;
166 event_unsignal(&stdcallstate.event);
167 spin_unlock_restore(&stdcallstate.lock, state, SPIN_LOCK_FLAG_IRQ);
168 }
169 }
170
171 /* must be called with irqs disabled */
sm_queue_stdcall(struct smc32_args * args)172 static long sm_queue_stdcall(struct smc32_args* args) {
173 long ret;
174 uint cpu = arch_curr_cpu_num();
175
176 spin_lock(&stdcallstate.lock);
177
178 if (stdcallstate.event.signaled || stdcallstate.done) {
179 if (args->smc_nr == SMC_SC_RESTART_LAST &&
180 stdcallstate.active_cpu == -1) {
181 stdcallstate.restart_count++;
182 LTRACEF_LEVEL(3, "cpu %d, restart std call, restart_count %d\n",
183 cpu, stdcallstate.restart_count);
184 goto restart_stdcall;
185 }
186 dprintf(CRITICAL, "%s: cpu %d, std call busy\n", __func__, cpu);
187 ret = SM_ERR_BUSY;
188 goto err;
189 } else {
190 if (args->smc_nr == SMC_SC_RESTART_LAST) {
191 dprintf(CRITICAL,
192 "%s: cpu %d, unexpected restart, no std call active\n",
193 __func__, arch_curr_cpu_num());
194 ret = SM_ERR_UNEXPECTED_RESTART;
195 goto err;
196 }
197 }
198
199 LTRACEF("cpu %d, queue std call 0x%x\n", cpu, args->smc_nr);
200 stdcallstate.initial_cpu = cpu;
201 stdcallstate.ret = SM_ERR_INTERNAL_FAILURE;
202 stdcallstate.args = *args;
203 stdcallstate.restart_count = 0;
204 event_signal(&stdcallstate.event, false);
205
206 restart_stdcall:
207 stdcallstate.active_cpu = cpu;
208 ret = 0;
209
210 err:
211 spin_unlock(&stdcallstate.lock);
212
213 return ret;
214 }
215
sm_sched_nonsecure_fiq_loop(long ret,struct smc32_args * args)216 static void sm_sched_nonsecure_fiq_loop(long ret, struct smc32_args* args) {
217 while (true) {
218 if (atomic_load(&platform_halted)) {
219 ret = SM_ERR_PANIC;
220 }
221 sm_sched_nonsecure(ret, args);
222 if (atomic_load(&platform_halted) && args->smc_nr != SMC_FC_FIQ_ENTER) {
223 continue;
224 }
225 if (SMC_IS_SMC64(args->smc_nr)) {
226 ret = SM_ERR_NOT_SUPPORTED;
227 continue;
228 }
229 if (!SMC_IS_FASTCALL(args->smc_nr)) {
230 break;
231 }
232 ret = sm_fastcall_table[SMC_ENTITY(args->smc_nr)](args);
233 }
234 }
235
236 /* must be called with irqs disabled */
sm_return_and_wait_for_next_stdcall(long ret,int cpu)237 static void sm_return_and_wait_for_next_stdcall(long ret, int cpu) {
238 struct smc32_args args = SMC32_ARGS_INITIAL_VALUE(args);
239
240 do {
241 #if ARCH_HAS_FIQ
242 arch_disable_fiqs();
243 #endif
244 sm_sched_nonsecure_fiq_loop(ret, &args);
245 #if ARCH_HAS_FIQ
246 arch_enable_fiqs();
247 #endif
248
249 /* Allow concurrent SMC_SC_NOP calls on multiple cpus */
250 if (args.smc_nr == SMC_SC_NOP) {
251 LTRACEF_LEVEL(3, "cpu %d, got nop\n", cpu);
252 ret = sm_nopcall_table[SMC_ENTITY(args.params[0])](&args);
253 } else {
254 ret = sm_queue_stdcall(&args);
255 }
256 } while (ret);
257 sm_intc_enable_interrupts();
258 }
259
sm_irq_return_ns(void)260 static void sm_irq_return_ns(void) {
261 long ret;
262 int cpu;
263
264 cpu = arch_curr_cpu_num();
265
266 spin_lock(&stdcallstate.lock); /* TODO: remove? */
267 LTRACEF_LEVEL(2, "got irq on cpu %d, stdcallcpu %d\n", cpu,
268 stdcallstate.active_cpu);
269 if (stdcallstate.active_cpu == cpu) {
270 stdcallstate.last_cpu = stdcallstate.active_cpu;
271 stdcallstate.active_cpu = -1;
272 ret = SM_ERR_INTERRUPTED;
273 } else {
274 ret = SM_ERR_NOP_INTERRUPTED;
275 }
276 LTRACEF_LEVEL(2, "got irq on cpu %d, return %ld\n", cpu, ret);
277 spin_unlock(&stdcallstate.lock);
278 sm_return_and_wait_for_next_stdcall(ret, cpu);
279 }
280
sm_irq_loop(void * arg)281 static int __NO_RETURN sm_irq_loop(void* arg) {
282 int cpu;
283 /* cpu that requested this thread, the current cpu could be different */
284 int eventcpu = (uintptr_t)arg;
285
286 /*
287 * Run this thread with interrupts masked, so we don't reenter the
288 * interrupt handler. The interrupt handler for non-secure interrupts
289 * returns to this thread with the interrupt still pending.
290 */
291 arch_disable_ints();
292 irq_thread_ready[eventcpu] = true;
293
294 cpu = arch_curr_cpu_num();
295 LTRACEF("wait for irqs for cpu %d, on cpu %d\n", eventcpu, cpu);
296 while (true) {
297 event_wait(&nsirqevent[eventcpu]);
298 sm_irq_return_ns();
299 }
300 }
301
302 /* must be called with irqs disabled */
sm_get_stdcall_ret(void)303 static long sm_get_stdcall_ret(void) {
304 long ret;
305 uint cpu = arch_curr_cpu_num();
306
307 spin_lock(&stdcallstate.lock);
308
309 if (stdcallstate.active_cpu != (int)cpu) {
310 dprintf(CRITICAL, "%s: stdcallcpu, a%d != curr-cpu %d, l%d, i%d\n",
311 __func__, stdcallstate.active_cpu, cpu, stdcallstate.last_cpu,
312 stdcallstate.initial_cpu);
313 ret = SM_ERR_INTERNAL_FAILURE;
314 goto err;
315 }
316 stdcallstate.last_cpu = stdcallstate.active_cpu;
317 stdcallstate.active_cpu = -1;
318
319 if (stdcallstate.done) {
320 stdcallstate.done = false;
321 ret = stdcallstate.ret;
322 LTRACEF("cpu %d, return stdcall result, %ld, initial cpu %d\n", cpu,
323 stdcallstate.ret, stdcallstate.initial_cpu);
324 } else {
325 if (sm_check_and_lock_api_version(TRUSTY_API_VERSION_SMP))
326 ret = SM_ERR_CPU_IDLE; /* ns using smp api */
327 else if (stdcallstate.restart_count)
328 ret = SM_ERR_BUSY;
329 else
330 ret = SM_ERR_INTERRUPTED;
331 LTRACEF("cpu %d, initial cpu %d, restart_count %d, std call not finished, return %ld\n",
332 cpu, stdcallstate.initial_cpu, stdcallstate.restart_count, ret);
333 }
334 err:
335 spin_unlock(&stdcallstate.lock);
336
337 return ret;
338 }
339
sm_wait_for_smcall(void * arg)340 static int sm_wait_for_smcall(void* arg) {
341 int cpu;
342 long ret = 0;
343
344 LTRACEF("wait for stdcalls, on cpu %d\n", arch_curr_cpu_num());
345
346 while (true) {
347 /*
348 * Disable interrupts so stdcallstate.active_cpu does not
349 * change to or from this cpu after checking it below.
350 */
351 arch_disable_ints();
352
353 /* Switch to sm-stdcall if sm_queue_stdcall woke it up */
354 thread_yield();
355
356 cpu = arch_curr_cpu_num();
357 if (cpu == stdcallstate.active_cpu)
358 ret = sm_get_stdcall_ret();
359 else
360 ret = SM_ERR_NOP_DONE;
361
362 sm_return_and_wait_for_next_stdcall(ret, cpu);
363
364 /* Re-enable interrupts (needed for SMC_SC_NOP) */
365 arch_enable_ints();
366 }
367 }
368
369 #if WITH_LIB_SM_MONITOR
370 /* per-cpu secure monitor initialization */
sm_mon_percpu_init(uint level)371 static void sm_mon_percpu_init(uint level) {
372 /* let normal world enable SMP, lock TLB, access CP10/11 */
373 __asm__ volatile(
374 "mrc p15, 0, r1, c1, c1, 2 \n"
375 "orr r1, r1, #0xC00 \n"
376 "orr r1, r1, #0x60000 \n"
377 "mcr p15, 0, r1, c1, c1, 2 @ NSACR \n"
378 :
379 :
380 : "r1");
381
382 __asm__ volatile("mcr p15, 0, %0, c12, c0, 1 \n"
383 :
384 : "r"(&monitor_vector_table));
385 }
386 LK_INIT_HOOK_FLAGS(libsm_mon_perrcpu,
387 sm_mon_percpu_init,
388 LK_INIT_LEVEL_PLATFORM - 3,
389 LK_INIT_FLAG_ALL_CPUS);
390 #endif
391
sm_init(uint level)392 static void sm_init(uint level) {
393 status_t err;
394 char name[32];
395
396 mutex_acquire(&boot_args_lock);
397
398 /* Map the boot arguments if supplied by the bootloader */
399 if (lk_boot_args[1] && lk_boot_args[2]) {
400 ulong offset = lk_boot_args[1] & (PAGE_SIZE - 1);
401 paddr_t paddr = round_down(lk_boot_args[1], PAGE_SIZE);
402 size_t size = round_up(lk_boot_args[2] + offset, PAGE_SIZE);
403 void* vptr;
404
405 err = vmm_alloc_physical(vmm_get_kernel_aspace(), "sm", size, &vptr,
406 PAGE_SIZE_SHIFT, paddr, 0,
407 ARCH_MMU_FLAG_NS |
408 ARCH_MMU_FLAG_PERM_NO_EXECUTE |
409 ARCH_MMU_FLAG_CACHED);
410 if (!err) {
411 boot_args = (uint8_t*)vptr + offset;
412 boot_args_refcnt++;
413 } else {
414 boot_args = NULL;
415 TRACEF("Error mapping boot parameter block: %d\n", err);
416 }
417 }
418
419 mutex_release(&boot_args_lock);
420
421 for (int cpu = 0; cpu < SMP_MAX_CPUS; cpu++) {
422 event_init(&nsirqevent[cpu], false, EVENT_FLAG_AUTOUNSIGNAL);
423
424 snprintf(name, sizeof(name), "irq-ns-switch-%d", cpu);
425 nsirqthreads[cpu] =
426 thread_create(name, sm_irq_loop, (void*)(uintptr_t)cpu,
427 HIGHEST_PRIORITY, DEFAULT_STACK_SIZE);
428 if (!nsirqthreads[cpu]) {
429 panic("failed to create irq NS switcher thread for cpu %d!\n", cpu);
430 }
431 thread_set_pinned_cpu(nsirqthreads[cpu], cpu);
432 thread_set_real_time(nsirqthreads[cpu]);
433
434 snprintf(name, sizeof(name), "idle-ns-switch-%d", cpu);
435 nsidlethreads[cpu] =
436 thread_create(name, sm_wait_for_smcall, NULL,
437 LOWEST_PRIORITY + 1, DEFAULT_STACK_SIZE);
438 if (!nsidlethreads[cpu]) {
439 panic("failed to create idle NS switcher thread for cpu %d!\n",
440 cpu);
441 }
442 thread_set_pinned_cpu(nsidlethreads[cpu], cpu);
443 thread_set_real_time(nsidlethreads[cpu]);
444 }
445
446 stdcallthread = thread_create("sm-stdcall", sm_stdcall_loop, NULL,
447 LOWEST_PRIORITY + 2, DEFAULT_STACK_SIZE);
448 if (!stdcallthread) {
449 panic("failed to create sm-stdcall thread!\n");
450 }
451 thread_set_real_time(stdcallthread);
452 thread_resume(stdcallthread);
453 }
454
455 LK_INIT_HOOK(libsm, sm_init, LK_INIT_LEVEL_PLATFORM - 1);
456
sm_handle_irq(void)457 enum handler_return sm_handle_irq(void) {
458 int cpu = arch_curr_cpu_num();
459 if (irq_thread_ready[cpu]) {
460 event_signal(&nsirqevent[cpu], false);
461 } else {
462 TRACEF("warning: got ns irq before irq thread is ready\n");
463 sm_irq_return_ns();
464 }
465
466 return INT_RESCHEDULE;
467 }
468
sm_handle_fiq(void)469 void sm_handle_fiq(void) {
470 uint32_t expected_return;
471 struct smc32_args args = SMC32_ARGS_INITIAL_VALUE(args);
472 if (sm_check_and_lock_api_version(TRUSTY_API_VERSION_RESTART_FIQ)) {
473 sm_sched_nonsecure_fiq_loop(SM_ERR_FIQ_INTERRUPTED, &args);
474 expected_return = SMC_SC_RESTART_FIQ;
475 } else {
476 sm_sched_nonsecure_fiq_loop(SM_ERR_INTERRUPTED, &args);
477 expected_return = SMC_SC_RESTART_LAST;
478 }
479 if (args.smc_nr != expected_return) {
480 TRACEF("got bad restart smc %x, expected %x\n", args.smc_nr,
481 expected_return);
482 while (args.smc_nr != expected_return)
483 sm_sched_nonsecure_fiq_loop(SM_ERR_INTERLEAVED_SMC, &args);
484 }
485 }
486
platform_halt(platform_halt_action suggested_action,platform_halt_reason reason)487 void platform_halt(platform_halt_action suggested_action,
488 platform_halt_reason reason) {
489 bool already_halted;
490 struct smc32_args args = SMC32_ARGS_INITIAL_VALUE(args);
491
492 arch_disable_ints();
493 already_halted = atomic_exchange(&platform_halted, true);
494 if (!already_halted) {
495 for (int cpu = 0; cpu < SMP_MAX_CPUS; cpu++) {
496 if (nsirqthreads[cpu]) {
497 event_signal(&nsirqevent[cpu], false);
498 }
499 }
500 dprintf(ALWAYS, "%s\n", lk_version);
501 dprintf(ALWAYS, "HALT: (reason = %d)\n", reason);
502 }
503
504 #if ARCH_HAS_FIQ
505 arch_disable_fiqs();
506 #endif
507 while (true)
508 sm_sched_nonsecure_fiq_loop(SM_ERR_PANIC, &args);
509 }
510
sm_get_boot_args(void ** boot_argsp,size_t * args_sizep)511 status_t sm_get_boot_args(void** boot_argsp, size_t* args_sizep) {
512 status_t err = NO_ERROR;
513
514 if (!boot_argsp || !args_sizep)
515 return ERR_INVALID_ARGS;
516
517 mutex_acquire(&boot_args_lock);
518
519 if (!boot_args) {
520 err = ERR_NOT_CONFIGURED;
521 goto unlock;
522 }
523
524 boot_args_refcnt++;
525 *boot_argsp = boot_args;
526 *args_sizep = lk_boot_args[2];
527 unlock:
528 mutex_release(&boot_args_lock);
529 return err;
530 }
531
resume_nsthreads(void)532 static void resume_nsthreads(void) {
533 int i;
534
535 for (i = 0; i < SMP_MAX_CPUS; i++) {
536 DEBUG_ASSERT(nsirqthreads[i]);
537 DEBUG_ASSERT(nsidlethreads[i]);
538
539 thread_resume(nsirqthreads[i]);
540 thread_resume(nsidlethreads[i]);
541 }
542 }
543
sm_put_boot_args(void)544 void sm_put_boot_args(void) {
545 mutex_acquire(&boot_args_lock);
546
547 if (!boot_args) {
548 TRACEF("WARNING: caller does not own "
549 "a reference to boot parameters\n");
550 goto unlock;
551 }
552
553 boot_args_refcnt--;
554 if (boot_args_refcnt == 0) {
555 vmm_free_region(vmm_get_kernel_aspace(), (vaddr_t)boot_args);
556 boot_args = NULL;
557 resume_nsthreads();
558 }
559 unlock:
560 mutex_release(&boot_args_lock);
561 }
562
sm_release_boot_args(uint level)563 static void sm_release_boot_args(uint level) {
564 if (boot_args) {
565 sm_put_boot_args();
566 } else {
567 /* we need to resume the ns-switcher here if
568 * the boot loader didn't pass bootargs
569 */
570 resume_nsthreads();
571 }
572
573 if (boot_args)
574 TRACEF("WARNING: outstanding reference to boot args"
575 "at the end of initialzation!\n");
576 }
577
578 LK_INIT_HOOK(libsm_bootargs, sm_release_boot_args, LK_INIT_LEVEL_LAST);
579