1 /*
2 * Copyright (c) 2013-2018, Google, Inc. All rights reserved
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining
5 * a copy of this software and associated documentation files
6 * (the "Software"), to deal in the Software without restriction,
7 * including without limitation the rights to use, copy, modify, merge,
8 * publish, distribute, sublicense, and/or sell copies of the Software,
9 * and to permit persons to whom the Software is furnished to do so,
10 * subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
18 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
19 * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
20 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
21 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 #define LOCAL_TRACE 0
25
26 #include <assert.h>
27 #include <bits.h>
28 #include <err.h>
29 #include <kernel/usercopy.h>
30 #include <list.h> // for containerof
31 #include <platform.h>
32 #include <stdlib.h>
33 #include <string.h>
34 #include <sys/types.h>
35 #include <trace.h>
36
37 #include <kernel/event.h>
38 #include <kernel/thread.h>
39 #include <kernel/wait.h>
40
41 #include <lib/syscall.h>
42
43 #if WITH_TRUSTY_IPC
44
45 #include <lib/trusty/handle.h>
46 #include <lib/trusty/handle_set.h>
47 #include <lib/trusty/trusty_app.h>
48 #include <lib/trusty/uctx.h>
49 #include <lk/init.h>
50
51 #if !defined(IPC_MAX_HANDLES) || ((IPC_MAX_HANDLES + 0) == 0)
52 #error "IPC_MAX_HANDLES was not defined. Please check the project build definitions."
53 #endif
54
55 #define IPC_HANDLE_ID_BASE 1000
56
57 struct htbl_entry {
58 struct handle* handle;
59 struct list_node ref_list;
60 };
61
62 struct uctx {
63 unsigned long inuse[BITMAP_NUM_WORDS(IPC_MAX_HANDLES)];
64 struct htbl_entry htbl[IPC_MAX_HANDLES];
65
66 void* priv;
67 struct mutex mlock;
68
69 #if WITH_WAIT_ANY_SUPPORT
70 struct handle* hset_all;
71 #endif
72
73 handle_id_t handle_id_base;
74 };
75
76 static status_t _uctx_startup(struct trusty_app* app);
77 static status_t _uctx_shutdown(struct trusty_app* app);
78
79 static uint _uctx_slot_id;
80 static struct trusty_app_notifier _uctx_notifier = {
81 .startup = _uctx_startup,
82 .shutdown = _uctx_shutdown,
83 };
84
_uctx_startup(struct trusty_app * app)85 static status_t _uctx_startup(struct trusty_app* app) {
86 struct uctx* uctx;
87
88 int err = uctx_create(app, &uctx);
89 if (err)
90 return err;
91
92 trusty_als_set(app, _uctx_slot_id, uctx);
93 return NO_ERROR;
94 }
95
_uctx_shutdown(struct trusty_app * app)96 static status_t _uctx_shutdown(struct trusty_app* app) {
97 LTRACEF("Destroying uctx for app: %d, %s\n", app->app_id,
98 app->props.app_name);
99 struct uctx* uctx;
100 uctx = trusty_als_get(app, _uctx_slot_id);
101
102 if (uctx)
103 uctx_destroy(uctx);
104
105 return NO_ERROR;
106 }
107
uctx_init(uint level)108 static void uctx_init(uint level) {
109 int res;
110
111 /* allocate als slot */
112 res = trusty_als_alloc_slot();
113 if (res < 0)
114 panic("failed (%d) to alloc als slot\n", res);
115 _uctx_slot_id = res;
116
117 /* register notifier */
118 res = trusty_register_app_notifier(&_uctx_notifier);
119 if (res < 0)
120 panic("failed (%d) to register uctx notifier\n", res);
121 }
122
123 LK_INIT_HOOK(uctx, uctx_init, LK_INIT_LEVEL_APPS - 2);
124
125 /*
126 * Get uctx context of the current app
127 */
current_uctx(void)128 struct uctx* current_uctx(void) {
129 struct trusty_app* tapp = current_trusty_app();
130 return trusty_als_get(tapp, _uctx_slot_id);
131 }
132
133 /*
134 * Check if specified handle_id does represent a valid handle
135 * for specified user context.
136 *
137 * On success return index of the handle in handle table,
138 * negative error otherwise
139 */
_check_handle_id(struct uctx * ctx,handle_id_t handle_id)140 static int _check_handle_id(struct uctx* ctx, handle_id_t handle_id) {
141 uint32_t idx;
142
143 DEBUG_ASSERT(ctx);
144
145 /*
146 * This check is technically not required because the wrapping of the
147 * arithemtic combined with the bounds check will detect this case.
148 * However, this makes it more obviously correct, guards against someone
149 * making idx signed or IPC_MAX_HANDLES very high, and makes us pass
150 * UBSan.
151 */
152 if (unlikely(handle_id < ctx->handle_id_base)) {
153 LTRACEF("%d is below handle base\n", handle_id);
154 return ERR_BAD_HANDLE;
155 }
156
157 idx = handle_id - ctx->handle_id_base;
158 if (unlikely(idx >= IPC_MAX_HANDLES)) {
159 LTRACEF("%d is invalid handle id\n", handle_id);
160 return ERR_BAD_HANDLE;
161 }
162
163 if (!bitmap_test(ctx->inuse, idx)) {
164 LTRACEF("%d is unused handle id\n", handle_id);
165 return ERR_NOT_FOUND;
166 }
167
168 /* there should be a handle there */
169 ASSERT(&ctx->htbl[idx].handle);
170
171 return idx;
172 }
173
remove_handle(struct uctx * ctx,int idx)174 static struct handle* remove_handle(struct uctx* ctx, int idx) {
175 struct handle* h;
176 struct handle_ref* ref;
177 struct handle_ref* tmp;
178
179 h = ctx->htbl[idx].handle;
180 if (!h)
181 return NULL;
182
183 /* clear all references */
184 list_for_every_entry_safe(&ctx->htbl[idx].ref_list, ref, tmp,
185 struct handle_ref, uctx_node) {
186 list_delete(&ref->uctx_node);
187 if (list_in_list(&ref->set_node))
188 handle_set_detach_ref(ref);
189 handle_decref(ref->handle);
190 free(ref);
191 }
192
193 ctx->htbl[idx].handle = NULL;
194 bitmap_clear(ctx->inuse, idx);
195
196 return h;
197 }
198
199 /*
200 * Create new handle ref and add it to specified handle set.
201 * Add resulting ref to head of specified list.
202 */
_hset_add_handle(struct handle * hset,struct handle * h,uint32_t id,uint32_t emask,void * cookie,struct list_node * ref_list)203 static int _hset_add_handle(struct handle* hset,
204 struct handle* h,
205 uint32_t id,
206 uint32_t emask,
207 void* cookie,
208 struct list_node* ref_list) {
209 int ret;
210 struct handle_ref* ref;
211
212 ASSERT(h);
213 ASSERT(hset);
214 ASSERT(ref_list);
215
216 ref = calloc(1, sizeof(*ref));
217 if (!ref)
218 return ERR_NO_MEMORY;
219
220 handle_incref(h);
221 ref->handle = h;
222 ref->emask = emask;
223 ref->cookie = cookie;
224 ref->id = id;
225
226 ret = handle_set_attach(hset, ref);
227 if (ret == NO_ERROR) {
228 list_add_head(ref_list, &ref->uctx_node);
229 } else {
230 handle_decref(ref->handle);
231 free(ref);
232 }
233
234 return ret;
235 }
236
237 #if WITH_WAIT_ANY_SUPPORT
238 /*
239 * Rebuild handle set containing all handles
240 */
rebuild_hset_all(struct uctx * ctx)241 static int rebuild_hset_all(struct uctx* ctx) {
242 int ret;
243 int idx;
244 struct handle* h;
245 struct handle_ref* ref;
246 struct handle* hset_all;
247
248 mutex_acquire(&ctx->mlock);
249
250 if (ctx->hset_all) {
251 ret = NO_ERROR; /* Already exists. This is not an error */
252 goto err_already_exists;
253 }
254
255 LTRACEF("Rebuilding all handles set\n");
256
257 /* create all handle set */
258 hset_all = handle_set_create();
259 if (!hset_all) {
260 LTRACEF("Out of memory\n");
261 ret = ERR_NO_MEMORY;
262 goto err_create;
263 }
264
265 for (idx = 0; idx < (int)countof(ctx->htbl); idx++) {
266 h = ctx->htbl[idx].handle;
267 if (!h || !h->ops->poll)
268 continue; /* skip NULL and non-pollable handles */
269
270 ret = _hset_add_handle(hset_all, h, ctx->handle_id_base + idx, ~0U,
271 NULL, &ctx->htbl[idx].ref_list);
272 if (ret != NO_ERROR) {
273 LTRACEF("Failed (%d) to add handle\n", ret);
274 goto err_add_handle;
275 }
276 }
277 ctx->hset_all = hset_all;
278 mutex_release(&ctx->mlock);
279
280 return NO_ERROR;
281
282 err_add_handle:
283 for (idx--; idx >= 0; idx--) {
284 h = ctx->htbl[idx].handle;
285 if (!h || !h->ops->poll)
286 continue; /* skip NULL and non-pollable handles */
287
288 ref = list_remove_head_type(&ctx->htbl[idx].ref_list, struct handle_ref,
289 uctx_node);
290 ASSERT(ref && ref->parent == hset_all);
291 handle_set_detach_ref(ref);
292 handle_decref(ref->handle);
293 free(ref);
294 }
295 handle_decref(hset_all);
296 err_create:
297 err_already_exists:
298 mutex_release(&ctx->mlock);
299
300 return ret;
301 }
302 #endif
303
304 /*
305 * Allocate and initialize user context - the structure that is used
306 * to keep track handles on behalf of user space app. Exactly one user
307 * context is created for each trusty app during it's nitialization.
308 */
uctx_create(void * priv,struct uctx ** ctx)309 int uctx_create(void* priv, struct uctx** ctx) {
310 struct uctx* new_ctx;
311
312 DEBUG_ASSERT(ctx);
313
314 new_ctx = calloc(1, sizeof(struct uctx));
315 if (!new_ctx) {
316 LTRACEF("Out of memory\n");
317 return ERR_NO_MEMORY;
318 }
319
320 new_ctx->priv = priv;
321 new_ctx->handle_id_base = IPC_HANDLE_ID_BASE;
322 mutex_init(&new_ctx->mlock);
323
324 for (uint i = 0; i < countof(new_ctx->htbl); i++)
325 list_initialize(&new_ctx->htbl[i].ref_list);
326
327 *ctx = new_ctx;
328
329 return NO_ERROR;
330 }
331
332 /*
333 * Destroy user context previously created by uctx_create.
334 */
uctx_destroy(struct uctx * ctx)335 void uctx_destroy(struct uctx* ctx) {
336 int i;
337 DEBUG_ASSERT(ctx);
338
339 for (i = 0; i < IPC_MAX_HANDLES; i++) {
340 struct handle* h = remove_handle(ctx, i);
341 if (h)
342 handle_close(h);
343 }
344
345 #if WITH_WAIT_ANY_SUPPORT
346 /* kill hset_all */
347 if (ctx->hset_all)
348 handle_decref(ctx->hset_all);
349 #endif
350
351 free(ctx);
352 }
353
354 /*
355 * Returns private data associated with user context. (Currently unused)
356 */
uctx_get_priv(struct uctx * ctx)357 void* uctx_get_priv(struct uctx* ctx) {
358 ASSERT(ctx);
359 return ctx->priv;
360 }
361
362 /*
363 * Install specified handle into user handle table and increment installed
364 * handle ref count accordinly.
365 */
uctx_handle_install(struct uctx * ctx,struct handle * handle,handle_id_t * id)366 int uctx_handle_install(struct uctx* ctx,
367 struct handle* handle,
368 handle_id_t* id) {
369 int ret;
370 int idx;
371
372 DEBUG_ASSERT(ctx);
373 DEBUG_ASSERT(handle);
374 DEBUG_ASSERT(id);
375
376 mutex_acquire(&ctx->mlock);
377 idx = bitmap_ffz(ctx->inuse, IPC_MAX_HANDLES);
378
379 if (idx < 0) {
380 ret = ERR_NO_RESOURCES;
381 goto err;
382 }
383
384 /* handle should be NULL and list should be empty */
385 ASSERT(!ctx->htbl[idx].handle);
386 ASSERT(list_is_empty(&ctx->htbl[idx].ref_list));
387
388 #if WITH_WAIT_ANY_SUPPORT
389 /* if hset_all exists autoadd pollable handle */
390 if (ctx->hset_all && handle->ops->poll) {
391 ret = _hset_add_handle(ctx->hset_all, handle, ctx->handle_id_base + idx,
392 ~0U, NULL, &ctx->htbl[idx].ref_list);
393 if (ret)
394 goto err;
395 }
396 #endif
397
398 handle_incref(handle);
399 ctx->htbl[idx].handle = handle;
400 bitmap_set(ctx->inuse, idx);
401 *id = ctx->handle_id_base + idx;
402 ret = NO_ERROR;
403
404 err:
405 mutex_release(&ctx->mlock);
406 return ret;
407 }
408
uctx_handle_get_tmp_ref(struct uctx * ctx,handle_id_t handle_id,struct handle_ref * out)409 static int uctx_handle_get_tmp_ref(struct uctx* ctx,
410 handle_id_t handle_id,
411 struct handle_ref* out) {
412 int ret;
413
414 DEBUG_ASSERT(ctx);
415 DEBUG_ASSERT(out);
416
417 mutex_acquire(&ctx->mlock);
418 ret = _check_handle_id(ctx, handle_id);
419 if (ret >= 0) {
420 /* take a reference on the handle we looked up */
421 struct handle* h = ctx->htbl[ret].handle;
422 handle_incref(h);
423 out->handle = h;
424 out->id = handle_id;
425 out->emask = ~0U;
426 out->cookie = NULL;
427
428 #if WITH_WAIT_ANY_SUPPORT
429 if (ctx->hset_all && h->ops->poll) {
430 struct handle_ref* ref = list_peek_head_type(
431 &ctx->htbl[ret].ref_list, struct handle_ref, uctx_node);
432 out->cookie = ref->cookie;
433 }
434 #endif
435 ret = NO_ERROR;
436 }
437 mutex_release(&ctx->mlock);
438 return ret;
439 }
440
441 /*
442 * Retrieve handle from specified user context specified by
443 * given handle_id. Increment ref count for returned handle.
444 */
uctx_handle_get(struct uctx * ctx,handle_id_t handle_id,struct handle ** handle_ptr)445 int uctx_handle_get(struct uctx* ctx,
446 handle_id_t handle_id,
447 struct handle** handle_ptr) {
448 struct handle_ref tmp_ref;
449
450 DEBUG_ASSERT(ctx);
451 DEBUG_ASSERT(handle_ptr);
452
453 int ret = uctx_handle_get_tmp_ref(ctx, handle_id, &tmp_ref);
454 if (ret == NO_ERROR) {
455 *handle_ptr = tmp_ref.handle;
456 }
457
458 return ret;
459 }
460
461 /*
462 * Remove handle specified by handle ID from given user context and
463 * return it to caller if requested. In later case the caller becomes an owner
464 * of that handle.
465 */
uctx_handle_remove(struct uctx * ctx,handle_id_t handle_id,struct handle ** handle_ptr)466 int uctx_handle_remove(struct uctx* ctx,
467 handle_id_t handle_id,
468 struct handle** handle_ptr) {
469 int ret;
470 struct handle* handle;
471
472 DEBUG_ASSERT(ctx);
473
474 mutex_acquire(&ctx->mlock);
475 ret = _check_handle_id(ctx, handle_id);
476 if (ret >= 0)
477 handle = remove_handle(ctx, ret);
478 mutex_release(&ctx->mlock);
479 if (ret < 0)
480 return ret;
481
482 ASSERT(handle);
483 if (handle_ptr) {
484 handle_incref(handle);
485 *handle_ptr = handle;
486 }
487 handle_decref(handle);
488
489 return NO_ERROR;
490 }
491
492 /******************************************************************************/
493
494 /* definition shared with userspace */
495 struct uevent {
496 uint32_t handle;
497 uint32_t event;
498 user_addr_t cookie;
499 };
500
_wait_for_uevent(const struct handle_ref * target,user_addr_t user_event,unsigned long timeout_msecs)501 static int _wait_for_uevent(const struct handle_ref* target,
502 user_addr_t user_event,
503 unsigned long timeout_msecs) {
504 struct uevent uevent;
505 struct handle_ref result;
506
507 int ret = handle_ref_wait(target, &result, timeout_msecs);
508 if (ret >= 0) {
509 DEBUG_ASSERT(result.handle); /* there should be a handle */
510
511 /* got an event */
512 memset(&uevent, 0, sizeof(uevent));
513 uevent.handle = result.id;
514 uevent.event = result.emask;
515 uevent.cookie = (user_addr_t)(uintptr_t)result.cookie;
516
517 ret = copy_to_user(user_event, &uevent, sizeof(uevent));
518 handle_decref(result.handle); /* drop ref taken by wait */
519 }
520
521 LTRACEF("[%p][%d]: ret = %d\n", current_trusty_thread(), result.id, ret);
522 return ret;
523 }
524
525 /*
526 * wait on single handle specified by handle id
527 */
sys_wait(uint32_t handle_id,user_addr_t user_event,uint32_t timeout_msecs)528 long __SYSCALL sys_wait(uint32_t handle_id,
529 user_addr_t user_event,
530 uint32_t timeout_msecs) {
531 int ret;
532 struct handle_ref target;
533 struct uctx* ctx = current_uctx();
534
535 LTRACEF("[%p][%d]: %d msec\n", current_trusty_thread(), handle_id,
536 timeout_msecs);
537
538 ret = uctx_handle_get_tmp_ref(ctx, handle_id, &target);
539 if (ret != NO_ERROR)
540 return ret;
541
542 ASSERT(target.handle);
543 ASSERT(target.id == handle_id);
544
545 ret = _wait_for_uevent(&target, user_event, timeout_msecs);
546
547 /* drop handle_ref grabbed by uctx_handle_get */
548 handle_decref(target.handle);
549 return ret;
550 }
551
552 /*
553 * Wait on any handle existing in user context.
554 */
sys_wait_any(user_addr_t user_event,uint32_t timeout_msecs)555 long __SYSCALL sys_wait_any(user_addr_t user_event, uint32_t timeout_msecs) {
556 #if WITH_WAIT_ANY_SUPPORT
557 int ret;
558 struct handle_ref target = {0};
559 struct uctx* ctx = current_uctx();
560
561 LTRACEF("[%p]: %d msec\n", current_trusty_thread(), timeout_msecs);
562
563 if (!ctx->hset_all) {
564 ret = rebuild_hset_all(ctx);
565 if (ret != NO_ERROR)
566 return ret;
567 }
568
569 handle_incref(ctx->hset_all);
570 target.handle = ctx->hset_all;
571
572 ret = _wait_for_uevent(&target, user_event, timeout_msecs);
573 handle_decref(target.handle); /* drop ref grabed above */
574 return ret;
575 #else
576 return (long)ERR_NOT_SUPPORTED;
577 #endif
578 }
579
sys_dup(uint32_t old_handle_id)580 long __SYSCALL sys_dup(uint32_t old_handle_id) {
581 int rc;
582 struct handle* h;
583 struct uctx* ctx = current_uctx();
584 handle_id_t new_handle_id;
585
586 LTRACEF("[%p][%d]\n", current_trusty_thread(), old_handle_id);
587
588 rc = uctx_handle_get(ctx, (handle_id_t)old_handle_id, &h);
589 if (rc != NO_ERROR) {
590 goto err_get;
591 }
592
593 rc = uctx_handle_install(ctx, h, &new_handle_id);
594 if (rc != NO_ERROR) {
595 goto err_install;
596 }
597
598 /* Drop ref grabbed by uctx_handle_get */
599 handle_decref(h);
600 return (long)new_handle_id;
601
602 err_install:
603 handle_decref(h);
604 err_get:
605 return rc;
606 }
607
sys_close(uint32_t handle_id)608 long __SYSCALL sys_close(uint32_t handle_id) {
609 struct handle* handle;
610
611 LTRACEF("[%p][%d]\n", current_trusty_thread(), handle_id);
612
613 int ret = uctx_handle_remove(current_uctx(), handle_id, &handle);
614 if (ret != NO_ERROR)
615 return ret;
616
617 handle_close(handle);
618 return NO_ERROR;
619 }
620
sys_set_cookie(uint32_t handle_id,user_addr_t cookie)621 long __SYSCALL sys_set_cookie(uint32_t handle_id, user_addr_t cookie) {
622 #if WITH_WAIT_ANY_SUPPORT
623 int ret;
624 struct uctx* ctx = current_uctx();
625
626 LTRACEF("[%p][%d]: cookie = 0x%08x\n", current_trusty_thread(), handle_id,
627 (uint)cookie);
628
629 if (!ctx->hset_all) {
630 ret = rebuild_hset_all(ctx);
631 if (ret != NO_ERROR)
632 return ret;
633 }
634
635 mutex_acquire(&ctx->mlock);
636 ret = _check_handle_id(ctx, handle_id);
637 if (ret < 0)
638 goto err;
639
640 /* cookies are only relevant for pollable handles */
641 if (!ctx->htbl[ret].handle->ops->poll) {
642 /* not a pollable handle */
643 ret = ERR_NOT_VALID;
644 goto err;
645 }
646
647 struct handle_ref* ref = list_peek_head_type(&ctx->htbl[ret].ref_list,
648 struct handle_ref, uctx_node);
649 ref->cookie = (void*)(uintptr_t)cookie;
650 ret = NO_ERROR;
651
652 err:
653 mutex_release(&ctx->mlock);
654 return ret;
655 #else
656 return (long)ERR_NOT_SUPPORTED;
657 #endif
658 }
659
660 /*****************************************************************************/
661
sys_handle_set_create(void)662 long __SYSCALL sys_handle_set_create(void) {
663 int ret;
664 handle_id_t id;
665 struct handle* hset;
666 struct uctx* ctx = current_uctx();
667
668 hset = handle_set_create();
669 if (!hset)
670 return ERR_NO_MEMORY;
671
672 /* install handle into user context */
673 ret = uctx_handle_install(ctx, hset, &id);
674 if (ret != NO_ERROR)
675 goto err_install;
676
677 handle_decref(hset);
678 return (long)id;
679
680 err_install:
681 handle_decref(hset);
682 return ret;
683 }
684
_hset_add_item(struct handle * hset,struct htbl_entry * item,uint32_t id,uint32_t emask,void * cookie)685 static int _hset_add_item(struct handle* hset,
686 struct htbl_entry* item,
687 uint32_t id,
688 uint32_t emask,
689 void* cookie) {
690 struct handle_ref* ref;
691
692 /* find if we are inserting duplicate */
693 list_for_every_entry(&item->ref_list, ref, struct handle_ref, uctx_node) {
694 if (ref->parent == hset) {
695 return ERR_ALREADY_EXISTS;
696 }
697 }
698
699 /*
700 * Note, we have to add at the end of the ref_list
701 *
702 * This is an artifact of how global cookies are handled in
703 * presence of all handle set. When handle is added to all
704 * handles set the corresponding handle_ref is added to the head
705 * of the list so an implementation of set_cookie syscall just picks
706 * first inte from the list and stores cookie there. An implementation
707 * of set_cookie syscall also triggers rebuild of all handle set, so
708 * it guaranties that entry is always present.
709 */
710 return _hset_add_handle(hset, item->handle, id, emask, cookie,
711 item->ref_list.prev);
712 }
713
_hset_del_item(struct handle * hset,struct htbl_entry * item)714 static int _hset_del_item(struct handle* hset, struct htbl_entry* item) {
715 uint del_cnt = 0;
716 struct handle_ref* ref;
717 struct handle_ref* tmp;
718
719 list_for_every_entry_safe(&item->ref_list, ref, tmp, struct handle_ref,
720 uctx_node) {
721 if (ref->parent == hset) {
722 del_cnt++;
723 LTRACEF("%p: %p\n", ref->parent, ref->handle);
724 list_delete(&ref->uctx_node);
725 handle_set_detach_ref(ref);
726 handle_decref(ref->handle);
727 free(ref);
728 }
729 }
730 return del_cnt ? NO_ERROR : ERR_NOT_FOUND;
731 }
732
_hset_mod_item(struct handle * hset,struct htbl_entry * item,uint32_t emask,void * cookie)733 static int _hset_mod_item(struct handle* hset,
734 struct htbl_entry* item,
735 uint32_t emask,
736 void* cookie) {
737 uint mod_cnt = 0;
738 struct handle_ref* ref;
739 struct handle_ref* tmp;
740
741 list_for_every_entry_safe(&item->ref_list, ref, tmp, struct handle_ref,
742 uctx_node) {
743 if (ref->parent == hset) {
744 mod_cnt++;
745 LTRACEF("%p: %p\n", ref->parent, ref->handle);
746 handle_set_update_ref(ref, emask, cookie);
747 }
748 }
749 return mod_cnt ? NO_ERROR : ERR_NOT_FOUND;
750 }
751
_hset_ctrl_locked(handle_id_t hset_id,handle_id_t h_id,uint32_t cmd,uint32_t event,void * cookie)752 static int _hset_ctrl_locked(handle_id_t hset_id,
753 handle_id_t h_id,
754 uint32_t cmd,
755 uint32_t event,
756 void* cookie) {
757 int ret;
758 int h_idx, hset_idx;
759 struct uctx* ctx = current_uctx();
760
761 LTRACEF("%d: %d: cmd=%d\n", hset_id, h_id, cmd);
762
763 hset_idx = _check_handle_id(ctx, hset_id);
764 if (hset_idx < 0)
765 return hset_idx;
766
767 h_idx = _check_handle_id(ctx, h_id);
768 if (h_idx < 0)
769 return h_idx;
770
771 switch (cmd) {
772 case HSET_ADD:
773 ret = _hset_add_item(ctx->htbl[hset_idx].handle, &ctx->htbl[h_idx],
774 h_id, event, cookie);
775 break;
776
777 case HSET_DEL:
778 ret = _hset_del_item(ctx->htbl[hset_idx].handle, &ctx->htbl[h_idx]);
779 break;
780
781 case HSET_MOD:
782 ret = _hset_mod_item(ctx->htbl[hset_idx].handle, &ctx->htbl[h_idx],
783 event, cookie);
784 break;
785
786 default:
787 LTRACEF("Invalid hset command (%d)\n", cmd);
788 ret = ERR_INVALID_ARGS;
789 }
790
791 return ret;
792 }
793
sys_handle_set_ctrl(handle_id_t hset_id,uint32_t cmd,user_addr_t user_event)794 long __SYSCALL sys_handle_set_ctrl(handle_id_t hset_id,
795 uint32_t cmd,
796 user_addr_t user_event) {
797 int ret = 0;
798 struct uevent uevent;
799 struct uctx* ctx = current_uctx();
800
801 ret = copy_from_user(&uevent, user_event, sizeof(uevent));
802 if (ret < 0)
803 return ret;
804
805 mutex_acquire(&ctx->mlock);
806 ret = _hset_ctrl_locked(hset_id, uevent.handle, cmd, uevent.event,
807 (void*)(uintptr_t)uevent.cookie);
808 mutex_release(&ctx->mlock);
809 return ret;
810 }
811
uctx_handle_writev(uint32_t fd,user_addr_t iov_uaddr,uint32_t iov_cnt)812 static ssize_t uctx_handle_writev(uint32_t fd,
813 user_addr_t iov_uaddr,
814 uint32_t iov_cnt) {
815 int rc;
816 struct handle* h;
817 struct uctx* ctx = current_uctx();
818
819 rc = uctx_handle_get(ctx, (handle_id_t)fd, &h);
820 if (rc != NO_ERROR)
821 return rc;
822
823 if (h->ops && h->ops->user_writev) {
824 rc = h->ops->user_writev(h, iov_uaddr, iov_cnt);
825 } else {
826 rc = ERR_NOT_SUPPORTED;
827 }
828
829 handle_decref(h);
830 return rc;
831 }
832
uctx_handle_readv(uint32_t fd,user_addr_t iov_uaddr,uint32_t iov_cnt)833 static ssize_t uctx_handle_readv(uint32_t fd,
834 user_addr_t iov_uaddr,
835 uint32_t iov_cnt) {
836 int rc;
837 struct handle* h;
838 struct uctx* ctx = current_uctx();
839
840 rc = uctx_handle_get(ctx, (handle_id_t)fd, &h);
841 if (rc != NO_ERROR)
842 return rc;
843
844 if (h->ops && h->ops->user_readv) {
845 rc = h->ops->user_readv(h, iov_uaddr, iov_cnt);
846 } else {
847 rc = ERR_NOT_SUPPORTED;
848 }
849
850 handle_decref(h);
851 return rc;
852 }
853
854 static const struct sys_fd_ops fd_op = {
855 .writev = uctx_handle_writev,
856 .readv = uctx_handle_readv,
857 };
858
uctx_get_fd_ops(uint32_t fd)859 const struct sys_fd_ops* uctx_get_fd_ops(uint32_t fd) {
860 if (fd >= IPC_HANDLE_ID_BASE &&
861 fd < (IPC_HANDLE_ID_BASE + IPC_MAX_HANDLES)) {
862 return &fd_op;
863 }
864 return NULL;
865 }
866
867 #else /* WITH_TRUSTY_IPC */
868
sys_wait(uint32_t handle_id,user_addr_t user_event,unsigned long timeout_msecs)869 long __SYSCALL sys_wait(uint32_t handle_id,
870 user_addr_t user_event,
871 unsigned long timeout_msecs) {
872 return (long)ERR_NOT_SUPPORTED;
873 }
874
sys_wait_any(user_addr_t user_event,unsigned long timeout_msecs)875 long __SYSCALL sys_wait_any(user_addr_t user_event,
876 unsigned long timeout_msecs) {
877 return (long)ERR_NOT_SUPPORTED;
878 }
879
sys_close(uint32_t handle_id)880 long __SYSCALL sys_close(uint32_t handle_id) {
881 return (long)ERR_NOT_SUPPORTED;
882 }
883
sys_set_cookie(uint32_t handle_id,user_addr_t cookie)884 long __SYSCALL sys_set_cookie(uint32_t handle_id, user_addr_t cookie) {
885 return (long)ERR_NOT_SUPPORTED;
886 }
887
sys_handle_set_create(void)888 long __SYSCALL sys_handle_set_create(void) {
889 return ERR_NOT_SUPPORTED;
890 }
891
sys_handle_set_ctrl(handle_id_t hset_id,uint32_t cmd,user_addr_t user_event)892 long __SYSCALL sys_handle_set_ctrl(handle_id_t hset_id,
893 uint32_t cmd,
894 user_addr_t user_event) {
895 return ERR_NOT_SUPPORTED;
896 }
897
uctx_get_fd_ops(uint32_t fd)898 const struct sys_fd_ops* uctx_get_fd_ops(uint32_t fd) {
899 return NULL;
900 }
901
902 #endif /* WITH_TRUSTY_IPC */
903