1 /*
2 * Copyright (c) 2013, Google, Inc. All rights reserved
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining
5 * a copy of this software and associated documentation files
6 * (the "Software"), to deal in the Software without restriction,
7 * including without limitation the rights to use, copy, modify, merge,
8 * publish, distribute, sublicense, and/or sell copies of the Software,
9 * and to permit persons to whom the Software is furnished to do so,
10 * subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
18 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
19 * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
20 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
21 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 #define LOCAL_TRACE 0
25
26 #include <assert.h>
27 #include <err.h>
28 #include <kernel/usercopy.h>
29 #include <list.h>
30 #include <platform.h>
31 #include <stdlib.h>
32 #include <string.h>
33 #include <trace.h>
34
35 #include <kernel/event.h>
36 #include <kernel/mutex.h>
37 #include <lk/init.h>
38
39 #include <lib/syscall.h>
40
41 #include <lib/trusty/uuid.h>
42
43 #if WITH_TRUSTY_IPC
44
45 #include <lib/trusty/event.h>
46 #include <lib/trusty/ipc.h>
47 #include <lib/trusty/trusty_app.h>
48 #include <lib/trusty/uctx.h>
49
50 #include <reflist.h>
51
52 static struct list_node waiting_for_port_chan_list =
53 LIST_INITIAL_VALUE(waiting_for_port_chan_list);
54
55 static struct list_node ipc_port_list = LIST_INITIAL_VALUE(ipc_port_list);
56
57 static struct mutex ipc_port_lock = MUTEX_INITIAL_VALUE(ipc_port_lock);
58
59 static uint32_t port_poll(struct handle* handle, uint32_t emask, bool finalize);
60 static void port_shutdown(struct handle* handle);
61 static void port_handle_destroy(struct handle* handle);
62
63 static uint32_t chan_poll(struct handle* handle, uint32_t emask, bool finalize);
64 static void chan_handle_destroy(struct handle* handle);
65
66 static struct ipc_port* port_find_locked(const char* path);
67 static int port_attach_client(struct ipc_port* port, struct ipc_chan* client);
68 static void chan_shutdown(struct ipc_chan* chan);
69 static void chan_add_ref(struct ipc_chan* conn, struct obj_ref* ref);
70 static void chan_del_ref(struct ipc_chan* conn, struct obj_ref* ref);
71
72 static void remove_from_waiting_for_port_list_locked(struct ipc_chan* client,
73 struct obj_ref* ref);
74
75 static struct handle_ops ipc_port_handle_ops = {
76 .poll = port_poll,
77 .destroy = port_handle_destroy,
78 };
79
80 static struct handle_ops ipc_chan_handle_ops = {
81 .poll = chan_poll,
82 .destroy = chan_handle_destroy,
83 };
84
ipc_is_channel(struct handle * handle)85 bool ipc_is_channel(struct handle* handle) {
86 return likely(handle->ops == &ipc_chan_handle_ops);
87 }
88
ipc_is_port(struct handle * handle)89 bool ipc_is_port(struct handle* handle) {
90 return likely(handle->ops == &ipc_port_handle_ops);
91 }
92
ipc_connection_waiting_for_port(const char * path,uint32_t flags)93 bool ipc_connection_waiting_for_port(const char* path, uint32_t flags) {
94 bool found = false;
95 struct ipc_chan* chan;
96
97 mutex_acquire(&ipc_port_lock);
98 list_for_every_entry(&waiting_for_port_chan_list, chan, struct ipc_chan,
99 node) {
100 if (!strncmp(path, chan->path, IPC_PORT_PATH_MAX) &&
101 ipc_port_check_access(flags, chan->uuid) == NO_ERROR) {
102 found = true;
103 break;
104 }
105 }
106 mutex_release(&ipc_port_lock);
107
108 return found;
109 }
110
ipc_remove_connection_waiting_for_port(const char * path,uint32_t flags)111 void ipc_remove_connection_waiting_for_port(const char* path, uint32_t flags) {
112 struct ipc_chan *chan, *temp;
113 struct obj_ref tmp_chan_ref = OBJ_REF_INITIAL_VALUE(tmp_chan_ref);
114
115 mutex_acquire(&ipc_port_lock);
116 list_for_every_entry_safe(&waiting_for_port_chan_list, chan, temp,
117 struct ipc_chan, node) {
118 if (!strncmp(path, chan->path, IPC_PORT_PATH_MAX) &&
119 ipc_port_check_access(flags, chan->uuid) == NO_ERROR) {
120 remove_from_waiting_for_port_list_locked(chan, &tmp_chan_ref);
121 chan_shutdown(chan);
122 chan_del_ref(chan, &tmp_chan_ref); /* drop local ref */
123 }
124 }
125 mutex_release(&ipc_port_lock);
126 }
127
128 /*
129 * Called by user task to create a new port at the given path.
130 * The returned handle will be later installed into uctx.
131 */
ipc_port_create(const uuid_t * sid,const char * path,uint num_recv_bufs,size_t recv_buf_size,uint32_t flags,struct handle ** phandle_ptr)132 int ipc_port_create(const uuid_t* sid,
133 const char* path,
134 uint num_recv_bufs,
135 size_t recv_buf_size,
136 uint32_t flags,
137 struct handle** phandle_ptr) {
138 struct ipc_port* new_port;
139 int ret = 0;
140
141 LTRACEF("creating port (%s)\n", path);
142
143 if (!sid) {
144 /* server uuid is required */
145 LTRACEF("server uuid is required\n");
146 return ERR_INVALID_ARGS;
147 }
148
149 if (!num_recv_bufs || num_recv_bufs > IPC_CHAN_MAX_BUFS || !recv_buf_size ||
150 recv_buf_size > IPC_CHAN_MAX_BUF_SIZE) {
151 LTRACEF("Invalid buffer sizes: %d x %zd\n", num_recv_bufs,
152 recv_buf_size);
153 return ERR_INVALID_ARGS;
154 }
155
156 new_port = calloc(1, sizeof(struct ipc_port));
157 if (!new_port) {
158 LTRACEF("cannot allocate memory for port\n");
159 return ERR_NO_MEMORY;
160 }
161
162 ret = strlcpy(new_port->path, path, sizeof(new_port->path));
163 if (ret == 0) {
164 LTRACEF("path is empty\n");
165 ret = ERR_INVALID_ARGS;
166 goto err_copy_path;
167 }
168
169 if ((uint)ret >= sizeof(new_port->path)) {
170 LTRACEF("path is too long (%d)\n", ret);
171 ret = ERR_TOO_BIG;
172 goto err_copy_path;
173 }
174
175 new_port->uuid = sid;
176 new_port->num_recv_bufs = num_recv_bufs;
177 new_port->recv_buf_size = recv_buf_size;
178 new_port->flags = flags;
179
180 new_port->state = IPC_PORT_STATE_INVALID;
181 list_initialize(&new_port->pending_list);
182
183 handle_init(&new_port->handle, &ipc_port_handle_ops);
184
185 LTRACEF("new port %p created (%s)\n", new_port, new_port->path);
186
187 *phandle_ptr = &new_port->handle;
188
189 return NO_ERROR;
190
191 err_copy_path:
192 free(new_port);
193 return ret;
194 }
195
196 #if TEST_BUILD
ipc_get_port_list(struct ipc_port ** out_port_list)197 int ipc_get_port_list(struct ipc_port** out_port_list) {
198 struct ipc_port* port;
199
200 mutex_acquire(&ipc_port_lock);
201 int len = list_length(&ipc_port_list);
202 *out_port_list = calloc(sizeof(struct ipc_port), len);
203 if (out_port_list == NULL) {
204 return ERR_NO_MEMORY;
205 }
206 struct ipc_port* current_port = *out_port_list;
207 list_for_every_entry(&ipc_port_list, port, struct ipc_port, node) {
208 memcpy(current_port, port, sizeof(struct ipc_port));
209 ++current_port;
210 }
211 mutex_release(&ipc_port_lock);
212 return len;
213 }
214
ipc_free_port_list(struct ipc_port * out_port_list)215 void ipc_free_port_list(struct ipc_port* out_port_list) {
216 free(out_port_list);
217 }
218 #endif
219
add_to_waiting_for_port_list_locked(struct ipc_chan * client)220 static void add_to_waiting_for_port_list_locked(struct ipc_chan* client) {
221 DEBUG_ASSERT(client);
222 DEBUG_ASSERT(!list_in_list(&client->node));
223 DEBUG_ASSERT(client->path);
224
225 list_add_tail(&waiting_for_port_chan_list, &client->node);
226 chan_add_ref(client, &client->node_ref);
227 }
228
remove_from_waiting_for_port_list_locked(struct ipc_chan * client,struct obj_ref * ref)229 static void remove_from_waiting_for_port_list_locked(struct ipc_chan* client,
230 struct obj_ref* ref) {
231 DEBUG_ASSERT(client);
232 DEBUG_ASSERT(list_in_list(&client->node));
233
234 free((void*)client->path);
235 client->path = NULL;
236
237 /* take it out of global pending list */
238 chan_add_ref(client, ref); /* add local ref */
239 list_delete(&client->node);
240 chan_del_ref(client, &client->node_ref); /* drop list ref */
241 }
242
243 /*
244 * Shutting down port
245 *
246 * Called by controlling handle gets closed.
247 */
port_shutdown(struct handle * phandle)248 static void port_shutdown(struct handle* phandle) {
249 bool is_startup_port = false;
250 struct ipc_chan* client;
251 ASSERT(phandle);
252 ASSERT(ipc_is_port(phandle));
253
254 struct ipc_port* port = containerof(phandle, struct ipc_port, handle);
255
256 LTRACEF("shutting down port %p\n", port);
257
258 /* detach it from global list if it is in the list */
259 mutex_acquire(&ipc_port_lock);
260 if (list_in_list(&port->node)) {
261 list_delete(&port->node);
262 }
263 mutex_release(&ipc_port_lock);
264
265 is_startup_port = trusty_app_is_startup_port(port->path);
266
267 /* tear down pending connections */
268 struct ipc_chan *server, *temp;
269 list_for_every_entry_safe(&port->pending_list, server, temp,
270 struct ipc_chan, node) {
271 /* Check if the port being shutdown has been registered by an
272 * application. If so, detach the client connection and put it
273 * back in the waiting for port list
274 */
275 if (is_startup_port) {
276 bool client_connecting = false;
277 /* Get a local ref to the client*/
278 struct obj_ref tmp_client_ref =
279 OBJ_REF_INITIAL_VALUE(tmp_client_ref);
280 chan_add_ref(server->peer, &tmp_client_ref);
281
282 /* Remove server -> client ref */
283 client = server->peer;
284 server->peer = NULL;
285 chan_del_ref(client, &server->peer_ref);
286
287 mutex_acquire(&client->mlock);
288 if (client->state != IPC_CHAN_STATE_DISCONNECTING) {
289 /*
290 * Remove client -> server ref if the client hasn't been closed
291 */
292 client->peer = NULL;
293 chan_del_ref(server, &client->peer_ref);
294 ASSERT(client->state == IPC_CHAN_STATE_CONNECTING);
295 client_connecting = true;
296 }
297 mutex_release(&client->mlock);
298
299 if (client_connecting) {
300 /*
301 * Reset client. This is needed before adding the client back
302 * to the waiting for port list but it is still safe to do if
303 * the client changes to disconnecting since we still hold a
304 * reference to the client. When all the references go away
305 * the destructor checks if the msg_queue has already been
306 * destroyed.
307 */
308 ipc_msg_queue_destroy(client->msg_queue);
309 client->msg_queue = NULL;
310 client->path = strdup(port->path);
311 ASSERT(client->path);
312
313 /* Add client to waiting_for_port list */
314 mutex_acquire(&ipc_port_lock);
315 if (client->state == IPC_CHAN_STATE_CONNECTING)
316 add_to_waiting_for_port_list_locked(client);
317 mutex_release(&ipc_port_lock);
318 }
319
320 /* Drop local ref */
321 chan_del_ref(client, &tmp_client_ref);
322 }
323
324 /* remove connection from the list */
325 mutex_acquire(&server->mlock);
326 list_delete(&server->node);
327 chan_del_ref(server, &server->node_ref); /* drop list ref */
328 mutex_release(&server->mlock);
329
330 /* pending server channel in not in user context table
331 * but we need to decrement ref to get rid of it
332 */
333 handle_decref(&server->handle);
334 }
335 }
336
337 /*
338 * Destroy port controlled by handle
339 *
340 * Called when controlling handle refcount reaches 0.
341 */
port_handle_destroy(struct handle * phandle)342 static void port_handle_destroy(struct handle* phandle) {
343 ASSERT(phandle);
344 ASSERT(ipc_is_port(phandle));
345
346 /* invoke port shutdown first */
347 port_shutdown(phandle);
348
349 struct ipc_port* port = containerof(phandle, struct ipc_port, handle);
350
351 /* pending list should be empty and
352 node should not be in the list
353 */
354 DEBUG_ASSERT(list_is_empty(&port->pending_list));
355 DEBUG_ASSERT(!list_in_list(&port->node));
356
357 LTRACEF("destroying port %p ('%s')\n", port, port->path);
358
359 free(port);
360 }
361
362 /*
363 * Make specified port publically available for operation.
364 */
ipc_port_publish(struct handle * phandle)365 int ipc_port_publish(struct handle* phandle) {
366 int ret = NO_ERROR;
367
368 DEBUG_ASSERT(phandle);
369 DEBUG_ASSERT(ipc_is_port(phandle));
370
371 struct ipc_port* port = containerof(phandle, struct ipc_port, handle);
372 DEBUG_ASSERT(!list_in_list(&port->node));
373
374 /* Check for duplicates */
375 mutex_acquire(&ipc_port_lock);
376 if (port_find_locked(port->path)) {
377 LTRACEF("path already exists\n");
378 ret = ERR_ALREADY_EXISTS;
379 } else {
380 port->state = IPC_PORT_STATE_LISTENING;
381 list_add_tail(&ipc_port_list, &port->node);
382
383 /* go through pending connection list and pick those we can handle */
384 struct ipc_chan *client, *temp;
385 struct obj_ref tmp_client_ref = OBJ_REF_INITIAL_VALUE(tmp_client_ref);
386 list_for_every_entry_safe(&waiting_for_port_chan_list, client, temp,
387 struct ipc_chan, node) {
388 if (strcmp(client->path, port->path))
389 continue;
390
391 remove_from_waiting_for_port_list_locked(client, &tmp_client_ref);
392
393 /* try to attach port */
394 int err = port_attach_client(port, client);
395 if (err) {
396 /* failed to attach port: close channel */
397 LTRACEF("failed (%d) to attach_port\n", err);
398 chan_shutdown(client);
399 }
400
401 chan_del_ref(client, &tmp_client_ref); /* drop local ref */
402 }
403 }
404 mutex_release(&ipc_port_lock);
405
406 return ret;
407 }
408
409 /*
410 * Called by user task to create new port.
411 *
412 * On success - returns handle id (small integer) for the new port.
413 * On error - returns negative error code.
414 */
sys_port_create(user_addr_t path,uint32_t num_recv_bufs,uint32_t recv_buf_size,uint32_t flags)415 long __SYSCALL sys_port_create(user_addr_t path,
416 uint32_t num_recv_bufs,
417 uint32_t recv_buf_size,
418 uint32_t flags) {
419 struct trusty_app* tapp = current_trusty_app();
420 struct uctx* ctx = current_uctx();
421 struct handle* port_handle = NULL;
422 int ret;
423 handle_id_t handle_id;
424 char tmp_path[IPC_PORT_PATH_MAX];
425
426 /* copy path from user space */
427 ret = (int)strlcpy_from_user(tmp_path, path, sizeof(tmp_path));
428 if (ret < 0)
429 return (long)ret;
430
431 if ((uint)ret >= sizeof(tmp_path)) {
432 /* string is too long */
433 return ERR_INVALID_ARGS;
434 }
435
436 /* create new port */
437 ret = ipc_port_create(&tapp->props.uuid, tmp_path, (uint)num_recv_bufs,
438 recv_buf_size, flags, &port_handle);
439 if (ret != NO_ERROR)
440 goto err_port_create;
441
442 /* install handle into user context */
443 ret = uctx_handle_install(ctx, port_handle, &handle_id);
444 if (ret != NO_ERROR)
445 goto err_install;
446
447 /* publish for normal operation */
448 ret = ipc_port_publish(port_handle);
449 if (ret != NO_ERROR)
450 goto err_publish;
451
452 handle_decref(port_handle);
453 return (long)handle_id;
454
455 err_publish:
456 (void)uctx_handle_remove(ctx, handle_id, NULL);
457 err_install:
458 handle_decref(port_handle);
459 err_port_create:
460 return (long)ret;
461 }
462
463 /*
464 * Look up and port with given name (ipc_port_lock must be held)
465 */
port_find_locked(const char * path)466 static struct ipc_port* port_find_locked(const char* path) {
467 struct ipc_port* port;
468
469 DEBUG_ASSERT(is_mutex_held(&ipc_port_lock));
470 list_for_every_entry(&ipc_port_list, port, struct ipc_port, node) {
471 if (!strcmp(path, port->path))
472 return port;
473 }
474 return NULL;
475 }
476
port_poll(struct handle * phandle,uint32_t emask,bool finalize)477 static uint32_t port_poll(struct handle* phandle,
478 uint32_t emask,
479 bool finalize) {
480 DEBUG_ASSERT(phandle);
481 DEBUG_ASSERT(ipc_is_port(phandle));
482
483 struct ipc_port* port = containerof(phandle, struct ipc_port, handle);
484 uint32_t events = 0;
485
486 if (port->state != IPC_PORT_STATE_LISTENING)
487 events |= IPC_HANDLE_POLL_ERROR;
488 else if (!list_is_empty(&port->pending_list))
489 events |= IPC_HANDLE_POLL_READY;
490 LTRACEF("%s in state %d events %x\n", port->path, port->state, events);
491
492 return events & emask;
493 }
494
495 /*
496 * Channel ref counting
497 */
__chan_destroy_refobj(struct obj * ref)498 static inline void __chan_destroy_refobj(struct obj* ref) {
499 struct ipc_chan* chan = containerof(ref, struct ipc_chan, refobj);
500
501 /* should not point to peer */
502 ASSERT(chan->peer == NULL);
503
504 /* should not be in a list */
505 ASSERT(!list_in_list(&chan->node));
506
507 if (chan->path)
508 free((void*)chan->path);
509
510 if (chan->msg_queue) {
511 ipc_msg_queue_destroy(chan->msg_queue);
512 chan->msg_queue = NULL;
513 }
514 free(chan);
515 }
516
chan_add_ref(struct ipc_chan * chan,struct obj_ref * ref)517 static inline void chan_add_ref(struct ipc_chan* chan, struct obj_ref* ref) {
518 spin_lock_saved_state_t state;
519
520 spin_lock_save(&chan->ref_slock, &state, SPIN_LOCK_FLAG_INTERRUPTS);
521 obj_add_ref(&chan->refobj, ref);
522 spin_unlock_restore(&chan->ref_slock, state, SPIN_LOCK_FLAG_INTERRUPTS);
523 }
524
chan_del_ref(struct ipc_chan * chan,struct obj_ref * ref)525 static inline void chan_del_ref(struct ipc_chan* chan, struct obj_ref* ref) {
526 spin_lock_saved_state_t state;
527
528 spin_lock_save(&chan->ref_slock, &state, SPIN_LOCK_FLAG_INTERRUPTS);
529 bool last = obj_del_ref(&chan->refobj, ref, NULL);
530 spin_unlock_restore(&chan->ref_slock, state, SPIN_LOCK_FLAG_INTERRUPTS);
531
532 if (last)
533 __chan_destroy_refobj(&chan->refobj);
534 }
535
536 /*
537 * Initialize channel handle
538 */
chan_handle_init(struct ipc_chan * chan)539 static inline struct handle* chan_handle_init(struct ipc_chan* chan) {
540 handle_init(&chan->handle, &ipc_chan_handle_ops);
541 chan_add_ref(chan, &chan->handle_ref);
542 return &chan->handle;
543 }
544
545 /*
546 * Allocate and initialize new channel.
547 */
chan_alloc(uint32_t flags,const uuid_t * uuid)548 static struct ipc_chan* chan_alloc(uint32_t flags, const uuid_t* uuid) {
549 struct ipc_chan* chan;
550 struct obj_ref tmp_ref = OBJ_REF_INITIAL_VALUE(tmp_ref);
551
552 chan = calloc(1, sizeof(struct ipc_chan));
553 if (!chan)
554 return NULL;
555
556 /* init per channel mutex */
557 mutex_init(&chan->mlock);
558
559 /* init ref object and associated lock */
560 spin_lock_init(&chan->ref_slock);
561 obj_init(&chan->refobj, &tmp_ref);
562
563 /* init refs */
564 obj_ref_init(&chan->node_ref);
565 obj_ref_init(&chan->peer_ref);
566 obj_ref_init(&chan->handle_ref);
567
568 chan->uuid = uuid;
569 if (flags & IPC_CHAN_FLAG_SERVER)
570 chan->state = IPC_CHAN_STATE_ACCEPTING;
571 else
572 chan->state = IPC_CHAN_STATE_CONNECTING;
573 chan->flags = flags;
574
575 chan_handle_init(chan);
576 chan_del_ref(chan, &tmp_ref);
577
578 return chan;
579 }
580
chan_shutdown_locked(struct ipc_chan * chan)581 static void chan_shutdown_locked(struct ipc_chan* chan) {
582 /* Remove channel from any list it might be in */
583 if (list_in_list(&chan->node)) {
584 list_delete(&chan->node);
585 chan_del_ref(chan, &chan->node_ref);
586
587 if (chan->flags & IPC_CHAN_FLAG_SERVER) {
588 /* If we are shutting down the server side of the channel pair
589 * and it was in the list (assume accept pending_list) we also
590 * need to remove handle ref because it is dangling. We cannot
591 * call handle_decref here because of locks but we can just
592 * remove chan_del_ref directly which would give us the same
593 * effect. In addition we also should detach peer.
594 */
595 if (chan->peer) {
596 chan_del_ref(chan->peer, &chan->peer_ref);
597 chan->peer = NULL;
598 }
599 chan_del_ref(chan, &chan->handle_ref);
600 }
601 }
602
603 switch (chan->state) {
604 case IPC_CHAN_STATE_CONNECTED:
605 case IPC_CHAN_STATE_CONNECTING:
606 chan->state = IPC_CHAN_STATE_DISCONNECTING;
607 handle_notify(&chan->handle);
608 break;
609 case IPC_CHAN_STATE_ACCEPTING:
610 chan->state = IPC_CHAN_STATE_DISCONNECTING;
611 break;
612 default:
613 /* no op */
614 break;
615 }
616 }
617
chan_shutdown(struct ipc_chan * chan)618 static void chan_shutdown(struct ipc_chan* chan) {
619 DEBUG_ASSERT(is_mutex_held(&ipc_port_lock));
620
621 LTRACEF("chan %p: peer %p\n", chan, chan->peer);
622
623 mutex_acquire(&chan->mlock);
624 struct ipc_chan* peer = chan->peer;
625 chan->peer = NULL;
626 chan_shutdown_locked(chan);
627 mutex_release(&chan->mlock);
628
629 /*
630 * if peer exists we are still holding reference to peer chan object
631 * so it cannot disappear.
632 */
633 if (peer) {
634 /* shutdown peer */
635 mutex_acquire(&peer->mlock);
636 chan_shutdown_locked(peer);
637 mutex_release(&peer->mlock);
638 chan_del_ref(peer, &chan->peer_ref);
639 }
640 }
641
chan_handle_destroy(struct handle * chandle)642 static void chan_handle_destroy(struct handle* chandle) {
643 DEBUG_ASSERT(chandle);
644 DEBUG_ASSERT(ipc_is_channel(chandle));
645
646 struct ipc_chan* chan = containerof(chandle, struct ipc_chan, handle);
647
648 mutex_acquire(&ipc_port_lock);
649 chan_shutdown(chan);
650 mutex_release(&ipc_port_lock);
651
652 chan_del_ref(chan, &chan->handle_ref);
653 }
654
655 /*
656 * Poll channel state
657 */
chan_poll(struct handle * chandle,uint32_t emask,bool finalize)658 static uint32_t chan_poll(struct handle* chandle,
659 uint32_t emask,
660 bool finalize) {
661 DEBUG_ASSERT(chandle);
662 DEBUG_ASSERT(ipc_is_channel(chandle));
663
664 struct ipc_chan* chan = containerof(chandle, struct ipc_chan, handle);
665
666 uint32_t events = 0;
667
668 mutex_acquire(&chan->mlock);
669 /* peer is closing connection */
670 if (chan->state == IPC_CHAN_STATE_DISCONNECTING) {
671 events |= IPC_HANDLE_POLL_HUP;
672 }
673
674 /* server accepted our connection */
675 if (chan->aux_state & IPC_CHAN_AUX_STATE_CONNECTED) {
676 events |= IPC_HANDLE_POLL_READY;
677 }
678
679 /* have a pending message? */
680 if (chan->msg_queue && !ipc_msg_queue_is_empty(chan->msg_queue)) {
681 events |= IPC_HANDLE_POLL_MSG;
682 }
683
684 /* check if we were send blocked */
685 if (chan->aux_state & IPC_CHAN_AUX_STATE_SEND_UNBLOCKED) {
686 events |= IPC_HANDLE_POLL_SEND_UNBLOCKED;
687 }
688
689 events &= emask;
690 if (finalize) {
691 if (events & IPC_HANDLE_POLL_READY) {
692 chan->aux_state &= ~IPC_CHAN_AUX_STATE_CONNECTED;
693 }
694 if (events & IPC_HANDLE_POLL_SEND_UNBLOCKED) {
695 chan->aux_state &= ~IPC_CHAN_AUX_STATE_SEND_UNBLOCKED;
696 }
697 }
698
699 mutex_release(&chan->mlock);
700 return events;
701 }
702
703 /*
704 * Check if connection to specified port is allowed
705 */
ipc_port_check_access(uint32_t port_flags,const uuid_t * uuid)706 int ipc_port_check_access(uint32_t port_flags, const uuid_t* uuid) {
707 if (!uuid)
708 return ERR_ACCESS_DENIED;
709
710 if (is_ns_client(uuid)) {
711 /* check if this port allows connection from NS clients */
712 if (port_flags & IPC_PORT_ALLOW_NS_CONNECT)
713 return NO_ERROR;
714 } else {
715 /* check if this port allows connection from Trusted Apps */
716 if (port_flags & IPC_PORT_ALLOW_TA_CONNECT)
717 return NO_ERROR;
718 }
719
720 return ERR_ACCESS_DENIED;
721 }
722
port_attach_client(struct ipc_port * port,struct ipc_chan * client)723 static int port_attach_client(struct ipc_port* port, struct ipc_chan* client) {
724 int ret;
725 struct ipc_chan* server;
726
727 if (port->state != IPC_PORT_STATE_LISTENING) {
728 LTRACEF("port %s is not in listening state (%d)\n", port->path,
729 port->state);
730 return ERR_NOT_READY;
731 }
732
733 /* check if we are allowed to connect */
734 ret = ipc_port_check_access(port->flags, client->uuid);
735 if (ret != NO_ERROR) {
736 LTRACEF("access denied: %d\n", ret);
737 return ret;
738 }
739
740 server = chan_alloc(IPC_CHAN_FLAG_SERVER, port->uuid);
741 if (!server) {
742 LTRACEF("failed to alloc server\n");
743 return ERR_NO_MEMORY;
744 }
745
746 /* allocate msg queues */
747 ret = ipc_msg_queue_create(port->num_recv_bufs, port->recv_buf_size,
748 &client->msg_queue);
749 if (ret != NO_ERROR) {
750 LTRACEF("failed to alloc mq: %d\n", ret);
751 goto err_client_mq;
752 }
753
754 ret = ipc_msg_queue_create(port->num_recv_bufs, port->recv_buf_size,
755 &server->msg_queue);
756 if (ret != NO_ERROR) {
757 LTRACEF("failed to alloc mq: %d\n", ret);
758 goto err_server_mq;
759 }
760
761 /* setup cross peer refs */
762 mutex_acquire(&client->mlock);
763 if (client->state == IPC_CHAN_STATE_DISCONNECTING) {
764 mutex_release(&client->mlock);
765 goto err_closed;
766 }
767 chan_add_ref(server, &client->peer_ref);
768 client->peer = server;
769
770 chan_add_ref(client, &server->peer_ref);
771 server->peer = client;
772 mutex_release(&client->mlock);
773
774 /* and add server channel to pending connection list */
775 chan_add_ref(server, &server->node_ref);
776 list_add_tail(&port->pending_list, &server->node);
777
778 /* Notify port that there is a pending connection */
779 handle_notify(&port->handle);
780
781 return NO_ERROR;
782
783 err_closed:
784 err_server_mq:
785 err_client_mq:
786 /*
787 * Ideally this would call handle_decref(&server->handle), but that will
788 * deadlock as ipc_port_lock is already held. Therefore close directly.
789 */
790 chan_shutdown(server);
791 chan_del_ref(server, &server->handle_ref);
792
793 return ERR_NO_MEMORY;
794 }
795
796 /*
797 * Client requests a connection to a port. It can be called in context
798 * of user task as well as vdev RX thread.
799 */
ipc_port_connect_async(const uuid_t * cid,const char * path,size_t max_path,uint flags,struct handle ** chandle_ptr)800 int ipc_port_connect_async(const uuid_t* cid,
801 const char* path,
802 size_t max_path,
803 uint flags,
804 struct handle** chandle_ptr) {
805 struct ipc_port* port;
806 struct ipc_chan* client;
807 int ret;
808
809 if (!cid) {
810 /* client uuid is required */
811 TRACEF("client uuid is required\n");
812 return ERR_INVALID_ARGS;
813 }
814
815 size_t len = strnlen(path, max_path);
816 if (len == 0 || len >= max_path) {
817 /* unterminated string */
818 TRACEF("invalid path specified\n");
819 return ERR_INVALID_ARGS;
820 }
821 /* After this point path is zero terminated */
822
823 /* allocate channel pair */
824 client = chan_alloc(0, cid);
825 if (!client) {
826 TRACEF("failed to alloc client\n");
827 return ERR_NO_MEMORY;
828 }
829
830 LTRACEF("Connecting to '%s'\n", path);
831
832 mutex_acquire(&ipc_port_lock);
833
834 port = port_find_locked(path);
835 if (port) {
836 /* found */
837 ret = port_attach_client(port, client);
838 if (ret)
839 goto err_attach_client;
840 } else {
841 /*
842 * Check if an app has registered to be started on connections
843 * to this port
844 */
845 ret = trusty_app_request_start_by_port(path, cid);
846 switch (ret) {
847 case NO_ERROR:
848 case ERR_ALREADY_STARTED:
849 break;
850 case ERR_NOT_FOUND:
851 /*
852 * App has not been loaded yet, but we wait for it if the caller
853 * asked to
854 */
855 if (flags & IPC_CONNECT_WAIT_FOR_PORT) {
856 break;
857 }
858 __FALLTHROUGH;
859 default:
860 goto err_find_ports;
861 }
862
863 /* port not found, add connection to waiting_for_port_chan_list */
864 client->path = strdup(path);
865 if (!client->path) {
866 ret = ERR_NO_MEMORY;
867 goto err_alloc_path;
868 }
869
870 /* add it to waiting for port list */
871 add_to_waiting_for_port_list_locked(client);
872 }
873
874 LTRACEF("new connection: client %p: peer %p\n", client, client->peer);
875
876 /* success */
877 handle_incref(&client->handle);
878 *chandle_ptr = &client->handle;
879 ret = NO_ERROR;
880
881 err_alloc_path:
882 err_attach_client:
883 err_find_ports:
884 mutex_release(&ipc_port_lock);
885 handle_decref(&client->handle);
886 return ret;
887 }
888
889 /* returns handle id for the new channel */
890
891 #ifndef DEFAULT_IPC_CONNECT_WARN_TIMEOUT
892 #define DEFAULT_IPC_CONNECT_WARN_TIMEOUT INFINITE_TIME
893 #endif
894
sys_connect(user_addr_t path,uint32_t flags)895 long __SYSCALL sys_connect(user_addr_t path, uint32_t flags) {
896 struct trusty_app* tapp = current_trusty_app();
897 struct uctx* ctx = current_uctx();
898 struct handle* chandle;
899 char tmp_path[IPC_PORT_PATH_MAX];
900 int ret;
901 handle_id_t handle_id;
902
903 if (flags & ~IPC_CONNECT_MASK) {
904 /* unsupported flags specified */
905 return ERR_INVALID_ARGS;
906 }
907
908 ret = (int)strlcpy_from_user(tmp_path, path, sizeof(tmp_path));
909 if (ret < 0)
910 return (long)ret;
911
912 if ((uint)ret >= sizeof(tmp_path))
913 return (long)ERR_INVALID_ARGS;
914
915 /* try to connect to event first */
916 ret = event_source_open(&tapp->props.uuid, tmp_path, sizeof(tmp_path), 0,
917 &chandle);
918 if (ret == NO_ERROR) {
919 goto install;
920 }
921
922 /* if an error is other then ERR_NOT_FOUND return immediately */
923 if (ret != ERR_NOT_FOUND) {
924 return ret;
925 }
926
927 /* then regular port */
928 ret = ipc_port_connect_async(&tapp->props.uuid, tmp_path, sizeof(tmp_path),
929 flags, &chandle);
930 if (ret != NO_ERROR)
931 return (long)ret;
932
933 if (!(flags & IPC_CONNECT_ASYNC)) {
934 uint32_t event;
935 lk_time_t timeout_msecs = DEFAULT_IPC_CONNECT_WARN_TIMEOUT;
936
937 ret = handle_wait(chandle, &event, timeout_msecs);
938 if (ret == ERR_TIMED_OUT) {
939 TRACEF("Timedout connecting to %s\n", tmp_path);
940 ret = handle_wait(chandle, &event, INFINITE_TIME);
941 }
942
943 if (ret < 0) {
944 /* timeout or other error */
945 handle_decref(chandle);
946 return ret;
947 }
948
949 if ((event & IPC_HANDLE_POLL_HUP) && !(event & IPC_HANDLE_POLL_MSG)) {
950 /* hangup and no pending messages */
951 handle_decref(chandle);
952 return ERR_CHANNEL_CLOSED;
953 }
954
955 if (!(event & IPC_HANDLE_POLL_READY)) {
956 /* not connected */
957 TRACEF("Unexpected channel state: event = 0x%x\n", event);
958 handle_decref(chandle);
959 return ERR_NOT_READY;
960 }
961 }
962
963 install:
964 ret = uctx_handle_install(ctx, chandle, &handle_id);
965 if (ret != NO_ERROR) {
966 /* Failed to install handle into user context */
967 handle_decref(chandle);
968 return (long)ret;
969 }
970
971 handle_decref(chandle);
972 return (long)handle_id;
973 }
974
975 /*
976 * Called by user task to accept incomming connection
977 */
ipc_port_accept(struct handle * phandle,struct handle ** chandle_ptr,const uuid_t ** uuid_ptr)978 int ipc_port_accept(struct handle* phandle,
979 struct handle** chandle_ptr,
980 const uuid_t** uuid_ptr) {
981 struct ipc_port* port;
982 struct ipc_chan* server = NULL;
983 struct ipc_chan* client = NULL;
984
985 DEBUG_ASSERT(chandle_ptr);
986 DEBUG_ASSERT(uuid_ptr);
987
988 if (!phandle || !ipc_is_port(phandle)) {
989 LTRACEF("invalid port handle %p\n", phandle);
990 return ERR_INVALID_ARGS;
991 }
992
993 port = containerof(phandle, struct ipc_port, handle);
994
995 if (port->state != IPC_PORT_STATE_LISTENING) {
996 /* Not in listening state: caller should close port.
997 * is it really possible to get here?
998 */
999 return ERR_CHANNEL_CLOSED;
1000 }
1001
1002 /* get next pending connection */
1003 mutex_acquire(&ipc_port_lock);
1004 server = list_remove_head_type(&port->pending_list, struct ipc_chan, node);
1005 mutex_release(&ipc_port_lock);
1006 if (!server) {
1007 /* TODO: should we block waiting for a new connection if one
1008 * is not pending? if so, need an optional argument maybe.
1009 */
1010 return ERR_NO_MSG;
1011 }
1012
1013 /* it must be a server side channel */
1014 DEBUG_ASSERT(server->flags & IPC_CHAN_FLAG_SERVER);
1015
1016 chan_del_ref(server, &server->node_ref); /* drop list ref */
1017
1018 client = server->peer;
1019
1020 /* there must be a client, it must be in CONNECTING state and
1021 server must be in ACCEPTING state */
1022 ASSERT(client);
1023 mutex_acquire(&client->mlock);
1024 if (server->state != IPC_CHAN_STATE_ACCEPTING ||
1025 client->state != IPC_CHAN_STATE_CONNECTING) {
1026 LTRACEF("Drop connection: client %p (0x%x) to server %p (0x%x):\n",
1027 client, client->state, server, server->state);
1028 mutex_release(&client->mlock);
1029 handle_decref(&server->handle);
1030 return ERR_CHANNEL_CLOSED;
1031 }
1032
1033 /* move both client and server into connected state */
1034 server->state = IPC_CHAN_STATE_CONNECTED;
1035 client->state = IPC_CHAN_STATE_CONNECTED;
1036 client->aux_state |= IPC_CHAN_AUX_STATE_CONNECTED;
1037
1038 /* init server channel handle and return it to caller */
1039 *chandle_ptr = &server->handle;
1040 *uuid_ptr = client->uuid;
1041
1042 mutex_release(&client->mlock);
1043
1044 /* notify client */
1045 handle_notify(&client->handle);
1046
1047 return NO_ERROR;
1048 }
1049
sys_accept(uint32_t handle_id,user_addr_t user_uuid)1050 long __SYSCALL sys_accept(uint32_t handle_id, user_addr_t user_uuid) {
1051 struct uctx* ctx = current_uctx();
1052 struct handle* phandle = NULL;
1053 struct handle* chandle = NULL;
1054 int ret;
1055 handle_id_t new_id;
1056 const uuid_t* peer_uuid_ptr;
1057
1058 ret = uctx_handle_get(ctx, handle_id, &phandle);
1059 if (ret != NO_ERROR)
1060 return (long)ret;
1061
1062 ret = ipc_port_accept(phandle, &chandle, &peer_uuid_ptr);
1063 if (ret != NO_ERROR)
1064 goto err_accept;
1065
1066 ret = uctx_handle_install(ctx, chandle, &new_id);
1067 if (ret != NO_ERROR)
1068 goto err_install;
1069
1070 /* copy peer uuid into userspace */
1071 ret = copy_to_user(user_uuid, peer_uuid_ptr, sizeof(uuid_t));
1072 if (ret != NO_ERROR)
1073 goto err_uuid_copy;
1074
1075 handle_decref(chandle);
1076 handle_decref(phandle);
1077 return (long)new_id;
1078
1079 err_uuid_copy:
1080 uctx_handle_remove(ctx, new_id, NULL);
1081 err_install:
1082 handle_decref(chandle);
1083 err_accept:
1084 handle_decref(phandle);
1085 return (long)ret;
1086 }
1087
1088 #else /* WITH_TRUSTY_IPC */
1089
sys_port_create(user_addr_t path,uint32_t num_recv_bufs,uint32_t recv_buf_size,uint32_t flags)1090 long __SYSCALL sys_port_create(user_addr_t path,
1091 uint32_t num_recv_bufs,
1092 uint32_t recv_buf_size,
1093 uint32_t flags) {
1094 return (long)ERR_NOT_SUPPORTED;
1095 }
1096
sys_connect(user_addr_t path,uint32_t flags)1097 long __SYSCALL sys_connect(user_addr_t path, uint32_t flags) {
1098 return (long)ERR_NOT_SUPPORTED;
1099 }
1100
sys_accept(uint32_t handle_id,uuid_t * peer_uuid)1101 long __SYSCALL sys_accept(uint32_t handle_id, uuid_t* peer_uuid) {
1102 return (long)ERR_NOT_SUPPORTED;
1103 }
1104
1105 #endif /* WITH_TRUSTY_IPC */
1106