1 /*
2  * Copyright (c) 2013, Google, Inc. All rights reserved
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining
5  * a copy of this software and associated documentation files
6  * (the "Software"), to deal in the Software without restriction,
7  * including without limitation the rights to use, copy, modify, merge,
8  * publish, distribute, sublicense, and/or sell copies of the Software,
9  * and to permit persons to whom the Software is furnished to do so,
10  * subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be
13  * included in all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
18  * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
19  * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
20  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
21  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22  */
23 
24 #define LOCAL_TRACE 0
25 
26 /**
27  * @file
28  * @brief  IPC message management primitives
29  * @defgroup ipc IPC
30  *
31  * Provides low level data structures for managing message
32  * areas for the ipc contexts.
33  *
34  * Also provides user syscall implementations for message
35  * send/receive mechanism.
36  *
37  * @{
38  */
39 
40 #include <assert.h>
41 #include <err.h>
42 #include <kernel/usercopy.h>
43 #include <list.h>
44 #include <stdbool.h>
45 #include <stdint.h>
46 #include <stdlib.h>
47 #include <string.h>
48 #include <sys/types.h>
49 #include <trace.h>
50 
51 #include <lib/syscall.h>
52 
53 #if WITH_TRUSTY_IPC
54 
55 #include <lib/trusty/handle.h>
56 #include <lib/trusty/ipc.h>
57 #include <lib/trusty/ipc_msg.h>
58 #include <lib/trusty/trusty_app.h>
59 #include <lib/trusty/uctx.h>
60 
61 enum {
62     MSG_ITEM_STATE_FREE = 0,
63     MSG_ITEM_STATE_FILLED = 1,
64     MSG_ITEM_STATE_READ = 2,
65 };
66 
67 struct msg_item {
68     uint8_t id;
69     uint8_t state;
70     uint num_handles;
71     struct handle* handles[MAX_MSG_HANDLES];
72     size_t len;
73     struct list_node node;
74 };
75 
76 struct ipc_msg_queue {
77     struct list_node free_list;
78     struct list_node filled_list;
79     struct list_node read_list;
80 
81     uint num_items;
82     size_t item_sz;
83 
84     uint8_t* buf;
85 
86     /* store the message descriptors in the queue,
87      * and the buffer separately. The buffer could
88      * eventually move to a separate area that can
89      * be mapped into the process directly.
90      */
91     struct msg_item items[0];
92 };
93 
94 /**
95  * @brief  Create IPC message queue
96  *
97  * Stores up-to a predefined number of equal-sized items in a circular
98  * buffer (FIFO).
99  *
100  * @param num_items   Number of messages we need to store.
101  * @param item_sz     Size of each message item.
102  * @param mq          Pointer where to store the ptr to the newly allocated
103  *                    message queue.
104  *
105  * @return  Returns NO_ERROR on success, ERR_NO_MEMORY on error.
106  */
ipc_msg_queue_create(uint num_items,size_t item_sz,struct ipc_msg_queue ** mq)107 int ipc_msg_queue_create(uint num_items,
108                          size_t item_sz,
109                          struct ipc_msg_queue** mq) {
110     struct ipc_msg_queue* tmp_mq;
111     int ret;
112 
113     tmp_mq = calloc(1, (sizeof(struct ipc_msg_queue) +
114                         num_items * sizeof(struct msg_item)));
115     if (!tmp_mq) {
116         dprintf(CRITICAL, "cannot allocate memory for message queue\n");
117         return ERR_NO_MEMORY;
118     }
119 
120     tmp_mq->buf = malloc(num_items * item_sz);
121     if (!tmp_mq->buf) {
122         dprintf(CRITICAL, "cannot allocate memory for message queue buf\n");
123         ret = ERR_NO_MEMORY;
124         goto err_alloc_buf;
125     }
126 
127     tmp_mq->num_items = num_items;
128     tmp_mq->item_sz = item_sz;
129     list_initialize(&tmp_mq->free_list);
130     list_initialize(&tmp_mq->filled_list);
131     list_initialize(&tmp_mq->read_list);
132 
133     for (uint i = 0; i < num_items; i++) {
134         tmp_mq->items[i].id = i;
135         list_add_tail(&tmp_mq->free_list, &tmp_mq->items[i].node);
136     }
137     *mq = tmp_mq;
138     return 0;
139 
140 err_alloc_buf:
141     free(tmp_mq);
142     return ret;
143 }
144 
ipc_msg_queue_destroy(struct ipc_msg_queue * mq)145 void ipc_msg_queue_destroy(struct ipc_msg_queue* mq) {
146     /* release handles if any */
147     for (uint i = 0; i < mq->num_items; i++) {
148         struct msg_item* item = &mq->items[i];
149         if (item->num_handles) {
150             for (uint j = 0; j < item->num_handles; j++) {
151                 handle_decref(item->handles[j]);
152             }
153         }
154     }
155     free(mq->buf);
156     free(mq);
157 }
158 
ipc_msg_queue_is_empty(struct ipc_msg_queue * mq)159 bool ipc_msg_queue_is_empty(struct ipc_msg_queue* mq) {
160     return list_is_empty(&mq->filled_list);
161 }
162 
ipc_msg_queue_is_full(struct ipc_msg_queue * mq)163 bool ipc_msg_queue_is_full(struct ipc_msg_queue* mq) {
164     return list_is_empty(&mq->free_list);
165 }
166 
msg_queue_get_buf(struct ipc_msg_queue * mq,struct msg_item * item)167 static inline uint8_t* msg_queue_get_buf(struct ipc_msg_queue* mq,
168                                          struct msg_item* item) {
169     return mq->buf + item->id * mq->item_sz;
170 }
171 
msg_queue_get_item(struct ipc_msg_queue * mq,uint32_t id)172 static inline struct msg_item* msg_queue_get_item(struct ipc_msg_queue* mq,
173                                                   uint32_t id) {
174     return id < mq->num_items ? &mq->items[id] : NULL;
175 }
176 
check_channel(struct handle * chandle)177 static int check_channel(struct handle* chandle) {
178     if (unlikely(!chandle))
179         return ERR_INVALID_ARGS;
180 
181     if (unlikely(!ipc_is_channel(chandle)))
182         return ERR_INVALID_ARGS;
183 
184     struct ipc_chan* chan = containerof(chandle, struct ipc_chan, handle);
185 
186     if (unlikely(!chan->peer))
187         return ERR_NOT_READY;
188 
189     return NO_ERROR;
190 }
191 
kern_msg_write_locked(struct ipc_msg_queue * mq,struct msg_item * item,const struct ipc_msg_kern * msg)192 static ssize_t kern_msg_write_locked(struct ipc_msg_queue* mq,
193                                      struct msg_item* item,
194                                      const struct ipc_msg_kern* msg) {
195     ssize_t ret = NO_ERROR;
196     uint8_t* buf = msg_queue_get_buf(mq, item);
197 
198     if (msg->num_handles) {
199         if (msg->num_handles > MAX_MSG_HANDLES) {
200             LTRACEF("sending too many (%u) handles\n", msg->num_handles);
201             return ERR_TOO_BIG;
202         }
203 
204         if (!msg->handles)
205             return ERR_INVALID_ARGS;
206     }
207 
208     /* copy any message body */
209     if (likely(msg->num_iov)) {
210         ret = kern_iovec_to_membuf(buf, mq->item_sz,
211                                    (const struct iovec_kern*)msg->iov,
212                                    msg->num_iov);
213         if (ret < 0)
214             return ret;
215     }
216 
217     /* copy attached handles */
218     for (uint i = 0; i < msg->num_handles; i++) {
219         if (!msg->handles[i]) {
220             ret = ERR_BAD_HANDLE;
221             goto err_bad_handle;
222         }
223 
224         if (!handle_is_sendable(msg->handles[i])) {
225             ret = ERR_NOT_ALLOWED;
226             goto err_bad_handle;
227         }
228 
229         /* grab an additional reference */
230         handle_incref(msg->handles[i]);
231         item->handles[i] = msg->handles[i];
232         item->num_handles++;
233     }
234 
235     return ret;
236 
237 err_bad_handle:
238     for (uint i = 0; i < item->num_handles; i++) {
239         handle_decref(item->handles[i]);
240         item->handles[i] = NULL;
241     }
242     item->num_handles = 0;
243 
244     return ret;
245 }
246 
user_msg_write_locked(struct ipc_msg_queue * mq,struct msg_item * item,const struct ipc_msg_user * msg,struct uctx * uctx)247 static ssize_t user_msg_write_locked(struct ipc_msg_queue* mq,
248                                      struct msg_item* item,
249                                      const struct ipc_msg_user* msg,
250                                      struct uctx* uctx) {
251     int rc;
252     ssize_t ret;
253     uint8_t* buf = msg_queue_get_buf(mq, item);
254 
255     if (msg->num_handles > MAX_MSG_HANDLES) {
256         LTRACEF("sending too many (%u) handles\n", msg->num_handles);
257         return ERR_TOO_BIG;
258     }
259 
260     /* copy message body */
261     ret = user_iovec_to_membuf(buf, mq->item_sz, msg->iov, msg->num_iov);
262     if (ret < 0)
263         return ret;
264 
265     if (!msg->num_handles)
266         return ret; /* no handles, just return body */
267 
268     /* copy handle ids from user space */
269     handle_id_t ids[MAX_MSG_HANDLES];
270 
271     rc = copy_from_user(&ids, msg->handles,
272                         msg->num_handles * sizeof(handle_id_t));
273     if (unlikely(rc != NO_ERROR))
274         return rc;
275 
276     /* Need to send all or none */
277     for (uint i = 0; i < msg->num_handles; i++) {
278         rc = uctx_handle_get(uctx, ids[i], &item->handles[i]);
279         if (unlikely(rc != NO_ERROR)) {
280             goto err_get;
281         }
282         item->num_handles++;
283 
284         if (!handle_is_sendable(item->handles[i])) {
285             rc = ERR_NOT_ALLOWED;
286             goto err_send;
287         }
288     }
289 
290     return ret;
291 
292 err_send:
293 err_get:
294     for (uint i = 0; i < item->num_handles; i++) {
295         handle_decref(item->handles[i]);
296         item->handles[i] = NULL;
297     }
298     item->num_handles = 0;
299 
300     return rc;
301 }
302 
msg_write_locked(struct ipc_chan * chan,const void * msg,struct uctx * uctx)303 static int msg_write_locked(struct ipc_chan* chan,
304                             const void* msg,
305                             struct uctx* uctx) {
306     ssize_t ret;
307     struct msg_item* item;
308     struct ipc_chan* peer = chan->peer;
309 
310     if (peer->state != IPC_CHAN_STATE_CONNECTED) {
311         if (likely(peer->state == IPC_CHAN_STATE_DISCONNECTING))
312             return ERR_CHANNEL_CLOSED;
313         else
314             return ERR_NOT_READY;
315     }
316 
317     struct ipc_msg_queue* mq = peer->msg_queue;
318 
319     item = list_peek_head_type(&mq->free_list, struct msg_item, node);
320     if (item == NULL) {
321         peer->aux_state |= IPC_CHAN_AUX_STATE_PEER_SEND_BLOCKED;
322         return ERR_NOT_ENOUGH_BUFFER;
323     }
324     if (!list_is_empty(&chan->msg_queue->read_list)) {
325         /**
326          * Application shall retire read messages (via put_msg api)
327          * before sending responses. This not only allows to not waste
328          * receive message queue spots, but more importantly prevents
329          * a hard-to-debug race condition where incoming messages from linux
330          * are silently dropped due to the receive queue being freed
331          * only after the response is sent.
332          */
333         TRACEF("WARNING: sending outgoing messages while incoming messages are in read state is not allowed\n");
334         /**
335          * todo: return an error after sufficient soak time of the warning log
336          * return ERR_NOT_ALLOWED;
337          */
338     }
339     DEBUG_ASSERT(item->state == MSG_ITEM_STATE_FREE);
340 
341     item->num_handles = 0;
342     item->len = 0;
343 
344     if (uctx)
345         ret = user_msg_write_locked(mq, item, msg, uctx);
346     else
347         ret = kern_msg_write_locked(mq, item, msg);
348 
349     if (ret < 0)
350         return ret;
351 
352     item->len = (size_t)ret;
353     list_delete(&item->node);
354     list_add_tail(&mq->filled_list, &item->node);
355     item->state = MSG_ITEM_STATE_FILLED;
356 
357     return item->len;
358 }
359 
360 /*
361  * Check if specified message id is valid, message is in read state
362  * and provided offset is within message bounds.
363  */
msg_check_read_item(struct ipc_msg_queue * mq,uint32_t msg_id,uint32_t offset)364 static struct msg_item* msg_check_read_item(struct ipc_msg_queue* mq,
365                                             uint32_t msg_id,
366                                             uint32_t offset) {
367     struct msg_item* item;
368 
369     item = msg_queue_get_item(mq, msg_id);
370     if (!item) {
371         LTRACEF("invalid message id %d\n", msg_id);
372         return NULL;
373     }
374 
375     if (item->state != MSG_ITEM_STATE_READ) {
376         LTRACEF("message %d is not in READ state (0x%x)\n", item->id,
377                 item->state);
378         return NULL;
379     }
380 
381     if (offset > item->len) {
382         LTRACEF("invalid offset %d\n", offset);
383         return NULL;
384     }
385 
386     return item;
387 }
388 
389 /*
390  * Reads the specified message by copying message data into the iov list
391  * and associated handles to destination handle array provided by kmsg.
392  * The message must have been previously moved to the read list (and thus
393  * put into READ state).
394  */
kern_msg_read_locked(struct ipc_msg_queue * mq,int32_t msg_id,uint32_t offset,struct ipc_msg_kern * kmsg)395 static int kern_msg_read_locked(struct ipc_msg_queue* mq,
396                                 int32_t msg_id,
397                                 uint32_t offset,
398                                 struct ipc_msg_kern* kmsg) {
399     int ret = 0;
400     struct msg_item* item;
401 
402     item = msg_check_read_item(mq, msg_id, offset);
403     if (!item)
404         return ERR_INVALID_ARGS;
405 
406     const uint8_t* buf = msg_queue_get_buf(mq, item) + offset;
407     size_t bytes_left = item->len - offset;
408 
409     if (likely(kmsg->num_iov)) {
410         ret = membuf_to_kern_iovec((const struct iovec_kern*)kmsg->iov,
411                                    kmsg->num_iov, buf, bytes_left);
412         if (ret < 0)
413             return ret;
414     }
415 
416     uint hcnt = MIN(kmsg->num_handles, item->num_handles);
417     for (uint i = 0; i < hcnt; i++) {
418         handle_incref(item->handles[i]);
419         kmsg->handles[i] = item->handles[i];
420     }
421 
422     return ret;
423 }
424 
425 /*
426  * Reads the specified message by copying message data to user space (iov list
427  * is provided by umsg) and associated handles to destination handle array
428  * provided by caller. The message must have been previously moved to the read
429  * list (and thus put into READ state).
430  */
user_msg_read_locked(struct ipc_msg_queue * mq,uint32_t msg_id,uint32_t offset,struct ipc_msg_user * umsg,struct handle ** ph,uint * phcnt)431 static int user_msg_read_locked(struct ipc_msg_queue* mq,
432                                 uint32_t msg_id,
433                                 uint32_t offset,
434                                 struct ipc_msg_user* umsg,
435                                 struct handle** ph,
436                                 uint* phcnt) {
437     int ret;
438     struct msg_item* item;
439 
440     item = msg_check_read_item(mq, msg_id, offset);
441     if (!item)
442         return ERR_INVALID_ARGS;
443 
444     const uint8_t* buf = msg_queue_get_buf(mq, item) + offset;
445     size_t bytes_left = item->len - offset;
446 
447     ret = membuf_to_user_iovec(umsg->iov, umsg->num_iov, buf, bytes_left);
448     if (ret < 0)
449         return ret;
450 
451     /* return out handles with additional refs */
452     uint hcnt = MIN(umsg->num_handles, item->num_handles);
453     for (uint i = 0; i < hcnt; i++) {
454         handle_incref(item->handles[i]);
455         ph[i] = item->handles[i];
456     }
457     *phcnt = hcnt;
458 
459     return ret;
460 }
461 
462 /*
463  * Is called to look at the head of the filled messages list. It should be
464  * followed by calling msg_get_filled_locked call to actually move message to
465  * readable list.
466  */
msg_peek_next_filled_locked(struct ipc_msg_queue * mq,struct ipc_msg_info * info)467 static int msg_peek_next_filled_locked(struct ipc_msg_queue* mq,
468                                        struct ipc_msg_info* info) {
469     struct msg_item* item;
470 
471     item = list_peek_head_type(&mq->filled_list, struct msg_item, node);
472     if (!item)
473         return ERR_NO_MSG;
474 
475     info->len = item->len;
476     info->id = item->id;
477     info->num_handles = item->num_handles;
478 
479     return NO_ERROR;
480 }
481 
482 /*
483  *  Is called to move top of the queue item to readable list.
484  */
msg_get_filled_locked(struct ipc_msg_queue * mq)485 static void msg_get_filled_locked(struct ipc_msg_queue* mq) {
486     struct msg_item* item;
487 
488     item = list_peek_head_type(&mq->filled_list, struct msg_item, node);
489     DEBUG_ASSERT(item);
490 
491     list_delete(&item->node);
492     list_add_tail(&mq->read_list, &item->node);
493     item->state = MSG_ITEM_STATE_READ;
494 }
495 
msg_put_read_locked(struct ipc_chan * chan,uint32_t msg_id,struct handle ** ph,uint * phcnt)496 static int msg_put_read_locked(struct ipc_chan* chan,
497                                uint32_t msg_id,
498                                struct handle** ph,
499                                uint* phcnt) {
500     DEBUG_ASSERT(chan);
501     DEBUG_ASSERT(chan->msg_queue);
502     DEBUG_ASSERT(ph);
503     DEBUG_ASSERT(phcnt);
504 
505     struct ipc_msg_queue* mq = chan->msg_queue;
506     struct msg_item* item = msg_queue_get_item(mq, msg_id);
507 
508     if (!item || item->state != MSG_ITEM_STATE_READ)
509         return ERR_INVALID_ARGS;
510 
511     list_delete(&item->node);
512 
513     /* detach handles from table if any */
514     for (uint j = 0; j < item->num_handles; j++) {
515         ph[j] = item->handles[j];
516         item->handles[j] = NULL;
517     }
518     *phcnt = item->num_handles;
519     item->num_handles = 0;
520 
521     /* put it on the head since it was just taken off here */
522     list_add_head(&mq->free_list, &item->node);
523     item->state = MSG_ITEM_STATE_FREE;
524 
525     return NO_ERROR;
526 }
527 
sys_send_msg(uint32_t handle_id,user_addr_t user_msg)528 long __SYSCALL sys_send_msg(uint32_t handle_id, user_addr_t user_msg) {
529     struct handle* chandle;
530     struct ipc_msg_user tmp_msg;
531     int ret;
532     struct uctx* uctx = current_uctx();
533 
534     /* copy message descriptor from user space */
535     ret = copy_from_user(&tmp_msg, user_msg, sizeof(struct ipc_msg_user));
536     if (unlikely(ret != NO_ERROR))
537         return (long)ret;
538 
539     /* grab handle */
540     ret = uctx_handle_get(uctx, handle_id, &chandle);
541     if (unlikely(ret != NO_ERROR))
542         return (long)ret;
543 
544     ret = check_channel(chandle);
545     if (likely(ret == NO_ERROR)) {
546         struct ipc_chan* chan = containerof(chandle, struct ipc_chan, handle);
547         mutex_acquire(&chan->peer->mlock);
548         ret = msg_write_locked(chan, &tmp_msg, uctx);
549         mutex_release(&chan->peer->mlock);
550         if (ret >= 0) {
551             /* and notify target */
552             handle_notify(&chan->peer->handle);
553         }
554     }
555     handle_decref(chandle);
556     return (long)ret;
557 }
558 
ipc_send_msg(struct handle * chandle,struct ipc_msg_kern * msg)559 int ipc_send_msg(struct handle* chandle, struct ipc_msg_kern* msg) {
560     int ret;
561 
562     if (!msg)
563         return ERR_INVALID_ARGS;
564 
565     ret = check_channel(chandle);
566     if (likely(ret == NO_ERROR)) {
567         struct ipc_chan* chan = containerof(chandle, struct ipc_chan, handle);
568         mutex_acquire(&chan->peer->mlock);
569         ret = msg_write_locked(chan, msg, NULL);
570         mutex_release(&chan->peer->mlock);
571         if (ret >= 0) {
572             handle_notify(&chan->peer->handle);
573         }
574     }
575     return ret;
576 }
577 
sys_get_msg(uint32_t handle_id,user_addr_t user_msg_info)578 long __SYSCALL sys_get_msg(uint32_t handle_id, user_addr_t user_msg_info) {
579     struct handle* chandle;
580     struct ipc_msg_info mi_kern;
581     int ret;
582 
583     /* grab handle */
584     ret = uctx_handle_get(current_uctx(), handle_id, &chandle);
585     if (ret != NO_ERROR)
586         return (long)ret;
587 
588     /* check if channel handle is a valid one */
589     ret = check_channel(chandle);
590     if (likely(ret == NO_ERROR)) {
591         struct ipc_chan* chan = containerof(chandle, struct ipc_chan, handle);
592         mutex_acquire(&chan->mlock);
593         /* peek next filled message */
594         ret = msg_peek_next_filled_locked(chan->msg_queue, &mi_kern);
595         if (likely(ret == NO_ERROR)) {
596             /* copy it to user space */
597             struct ipc_msg_info_user mi_user;
598 
599             memset(&mi_user, 0, sizeof(mi_user));
600             mi_user.len = (user_size_t)mi_kern.len;
601             mi_user.id = mi_kern.id;
602             mi_user.num_handles = mi_kern.num_handles;
603             ret = copy_to_user(user_msg_info, &mi_user, sizeof(mi_user));
604             if (likely(ret == NO_ERROR)) {
605                 /* and make it readable */
606                 msg_get_filled_locked(chan->msg_queue);
607             }
608         }
609         mutex_release(&chan->mlock);
610     }
611     handle_decref(chandle);
612     return (long)ret;
613 }
614 
ipc_get_msg(struct handle * chandle,struct ipc_msg_info * msg_info)615 int ipc_get_msg(struct handle* chandle, struct ipc_msg_info* msg_info) {
616     int ret;
617 
618     /* check if channel handle */
619     ret = check_channel(chandle);
620     if (likely(ret == NO_ERROR)) {
621         struct ipc_chan* chan = containerof(chandle, struct ipc_chan, handle);
622         mutex_acquire(&chan->mlock);
623         /* peek next filled message */
624         ret = msg_peek_next_filled_locked(chan->msg_queue, msg_info);
625         if (likely(ret == NO_ERROR)) {
626             /* and make it readable */
627             msg_get_filled_locked(chan->msg_queue);
628         }
629         mutex_release(&chan->mlock);
630     }
631     return ret;
632 }
633 
sys_put_msg(uint32_t handle_id,uint32_t msg_id)634 long __SYSCALL sys_put_msg(uint32_t handle_id, uint32_t msg_id) {
635     struct handle* chandle;
636 
637     /* grab handle */
638     int ret = uctx_handle_get(current_uctx(), handle_id, &chandle);
639     if (unlikely(ret != NO_ERROR))
640         return (long)ret;
641 
642     /* and put it to rest */
643     ret = ipc_put_msg(chandle, msg_id);
644     handle_decref(chandle);
645 
646     return (long)ret;
647 }
648 
ipc_put_msg(struct handle * chandle,uint32_t msg_id)649 int ipc_put_msg(struct handle* chandle, uint32_t msg_id) {
650     int ret;
651 
652     /* check is channel handle is a valid one */
653     ret = check_channel(chandle);
654     if (unlikely(ret != NO_ERROR))
655         return ret;
656 
657     struct handle* h[MAX_MSG_HANDLES];
658     uint hcnt = 0;
659     bool need_notify = false;
660     struct ipc_chan* chan = containerof(chandle, struct ipc_chan, handle);
661     /* retire message */
662     mutex_acquire(&chan->mlock);
663     ret = msg_put_read_locked(chan, msg_id, h, &hcnt);
664     if (ret == NO_ERROR &&
665         (chan->aux_state & IPC_CHAN_AUX_STATE_PEER_SEND_BLOCKED)) {
666         chan->aux_state &= ~IPC_CHAN_AUX_STATE_PEER_SEND_BLOCKED;
667         need_notify = true;
668     }
669     mutex_release(&chan->mlock);
670 
671     /* drop handle references outside of the lock */
672     for (uint i = 0; i < hcnt; i++) {
673         handle_decref(h[i]);
674     }
675 
676     if (need_notify) {
677         mutex_acquire(&chan->peer->mlock);
678         chan->peer->aux_state |= IPC_CHAN_AUX_STATE_SEND_UNBLOCKED;
679         mutex_release(&chan->peer->mlock);
680         handle_notify(&chan->peer->handle);
681     }
682     return ret;
683 }
684 
user_remove_multiple(struct uctx * uctx,handle_id_t * hids,uint hcnt)685 static void user_remove_multiple(struct uctx* uctx,
686                                  handle_id_t* hids,
687                                  uint hcnt) {
688     for (uint i = 0; i < hcnt; i++)
689         uctx_handle_remove(uctx, hids[i], NULL);
690 }
691 
user_install_multiple(struct uctx * uctx,struct handle ** hptrs,handle_id_t * hids,uint hcnt)692 static int user_install_multiple(struct uctx* uctx,
693                                  struct handle** hptrs,
694                                  handle_id_t* hids,
695                                  uint hcnt) {
696     for (uint i = 0; i < hcnt; i++) {
697         int rc = uctx_handle_install(uctx, hptrs[i], &hids[i]);
698         if (rc) {
699             user_remove_multiple(uctx, hids, i);
700             return rc;
701         }
702     }
703     return 0;
704 }
705 
user_return_handles(struct uctx * uctx,user_addr_t uhptrs,struct handle ** hptrs,uint hcnt)706 static int user_return_handles(struct uctx* uctx,
707                                user_addr_t uhptrs,
708                                struct handle** hptrs,
709                                uint hcnt) {
710     int rc;
711     handle_id_t hids[MAX_MSG_HANDLES];
712 
713     if (hcnt > MAX_MSG_HANDLES) {
714         LTRACEF("returning too many (%u) handles\n", hcnt);
715         return ERR_TOO_BIG;
716     }
717 
718     /* install handles */
719     rc = user_install_multiple(uctx, hptrs, hids, hcnt);
720     if (rc < 0)
721         return rc;
722 
723     /* copy out handle ids */
724     rc = copy_to_user(uhptrs, hids, hcnt * sizeof(handle_id_t));
725     if (rc < 0) {
726         /* remove installed handles in case of error */
727         user_remove_multiple(uctx, hids, hcnt);
728         return rc;
729     }
730     return 0;
731 }
732 
sys_read_msg(uint32_t handle_id,uint32_t msg_id,uint32_t offset,user_addr_t user_msg)733 long __SYSCALL sys_read_msg(uint32_t handle_id,
734                             uint32_t msg_id,
735                             uint32_t offset,
736                             user_addr_t user_msg) {
737     struct handle* chandle;
738     struct ipc_msg_user msg;
739     int ret;
740     struct uctx* uctx = current_uctx();
741 
742     /* get msg descriptor from user space */
743     ret = copy_from_user(&msg, user_msg, sizeof(struct ipc_msg_user));
744     if (unlikely(ret != NO_ERROR))
745         return (long)ret;
746 
747     /* grab handle */
748     ret = uctx_handle_get(uctx, handle_id, &chandle);
749     if (unlikely(ret != NO_ERROR))
750         return (long)ret;
751 
752     /* check if channel handle is a valid one */
753     ret = check_channel(chandle);
754     if (ret == NO_ERROR) {
755         struct ipc_chan* chan = containerof(chandle, struct ipc_chan, handle);
756         struct handle* h[MAX_MSG_HANDLES];
757         uint hcnt = 0;
758 
759         mutex_acquire(&chan->mlock);
760         ret = user_msg_read_locked(chan->msg_queue, msg_id, offset, &msg, h,
761                                    &hcnt);
762         mutex_release(&chan->mlock);
763 
764         if (ret >= 0 && hcnt) {
765             /* install into caller handle table and copy them out */
766             int rc = user_return_handles(uctx, msg.handles, h, hcnt);
767             if (rc < 0) {
768                 ret = rc;
769             }
770 
771             /* drop references obtained in user_msg_read_locked */
772             for (uint i = 0; i < hcnt; i++)
773                 handle_decref(h[i]);
774         }
775     }
776     handle_decref(chandle);
777 
778     return (long)ret;
779 }
780 
ipc_read_msg(struct handle * chandle,uint32_t msg_id,uint32_t offset,struct ipc_msg_kern * msg)781 int ipc_read_msg(struct handle* chandle,
782                  uint32_t msg_id,
783                  uint32_t offset,
784                  struct ipc_msg_kern* msg) {
785     int ret;
786 
787     if (!msg)
788         return ERR_INVALID_ARGS;
789 
790     ret = check_channel(chandle);
791     if (ret == NO_ERROR) {
792         struct ipc_chan* chan = containerof(chandle, struct ipc_chan, handle);
793         mutex_acquire(&chan->mlock);
794         ret = kern_msg_read_locked(chan->msg_queue, msg_id, offset, msg);
795         mutex_release(&chan->mlock);
796     }
797     return ret;
798 }
799 
800 #else /* WITH_TRUSTY_IPC */
801 
sys_send_msg(uint32_t handle_id,user_addr_t user_msg)802 long __SYSCALL sys_send_msg(uint32_t handle_id, user_addr_t user_msg) {
803     return (long)ERR_NOT_SUPPORTED;
804 }
805 
sys_get_msg(uint32_t handle_id,user_addr_t user_msg_info)806 long __SYSCALL sys_get_msg(uint32_t handle_id, user_addr_t user_msg_info) {
807     return (long)ERR_NOT_SUPPORTED;
808 }
809 
sys_put_msg(uint32_t handle_id,uint32_t msg_id)810 long __SYSCALL sys_put_msg(uint32_t handle_id, uint32_t msg_id) {
811     return (long)ERR_NOT_SUPPORTED;
812 }
813 
sys_read_msg(uint32_t handle_id,uint32_t msg_id,uint32_t offset,user_addr_t user_msg)814 long __SYSCALL sys_read_msg(uint32_t handle_id,
815                             uint32_t msg_id,
816                             uint32_t offset,
817                             user_addr_t user_msg) {
818     return (long)ERR_NOT_SUPPORTED;
819 }
820 
821 #endif /* WITH_TRUSTY_IPC */
822