1 /*
2 * Copyright (c) 2013, Google, Inc. All rights reserved
3 * Copyright (c) 2013, NVIDIA CORPORATION. All rights reserved
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files
7 * (the "Software"), to deal in the Software without restriction,
8 * including without limitation the rights to use, copy, modify, merge,
9 * publish, distribute, sublicense, and/or sell copies of the Software,
10 * and to permit persons to whom the Software is furnished to do so,
11 * subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be
14 * included in all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
17 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
19 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
20 * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
21 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
22 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25 #include <assert.h>
26 #include <debug.h>
27 #include <err.h>
28 #include <kernel/mutex.h>
29 #include <kernel/thread.h>
30 #include <kernel/usercopy.h>
31 #include <lk/macros.h>
32 #include <stdlib.h>
33 #include <string.h>
34 #include <trace.h>
35 #include <uapi/mm.h>
36
37 #include <lib/trusty/memref.h>
38 #include <lib/trusty/sys_fd.h>
39 #include <lib/trusty/trusty_app.h>
40 #include <lib/trusty/uctx.h>
41 #include <lib/trusty/uio.h>
42 #include <platform.h>
43 #if LK_LIBC_IMPLEMENTATION_IS_MUSL
44 #include <trusty/io_handle.h>
45 #endif
46
47 #include "util.h"
48
49 #define LOCAL_TRACE 0
50
51 static ssize_t sys_std_writev(uint32_t fd,
52 user_addr_t iov_uaddr,
53 uint32_t iov_cnt);
54
55 static mutex_t fd_lock = MUTEX_INITIAL_VALUE(fd_lock);
56
57 static const struct sys_fd_ops sys_std_fd_op = {
58 .writev = sys_std_writev,
59 };
60
61 static struct sys_fd_ops const* sys_fds[MAX_SYS_FD_HADLERS] = {
62 [1] = &sys_std_fd_op, /* stdout */
63 [2] = &sys_std_fd_op, /* stderr */
64 };
65
install_sys_fd_handler(uint32_t fd,const struct sys_fd_ops * ops)66 status_t install_sys_fd_handler(uint32_t fd, const struct sys_fd_ops* ops) {
67 status_t ret;
68
69 if (fd >= countof(sys_fds))
70 return ERR_INVALID_ARGS;
71
72 mutex_acquire(&fd_lock);
73 if (!sys_fds[fd]) {
74 sys_fds[fd] = ops;
75 ret = NO_ERROR;
76 } else {
77 ret = ERR_ALREADY_EXISTS;
78 }
79 mutex_release(&fd_lock);
80 return ret;
81 }
82
get_sys_fd_handler(uint32_t fd)83 static const struct sys_fd_ops* get_sys_fd_handler(uint32_t fd) {
84 const struct sys_fd_ops* ops;
85
86 ops = uctx_get_fd_ops(fd);
87 if (ops)
88 return ops;
89
90 if (fd >= countof(sys_fds))
91 return NULL;
92
93 return sys_fds[fd];
94 }
95
valid_address(vaddr_t addr,const u_int size)96 static bool valid_address(vaddr_t addr, const u_int size) {
97 u_int rsize = round_up(size + (addr & (PAGE_SIZE - 1)), PAGE_SIZE);
98 addr = round_down(addr, PAGE_SIZE);
99
100 /* Ensure size did not overflow */
101 if (rsize < size) {
102 return false;
103 }
104
105 while (rsize) {
106 if (!is_user_address(addr) || !vaddr_to_paddr((void*)addr)) {
107 return false;
108 }
109 addr += PAGE_SIZE;
110 rsize -= PAGE_SIZE;
111 }
112
113 return true;
114 }
115
116 /* handle stdout/stderr */
sys_std_writev(uint32_t fd,user_addr_t iov_uaddr,uint32_t iov_cnt)117 static ssize_t sys_std_writev(uint32_t fd,
118 user_addr_t iov_uaddr,
119 uint32_t iov_cnt) {
120 /*
121 * Even if we're suppressing the output, we need to process the data to
122 * produce the correct return code.
123 */
124 bool should_output = INFO <= LK_LOGLEVEL;
125 io_handle_t* io_handle = fd_io_handle(fd);
126 if (io_handle == NULL) {
127 return ERR_BAD_HANDLE;
128 }
129 uint8_t buf[128];
130
131 if (should_output) {
132 io_lock(io_handle);
133 }
134
135 struct iovec_iter iter = iovec_iter_create(iov_cnt);
136 size_t total_bytes = 0;
137 int ret;
138
139 while (iovec_iter_has_next(&iter)) {
140 ret = user_iovec_to_membuf_iter(buf, sizeof(buf), iov_uaddr, &iter);
141 if (ret < 0) {
142 goto write_done;
143 }
144 total_bytes += ret;
145 if (should_output) {
146 ret = io_write(io_handle, (const void*)buf, ret);
147 if (ret < 0) {
148 goto write_done;
149 }
150 }
151 }
152 ret = total_bytes;
153
154 write_done:
155 if (should_output) {
156 io_write_commit(io_handle);
157 io_unlock(io_handle);
158 }
159 return ret;
160 }
161
sys_writev(uint32_t fd,user_addr_t iov_uaddr,uint32_t iov_cnt)162 long sys_writev(uint32_t fd, user_addr_t iov_uaddr, uint32_t iov_cnt) {
163 const struct sys_fd_ops* ops = get_sys_fd_handler(fd);
164
165 if (ops && ops->writev)
166 return ops->writev(fd, iov_uaddr, iov_cnt);
167
168 return ERR_NOT_SUPPORTED;
169 }
170
sys_brk(void * u_brk)171 void* sys_brk(void* u_brk) {
172 vaddr_t brk = (vaddr_t)u_brk;
173 struct trusty_app* trusty_app = current_trusty_app();
174 if (!brk)
175 return (void*)trusty_app->cur_brk;
176 /* check if this is the first sbrk */
177 if (!trusty_app->used_brk) {
178 uint vmm_flags = VMM_FLAG_QUOTA;
179 status_t ret;
180 size_t size = round_up(trusty_app->end_brk - trusty_app->start_brk,
181 PAGE_SIZE);
182 vmm_flags |= VMM_FLAG_VALLOC_SPECIFIC;
183 ret = vmm_alloc(
184 trusty_app->aspace, "brk_heap", size,
185 (void*)&trusty_app->start_brk, 0, vmm_flags,
186 ARCH_MMU_FLAG_PERM_USER | ARCH_MMU_FLAG_PERM_NO_EXECUTE);
187 if (ret) {
188 TRACEF("sbrk heap allocation failed!\n");
189 return (void*)trusty_app->cur_brk;
190 }
191 trusty_app->used_brk = true;
192 }
193
194 /* update brk, if within range */
195 if ((brk >= trusty_app->start_brk) && (brk <= trusty_app->end_brk)) {
196 trusty_app->cur_brk = brk;
197 }
198 return (void*)trusty_app->cur_brk;
199 }
200
sys_exit_etc(int32_t status,uint32_t flags)201 long sys_exit_etc(int32_t status, uint32_t flags) {
202 thread_t* current = get_current_thread();
203 LTRACEF("exit called, thread %p, name %s\n", current, current->name);
204 trusty_app_exit(status);
205 return 0L;
206 }
207
sys_readv(uint32_t fd,user_addr_t iov_uaddr,uint32_t iov_cnt)208 long sys_readv(uint32_t fd, user_addr_t iov_uaddr, uint32_t iov_cnt) {
209 const struct sys_fd_ops* ops = get_sys_fd_handler(fd);
210
211 if (ops && ops->readv)
212 return ops->readv(fd, iov_uaddr, iov_cnt);
213
214 return ERR_NOT_SUPPORTED;
215 }
216
sys_ioctl(uint32_t fd,uint32_t req,user_addr_t user_ptr)217 long sys_ioctl(uint32_t fd, uint32_t req, user_addr_t user_ptr) {
218 const struct sys_fd_ops* ops = get_sys_fd_handler(fd);
219
220 if (ops && ops->ioctl)
221 return ops->ioctl(fd, req, user_ptr);
222
223 return ERR_NOT_SUPPORTED;
224 }
225
226 #if IS_64BIT && USER_32BIT
sys_nanosleep(uint32_t clock_id,uint32_t flags,uint32_t sleep_time_l,uint32_t sleep_time_h)227 long sys_nanosleep(uint32_t clock_id,
228 uint32_t flags,
229 uint32_t sleep_time_l,
230 uint32_t sleep_time_h) {
231 uint64_t sleep_time = sleep_time_l + ((uint64_t)sleep_time_h << 32);
232 thread_sleep_ns(sleep_time);
233
234 return NO_ERROR;
235 }
236 #else
sys_nanosleep(uint32_t clock_id,uint32_t flags,uint64_t sleep_time)237 long sys_nanosleep(uint32_t clock_id, uint32_t flags, uint64_t sleep_time) {
238 thread_sleep_ns(sleep_time);
239
240 return NO_ERROR;
241 }
242 #endif
243
sys_gettime(uint32_t clock_id,uint32_t flags,user_addr_t time)244 long sys_gettime(uint32_t clock_id, uint32_t flags, user_addr_t time) {
245 // return time in nanoseconds
246 lk_time_ns_t t = current_time_ns();
247
248 return copy_to_user(time, &t, sizeof(int64_t));
249 }
250
sys_mmap(user_addr_t uaddr,uint32_t size,uint32_t flags,uint32_t handle_id)251 long sys_mmap(user_addr_t uaddr,
252 uint32_t size,
253 uint32_t flags,
254 uint32_t handle_id) {
255 struct trusty_app* trusty_app = current_trusty_app();
256 long ret;
257
258 if (flags & MMAP_FLAG_IO_HANDLE) {
259 /*
260 * Only allows mapping on IO region specified by handle (id) and uaddr
261 * must be 0 for now.
262 * TBD: Add support in to use uaddr as a hint.
263 */
264 if (uaddr != 0 || flags & MMAP_FLAG_ANONYMOUS) {
265 return ERR_INVALID_ARGS;
266 }
267
268 ret = trusty_app_setup_mmio(trusty_app, handle_id, &uaddr, size);
269 if (ret != NO_ERROR) {
270 return ret;
271 }
272
273 return uaddr;
274 } else if (flags & MMAP_FLAG_ANONYMOUS) {
275 /*
276 * Same as above, uaddr must be 0 for now.
277 * TBD: Add support to use addr as a hint.
278 */
279 if (uaddr != 0 && !(flags & MMAP_FLAG_FIXED_NOREPLACE)) {
280 return ERR_INVALID_ARGS;
281 }
282
283 uint32_t mmu_flags = 0;
284 ret = xlat_flags(flags, flags, &mmu_flags);
285 if (ret != NO_ERROR) {
286 LTRACEF("error translating memory protection flags in mmap\n");
287 return ret;
288 }
289
290 vaddr_t vaddr = uaddr;
291 void* ptr = (void*)vaddr;
292 uint vmm_flags = VMM_FLAG_QUOTA;
293 if (flags & MMAP_FLAG_FIXED_NOREPLACE) {
294 if (!uaddr) {
295 LTRACEF("a fixed allocation requires a non-NULL pointer\n");
296 return ERR_INVALID_ARGS;
297 }
298 vmm_flags |= VMM_FLAG_VALLOC_SPECIFIC;
299 }
300 if (flags & MMAP_FLAG_NO_PHYSICAL) {
301 if (!(flags & MMAP_FLAG_PROT_WRITE)) {
302 LTRACEF("a NO_PHYSICAL allocation must allow write access\n");
303 return ERR_INVALID_ARGS;
304 }
305 vmm_flags |= VMM_FLAG_NO_PHYSICAL;
306 if (uaddr) {
307 LTRACEF("a NO_PHYSICAL allocation cannot be specific\n");
308 return ERR_INVALID_ARGS;
309 }
310 }
311 ret = vmm_alloc(trusty_app->aspace, "mmap", size, &ptr, 0, vmm_flags,
312 mmu_flags);
313 if (ret != NO_ERROR) {
314 LTRACEF("error mapping anonymous region\n");
315 return ret;
316 }
317
318 return (long)ptr;
319 } else {
320 struct handle* handle;
321 ret = uctx_handle_get(current_uctx(), handle_id, &handle);
322 if (ret != NO_ERROR) {
323 LTRACEF("mmapped nonexistent handle\n");
324 return ret;
325 }
326
327 ret = handle_mmap(handle, 0, size, flags, &uaddr);
328 handle_decref(handle);
329 if (ret != NO_ERROR) {
330 LTRACEF("handle_mmap failed\n");
331 return ret;
332 }
333
334 return uaddr;
335 }
336 }
337
sys_munmap(user_addr_t uaddr,uint32_t size)338 long sys_munmap(user_addr_t uaddr, uint32_t size) {
339 struct trusty_app* trusty_app = current_trusty_app();
340
341 /*
342 * vmm_free_region always unmaps whole region.
343 * TBD: Add support to unmap partial region when there's use case.
344 */
345 return vmm_free_region_etc(trusty_app->aspace, uaddr, size, 0);
346 }
347
sys_prepare_dma(user_addr_t uaddr,uint32_t size,uint32_t flags,user_addr_t pmem)348 long sys_prepare_dma(user_addr_t uaddr,
349 uint32_t size,
350 uint32_t flags,
351 user_addr_t pmem) {
352 struct dma_pmem kpmem;
353 size_t mapped_size = 0;
354 uint32_t entries = 0;
355 long ret;
356 vaddr_t vaddr = uaddr;
357
358 LTRACEF("uaddr 0x%" PRIxPTR_USER
359 ", size 0x%x, flags 0x%x, pmem 0x%" PRIxPTR_USER "\n",
360 uaddr, size, flags, pmem);
361
362 if (size == 0)
363 return ERR_INVALID_ARGS;
364
365 struct trusty_app* trusty_app = current_trusty_app();
366 struct vmm_obj_slice slice;
367 vmm_obj_slice_init(&slice);
368
369 ret = vmm_get_obj(trusty_app->aspace, vaddr, size, &slice);
370 if (ret != NO_ERROR)
371 return ret;
372
373 if (!slice.obj || !slice.obj->ops) {
374 ret = ERR_NOT_VALID;
375 goto err;
376 }
377
378 do {
379 paddr_t paddr;
380 size_t paddr_size;
381 ret = slice.obj->ops->get_page(slice.obj, slice.offset + mapped_size,
382 &paddr, &paddr_size);
383 if (ret != NO_ERROR)
384 goto err;
385
386 memset(&kpmem, 0, sizeof(kpmem));
387 kpmem.paddr = paddr;
388 kpmem.size = MIN(size - mapped_size, paddr_size);
389
390 /*
391 * Here, kpmem.size is either the remaining mapping size
392 * (size - mapping_size)
393 * or the distance to a page boundary that is not physically
394 * contiguous with the next page mapped in the given virtual
395 * address range.
396 * In either case it marks the end of the current kpmem record.
397 */
398
399 ret = copy_to_user(pmem, &kpmem, sizeof(struct dma_pmem));
400 if (ret != NO_ERROR)
401 goto err;
402
403 pmem += sizeof(struct dma_pmem);
404
405 mapped_size += kpmem.size;
406 entries++;
407
408 } while (mapped_size < size && (flags & DMA_FLAG_MULTI_PMEM));
409
410 if (flags & DMA_FLAG_FROM_DEVICE)
411 arch_clean_invalidate_cache_range(vaddr, mapped_size);
412 else
413 arch_clean_cache_range(vaddr, mapped_size);
414
415 if (!(flags & DMA_FLAG_ALLOW_PARTIAL) && mapped_size != size) {
416 ret = ERR_BAD_LEN;
417 goto err;
418 }
419
420 ret = trusty_app_allow_dma_range(trusty_app, slice.obj, slice.offset,
421 slice.size, vaddr, flags);
422 if (ret != NO_ERROR) {
423 goto err;
424 }
425
426 ret = entries; /* fallthrough */
427 err:
428 vmm_obj_slice_release(&slice);
429 return ret;
430 }
431
sys_finish_dma(user_addr_t uaddr,uint32_t size,uint32_t flags)432 long sys_finish_dma(user_addr_t uaddr, uint32_t size, uint32_t flags) {
433 LTRACEF("uaddr 0x%" PRIxPTR_USER ", size 0x%x, flags 0x%x\n", uaddr, size,
434 flags);
435
436 /* check buffer is in task's address space */
437 if (!valid_address((vaddr_t)uaddr, size))
438 return ERR_INVALID_ARGS;
439
440 if (flags & DMA_FLAG_FROM_DEVICE)
441 arch_clean_invalidate_cache_range(uaddr, size);
442
443 /*
444 * Check that app prepared dma on the provided virtual address range.
445 * Returns ERR_NOT_FOUND if the range wasn't found. One way this can
446 * happen is when an app finishes a dma range that it didn't prepare.
447 */
448 return trusty_app_destroy_dma_range((vaddr_t)uaddr, size);
449 }
450
sys_set_user_tls(user_addr_t uaddr)451 long sys_set_user_tls(user_addr_t uaddr) {
452 arch_set_user_tls(uaddr);
453 return NO_ERROR;
454 }
455
sys_memref_create(user_addr_t uaddr,user_size_t size,uint32_t mmap_prot)456 long sys_memref_create(user_addr_t uaddr,
457 user_size_t size,
458 uint32_t mmap_prot) {
459 struct trusty_app* app = current_trusty_app();
460 struct handle* handle;
461 handle_id_t id;
462 status_t rc = memref_create_from_aspace(app->aspace, uaddr, size, mmap_prot,
463 &handle);
464 if (rc) {
465 LTRACEF("failed to create memref\n");
466 return rc;
467 }
468
469 int rc_uctx = uctx_handle_install(current_uctx(), handle, &id);
470 /*
471 * uctx_handle_install takes a reference to the handle, so we release
472 * ours now. If it failed, this will release it. If it succeeded, this
473 * prevents us from leaking when the application is destroyed.
474 */
475 handle_decref(handle);
476 if (rc_uctx) {
477 LTRACEF("failed to install handle\n");
478 return rc_uctx;
479 }
480
481 LTRACEF("memref created: %d\n", id);
482 return id;
483 }
484