1 /*
2 * Copyright (c) 2015, Google Inc. All rights reserved
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining
5 * a copy of this software and associated documentation files
6 * (the "Software"), to deal in the Software without restriction,
7 * including without limitation the rights to use, copy, modify, merge,
8 * publish, distribute, sublicense, and/or sell copies of the Software,
9 * and to permit persons to whom the Software is furnished to do so,
10 * subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
18 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
19 * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
20 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
21 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <err.h>
26 #include <kernel/usercopy.h>
27 #include <lib/unittest/unittest.h>
28 #include <lk/init.h>
29 #include <stdio.h>
30 #include <string.h>
31 #include <trusty/string.h>
32 #ifdef ARCH_ARM64
33 #include <arch/safecopy.h>
34 #endif
35
36 #define PORT_NAME "com.android.kernel.usercopy-unittest"
37
38 #define TEST_BUF_SIZE (16)
39 #define TEST_BUF1_SIZE (TEST_BUF_SIZE / 2)
40 #define TEST_BUF2_SIZE (TEST_BUF_SIZE - TEST_BUF1_SIZE)
41 #define TEST_BUF_COPY_START (1)
42 #define TEST_BUF_COPY_SIZE (TEST_BUF_SIZE - TEST_BUF_COPY_START - 1)
43 #define TEST_BUF1_COPY_SIZE (TEST_BUF1_SIZE - TEST_BUF_COPY_START)
44 #define TEST_BUF2_COPY_SIZE (TEST_BUF_COPY_SIZE - TEST_BUF1_COPY_SIZE)
45 #define TEST_BUF_COPY_LAST (TEST_BUF_SIZE - 1 - 1)
46 #define TEST_BUF2_COPY_LAST (TEST_BUF_COPY_LAST - TEST_BUF1_SIZE)
47
48 #define SRC_DATA (0x22)
49 #define DEST_DATA (0x11)
50
51 #define FLAGS_NO_PAGE (ARCH_MMU_FLAG_INVALID)
52 #define FLAGS_NO_USER (0u)
53 #define FLAGS_RO_USER (ARCH_MMU_FLAG_PERM_USER | ARCH_MMU_FLAG_PERM_RO)
54 #define FLAGS_RW_USER (ARCH_MMU_FLAG_PERM_USER)
55
56 #define STACK_ADDR_IDX (0)
57 #define HEAP_ADDR_IDX (1)
58 #define GLOBAL_ADDR_IDX (2)
59
60 #define START_PAGE_ADDR ((void*)(PAGE_SIZE * 0x10))
61 #define TEST_BUF_ADDR \
62 ((user_addr_t)((uintptr_t)(START_PAGE_ADDR + PAGE_SIZE - TEST_BUF1_SIZE)))
63
get_addr_param(void)64 static inline user_addr_t get_addr_param(void) {
65 const void* const* param_arr = GetParam();
66 const user_addr_t* addr = param_arr[0];
67 return *addr;
68 }
69
get_start_flags_param(void)70 static inline uint32_t get_start_flags_param(void) {
71 const void* const* param_arr = GetParam();
72 const uint32_t* start_flags = param_arr[1];
73 return *start_flags;
74 }
75
get_end_flags_param(void)76 static inline uint32_t get_end_flags_param(void) {
77 const void* const* param_arr = GetParam();
78 const uint32_t* end_flags = param_arr[2];
79 return *end_flags;
80 }
81
checkbuf(const char * buf,char c,size_t size)82 static int checkbuf(const char* buf, char c, size_t size) {
83 int error_count = 0;
84 for (size_t i = 0; i < size; i++) {
85 if (buf[i] != c) {
86 error_count++;
87 }
88 }
89 return error_count;
90 }
91
usercopy_test_init_buf(char * kbuf1,char * kbuf2,uint8_t val,int null_offset)92 static void usercopy_test_init_buf(char* kbuf1,
93 char* kbuf2,
94 uint8_t val,
95 int null_offset) {
96 if (kbuf1) {
97 memset(kbuf1, val, TEST_BUF1_SIZE);
98 if (null_offset >= 0 && null_offset < TEST_BUF1_SIZE) {
99 kbuf1[null_offset] = '\0';
100 }
101 }
102 if (kbuf2) {
103 memset(kbuf2, val, TEST_BUF2_SIZE);
104 if (null_offset >= TEST_BUF1_SIZE && null_offset < TEST_BUF_SIZE) {
105 kbuf2[null_offset - TEST_BUF1_SIZE] = '\0';
106 }
107 }
108 }
109
110 typedef struct {
111 struct vmm_aspace* aspace;
112 } usercopytest_t;
113
TEST_F_SETUP(usercopytest)114 TEST_F_SETUP(usercopytest) {
115 int ret;
116 void* addr = START_PAGE_ADDR;
117 uint32_t start_flags = get_start_flags_param();
118 uint32_t end_flags = get_end_flags_param();
119
120 _state->aspace = NULL;
121
122 ret = vmm_create_aspace(&_state->aspace, "usercopy_test", 0);
123 ASSERT_EQ(NO_ERROR, ret);
124
125 if (start_flags != FLAGS_NO_PAGE) {
126 ret = vmm_alloc(_state->aspace, "start-page", PAGE_SIZE, &addr, 0,
127 VMM_FLAG_VALLOC_SPECIFIC | VMM_FLAG_NO_END_GUARD,
128 start_flags | ARCH_MMU_FLAG_PERM_NO_EXECUTE);
129 ASSERT_EQ(NO_ERROR, ret);
130 ASSERT_EQ(START_PAGE_ADDR, addr);
131 }
132
133 addr += PAGE_SIZE;
134
135 if (end_flags != FLAGS_NO_PAGE) {
136 ret = vmm_alloc(_state->aspace, "end-page", PAGE_SIZE, &addr, 0,
137 VMM_FLAG_VALLOC_SPECIFIC | VMM_FLAG_NO_START_GUARD,
138 end_flags | ARCH_MMU_FLAG_PERM_NO_EXECUTE);
139 ASSERT_EQ(NO_ERROR, ret);
140 ASSERT_EQ(START_PAGE_ADDR + PAGE_SIZE, addr);
141 }
142
143 vmm_set_active_aspace(_state->aspace);
144
145 test_abort:
146 return;
147 }
148
TEST_F_TEARDOWN(usercopytest)149 TEST_F_TEARDOWN(usercopytest) {
150 vmm_set_active_aspace(NULL);
151
152 if (_state->aspace) {
153 vmm_free_aspace(_state->aspace);
154 }
155 }
156
TEST_P(usercopytest,copy_to_user)157 TEST_P(usercopytest, copy_to_user) {
158 user_addr_t addr = get_addr_param();
159 uint32_t arch_mmu_flags_start = get_start_flags_param();
160 uint32_t arch_mmu_flags_end = get_end_flags_param();
161 int ret;
162 char src_buf[TEST_BUF_SIZE];
163 char* dest_kbuf1;
164 char* dest_kbuf2;
165 char expect1;
166 char expect2;
167
168 dest_kbuf1 = paddr_to_kvaddr(vaddr_to_paddr((void*)(uintptr_t)addr));
169 dest_kbuf2 = paddr_to_kvaddr(
170 vaddr_to_paddr((void*)(uintptr_t)addr + TEST_BUF1_SIZE));
171
172 /* dest buffs should be NULL iff their flags are FLAGS_NO_PAGE */
173 EXPECT_EQ((dest_kbuf1 == NULL), (arch_mmu_flags_start == FLAGS_NO_PAGE));
174 EXPECT_EQ((dest_kbuf2 == NULL), (arch_mmu_flags_end == FLAGS_NO_PAGE));
175
176 usercopy_test_init_buf(dest_kbuf1, dest_kbuf2, DEST_DATA, -1);
177 memset(src_buf, SRC_DATA, sizeof(src_buf));
178
179 /* Zero-length copy should always succeed */
180 ret = copy_to_user(addr + TEST_BUF_COPY_START, NULL, 0);
181 EXPECT_EQ(0, ret);
182
183 /* Dest buffer should be untouched after zero-length copy */
184 if (dest_kbuf1) {
185 EXPECT_EQ(0, checkbuf(dest_kbuf1, DEST_DATA, TEST_BUF1_SIZE));
186 }
187 if (dest_kbuf2) {
188 EXPECT_EQ(0, checkbuf(dest_kbuf2, DEST_DATA, TEST_BUF2_SIZE));
189 }
190
191 /* Perform non-zero length copy */
192 ret = copy_to_user(addr + TEST_BUF_COPY_START,
193 src_buf + TEST_BUF_COPY_START, TEST_BUF_COPY_SIZE);
194
195 /*
196 * If both pages are writeable copy_to_user should succeed otherwise it
197 * should return ERR_FAULT.
198 */
199 if (arch_mmu_flags_start == ARCH_MMU_FLAG_PERM_USER &&
200 arch_mmu_flags_end == ARCH_MMU_FLAG_PERM_USER) {
201 /*
202 * If both pages are writeable from user-space copy_to_user should
203 * return success and every byte should be copied to dest_buf.
204 */
205 EXPECT_EQ(0, ret);
206 expect1 = SRC_DATA;
207 expect2 = SRC_DATA;
208 } else {
209 /*
210 * If one of the pages is not writeable from user-space copy_to_user
211 * should return ERR_FAULT. If only the first page is writeable everying
212 * should be copied in the first page or nothing should be copied in the
213 * first page. If the first page is not writeable, nothing should be
214 * copied to either page. If the second page is not writeable, no data
215 * should be copied to it, even if the first page was written to.
216 */
217 EXPECT_EQ(ERR_FAULT, ret);
218 if (arch_mmu_flags_start == ARCH_MMU_FLAG_PERM_USER &&
219 dest_kbuf1[TEST_BUF_COPY_START] == SRC_DATA) {
220 expect1 = SRC_DATA;
221 } else {
222 expect1 = DEST_DATA;
223 }
224 expect2 = DEST_DATA;
225 }
226
227 /* copy_to_user should not modify src_buf at all */
228 EXPECT_EQ(0, checkbuf(src_buf, SRC_DATA, TEST_BUF_SIZE));
229
230 if (dest_kbuf1) {
231 /* Dest byte before copied region should be untouched */
232 EXPECT_EQ(DEST_DATA, dest_kbuf1[0]);
233
234 /* Check that copied region match expected value we selected above */
235 EXPECT_EQ(0, checkbuf(dest_kbuf1 + TEST_BUF_COPY_START, expect1,
236 TEST_BUF1_COPY_SIZE));
237 }
238
239 if (dest_kbuf2) {
240 /* Check that copied region match expected value we selected above */
241 EXPECT_EQ(0, checkbuf(dest_kbuf2, expect2, TEST_BUF2_COPY_SIZE));
242
243 /* Dest byte after copied region should be untouched */
244 EXPECT_EQ(DEST_DATA, dest_kbuf2[TEST_BUF2_SIZE - 1]);
245 }
246 }
247
TEST_P(usercopytest,copy_from_user)248 TEST_P(usercopytest, copy_from_user) {
249 user_addr_t addr = get_addr_param();
250 uint32_t arch_mmu_flags_start = get_start_flags_param();
251 uint32_t arch_mmu_flags_end = get_end_flags_param();
252 int ret;
253 char dest_buf[TEST_BUF_SIZE];
254 char* src_kbuf1;
255 char* src_kbuf2;
256 char expect1;
257 char expect2;
258
259 memset(dest_buf, DEST_DATA, sizeof(dest_buf));
260 src_kbuf1 = paddr_to_kvaddr(vaddr_to_paddr((void*)(uintptr_t)addr));
261 src_kbuf2 = paddr_to_kvaddr(
262 vaddr_to_paddr((void*)(uintptr_t)addr + TEST_BUF1_SIZE));
263
264 /* src buffs should be NULL iff their flags are FLAGS_NO_PAGE */
265 EXPECT_EQ((src_kbuf1 == NULL), (arch_mmu_flags_start == FLAGS_NO_PAGE));
266 EXPECT_EQ((src_kbuf2 == NULL), (arch_mmu_flags_end == FLAGS_NO_PAGE));
267
268 usercopy_test_init_buf(src_kbuf1, src_kbuf2, SRC_DATA, -1);
269
270 /* Zero-length copy should always succeed */
271 ret = copy_from_user(NULL, addr + TEST_BUF_COPY_START, 0);
272 EXPECT_EQ(0, ret);
273
274 /* Dest buffer should be untouched after zero-length copy */
275 EXPECT_EQ(0, checkbuf(dest_buf, DEST_DATA, TEST_BUF_SIZE));
276
277 /* Perform non-zero length copy */
278 ret = copy_from_user(dest_buf + TEST_BUF_COPY_START,
279 addr + TEST_BUF_COPY_START, TEST_BUF_COPY_SIZE);
280 if (arch_mmu_flags_start & arch_mmu_flags_end & ARCH_MMU_FLAG_PERM_USER) {
281 /*
282 * If both pages are readable from user-space copy_from_user should
283 * return success and every byte should be copied to dest_buf.
284 */
285 EXPECT_EQ(0, ret);
286 expect1 = SRC_DATA;
287 expect2 = SRC_DATA;
288 } else {
289 /*
290 * If one of the pages is not readable from user-space copy_from_user
291 * should return ERR_FAULT, and the parts of dest_buf that could not be
292 * copied into should be set to 0.
293 * The destination kernel buffer should always be written so
294 * potentially uninitialized kernel data does not leak.
295 */
296 EXPECT_EQ(ERR_FAULT, ret);
297 if (!(arch_mmu_flags_start & ARCH_MMU_FLAG_PERM_USER) ||
298 !dest_buf[TEST_BUF_COPY_START]) {
299 expect1 = 0;
300 } else {
301 expect1 = SRC_DATA;
302 }
303 expect2 = 0;
304 }
305
306 EXPECT_EQ(0, checkbuf(dest_buf + TEST_BUF_COPY_START, expect1,
307 TEST_BUF1_COPY_SIZE));
308 EXPECT_EQ(0, checkbuf(dest_buf + TEST_BUF1_SIZE, expect2,
309 TEST_BUF2_COPY_SIZE));
310
311 /* Dest bytes before and after copied region should be untouched */
312 EXPECT_EQ(DEST_DATA, dest_buf[0]);
313 EXPECT_EQ(DEST_DATA, dest_buf[TEST_BUF_SIZE - 1]);
314
315 /* Src buffer should not be modified */
316 if (src_kbuf1) {
317 EXPECT_EQ(0, checkbuf(src_kbuf1, SRC_DATA, TEST_BUF1_SIZE));
318 }
319 if (src_kbuf2) {
320 EXPECT_EQ(0, checkbuf(src_kbuf2, SRC_DATA, TEST_BUF2_SIZE));
321 }
322 }
323
324 #if ARCH_ARM64
325 #define ENABLED_ON_ARM64_NAME(name) name
326 #else
327 #define ENABLED_ON_ARM64_NAME(name) DISABLED_##name
328 #define copy_from_anywhere(dst, src, len) -1
329 #endif
330
TEST_P(usercopytest,ENABLED_ON_ARM64_NAME (copy_from_anywhere))331 TEST_P(usercopytest, ENABLED_ON_ARM64_NAME(copy_from_anywhere)) {
332 user_addr_t addr = get_addr_param();
333 uint32_t arch_mmu_flags_start = get_start_flags_param();
334 uint32_t arch_mmu_flags_end = get_end_flags_param();
335 int ret;
336 char dest_buf[TEST_BUF_SIZE];
337 char* src_kbuf1;
338 char* src_kbuf2;
339 char expect1;
340 char expect2;
341
342 memset(dest_buf, DEST_DATA, sizeof(dest_buf));
343 src_kbuf1 = paddr_to_kvaddr(vaddr_to_paddr((void*)(uintptr_t)addr));
344 src_kbuf2 = paddr_to_kvaddr(
345 vaddr_to_paddr((void*)(uintptr_t)addr + TEST_BUF1_SIZE));
346
347 /* src buffs should be NULL iff their flags are FLAGS_NO_PAGE */
348 EXPECT_EQ((src_kbuf1 == NULL), (arch_mmu_flags_start == FLAGS_NO_PAGE));
349 EXPECT_EQ((src_kbuf2 == NULL), (arch_mmu_flags_end == FLAGS_NO_PAGE));
350
351 usercopy_test_init_buf(src_kbuf1, src_kbuf2, SRC_DATA, -1);
352
353 /* Zero-length copy should always succeed */
354 ret = copy_from_anywhere(NULL, addr + TEST_BUF_COPY_START, 0);
355 EXPECT_EQ(0, ret);
356
357 /* Dest buffer should be untouched after zero-length copy */
358 EXPECT_EQ(0, checkbuf(dest_buf, DEST_DATA, TEST_BUF_SIZE));
359
360 /* Perform non-zero length copy */
361 ret = copy_from_anywhere(dest_buf + TEST_BUF_COPY_START,
362 addr + TEST_BUF_COPY_START, TEST_BUF_COPY_SIZE);
363 if (arch_mmu_flags_start != FLAGS_NO_PAGE &&
364 arch_mmu_flags_end != FLAGS_NO_PAGE) {
365 /*
366 * If both pages are readable, copy_from_anywhere should return
367 * success and every byte should be copied to dest_buf.
368 */
369 EXPECT_EQ(0, ret);
370 expect1 = SRC_DATA;
371 expect2 = SRC_DATA;
372 } else {
373 /*
374 * If one of the pages is not readable copy_from_anywhere should
375 * return ERR_FAULT, and the parts of dest_buf that could not be
376 * copied into should be set to 0.
377 * The destination kernel buffer should always be written so
378 * potentially uninitialized kernel data does not leak.
379 */
380 EXPECT_EQ(ERR_FAULT, ret);
381 if (arch_mmu_flags_start == FLAGS_NO_PAGE) {
382 expect1 = 0;
383 } else {
384 expect1 = SRC_DATA;
385 }
386 expect2 = 0;
387 }
388
389 EXPECT_EQ(0, checkbuf(dest_buf + TEST_BUF_COPY_START, expect1,
390 TEST_BUF1_COPY_SIZE));
391 EXPECT_EQ(0, checkbuf(dest_buf + TEST_BUF1_SIZE, expect2,
392 TEST_BUF2_COPY_SIZE));
393
394 /* Dest bytes before and after copied region should be untouched */
395 EXPECT_EQ(DEST_DATA, dest_buf[0]);
396 EXPECT_EQ(DEST_DATA, dest_buf[TEST_BUF_SIZE - 1]);
397
398 /* Src buffer should not be modified */
399 if (src_kbuf1) {
400 EXPECT_EQ(0, checkbuf(src_kbuf1, SRC_DATA, TEST_BUF1_SIZE));
401 }
402 if (src_kbuf2) {
403 EXPECT_EQ(0, checkbuf(src_kbuf2, SRC_DATA, TEST_BUF2_SIZE));
404 }
405 }
406
usercopy_test_strlcpy_from_user_inner(user_addr_t addr,uint arch_mmu_flags_start,uint arch_mmu_flags_end,int copy_size,int null_off)407 static void usercopy_test_strlcpy_from_user_inner(user_addr_t addr,
408 uint arch_mmu_flags_start,
409 uint arch_mmu_flags_end,
410 int copy_size,
411 int null_off) {
412 int ret;
413 char dest_buf[TEST_BUF_SIZE];
414 char* src_kbuf1;
415 char* src_kbuf2;
416 size_t dest_len;
417 int copy_len = copy_size ? copy_size - 1 : 0;
418
419 memset(dest_buf, DEST_DATA, sizeof(dest_buf));
420 src_kbuf1 = paddr_to_kvaddr(vaddr_to_paddr((void*)(uintptr_t)addr));
421 src_kbuf2 = paddr_to_kvaddr(
422 vaddr_to_paddr((void*)(uintptr_t)addr + TEST_BUF1_SIZE));
423
424 /* src buffs should be NULL iff their flags are FLAGS_NO_PAGE */
425 EXPECT_EQ((src_kbuf1 == NULL), (arch_mmu_flags_start == FLAGS_NO_PAGE));
426 EXPECT_EQ((src_kbuf2 == NULL), (arch_mmu_flags_end == FLAGS_NO_PAGE));
427
428 usercopy_test_init_buf(src_kbuf1, src_kbuf2, SRC_DATA, null_off);
429
430 ret = strlcpy_from_user(dest_buf + TEST_BUF_COPY_START,
431 addr + TEST_BUF_COPY_START, copy_size);
432
433 dest_len = strnlen(dest_buf + TEST_BUF_COPY_START, TEST_BUF_COPY_SIZE);
434 if (copy_size) {
435 /*
436 * Kernel buffer should always be null terminated.
437 */
438 EXPECT_NE(TEST_BUF_COPY_SIZE, dest_len, " null_off=%d, copy_size=%d\n",
439 null_off, copy_size);
440 } else {
441 /*
442 * If copy_size is 0, then kernel buffer will not be null terminated.
443 */
444 EXPECT_EQ(TEST_BUF_COPY_SIZE, dest_len, " null_off=%d, copy_size=%d\n",
445 null_off, copy_size);
446 dest_len = 0;
447 }
448
449 /*
450 * If the string in dest_buf is not empty it should only contain data from
451 * the source string.
452 */
453 EXPECT_EQ(0, checkbuf(dest_buf + TEST_BUF_COPY_START, SRC_DATA, dest_len),
454 " null_off=%d, copy_size=%d\n", null_off, copy_size);
455
456 if ((arch_mmu_flags_start & ARCH_MMU_FLAG_PERM_USER) &&
457 ((arch_mmu_flags_end & ARCH_MMU_FLAG_PERM_USER) ||
458 null_off < TEST_BUF1_SIZE)) {
459 /*
460 * If the pages readable from user-space contain a 0 terminated string,
461 * strlcpy_from_user should return the length of that string and every
462 * byte up to the 0 terminator that fits in dest_buf should be copied
463 * there. dest_buf should always be 0 terminated.
464 */
465 EXPECT_EQ(null_off - TEST_BUF_COPY_START, ret,
466 " wrong strlen returned, null_off=%d, copy_size=%d\n",
467 null_off, copy_size);
468 EXPECT_EQ(MIN(null_off - TEST_BUF_COPY_START, copy_len), dest_len,
469 " null_off=%d, copy_size=%d\n", null_off, copy_size);
470 } else {
471 /*
472 * If one of the pages is not readable from user-space strlcpy_from_user
473 * should return ERR_FAULT, and dest_buf should have a null terminator
474 * at the start of the faulting page or at the start of the string.
475 */
476 EXPECT_EQ(ERR_FAULT, ret, " null_off=%d, copy_size=%d\n", null_off,
477 copy_size);
478 if (!(arch_mmu_flags_start & ARCH_MMU_FLAG_PERM_USER)) {
479 EXPECT_EQ(0, dest_len, " null_off=%d, copy_size=%d\n", null_off,
480 copy_size);
481 } else if (dest_len) {
482 EXPECT_EQ(MIN(TEST_BUF1_COPY_SIZE, copy_len), dest_len,
483 " null_off=%d, copy_size=%d\n", null_off, copy_size);
484 }
485 }
486
487 /* Src buffer should not be modified */
488 if (src_kbuf1) {
489 if (null_off < TEST_BUF1_SIZE) {
490 EXPECT_EQ(0, checkbuf(src_kbuf1, SRC_DATA, null_off));
491 EXPECT_EQ('\0', src_kbuf1[null_off]);
492 EXPECT_EQ(0, checkbuf(src_kbuf1 + null_off + 1, SRC_DATA,
493 TEST_BUF1_SIZE - null_off - 1));
494 } else {
495 EXPECT_EQ(0, checkbuf(src_kbuf1, SRC_DATA, TEST_BUF1_SIZE));
496 }
497 }
498 if (src_kbuf2) {
499 if (null_off >= TEST_BUF1_SIZE) {
500 size_t null_off2 = null_off - TEST_BUF1_SIZE;
501 EXPECT_EQ(0, checkbuf(src_kbuf2, SRC_DATA, null_off2));
502 EXPECT_EQ('\0', src_kbuf2[null_off2]);
503 EXPECT_EQ(0, checkbuf(src_kbuf2 + null_off2 + 1, SRC_DATA,
504 TEST_BUF2_SIZE - null_off2 - 1));
505 } else {
506 EXPECT_EQ(0, checkbuf(src_kbuf2, SRC_DATA, TEST_BUF2_SIZE));
507 }
508 }
509
510 /* Dest bytes before and after copied region should be untouched */
511 EXPECT_EQ(DEST_DATA, dest_buf[0]);
512 EXPECT_EQ(DEST_DATA, dest_buf[TEST_BUF_COPY_START + copy_size]);
513 EXPECT_EQ(DEST_DATA, dest_buf[TEST_BUF_SIZE - 1]);
514 }
515
TEST_P(usercopytest,strlcpy_from_user)516 TEST_P(usercopytest, strlcpy_from_user) {
517 user_addr_t addr = get_addr_param();
518 uint32_t arch_mmu_flags_start = get_start_flags_param();
519 uint32_t arch_mmu_flags_end = get_end_flags_param();
520 size_t copy_sizes[] = {0, TEST_BUF1_COPY_SIZE, TEST_BUF_COPY_SIZE};
521 size_t copy_sizes_index;
522 int null_off;
523 int copy_size;
524
525 for (copy_sizes_index = 0; copy_sizes_index < countof(copy_sizes);
526 copy_sizes_index++) {
527 copy_size = copy_sizes[copy_sizes_index];
528 for (null_off = TEST_BUF_COPY_START; null_off < TEST_BUF_SIZE;
529 null_off++) {
530 usercopy_test_strlcpy_from_user_inner(addr, arch_mmu_flags_start,
531 arch_mmu_flags_end, copy_size,
532 null_off);
533 }
534 }
535 }
536
flags_to_str(uint32_t flags)537 static const char* flags_to_str(uint32_t flags) {
538 switch (flags) {
539 case FLAGS_NO_PAGE:
540 return "--";
541 case FLAGS_NO_USER:
542 return "ko";
543 case FLAGS_RO_USER:
544 return "ro";
545 case FLAGS_RW_USER:
546 return "rw";
547 default:
548 return "??";
549 }
550 }
551
user_param_to_string(const void * param,char * buf,size_t buf_size)552 static void user_param_to_string(const void* param,
553 char* buf,
554 size_t buf_size) {
555 uint32_t start_flags = get_start_flags_param();
556 uint32_t end_flags = get_end_flags_param();
557 size_t count = 0;
558
559 count = scnprintf(buf + count, buf_size - count, "%s",
560 flags_to_str(start_flags));
561 scnprintf(buf + count, buf_size - count, "%s", flags_to_str(end_flags));
562 }
563
564 INSTANTIATE_TEST_SUITE_P(UserCopyTestParams,
565 usercopytest,
566 testing_Combine(testing_Values(TEST_BUF_ADDR),
567 testing_Values(FLAGS_NO_PAGE,
568 FLAGS_NO_USER,
569 FLAGS_RO_USER,
570 FLAGS_RW_USER),
571 testing_Values(FLAGS_NO_PAGE,
572 FLAGS_NO_USER,
573 FLAGS_RO_USER,
574 FLAGS_RW_USER)),
575 user_param_to_string);
576
577 #if IS_64BIT && USER_32BIT
578 /*
579 * Tests with Kernel addresses are not applicable to arm64u32 since kernel
580 * addresses do not fit in a user_addr_t.
581 */
582 static_assert(KERNEL_BASE > UINT32_MAX);
583
584 PORT_TEST(usercopy_tests, PORT_NAME)
585
586 #else
587 /* These are filled in before the tests are run */
588 static user_addr_t kernel_addrs[3];
589
590 static void kernel_param_to_string(const void* param,
591 char* buf,
592 size_t buf_size) {
593 const void* const* kernel_param = param;
594 size_t idx = ((user_addr_t*)kernel_param[0] - kernel_addrs);
595 const char* str;
596
597 switch (idx) {
598 case STACK_ADDR_IDX:
599 str = "kernel-stack";
600 break;
601 case HEAP_ADDR_IDX:
602 str = "kernel-heap";
603 break;
604 case GLOBAL_ADDR_IDX:
605 str = "kernel-global";
606 break;
607 default:
608 str = "unknown-address-type";
609 }
610
611 scnprintf(buf, buf_size, "%s", str);
612 }
613
614 INSTANTIATE_TEST_SUITE_P(KernelUserCopyTestParams,
615 usercopytest,
616 testing_Combine(testing_ValuesIn(kernel_addrs),
617 testing_Values(FLAGS_NO_USER),
618 testing_Values(FLAGS_NO_USER)),
619 kernel_param_to_string);
620
621 static bool run_usercopy_test(struct unittest* test) {
622 bool tests_passed;
623 static uint8_t global_buf[TEST_BUF_SIZE];
624 uint8_t stack_buf[TEST_BUF_SIZE];
625 uint8_t* heap_buf = malloc(TEST_BUF_SIZE);
626
627 ASSERT(heap_buf);
628
629 kernel_addrs[STACK_ADDR_IDX] = (user_addr_t)stack_buf;
630 kernel_addrs[HEAP_ADDR_IDX] = (user_addr_t)heap_buf;
631 kernel_addrs[GLOBAL_ADDR_IDX] = (user_addr_t)global_buf;
632
633 tests_passed = RUN_ALL_TESTS();
634
635 free(heap_buf);
636
637 return tests_passed;
638 }
639
640 static void usercopy_test_init(uint level) {
641 static struct unittest usercopy_unittest = {
642 .port_name = PORT_NAME,
643 .run_test = run_usercopy_test,
644 };
645
646 unittest_add(&usercopy_unittest);
647 }
648
649 LK_INIT_HOOK(usercopy_test, usercopy_test_init, LK_INIT_LEVEL_APPS);
650 #endif // !(IS_64BIT && USER_32BIT)
651