1 /*
2 * Copyright (C) 2019 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "jit/jit_memory_region.h"
18
19 #include <signal.h>
20 #include <sys/mman.h>
21 #include <sys/types.h>
22 #include <unistd.h>
23
24 #include <android-base/unique_fd.h>
25 #include <gtest/gtest.h>
26
27 #include "base/globals.h"
28 #include "base/memfd.h"
29 #include "base/utils.h"
30 #include "common_runtime_test.h"
31
32 namespace art {
33 namespace jit {
34
35 // These tests only run on bionic.
36 #if defined(__BIONIC__)
37 static constexpr int kReturnFromFault = 42;
38
39 // These globals are only set in child processes.
40 void* gAddrToFaultOn = nullptr;
41
handler(int ATTRIBUTE_UNUSED,siginfo_t * info,void * ATTRIBUTE_UNUSED)42 void handler(int ATTRIBUTE_UNUSED, siginfo_t* info, void* ATTRIBUTE_UNUSED) {
43 CHECK_EQ(info->si_addr, gAddrToFaultOn);
44 exit(kReturnFromFault);
45 }
46
registerSignalHandler()47 static void registerSignalHandler() {
48 struct sigaction sa;
49 sigemptyset(&sa.sa_mask);
50 sa.sa_flags = SA_SIGINFO;
51 sa.sa_sigaction = handler;
52 sigaction(SIGSEGV, &sa, nullptr);
53 }
54
55 class TestZygoteMemory : public testing::Test {
56 public:
BasicTest()57 void BasicTest() {
58 // Zygote JIT memory only works on kernels that don't segfault on flush.
59 TEST_DISABLED_FOR_KERNELS_WITH_CACHE_SEGFAULT();
60 std::string error_msg;
61 size_t size = kPageSize;
62 android::base::unique_fd fd(JitMemoryRegion::CreateZygoteMemory(size, &error_msg));
63 CHECK_NE(fd.get(), -1);
64
65 // Create a writable mapping.
66 int32_t* addr = reinterpret_cast<int32_t*>(
67 mmap(nullptr, kPageSize, PROT_READ | PROT_WRITE, MAP_SHARED, fd.get(), 0));
68 CHECK(addr != nullptr);
69 CHECK_NE(addr, MAP_FAILED);
70
71 // Test that we can write into the mapping.
72 addr[0] = 42;
73 CHECK_EQ(addr[0], 42);
74
75 // Protect the memory.
76 bool res = JitMemoryRegion::ProtectZygoteMemory(fd.get(), &error_msg);
77 CHECK(res);
78
79 // Test that we can still write into the mapping.
80 addr[0] = 2;
81 CHECK_EQ(addr[0], 2);
82
83 // Test that we cannot create another writable mapping.
84 int32_t* addr2 = reinterpret_cast<int32_t*>(
85 mmap(nullptr, kPageSize, PROT_READ | PROT_WRITE, MAP_SHARED, fd.get(), 0));
86 CHECK_EQ(addr2, MAP_FAILED);
87
88 // With the existing mapping, we can toggle read/write.
89 CHECK_EQ(mprotect(addr, size, PROT_READ), 0) << strerror(errno);
90 CHECK_EQ(mprotect(addr, size, PROT_READ | PROT_WRITE), 0) << strerror(errno);
91
92 // Test mremap with old_size = 0. From the man pages:
93 // If the value of old_size is zero, and old_address refers to a shareable mapping
94 // (see mmap(2) MAP_SHARED), then mremap() will create a new mapping of the same pages.
95 addr2 = reinterpret_cast<int32_t*>(mremap(addr, 0, kPageSize, MREMAP_MAYMOVE));
96 CHECK_NE(addr2, MAP_FAILED);
97
98 // Test that we can write into the remapped mapping.
99 addr2[0] = 3;
100 CHECK_EQ(addr2[0], 3);
101
102 addr2 = reinterpret_cast<int32_t*>(mremap(addr, kPageSize, 2 * kPageSize, MREMAP_MAYMOVE));
103 CHECK_NE(addr2, MAP_FAILED);
104
105 // Test that we can write into the remapped mapping.
106 addr2[0] = 4;
107 CHECK_EQ(addr2[0], 4);
108 }
109
TestUnmapWritableAfterFork()110 void TestUnmapWritableAfterFork() {
111 // Zygote JIT memory only works on kernels that don't segfault on flush.
112 TEST_DISABLED_FOR_KERNELS_WITH_CACHE_SEGFAULT();
113 std::string error_msg;
114 size_t size = kPageSize;
115 int32_t* addr = nullptr;
116 int32_t* addr2 = nullptr;
117 {
118 android::base::unique_fd fd(JitMemoryRegion::CreateZygoteMemory(size, &error_msg));
119 CHECK_NE(fd.get(), -1);
120
121 // Create a writable mapping.
122 addr = reinterpret_cast<int32_t*>(
123 mmap(nullptr, kPageSize, PROT_READ | PROT_WRITE, MAP_SHARED, fd.get(), 0));
124 CHECK(addr != nullptr);
125 CHECK_NE(addr, MAP_FAILED);
126
127 // Test that we can write into the mapping.
128 addr[0] = 42;
129 CHECK_EQ(addr[0], 42);
130
131 // Create a read-only mapping.
132 addr2 = reinterpret_cast<int32_t*>(
133 mmap(nullptr, kPageSize, PROT_READ, MAP_SHARED, fd.get(), 0));
134 CHECK(addr2 != nullptr);
135
136 // Protect the memory.
137 bool res = JitMemoryRegion::ProtectZygoteMemory(fd.get(), &error_msg);
138 CHECK(res);
139 }
140 // At this point, the fd has been dropped, but the memory mappings are still
141 // there.
142
143 // Create a mapping of atomic ints to communicate between processes.
144 android::base::unique_fd fd2(JitMemoryRegion::CreateZygoteMemory(size, &error_msg));
145 CHECK_NE(fd2.get(), -1);
146 std::atomic<int32_t>* shared = reinterpret_cast<std::atomic<int32_t>*>(
147 mmap(nullptr, kPageSize, PROT_READ | PROT_WRITE, MAP_SHARED, fd2.get(), 0));
148
149 // Values used for the tests below.
150 const int32_t parent_value = 66;
151 const int32_t child_value = 33;
152 const int32_t starting_value = 22;
153
154 shared[0] = 0;
155 addr[0] = starting_value;
156 CHECK_EQ(addr[0], starting_value);
157 CHECK_EQ(addr2[0], starting_value);
158 pid_t pid = fork();
159 if (pid == 0) {
160 // Test that we can write into the mapping.
161 addr[0] = child_value;
162 CHECK_EQ(addr[0], child_value);
163 CHECK_EQ(addr2[0], child_value);
164
165 // Unmap the writable mappping.
166 munmap(addr, kPageSize);
167
168 CHECK_EQ(addr2[0], child_value);
169
170 // Notify parent process.
171 shared[0] = 1;
172
173 // Wait for parent process for a new value.
174 while (shared[0] != 2) {
175 sched_yield();
176 }
177 CHECK_EQ(addr2[0], parent_value);
178
179 // Test that we cannot write into the mapping. The signal handler will
180 // exit the process.
181 gAddrToFaultOn = addr;
182 registerSignalHandler();
183 // This write will trigger a fault, as `addr` is unmapped.
184 addr[0] = child_value + 1;
185 exit(0);
186 } else {
187 while (shared[0] != 1) {
188 sched_yield();
189 }
190 CHECK_EQ(addr[0], child_value);
191 CHECK_EQ(addr2[0], child_value);
192 addr[0] = parent_value;
193 // Notify the child if the new value.
194 shared[0] = 2;
195 int status;
196 CHECK_EQ(waitpid(pid, &status, 0), pid);
197 CHECK(WIFEXITED(status)) << strerror(errno);
198 CHECK_EQ(WEXITSTATUS(status), kReturnFromFault);
199 CHECK_EQ(addr[0], parent_value);
200 CHECK_EQ(addr2[0], parent_value);
201 munmap(addr, kPageSize);
202 munmap(addr2, kPageSize);
203 munmap(shared, kPageSize);
204 }
205 }
206
TestMadviseDontFork()207 void TestMadviseDontFork() {
208 // Zygote JIT memory only works on kernels that don't segfault on flush.
209 TEST_DISABLED_FOR_KERNELS_WITH_CACHE_SEGFAULT();
210 std::string error_msg;
211 size_t size = kPageSize;
212 int32_t* addr = nullptr;
213 int32_t* addr2 = nullptr;
214 {
215 android::base::unique_fd fd(JitMemoryRegion::CreateZygoteMemory(size, &error_msg));
216 CHECK_NE(fd.get(), -1);
217
218 // Create a writable mapping.
219 addr = reinterpret_cast<int32_t*>(
220 mmap(nullptr, kPageSize, PROT_READ | PROT_WRITE, MAP_SHARED, fd.get(), 0));
221 CHECK(addr != nullptr);
222 CHECK_NE(addr, MAP_FAILED);
223 CHECK_EQ(madvise(addr, kPageSize, MADV_DONTFORK), 0);
224
225 // Test that we can write into the mapping.
226 addr[0] = 42;
227 CHECK_EQ(addr[0], 42);
228
229 // Create a read-only mapping.
230 addr2 = reinterpret_cast<int32_t*>(
231 mmap(nullptr, kPageSize, PROT_READ, MAP_SHARED, fd.get(), 0));
232 CHECK(addr2 != nullptr);
233
234 // Protect the memory.
235 bool res = JitMemoryRegion::ProtectZygoteMemory(fd.get(), &error_msg);
236 CHECK(res);
237 }
238 // At this point, the fd has been dropped, but the memory mappings are still
239 // there.
240
241 // Create a mapping of atomic ints to communicate between processes.
242 android::base::unique_fd fd2(JitMemoryRegion::CreateZygoteMemory(size, &error_msg));
243 CHECK_NE(fd2.get(), -1);
244 std::atomic<int32_t>* shared = reinterpret_cast<std::atomic<int32_t>*>(
245 mmap(nullptr, kPageSize, PROT_READ | PROT_WRITE, MAP_SHARED, fd2.get(), 0));
246
247 // Values used for the tests below.
248 const int32_t parent_value = 66;
249 const int32_t child_value = 33;
250 const int32_t starting_value = 22;
251
252 shared[0] = 0;
253 addr[0] = starting_value;
254 CHECK_EQ(addr[0], starting_value);
255 CHECK_EQ(addr2[0], starting_value);
256 pid_t pid = fork();
257 if (pid == 0) {
258 CHECK_EQ(addr2[0], starting_value);
259
260 // Notify parent process.
261 shared[0] = 1;
262
263 // Wait for parent process for new value.
264 while (shared[0] != 2) {
265 sched_yield();
266 }
267
268 CHECK_EQ(addr2[0], parent_value);
269 // Test that we cannot write into the mapping. The signal handler will
270 // exit the process.
271 gAddrToFaultOn = addr;
272 registerSignalHandler();
273 addr[0] = child_value + 1;
274 exit(0);
275 } else {
276 while (shared[0] != 1) {
277 sched_yield();
278 }
279 CHECK_EQ(addr[0], starting_value);
280 CHECK_EQ(addr2[0], starting_value);
281 addr[0] = parent_value;
282 // Notify the child of the new value.
283 shared[0] = 2;
284 int status;
285 CHECK_EQ(waitpid(pid, &status, 0), pid);
286 CHECK(WIFEXITED(status)) << strerror(errno);
287 CHECK_EQ(WEXITSTATUS(status), kReturnFromFault);
288 CHECK_EQ(addr[0], parent_value);
289 CHECK_EQ(addr2[0], parent_value);
290
291 munmap(addr, kPageSize);
292 munmap(addr2, kPageSize);
293 munmap(shared, kPageSize);
294 }
295 }
296
297 // This code is testing some behavior that ART could potentially use: get a
298 // copy-on-write mapping that can incorporate changes from a shared mapping
299 // owned by another process.
TestFromSharedToPrivate()300 void TestFromSharedToPrivate() {
301 // Zygote JIT memory only works on kernels that don't segfault on flush.
302 TEST_DISABLED_FOR_KERNELS_WITH_CACHE_SEGFAULT();
303 // This test is only for memfd with future write sealing support:
304 // 1) ashmem with PROT_READ doesn't permit mapping MAP_PRIVATE | PROT_WRITE
305 // 2) ashmem mapped MAP_PRIVATE discards the contents already written.
306 if (!art::IsSealFutureWriteSupported()) {
307 return;
308 }
309 std::string error_msg;
310 size_t size = kPageSize;
311 int32_t* addr = nullptr;
312 android::base::unique_fd fd(JitMemoryRegion::CreateZygoteMemory(size, &error_msg));
313 CHECK_NE(fd.get(), -1);
314
315 // Create a writable mapping.
316 addr = reinterpret_cast<int32_t*>(
317 mmap(nullptr, kPageSize, PROT_READ | PROT_WRITE, MAP_SHARED, fd.get(), 0));
318 CHECK(addr != nullptr);
319 CHECK_NE(addr, MAP_FAILED);
320
321 // Test that we can write into the mapping.
322 addr[0] = 42;
323 CHECK_EQ(addr[0], 42);
324
325 // Create another mapping of atomic ints to communicate between processes.
326 android::base::unique_fd fd2(JitMemoryRegion::CreateZygoteMemory(size, &error_msg));
327 CHECK_NE(fd2.get(), -1);
328 std::atomic<int32_t>* shared = reinterpret_cast<std::atomic<int32_t>*>(
329 mmap(nullptr, kPageSize, PROT_READ | PROT_WRITE, MAP_SHARED, fd2.get(), 0));
330
331 // Protect the memory.
332 CHECK(JitMemoryRegion::ProtectZygoteMemory(fd.get(), &error_msg));
333
334 // Values used for the tests below.
335 const int32_t parent_value = 66;
336 const int32_t child_value = 33;
337 const int32_t starting_value = 22;
338
339 // Check that updates done by a child mapping write-private are not visible
340 // to the parent.
341 addr[0] = starting_value;
342 shared[0] = 0;
343 pid_t pid = fork();
344 if (pid == 0) {
345 CHECK_EQ(mmap(addr, kPageSize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_FIXED, fd.get(), 0),
346 addr);
347 addr[0] = child_value;
348 exit(0);
349 } else {
350 int status;
351 CHECK_EQ(waitpid(pid, &status, 0), pid);
352 CHECK(WIFEXITED(status)) << strerror(errno);
353 CHECK_EQ(addr[0], starting_value);
354 }
355
356 addr[0] = starting_value;
357 shared[0] = 0;
358
359 // Check getting back and forth on shared mapping.
360 pid = fork();
361 if (pid == 0) {
362 // Map it private with write access. MAP_FIXED will replace the existing
363 // mapping.
364 CHECK_EQ(mmap(addr, kPageSize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_FIXED, fd.get(), 0),
365 addr);
366 addr[0] = child_value;
367 CHECK_EQ(addr[0], child_value);
368
369 // Check that mapping shared with write access fails.
370 CHECK_EQ(mmap(addr, kPageSize, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED, fd.get(), 0),
371 MAP_FAILED);
372 CHECK_EQ(errno, EPERM);
373
374 // Map shared with read access.
375 CHECK_EQ(mmap(addr, kPageSize, PROT_READ, MAP_SHARED | MAP_FIXED, fd.get(), 0), addr);
376 CHECK_NE(addr[0], child_value);
377
378 // Wait for the parent to notify.
379 while (shared[0] != 1) {
380 sched_yield();
381 }
382 CHECK_EQ(addr[0], parent_value);
383
384 // Notify the parent for getting a new update of the buffer.
385 shared[0] = 2;
386
387 // Map it private again.
388 CHECK_EQ(mmap(addr, kPageSize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_FIXED, fd.get(), 0),
389 addr);
390 addr[0] = child_value + 1;
391 CHECK_EQ(addr[0], child_value + 1);
392
393 // And map it back shared.
394 CHECK_EQ(mmap(addr, kPageSize, PROT_READ, MAP_SHARED | MAP_FIXED, fd.get(), 0), addr);
395 while (shared[0] != 3) {
396 sched_yield();
397 }
398 CHECK_EQ(addr[0], parent_value + 1);
399 exit(0);
400 } else {
401 addr[0] = parent_value;
402 CHECK_EQ(addr[0], parent_value);
403
404 // Notify the child of the new value.
405 shared[0] = 1;
406
407 // Wait for the child to ask for a new value;
408 while (shared[0] != 2) {
409 sched_yield();
410 }
411 addr[0] = parent_value + 1;
412 CHECK_EQ(addr[0], parent_value + 1);
413
414 // Notify the child of a new value.
415 shared[0] = 3;
416 int status;
417 CHECK_EQ(waitpid(pid, &status, 0), pid);
418 CHECK(WIFEXITED(status)) << strerror(errno);
419 CHECK_EQ(addr[0], parent_value + 1);
420 }
421
422 // Check that updates done by the parent are visible after a new mmap
423 // write-private.
424 shared[0] = 0;
425 addr[0] = starting_value;
426 pid = fork();
427 if (pid == 0) {
428 CHECK_EQ(mmap(addr, kPageSize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_FIXED, fd.get(), 0),
429 addr);
430 CHECK_EQ(addr[0], starting_value);
431 addr[0] = child_value;
432 CHECK_EQ(addr[0], child_value);
433
434 // Notify the parent to update the buffer.
435 shared[0] = 1;
436
437 // Wait for the parent update.
438 while (shared[0] != 2) {
439 sched_yield();
440 }
441 // Test the buffer still contains our own data, and not the parent's.
442 CHECK_EQ(addr[0], child_value);
443
444 // Test the buffer contains the parent data after a new mmap.
445 CHECK_EQ(mmap(addr, kPageSize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_FIXED, fd.get(), 0),
446 addr);
447 CHECK_EQ(addr[0], parent_value);
448 exit(0);
449 } else {
450 // Wait for the child to start
451 while (shared[0] != 1) {
452 sched_yield();
453 }
454 CHECK_EQ(addr[0], starting_value);
455 addr[0] = parent_value;
456 // Notify the child that the buffer has been written.
457 shared[0] = 2;
458 int status;
459 CHECK_EQ(waitpid(pid, &status, 0), pid);
460 CHECK(WIFEXITED(status)) << strerror(errno);
461 CHECK_EQ(addr[0], parent_value);
462 }
463
464 // Check that updates done by the parent are visible for a new mmap
465 // write-private that hasn't written to the buffer yet.
466 shared[0] = 0;
467 addr[0] = starting_value;
468 pid = fork();
469 if (pid == 0) {
470 CHECK_EQ(mmap(addr, kPageSize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_FIXED, fd.get(), 0),
471 addr);
472 CHECK_EQ(addr[0], starting_value);
473 // Notify the parent for a new update of the buffer.
474 shared[0] = 1;
475 while (addr[0] != parent_value) {
476 sched_yield();
477 }
478 addr[0] = child_value;
479 CHECK_EQ(addr[0], child_value);
480 exit(0);
481 } else {
482 while (shared[0] != 1) {
483 sched_yield();
484 }
485 CHECK_EQ(addr[0], starting_value);
486 addr[0] = parent_value;
487 int status;
488 CHECK_EQ(waitpid(pid, &status, 0), pid);
489 CHECK(WIFEXITED(status)) << strerror(errno);
490 CHECK_EQ(addr[0], parent_value);
491 }
492 munmap(addr, kPageSize);
493 munmap(shared, kPageSize);
494 }
495
496 // Test that a readable mapping created befire sealing future writes, can be
497 // changed into a writable mapping.
TestVmMayWriteBefore()498 void TestVmMayWriteBefore() {
499 // Zygote JIT memory only works on kernels that don't segfault on flush.
500 TEST_DISABLED_FOR_KERNELS_WITH_CACHE_SEGFAULT();
501 std::string error_msg;
502 size_t size = kPageSize;
503 int32_t* addr = nullptr;
504 {
505 android::base::unique_fd fd(JitMemoryRegion::CreateZygoteMemory(size, &error_msg));
506 CHECK_NE(fd.get(), -1);
507
508 // Create a shared readable mapping.
509 addr = reinterpret_cast<int32_t*>(
510 mmap(nullptr, kPageSize, PROT_READ, MAP_SHARED, fd.get(), 0));
511 CHECK(addr != nullptr);
512 CHECK_NE(addr, MAP_FAILED);
513
514 // Protect the memory.
515 bool res = JitMemoryRegion::ProtectZygoteMemory(fd.get(), &error_msg);
516 CHECK(res);
517 }
518 // At this point, the fd has been dropped, but the memory mappings are still
519 // there.
520 int res = mprotect(addr, kPageSize, PROT_WRITE);
521 CHECK_EQ(res, 0);
522 }
523
524 // Test that we cannot create a writable mapping after sealing future writes.
TestVmMayWriteAfter()525 void TestVmMayWriteAfter() {
526 // Zygote JIT memory only works on kernels that don't segfault on flush.
527 TEST_DISABLED_FOR_KERNELS_WITH_CACHE_SEGFAULT();
528 std::string error_msg;
529 size_t size = kPageSize;
530 int32_t* addr = nullptr;
531 {
532 android::base::unique_fd fd(JitMemoryRegion::CreateZygoteMemory(size, &error_msg));
533 CHECK_NE(fd.get(), -1);
534
535 // Protect the memory.
536 bool res = JitMemoryRegion::ProtectZygoteMemory(fd.get(), &error_msg);
537 CHECK(res);
538
539 // Create a shared readable mapping.
540 addr = reinterpret_cast<int32_t*>(
541 mmap(nullptr, kPageSize, PROT_READ, MAP_SHARED, fd.get(), 0));
542 CHECK(addr != nullptr);
543 CHECK_NE(addr, MAP_FAILED);
544 }
545 // At this point, the fd has been dropped, but the memory mappings are still
546 // there.
547 int res = mprotect(addr, kPageSize, PROT_WRITE);
548 CHECK_EQ(res, -1);
549 CHECK_EQ(errno, EACCES);
550 }
551 };
552
TEST_F(TestZygoteMemory,BasicTest)553 TEST_F(TestZygoteMemory, BasicTest) {
554 BasicTest();
555 }
556
TEST_F(TestZygoteMemory,TestUnmapWritableAfterFork)557 TEST_F(TestZygoteMemory, TestUnmapWritableAfterFork) {
558 TestUnmapWritableAfterFork();
559 }
560
TEST_F(TestZygoteMemory,TestMadviseDontFork)561 TEST_F(TestZygoteMemory, TestMadviseDontFork) {
562 TestMadviseDontFork();
563 }
564
TEST_F(TestZygoteMemory,TestFromSharedToPrivate)565 TEST_F(TestZygoteMemory, TestFromSharedToPrivate) {
566 TestFromSharedToPrivate();
567 }
568
TEST_F(TestZygoteMemory,TestVmMayWriteBefore)569 TEST_F(TestZygoteMemory, TestVmMayWriteBefore) {
570 TestVmMayWriteBefore();
571 }
572
TEST_F(TestZygoteMemory,TestVmMayWriteAfter)573 TEST_F(TestZygoteMemory, TestVmMayWriteAfter) {
574 TestVmMayWriteAfter();
575 }
576
577 #endif // defined (__BIONIC__)
578
579 } // namespace jit
580 } // namespace art
581