1 /*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "mem_map.h"
18
19 #include <memory>
20 #include <random>
21
22 #include "bit_utils.h"
23 #include "common_art_test.h"
24 #include "logging.h"
25 #include "memory_tool.h"
26 #include "mman.h"
27 #include "unix_file/fd_file.h"
28
29 namespace art {
30
31 class MemMapTest : public CommonArtTest {
32 public:
IsAddressMapped(void * addr)33 static bool IsAddressMapped(void* addr) {
34 bool res = msync(addr, 1, MS_SYNC) == 0;
35 if (!res && errno != ENOMEM) {
36 PLOG(FATAL) << "Unexpected error occurred on msync";
37 }
38 return res;
39 }
40
RandomData(size_t size)41 static std::vector<uint8_t> RandomData(size_t size) {
42 std::random_device rd;
43 std::uniform_int_distribution<uint8_t> dist;
44 std::vector<uint8_t> res;
45 res.resize(size);
46 for (size_t i = 0; i < size; i++) {
47 res[i] = dist(rd);
48 }
49 return res;
50 }
51
GetValidMapAddress(size_t size,bool low_4gb)52 static uint8_t* GetValidMapAddress(size_t size, bool low_4gb) {
53 // Find a valid map address and unmap it before returning.
54 std::string error_msg;
55 MemMap map = MemMap::MapAnonymous("temp",
56 size,
57 PROT_READ,
58 low_4gb,
59 &error_msg);
60 CHECK(map.IsValid());
61 return map.Begin();
62 }
63
RemapAtEndTest(bool low_4gb)64 static void RemapAtEndTest(bool low_4gb) {
65 std::string error_msg;
66 // Cast the page size to size_t.
67 const size_t page_size = MemMap::GetPageSize();
68 // Map a two-page memory region.
69 MemMap m0 = MemMap::MapAnonymous("MemMapTest_RemapAtEndTest_map0",
70 2 * page_size,
71 PROT_READ | PROT_WRITE,
72 low_4gb,
73 &error_msg);
74 // Check its state and write to it.
75 ASSERT_TRUE(m0.IsValid());
76 uint8_t* base0 = m0.Begin();
77 ASSERT_TRUE(base0 != nullptr) << error_msg;
78 size_t size0 = m0.Size();
79 EXPECT_EQ(m0.Size(), 2 * page_size);
80 EXPECT_EQ(m0.BaseBegin(), base0);
81 EXPECT_EQ(m0.BaseSize(), size0);
82 memset(base0, 42, 2 * page_size);
83 // Remap the latter half into a second MemMap.
84 MemMap m1 = m0.RemapAtEnd(base0 + page_size,
85 "MemMapTest_RemapAtEndTest_map1",
86 PROT_READ | PROT_WRITE,
87 &error_msg);
88 // Check the states of the two maps.
89 EXPECT_EQ(m0.Begin(), base0) << error_msg;
90 EXPECT_EQ(m0.Size(), page_size);
91 EXPECT_EQ(m0.BaseBegin(), base0);
92 EXPECT_EQ(m0.BaseSize(), page_size);
93 uint8_t* base1 = m1.Begin();
94 size_t size1 = m1.Size();
95 EXPECT_EQ(base1, base0 + page_size);
96 EXPECT_EQ(size1, page_size);
97 EXPECT_EQ(m1.BaseBegin(), base1);
98 EXPECT_EQ(m1.BaseSize(), size1);
99 // Write to the second region.
100 memset(base1, 43, page_size);
101 // Check the contents of the two regions.
102 for (size_t i = 0; i < page_size; ++i) {
103 EXPECT_EQ(base0[i], 42);
104 }
105 for (size_t i = 0; i < page_size; ++i) {
106 EXPECT_EQ(base1[i], 43);
107 }
108 // Unmap the first region.
109 m0.Reset();
110 // Make sure the second region is still accessible after the first
111 // region is unmapped.
112 for (size_t i = 0; i < page_size; ++i) {
113 EXPECT_EQ(base1[i], 43);
114 }
115 MemMap m2 = m1.RemapAtEnd(m1.Begin(),
116 "MemMapTest_RemapAtEndTest_map1",
117 PROT_READ | PROT_WRITE,
118 &error_msg);
119 ASSERT_TRUE(m2.IsValid()) << error_msg;
120 ASSERT_FALSE(m1.IsValid());
121 }
122
CommonInit()123 void CommonInit() {
124 MemMap::Init();
125 }
126
127 #if defined(__LP64__) && !defined(__x86_64__)
GetLinearScanPos()128 static uintptr_t GetLinearScanPos() {
129 return MemMap::next_mem_pos_;
130 }
131 #endif
132 };
133
134 #if defined(__LP64__) && !defined(__x86_64__)
135
136 #ifdef __BIONIC__
137 extern uintptr_t CreateStartPos(uint64_t input, uint64_t page_size);
138 #endif
139
TEST_F(MemMapTest,PageSize)140 TEST_F(MemMapTest, PageSize) {
141 const size_t page_size = MemMap::GetPageSize();
142 EXPECT_EQ(page_size, GetPageSizeSlow());
143 }
144
TEST_F(MemMapTest,Start)145 TEST_F(MemMapTest, Start) {
146 CommonInit();
147 uintptr_t start = GetLinearScanPos();
148 EXPECT_LE(64 * KB, start);
149 EXPECT_LT(start, static_cast<uintptr_t>(ART_BASE_ADDRESS));
150 #ifdef __BIONIC__
151 const size_t page_size = MemMap::GetPageSize();
152 // Test a couple of values. Make sure they are different.
153 uintptr_t last = 0;
154 for (size_t i = 0; i < 100; ++i) {
155 uintptr_t random_start = CreateStartPos(i * page_size, page_size);
156 EXPECT_NE(last, random_start);
157 last = random_start;
158 }
159
160 // Even on max, should be below ART_BASE_ADDRESS.
161 EXPECT_LT(CreateStartPos(~0, page_size), static_cast<uintptr_t>(ART_BASE_ADDRESS));
162 #endif
163 // End of test.
164 }
165 #endif
166
167 // We need mremap to be able to test ReplaceMapping at all
168 #if HAVE_MREMAP_SYSCALL
TEST_F(MemMapTest,ReplaceMapping_SameSize)169 TEST_F(MemMapTest, ReplaceMapping_SameSize) {
170 const size_t page_size = MemMap::GetPageSize();
171 std::string error_msg;
172 MemMap dest = MemMap::MapAnonymous("MapAnonymousEmpty-atomic-replace-dest",
173 page_size,
174 PROT_READ,
175 /*low_4gb=*/ false,
176 &error_msg);
177 ASSERT_TRUE(dest.IsValid());
178 MemMap source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
179 page_size,
180 PROT_WRITE | PROT_READ,
181 /*low_4gb=*/ false,
182 &error_msg);
183 ASSERT_TRUE(source.IsValid());
184 void* source_addr = source.Begin();
185 void* dest_addr = dest.Begin();
186 ASSERT_TRUE(IsAddressMapped(source_addr));
187 ASSERT_TRUE(IsAddressMapped(dest_addr));
188
189 std::vector<uint8_t> data = RandomData(page_size);
190 memcpy(source.Begin(), data.data(), data.size());
191
192 ASSERT_TRUE(dest.ReplaceWith(&source, &error_msg)) << error_msg;
193
194 ASSERT_FALSE(IsAddressMapped(source_addr));
195 ASSERT_TRUE(IsAddressMapped(dest_addr));
196 ASSERT_FALSE(source.IsValid());
197
198 ASSERT_EQ(dest.Size(), static_cast<size_t>(page_size));
199
200 ASSERT_EQ(memcmp(dest.Begin(), data.data(), dest.Size()), 0);
201 }
202
TEST_F(MemMapTest,ReplaceMapping_MakeLarger)203 TEST_F(MemMapTest, ReplaceMapping_MakeLarger) {
204 const size_t page_size = MemMap::GetPageSize();
205 std::string error_msg;
206 MemMap dest = MemMap::MapAnonymous("MapAnonymousEmpty-atomic-replace-dest",
207 5 * page_size, // Need to make it larger
208 // initially so we know
209 // there won't be mappings
210 // in the way when we move
211 // source.
212 PROT_READ,
213 /*low_4gb=*/ false,
214 &error_msg);
215 ASSERT_TRUE(dest.IsValid());
216 MemMap source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
217 3 * page_size,
218 PROT_WRITE | PROT_READ,
219 /*low_4gb=*/ false,
220 &error_msg);
221 ASSERT_TRUE(source.IsValid());
222 uint8_t* source_addr = source.Begin();
223 uint8_t* dest_addr = dest.Begin();
224 ASSERT_TRUE(IsAddressMapped(source_addr));
225
226 // Fill the source with random data.
227 std::vector<uint8_t> data = RandomData(3 * page_size);
228 memcpy(source.Begin(), data.data(), data.size());
229
230 // Make the dest smaller so that we know we'll have space.
231 dest.SetSize(page_size);
232
233 ASSERT_TRUE(IsAddressMapped(dest_addr));
234 ASSERT_FALSE(IsAddressMapped(dest_addr + 2 * page_size));
235 ASSERT_EQ(dest.Size(), static_cast<size_t>(page_size));
236
237 ASSERT_TRUE(dest.ReplaceWith(&source, &error_msg)) << error_msg;
238
239 ASSERT_FALSE(IsAddressMapped(source_addr));
240 ASSERT_EQ(dest.Size(), static_cast<size_t>(3 * page_size));
241 ASSERT_TRUE(IsAddressMapped(dest_addr));
242 ASSERT_TRUE(IsAddressMapped(dest_addr + 2 * page_size));
243 ASSERT_FALSE(source.IsValid());
244
245 ASSERT_EQ(memcmp(dest.Begin(), data.data(), dest.Size()), 0);
246 }
247
TEST_F(MemMapTest,ReplaceMapping_MakeSmaller)248 TEST_F(MemMapTest, ReplaceMapping_MakeSmaller) {
249 const size_t page_size = MemMap::GetPageSize();
250 std::string error_msg;
251 MemMap dest = MemMap::MapAnonymous("MapAnonymousEmpty-atomic-replace-dest",
252 3 * page_size,
253 PROT_READ,
254 /*low_4gb=*/ false,
255 &error_msg);
256 ASSERT_TRUE(dest.IsValid());
257 MemMap source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
258 page_size,
259 PROT_WRITE | PROT_READ,
260 /*low_4gb=*/ false,
261 &error_msg);
262 ASSERT_TRUE(source.IsValid());
263 uint8_t* source_addr = source.Begin();
264 uint8_t* dest_addr = dest.Begin();
265 ASSERT_TRUE(IsAddressMapped(source_addr));
266 ASSERT_TRUE(IsAddressMapped(dest_addr));
267 ASSERT_TRUE(IsAddressMapped(dest_addr + 2 * page_size));
268 ASSERT_EQ(dest.Size(), static_cast<size_t>(3 * page_size));
269
270 std::vector<uint8_t> data = RandomData(page_size);
271 memcpy(source.Begin(), data.data(), page_size);
272
273 ASSERT_TRUE(dest.ReplaceWith(&source, &error_msg)) << error_msg;
274
275 ASSERT_FALSE(IsAddressMapped(source_addr));
276 ASSERT_EQ(dest.Size(), static_cast<size_t>(page_size));
277 ASSERT_TRUE(IsAddressMapped(dest_addr));
278 ASSERT_FALSE(IsAddressMapped(dest_addr + 2 * page_size));
279 ASSERT_FALSE(source.IsValid());
280
281 ASSERT_EQ(memcmp(dest.Begin(), data.data(), dest.Size()), 0);
282 }
283
TEST_F(MemMapTest,ReplaceMapping_FailureOverlap)284 TEST_F(MemMapTest, ReplaceMapping_FailureOverlap) {
285 const size_t page_size = MemMap::GetPageSize();
286 std::string error_msg;
287 MemMap dest =
288 MemMap::MapAnonymous(
289 "MapAnonymousEmpty-atomic-replace-dest",
290 3 * page_size, // Need to make it larger initially so we know there won't be mappings in
291 // the way when we move source.
292 PROT_READ | PROT_WRITE,
293 /*low_4gb=*/ false,
294 &error_msg);
295 ASSERT_TRUE(dest.IsValid());
296 // Resize down to 1 page so we can remap the rest.
297 dest.SetSize(page_size);
298 // Create source from the last 2 pages
299 MemMap source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
300 dest.Begin() + page_size,
301 2 * page_size,
302 PROT_WRITE | PROT_READ,
303 /*low_4gb=*/ false,
304 /*reuse=*/ false,
305 /*reservation=*/ nullptr,
306 &error_msg);
307 ASSERT_TRUE(source.IsValid());
308 ASSERT_EQ(dest.Begin() + page_size, source.Begin());
309 uint8_t* source_addr = source.Begin();
310 uint8_t* dest_addr = dest.Begin();
311 ASSERT_TRUE(IsAddressMapped(source_addr));
312
313 // Fill the source and dest with random data.
314 std::vector<uint8_t> data = RandomData(2 * page_size);
315 memcpy(source.Begin(), data.data(), data.size());
316 std::vector<uint8_t> dest_data = RandomData(page_size);
317 memcpy(dest.Begin(), dest_data.data(), dest_data.size());
318
319 ASSERT_TRUE(IsAddressMapped(dest_addr));
320 ASSERT_EQ(dest.Size(), static_cast<size_t>(page_size));
321
322 ASSERT_FALSE(dest.ReplaceWith(&source, &error_msg)) << error_msg;
323
324 ASSERT_TRUE(IsAddressMapped(source_addr));
325 ASSERT_TRUE(IsAddressMapped(dest_addr));
326 ASSERT_EQ(source.Size(), data.size());
327 ASSERT_EQ(dest.Size(), dest_data.size());
328
329 ASSERT_EQ(memcmp(source.Begin(), data.data(), data.size()), 0);
330 ASSERT_EQ(memcmp(dest.Begin(), dest_data.data(), dest_data.size()), 0);
331 }
332 #endif // HAVE_MREMAP_SYSCALL
333
TEST_F(MemMapTest,MapAnonymousEmpty)334 TEST_F(MemMapTest, MapAnonymousEmpty) {
335 CommonInit();
336 const size_t page_size = MemMap::GetPageSize();
337 std::string error_msg;
338 MemMap map = MemMap::MapAnonymous("MapAnonymousEmpty",
339 /*byte_count=*/ 0,
340 PROT_READ,
341 /*low_4gb=*/ false,
342 &error_msg);
343 ASSERT_FALSE(map.IsValid()) << error_msg;
344 ASSERT_FALSE(error_msg.empty());
345
346 error_msg.clear();
347 map = MemMap::MapAnonymous("MapAnonymousNonEmpty",
348 page_size,
349 PROT_READ | PROT_WRITE,
350 /*low_4gb=*/ false,
351 &error_msg);
352 ASSERT_TRUE(map.IsValid()) << error_msg;
353 ASSERT_TRUE(error_msg.empty());
354 }
355
TEST_F(MemMapTest,MapAnonymousFailNullError)356 TEST_F(MemMapTest, MapAnonymousFailNullError) {
357 CommonInit();
358 const size_t page_size = MemMap::GetPageSize();
359 // Test that we don't crash with a null error_str when mapping at an invalid location.
360 MemMap map = MemMap::MapAnonymous("MapAnonymousInvalid",
361 reinterpret_cast<uint8_t*>(static_cast<size_t>(page_size)),
362 0x20000,
363 PROT_READ | PROT_WRITE,
364 /*low_4gb=*/ false,
365 /*reuse=*/ false,
366 /*reservation=*/ nullptr,
367 nullptr);
368 ASSERT_FALSE(map.IsValid());
369 }
370
371 #ifdef __LP64__
TEST_F(MemMapTest,MapAnonymousEmpty32bit)372 TEST_F(MemMapTest, MapAnonymousEmpty32bit) {
373 CommonInit();
374 const size_t page_size = MemMap::GetPageSize();
375 std::string error_msg;
376 MemMap map = MemMap::MapAnonymous("MapAnonymousEmpty",
377 /*byte_count=*/ 0,
378 PROT_READ,
379 /*low_4gb=*/ true,
380 &error_msg);
381 ASSERT_FALSE(map.IsValid()) << error_msg;
382 ASSERT_FALSE(error_msg.empty());
383
384 error_msg.clear();
385 map = MemMap::MapAnonymous("MapAnonymousNonEmpty",
386 page_size,
387 PROT_READ | PROT_WRITE,
388 /*low_4gb=*/ true,
389 &error_msg);
390 ASSERT_TRUE(map.IsValid()) << error_msg;
391 ASSERT_TRUE(error_msg.empty());
392 ASSERT_LT(reinterpret_cast<uintptr_t>(map.BaseBegin()), 1ULL << 32);
393 }
TEST_F(MemMapTest,MapFile32Bit)394 TEST_F(MemMapTest, MapFile32Bit) {
395 CommonInit();
396 std::string error_msg;
397 ScratchFile scratch_file;
398 const size_t map_size = MemMap::GetPageSize();
399 std::unique_ptr<uint8_t[]> data(new uint8_t[map_size]());
400 ASSERT_TRUE(scratch_file.GetFile()->WriteFully(&data[0], map_size));
401 MemMap map = MemMap::MapFile(/*byte_count=*/map_size,
402 PROT_READ,
403 MAP_PRIVATE,
404 scratch_file.GetFd(),
405 /*start=*/0,
406 /*low_4gb=*/true,
407 scratch_file.GetFilename().c_str(),
408 &error_msg);
409 ASSERT_TRUE(map.IsValid()) << error_msg;
410 ASSERT_TRUE(error_msg.empty());
411 ASSERT_EQ(map.Size(), map_size);
412 ASSERT_LT(reinterpret_cast<uintptr_t>(map.BaseBegin()), 1ULL << 32);
413 }
414 #endif
415
TEST_F(MemMapTest,MapAnonymousExactAddr)416 TEST_F(MemMapTest, MapAnonymousExactAddr) {
417 // TODO: The semantics of the MemMap::MapAnonymous() with a given address but without
418 // `reuse == true` or `reservation != nullptr` is weird. We should either drop support
419 // for it, or take it only as a hint and allow the result to be mapped elsewhere.
420 // Currently we're seeing failures with ASAN. b/118408378
421 TEST_DISABLED_FOR_MEMORY_TOOL();
422
423 CommonInit();
424 const size_t page_size = MemMap::GetPageSize();
425 std::string error_msg;
426 // Find a valid address.
427 uint8_t* valid_address = GetValidMapAddress(page_size, /*low_4gb=*/false);
428 // Map at an address that should work, which should succeed.
429 MemMap map0 = MemMap::MapAnonymous("MapAnonymous0",
430 valid_address,
431 page_size,
432 PROT_READ | PROT_WRITE,
433 /*low_4gb=*/ false,
434 /*reuse=*/ false,
435 /*reservation=*/ nullptr,
436 &error_msg);
437 ASSERT_TRUE(map0.IsValid()) << error_msg;
438 ASSERT_TRUE(error_msg.empty());
439 ASSERT_TRUE(map0.BaseBegin() == valid_address);
440 // Map at an unspecified address, which should succeed.
441 MemMap map1 = MemMap::MapAnonymous("MapAnonymous1",
442 page_size,
443 PROT_READ | PROT_WRITE,
444 /*low_4gb=*/ false,
445 &error_msg);
446 ASSERT_TRUE(map1.IsValid()) << error_msg;
447 ASSERT_TRUE(error_msg.empty());
448 ASSERT_TRUE(map1.BaseBegin() != nullptr);
449 // Attempt to map at the same address, which should fail.
450 MemMap map2 = MemMap::MapAnonymous("MapAnonymous2",
451 reinterpret_cast<uint8_t*>(map1.BaseBegin()),
452 page_size,
453 PROT_READ | PROT_WRITE,
454 /*low_4gb=*/ false,
455 /*reuse=*/ false,
456 /*reservation=*/ nullptr,
457 &error_msg);
458 ASSERT_FALSE(map2.IsValid()) << error_msg;
459 ASSERT_TRUE(!error_msg.empty());
460 }
461
TEST_F(MemMapTest,RemapAtEnd)462 TEST_F(MemMapTest, RemapAtEnd) {
463 RemapAtEndTest(false);
464 }
465
466 #ifdef __LP64__
TEST_F(MemMapTest,RemapAtEnd32bit)467 TEST_F(MemMapTest, RemapAtEnd32bit) {
468 RemapAtEndTest(true);
469 }
470 #endif
471
TEST_F(MemMapTest,RemapFileViewAtEnd)472 TEST_F(MemMapTest, RemapFileViewAtEnd) {
473 CommonInit();
474 const size_t page_size = MemMap::GetPageSize();
475 std::string error_msg;
476 ScratchFile scratch_file;
477
478 // Create a scratch file 3 pages large.
479 const size_t map_size = 3 * page_size;
480 std::unique_ptr<uint8_t[]> data(new uint8_t[map_size]());
481 memset(data.get(), 1, page_size);
482 memset(&data[0], 0x55, page_size);
483 memset(&data[page_size], 0x5a, page_size);
484 memset(&data[2 * page_size], 0xaa, page_size);
485 ASSERT_TRUE(scratch_file.GetFile()->WriteFully(&data[0], map_size));
486
487 MemMap map = MemMap::MapFile(/*byte_count=*/map_size,
488 PROT_READ,
489 MAP_PRIVATE,
490 scratch_file.GetFd(),
491 /*start=*/0,
492 /*low_4gb=*/true,
493 scratch_file.GetFilename().c_str(),
494 &error_msg);
495 ASSERT_TRUE(map.IsValid()) << error_msg;
496 ASSERT_TRUE(error_msg.empty());
497 ASSERT_EQ(map.Size(), map_size);
498 ASSERT_LT(reinterpret_cast<uintptr_t>(map.BaseBegin()), 1ULL << 32);
499 ASSERT_EQ(data[0], *map.Begin());
500 ASSERT_EQ(data[page_size], *(map.Begin() + page_size));
501 ASSERT_EQ(data[2 * page_size], *(map.Begin() + 2 * page_size));
502
503 for (size_t offset = 2 * page_size; offset > 0; offset -= page_size) {
504 MemMap tail = map.RemapAtEnd(map.Begin() + offset,
505 "bad_offset_map",
506 PROT_READ,
507 MAP_PRIVATE | MAP_FIXED,
508 scratch_file.GetFd(),
509 offset,
510 &error_msg);
511 ASSERT_TRUE(tail.IsValid()) << error_msg;
512 ASSERT_TRUE(error_msg.empty());
513 ASSERT_EQ(offset, map.Size());
514 ASSERT_EQ(static_cast<size_t>(page_size), tail.Size());
515 ASSERT_EQ(tail.Begin(), map.Begin() + map.Size());
516 ASSERT_EQ(data[offset], *tail.Begin());
517 }
518 }
519
TEST_F(MemMapTest,MapAnonymousExactAddr32bitHighAddr)520 TEST_F(MemMapTest, MapAnonymousExactAddr32bitHighAddr) {
521 // This test does not work under AddressSanitizer.
522 // Historical note: This test did not work under Valgrind either.
523 TEST_DISABLED_FOR_MEMORY_TOOL();
524
525 CommonInit();
526 constexpr size_t size = 0x100000;
527 // Try all addresses starting from 2GB to 4GB.
528 size_t start_addr = 2 * GB;
529 std::string error_msg;
530 MemMap map;
531 for (; start_addr <= std::numeric_limits<uint32_t>::max() - size; start_addr += size) {
532 map = MemMap::MapAnonymous("MapAnonymousExactAddr32bitHighAddr",
533 reinterpret_cast<uint8_t*>(start_addr),
534 size,
535 PROT_READ | PROT_WRITE,
536 /*low_4gb=*/ true,
537 /*reuse=*/ false,
538 /*reservation=*/ nullptr,
539 &error_msg);
540 if (map.IsValid()) {
541 break;
542 }
543 }
544 ASSERT_TRUE(map.IsValid()) << error_msg;
545 ASSERT_GE(reinterpret_cast<uintptr_t>(map.End()), 2u * GB);
546 ASSERT_TRUE(error_msg.empty());
547 ASSERT_EQ(map.BaseBegin(), reinterpret_cast<void*>(start_addr));
548 }
549
TEST_F(MemMapTest,MapAnonymousOverflow)550 TEST_F(MemMapTest, MapAnonymousOverflow) {
551 CommonInit();
552 const size_t page_size = MemMap::GetPageSize();
553 std::string error_msg;
554 uintptr_t ptr = 0;
555 ptr -= page_size; // Now it's close to the top.
556 MemMap map = MemMap::MapAnonymous("MapAnonymousOverflow",
557 reinterpret_cast<uint8_t*>(ptr),
558 2 * page_size, // brings it over the top.
559 PROT_READ | PROT_WRITE,
560 /*low_4gb=*/ false,
561 /*reuse=*/ false,
562 /*reservation=*/ nullptr,
563 &error_msg);
564 ASSERT_FALSE(map.IsValid());
565 ASSERT_FALSE(error_msg.empty());
566 }
567
568 #ifdef __LP64__
TEST_F(MemMapTest,MapAnonymousLow4GBExpectedTooHigh)569 TEST_F(MemMapTest, MapAnonymousLow4GBExpectedTooHigh) {
570 CommonInit();
571 const size_t page_size = MemMap::GetPageSize();
572 std::string error_msg;
573 MemMap map =
574 MemMap::MapAnonymous("MapAnonymousLow4GBExpectedTooHigh",
575 reinterpret_cast<uint8_t*>(UINT64_C(0x100000000)),
576 page_size,
577 PROT_READ | PROT_WRITE,
578 /*low_4gb=*/ true,
579 /*reuse=*/ false,
580 /*reservation=*/ nullptr,
581 &error_msg);
582 ASSERT_FALSE(map.IsValid());
583 ASSERT_FALSE(error_msg.empty());
584 }
585
TEST_F(MemMapTest,MapAnonymousLow4GBRangeTooHigh)586 TEST_F(MemMapTest, MapAnonymousLow4GBRangeTooHigh) {
587 CommonInit();
588 std::string error_msg;
589 MemMap map = MemMap::MapAnonymous("MapAnonymousLow4GBRangeTooHigh",
590 /*addr=*/ reinterpret_cast<uint8_t*>(0xF0000000),
591 /*byte_count=*/ 0x20000000,
592 PROT_READ | PROT_WRITE,
593 /*low_4gb=*/ true,
594 /*reuse=*/ false,
595 /*reservation=*/ nullptr,
596 &error_msg);
597 ASSERT_FALSE(map.IsValid());
598 ASSERT_FALSE(error_msg.empty());
599 }
600 #endif
601
TEST_F(MemMapTest,MapAnonymousReuse)602 TEST_F(MemMapTest, MapAnonymousReuse) {
603 CommonInit();
604 std::string error_msg;
605 MemMap map = MemMap::MapAnonymous("MapAnonymousReserve",
606 /*byte_count=*/ 0x20000,
607 PROT_READ | PROT_WRITE,
608 /*low_4gb=*/ false,
609 &error_msg);
610 ASSERT_TRUE(map.IsValid());
611 ASSERT_TRUE(error_msg.empty());
612 MemMap map2 = MemMap::MapAnonymous("MapAnonymousReused",
613 /*addr=*/ reinterpret_cast<uint8_t*>(map.BaseBegin()),
614 /*byte_count=*/ 0x10000,
615 PROT_READ | PROT_WRITE,
616 /*low_4gb=*/ false,
617 /*reuse=*/ true,
618 /*reservation=*/ nullptr,
619 &error_msg);
620 ASSERT_TRUE(map2.IsValid());
621 ASSERT_TRUE(error_msg.empty());
622 }
623
TEST_F(MemMapTest,CheckNoGaps)624 TEST_F(MemMapTest, CheckNoGaps) {
625 CommonInit();
626 const size_t page_size = MemMap::GetPageSize();
627 std::string error_msg;
628 constexpr size_t kNumPages = 3;
629 // Map a 3-page mem map.
630 MemMap reservation = MemMap::MapAnonymous("MapAnonymous0",
631 page_size * kNumPages,
632 PROT_READ | PROT_WRITE,
633 /*low_4gb=*/ false,
634 &error_msg);
635 ASSERT_TRUE(reservation.IsValid()) << error_msg;
636 ASSERT_TRUE(error_msg.empty());
637 // Record the base address.
638 uint8_t* map_base = reinterpret_cast<uint8_t*>(reservation.BaseBegin());
639
640 // Map at the same address, taking from the `map` reservation.
641 MemMap map0 = MemMap::MapAnonymous("MapAnonymous0",
642 page_size,
643 PROT_READ | PROT_WRITE,
644 /*low_4gb=*/ false,
645 &reservation,
646 &error_msg);
647 ASSERT_TRUE(map0.IsValid()) << error_msg;
648 ASSERT_TRUE(error_msg.empty());
649 ASSERT_EQ(map_base, map0.Begin());
650 MemMap map1 = MemMap::MapAnonymous("MapAnonymous1",
651 page_size,
652 PROT_READ | PROT_WRITE,
653 /*low_4gb=*/ false,
654 &reservation,
655 &error_msg);
656 ASSERT_TRUE(map1.IsValid()) << error_msg;
657 ASSERT_TRUE(error_msg.empty());
658 ASSERT_EQ(map_base + page_size, map1.Begin());
659 MemMap map2 = MemMap::MapAnonymous("MapAnonymous2",
660 page_size,
661 PROT_READ | PROT_WRITE,
662 /*low_4gb=*/ false,
663 &reservation,
664 &error_msg);
665 ASSERT_TRUE(map2.IsValid()) << error_msg;
666 ASSERT_TRUE(error_msg.empty());
667 ASSERT_EQ(map_base + 2 * page_size, map2.Begin());
668 ASSERT_FALSE(reservation.IsValid()); // The entire reservation was used.
669
670 // One-map cases.
671 ASSERT_TRUE(MemMap::CheckNoGaps(map0, map0));
672 ASSERT_TRUE(MemMap::CheckNoGaps(map1, map1));
673 ASSERT_TRUE(MemMap::CheckNoGaps(map2, map2));
674
675 // Two or three-map cases.
676 ASSERT_TRUE(MemMap::CheckNoGaps(map0, map1));
677 ASSERT_TRUE(MemMap::CheckNoGaps(map1, map2));
678 ASSERT_TRUE(MemMap::CheckNoGaps(map0, map2));
679
680 // Unmap the middle one.
681 map1.Reset();
682
683 // Should return false now that there's a gap in the middle.
684 ASSERT_FALSE(MemMap::CheckNoGaps(map0, map2));
685 }
686
TEST_F(MemMapTest,AlignBy)687 TEST_F(MemMapTest, AlignBy) {
688 CommonInit();
689 const size_t page_size = MemMap::GetPageSize();
690 std::string error_msg;
691 // Map a region.
692 MemMap m0 = MemMap::MapAnonymous("MemMapTest_AlignByTest_map0",
693 14 * page_size,
694 PROT_READ | PROT_WRITE,
695 /*low_4gb=*/ false,
696 &error_msg);
697 ASSERT_TRUE(m0.IsValid());
698 uint8_t* base0 = m0.Begin();
699 ASSERT_TRUE(base0 != nullptr) << error_msg;
700 ASSERT_EQ(m0.Size(), 14 * page_size);
701 ASSERT_EQ(m0.BaseBegin(), base0);
702 ASSERT_EQ(m0.BaseSize(), m0.Size());
703
704 // Break it into several regions by using RemapAtEnd.
705 MemMap m1 = m0.RemapAtEnd(base0 + 3 * page_size,
706 "MemMapTest_AlignByTest_map1",
707 PROT_READ | PROT_WRITE,
708 &error_msg);
709 uint8_t* base1 = m1.Begin();
710 ASSERT_TRUE(base1 != nullptr) << error_msg;
711 ASSERT_EQ(base1, base0 + 3 * page_size);
712 ASSERT_EQ(m0.Size(), 3 * page_size);
713
714 MemMap m2 = m1.RemapAtEnd(base1 + 4 * page_size,
715 "MemMapTest_AlignByTest_map2",
716 PROT_READ | PROT_WRITE,
717 &error_msg);
718 uint8_t* base2 = m2.Begin();
719 ASSERT_TRUE(base2 != nullptr) << error_msg;
720 ASSERT_EQ(base2, base1 + 4 * page_size);
721 ASSERT_EQ(m1.Size(), 4 * page_size);
722
723 MemMap m3 = m2.RemapAtEnd(base2 + 3 * page_size,
724 "MemMapTest_AlignByTest_map1",
725 PROT_READ | PROT_WRITE,
726 &error_msg);
727 uint8_t* base3 = m3.Begin();
728 ASSERT_TRUE(base3 != nullptr) << error_msg;
729 ASSERT_EQ(base3, base2 + 3 * page_size);
730 ASSERT_EQ(m2.Size(), 3 * page_size);
731 ASSERT_EQ(m3.Size(), 4 * page_size);
732
733 uint8_t* end0 = base0 + m0.Size();
734 uint8_t* end1 = base1 + m1.Size();
735 uint8_t* end2 = base2 + m2.Size();
736 uint8_t* end3 = base3 + m3.Size();
737
738 ASSERT_EQ(static_cast<size_t>(end3 - base0), 14 * page_size);
739
740 if (IsAlignedParam(base0, 2 * page_size)) {
741 ASSERT_FALSE(IsAlignedParam(base1, 2 * page_size));
742 ASSERT_FALSE(IsAlignedParam(base2, 2 * page_size));
743 ASSERT_TRUE(IsAlignedParam(base3, 2 * page_size));
744 ASSERT_TRUE(IsAlignedParam(end3, 2 * page_size));
745 } else {
746 ASSERT_TRUE(IsAlignedParam(base1, 2 * page_size));
747 ASSERT_TRUE(IsAlignedParam(base2, 2 * page_size));
748 ASSERT_FALSE(IsAlignedParam(base3, 2 * page_size));
749 ASSERT_FALSE(IsAlignedParam(end3, 2 * page_size));
750 }
751
752 // Align by 2 * page_size;
753 m0.AlignBy(2 * page_size);
754 m1.AlignBy(2 * page_size);
755 m2.AlignBy(2 * page_size);
756 m3.AlignBy(2 * page_size);
757
758 EXPECT_TRUE(IsAlignedParam(m0.Begin(), 2 * page_size));
759 EXPECT_TRUE(IsAlignedParam(m1.Begin(), 2 * page_size));
760 EXPECT_TRUE(IsAlignedParam(m2.Begin(), 2 * page_size));
761 EXPECT_TRUE(IsAlignedParam(m3.Begin(), 2 * page_size));
762
763 EXPECT_TRUE(IsAlignedParam(m0.Begin() + m0.Size(), 2 * page_size));
764 EXPECT_TRUE(IsAlignedParam(m1.Begin() + m1.Size(), 2 * page_size));
765 EXPECT_TRUE(IsAlignedParam(m2.Begin() + m2.Size(), 2 * page_size));
766 EXPECT_TRUE(IsAlignedParam(m3.Begin() + m3.Size(), 2 * page_size));
767
768 if (IsAlignedParam(base0, 2 * page_size)) {
769 EXPECT_EQ(m0.Begin(), base0);
770 EXPECT_EQ(m0.Begin() + m0.Size(), end0 - page_size);
771 EXPECT_EQ(m1.Begin(), base1 + page_size);
772 EXPECT_EQ(m1.Begin() + m1.Size(), end1 - page_size);
773 EXPECT_EQ(m2.Begin(), base2 + page_size);
774 EXPECT_EQ(m2.Begin() + m2.Size(), end2);
775 EXPECT_EQ(m3.Begin(), base3);
776 EXPECT_EQ(m3.Begin() + m3.Size(), end3);
777 } else {
778 EXPECT_EQ(m0.Begin(), base0 + page_size);
779 EXPECT_EQ(m0.Begin() + m0.Size(), end0);
780 EXPECT_EQ(m1.Begin(), base1);
781 EXPECT_EQ(m1.Begin() + m1.Size(), end1);
782 EXPECT_EQ(m2.Begin(), base2);
783 EXPECT_EQ(m2.Begin() + m2.Size(), end2 - page_size);
784 EXPECT_EQ(m3.Begin(), base3 + page_size);
785 EXPECT_EQ(m3.Begin() + m3.Size(), end3 - page_size);
786 }
787 }
788
TEST_F(MemMapTest,Reservation)789 TEST_F(MemMapTest, Reservation) {
790 CommonInit();
791 const size_t page_size = MemMap::GetPageSize();
792 std::string error_msg;
793 ScratchFile scratch_file;
794 const size_t map_size = 5 * page_size;
795 std::unique_ptr<uint8_t[]> data(new uint8_t[map_size]());
796 ASSERT_TRUE(scratch_file.GetFile()->WriteFully(&data[0], map_size));
797
798 MemMap reservation = MemMap::MapAnonymous("Test reservation",
799 map_size,
800 PROT_NONE,
801 /*low_4gb=*/ false,
802 &error_msg);
803 ASSERT_TRUE(reservation.IsValid());
804 ASSERT_TRUE(error_msg.empty());
805
806 // Map first part of the reservation.
807 const size_t chunk1_size = page_size - 1u;
808 ASSERT_LT(chunk1_size, map_size) << "We want to split the reservation.";
809 uint8_t* addr1 = reservation.Begin();
810 MemMap map1 = MemMap::MapFileAtAddress(addr1,
811 /*byte_count=*/ chunk1_size,
812 PROT_READ,
813 MAP_PRIVATE,
814 scratch_file.GetFd(),
815 /*start=*/ 0,
816 /*low_4gb=*/ false,
817 scratch_file.GetFilename().c_str(),
818 /*reuse=*/ false,
819 &reservation,
820 &error_msg);
821 ASSERT_TRUE(map1.IsValid()) << error_msg;
822 ASSERT_TRUE(error_msg.empty());
823 ASSERT_EQ(map1.Size(), chunk1_size);
824 ASSERT_EQ(addr1, map1.Begin());
825 ASSERT_TRUE(reservation.IsValid());
826 // Entire pages are taken from the `reservation`.
827 ASSERT_LT(map1.End(), map1.BaseEnd());
828 ASSERT_EQ(map1.BaseEnd(), reservation.Begin());
829
830 // Map second part as an anonymous mapping.
831 const size_t chunk2_size = 2 * page_size;
832 DCHECK_LT(chunk2_size, reservation.Size()); // We want to split the reservation.
833 uint8_t* addr2 = reservation.Begin();
834 MemMap map2 = MemMap::MapAnonymous("MiddleReservation",
835 addr2,
836 /*byte_count=*/ chunk2_size,
837 PROT_READ,
838 /*low_4gb=*/ false,
839 /*reuse=*/ false,
840 &reservation,
841 &error_msg);
842 ASSERT_TRUE(map2.IsValid()) << error_msg;
843 ASSERT_TRUE(error_msg.empty());
844 ASSERT_EQ(map2.Size(), chunk2_size);
845 ASSERT_EQ(addr2, map2.Begin());
846 ASSERT_EQ(map2.End(), map2.BaseEnd()); // chunk2_size is page aligned.
847 ASSERT_EQ(map2.BaseEnd(), reservation.Begin());
848
849 // Map the rest of the reservation except the last byte.
850 const size_t chunk3_size = reservation.Size() - 1u;
851 uint8_t* addr3 = reservation.Begin();
852 MemMap map3 = MemMap::MapFileAtAddress(addr3,
853 /*byte_count=*/ chunk3_size,
854 PROT_READ,
855 MAP_PRIVATE,
856 scratch_file.GetFd(),
857 /*start=*/ dchecked_integral_cast<size_t>(addr3 - addr1),
858 /*low_4gb=*/ false,
859 scratch_file.GetFilename().c_str(),
860 /*reuse=*/ false,
861 &reservation,
862 &error_msg);
863 ASSERT_TRUE(map3.IsValid()) << error_msg;
864 ASSERT_TRUE(error_msg.empty());
865 ASSERT_EQ(map3.Size(), chunk3_size);
866 ASSERT_EQ(addr3, map3.Begin());
867 // Entire pages are taken from the `reservation`, so it's now exhausted.
868 ASSERT_FALSE(reservation.IsValid());
869
870 // Now split the MiddleReservation.
871 const size_t chunk2a_size = page_size - 1u;
872 DCHECK_LT(chunk2a_size, map2.Size()); // We want to split the reservation.
873 MemMap map2a = map2.TakeReservedMemory(chunk2a_size);
874 ASSERT_TRUE(map2a.IsValid()) << error_msg;
875 ASSERT_TRUE(error_msg.empty());
876 ASSERT_EQ(map2a.Size(), chunk2a_size);
877 ASSERT_EQ(addr2, map2a.Begin());
878 ASSERT_TRUE(map2.IsValid());
879 ASSERT_LT(map2a.End(), map2a.BaseEnd());
880 ASSERT_EQ(map2a.BaseEnd(), map2.Begin());
881
882 // And take the rest of the middle reservation.
883 const size_t chunk2b_size = map2.Size() - 1u;
884 uint8_t* addr2b = map2.Begin();
885 MemMap map2b = map2.TakeReservedMemory(chunk2b_size);
886 ASSERT_TRUE(map2b.IsValid()) << error_msg;
887 ASSERT_TRUE(error_msg.empty());
888 ASSERT_EQ(map2b.Size(), chunk2a_size);
889 ASSERT_EQ(addr2b, map2b.Begin());
890 ASSERT_FALSE(map2.IsValid());
891 }
892
893 } // namespace art
894
895 namespace {
896
897 class DumpMapsOnFailListener : public testing::EmptyTestEventListener {
OnTestPartResult(const testing::TestPartResult & result)898 void OnTestPartResult(const testing::TestPartResult& result) override {
899 switch (result.type()) {
900 case testing::TestPartResult::kFatalFailure:
901 art::PrintFileToLog("/proc/self/maps", android::base::LogSeverity::ERROR);
902 break;
903
904 // TODO: Could consider logging on EXPECT failures.
905 case testing::TestPartResult::kNonFatalFailure:
906 case testing::TestPartResult::kSkip:
907 case testing::TestPartResult::kSuccess:
908 break;
909 }
910 }
911 };
912
913 } // namespace
914
915 // Inject our listener into the test runner.
916 extern "C"
917 __attribute__((visibility("default"))) __attribute__((used))
ArtTestGlobalInit()918 void ArtTestGlobalInit() {
919 testing::UnitTest::GetInstance()->listeners().Append(new DumpMapsOnFailListener());
920 }
921