1 /*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "mem_map.h"
18
19 #include <memory>
20
21 #include <valgrind.h>
22
23 #include "gtest/gtest.h"
24
25 namespace art {
26
27 class MemMapTest : public testing::Test {
28 public:
BaseBegin(MemMap * mem_map)29 static uint8_t* BaseBegin(MemMap* mem_map) {
30 return reinterpret_cast<uint8_t*>(mem_map->base_begin_);
31 }
BaseSize(MemMap * mem_map)32 static size_t BaseSize(MemMap* mem_map) {
33 return mem_map->base_size_;
34 }
35
RemapAtEndTest(bool low_4gb)36 static void RemapAtEndTest(bool low_4gb) {
37 std::string error_msg;
38 // Cast the page size to size_t.
39 const size_t page_size = static_cast<size_t>(kPageSize);
40 // Map a two-page memory region.
41 MemMap* m0 = MemMap::MapAnonymous("MemMapTest_RemapAtEndTest_map0",
42 nullptr,
43 2 * page_size,
44 PROT_READ | PROT_WRITE,
45 low_4gb,
46 false,
47 &error_msg);
48 // Check its state and write to it.
49 uint8_t* base0 = m0->Begin();
50 ASSERT_TRUE(base0 != nullptr) << error_msg;
51 size_t size0 = m0->Size();
52 EXPECT_EQ(m0->Size(), 2 * page_size);
53 EXPECT_EQ(BaseBegin(m0), base0);
54 EXPECT_EQ(BaseSize(m0), size0);
55 memset(base0, 42, 2 * page_size);
56 // Remap the latter half into a second MemMap.
57 MemMap* m1 = m0->RemapAtEnd(base0 + page_size,
58 "MemMapTest_RemapAtEndTest_map1",
59 PROT_READ | PROT_WRITE,
60 &error_msg);
61 // Check the states of the two maps.
62 EXPECT_EQ(m0->Begin(), base0) << error_msg;
63 EXPECT_EQ(m0->Size(), page_size);
64 EXPECT_EQ(BaseBegin(m0), base0);
65 EXPECT_EQ(BaseSize(m0), page_size);
66 uint8_t* base1 = m1->Begin();
67 size_t size1 = m1->Size();
68 EXPECT_EQ(base1, base0 + page_size);
69 EXPECT_EQ(size1, page_size);
70 EXPECT_EQ(BaseBegin(m1), base1);
71 EXPECT_EQ(BaseSize(m1), size1);
72 // Write to the second region.
73 memset(base1, 43, page_size);
74 // Check the contents of the two regions.
75 for (size_t i = 0; i < page_size; ++i) {
76 EXPECT_EQ(base0[i], 42);
77 }
78 for (size_t i = 0; i < page_size; ++i) {
79 EXPECT_EQ(base1[i], 43);
80 }
81 // Unmap the first region.
82 delete m0;
83 // Make sure the second region is still accessible after the first
84 // region is unmapped.
85 for (size_t i = 0; i < page_size; ++i) {
86 EXPECT_EQ(base1[i], 43);
87 }
88 delete m1;
89 }
90
CommonInit()91 void CommonInit() {
92 MemMap::Init();
93 }
94
95 #if defined(__LP64__) && !defined(__x86_64__)
GetLinearScanPos()96 static uintptr_t GetLinearScanPos() {
97 return MemMap::next_mem_pos_;
98 }
99 #endif
100 };
101
102 #if defined(__LP64__) && !defined(__x86_64__)
103
104 #ifdef __BIONIC__
105 extern uintptr_t CreateStartPos(uint64_t input);
106 #endif
107
TEST_F(MemMapTest,Start)108 TEST_F(MemMapTest, Start) {
109 CommonInit();
110 uintptr_t start = GetLinearScanPos();
111 EXPECT_LE(64 * KB, start);
112 EXPECT_LT(start, static_cast<uintptr_t>(ART_BASE_ADDRESS));
113 #ifdef __BIONIC__
114 // Test a couple of values. Make sure they are different.
115 uintptr_t last = 0;
116 for (size_t i = 0; i < 100; ++i) {
117 uintptr_t random_start = CreateStartPos(i * kPageSize);
118 EXPECT_NE(last, random_start);
119 last = random_start;
120 }
121
122 // Even on max, should be below ART_BASE_ADDRESS.
123 EXPECT_LT(CreateStartPos(~0), static_cast<uintptr_t>(ART_BASE_ADDRESS));
124 #endif
125 // End of test.
126 }
127 #endif
128
TEST_F(MemMapTest,MapAnonymousEmpty)129 TEST_F(MemMapTest, MapAnonymousEmpty) {
130 CommonInit();
131 std::string error_msg;
132 std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousEmpty",
133 nullptr,
134 0,
135 PROT_READ,
136 false,
137 false,
138 &error_msg));
139 ASSERT_TRUE(map.get() != nullptr) << error_msg;
140 ASSERT_TRUE(error_msg.empty());
141 map.reset(MemMap::MapAnonymous("MapAnonymousEmpty",
142 nullptr,
143 kPageSize,
144 PROT_READ | PROT_WRITE,
145 false,
146 false,
147 &error_msg));
148 ASSERT_TRUE(map.get() != nullptr) << error_msg;
149 ASSERT_TRUE(error_msg.empty());
150 }
151
152 #ifdef __LP64__
TEST_F(MemMapTest,MapAnonymousEmpty32bit)153 TEST_F(MemMapTest, MapAnonymousEmpty32bit) {
154 CommonInit();
155 std::string error_msg;
156 std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousEmpty",
157 nullptr,
158 kPageSize,
159 PROT_READ | PROT_WRITE,
160 true,
161 false,
162 &error_msg));
163 ASSERT_TRUE(map.get() != nullptr) << error_msg;
164 ASSERT_TRUE(error_msg.empty());
165 ASSERT_LT(reinterpret_cast<uintptr_t>(BaseBegin(map.get())), 1ULL << 32);
166 }
167 #endif
168
TEST_F(MemMapTest,MapAnonymousExactAddr)169 TEST_F(MemMapTest, MapAnonymousExactAddr) {
170 CommonInit();
171 std::string error_msg;
172 // Map at an address that should work, which should succeed.
173 std::unique_ptr<MemMap> map0(MemMap::MapAnonymous("MapAnonymous0",
174 reinterpret_cast<uint8_t*>(ART_BASE_ADDRESS),
175 kPageSize,
176 PROT_READ | PROT_WRITE,
177 false,
178 false,
179 &error_msg));
180 ASSERT_TRUE(map0.get() != nullptr) << error_msg;
181 ASSERT_TRUE(error_msg.empty());
182 ASSERT_TRUE(map0->BaseBegin() == reinterpret_cast<void*>(ART_BASE_ADDRESS));
183 // Map at an unspecified address, which should succeed.
184 std::unique_ptr<MemMap> map1(MemMap::MapAnonymous("MapAnonymous1",
185 nullptr,
186 kPageSize,
187 PROT_READ | PROT_WRITE,
188 false,
189 false,
190 &error_msg));
191 ASSERT_TRUE(map1.get() != nullptr) << error_msg;
192 ASSERT_TRUE(error_msg.empty());
193 ASSERT_TRUE(map1->BaseBegin() != nullptr);
194 // Attempt to map at the same address, which should fail.
195 std::unique_ptr<MemMap> map2(MemMap::MapAnonymous("MapAnonymous2",
196 reinterpret_cast<uint8_t*>(map1->BaseBegin()),
197 kPageSize,
198 PROT_READ | PROT_WRITE,
199 false,
200 false,
201 &error_msg));
202 ASSERT_TRUE(map2.get() == nullptr) << error_msg;
203 ASSERT_TRUE(!error_msg.empty());
204 }
205
TEST_F(MemMapTest,RemapAtEnd)206 TEST_F(MemMapTest, RemapAtEnd) {
207 RemapAtEndTest(false);
208 }
209
210 #ifdef __LP64__
TEST_F(MemMapTest,RemapAtEnd32bit)211 TEST_F(MemMapTest, RemapAtEnd32bit) {
212 RemapAtEndTest(true);
213 }
214 #endif
215
TEST_F(MemMapTest,MapAnonymousExactAddr32bitHighAddr)216 TEST_F(MemMapTest, MapAnonymousExactAddr32bitHighAddr) {
217 CommonInit();
218 // This test may not work under valgrind.
219 if (RUNNING_ON_VALGRIND == 0) {
220 uintptr_t start_addr = ART_BASE_ADDRESS + 0x1000000;
221 std::string error_msg;
222 std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousExactAddr32bitHighAddr",
223 reinterpret_cast<uint8_t*>(start_addr),
224 0x21000000,
225 PROT_READ | PROT_WRITE,
226 true,
227 false,
228 &error_msg));
229 ASSERT_TRUE(map.get() != nullptr) << error_msg;
230 ASSERT_TRUE(error_msg.empty());
231 ASSERT_EQ(reinterpret_cast<uintptr_t>(BaseBegin(map.get())), start_addr);
232 }
233 }
234
TEST_F(MemMapTest,MapAnonymousOverflow)235 TEST_F(MemMapTest, MapAnonymousOverflow) {
236 CommonInit();
237 std::string error_msg;
238 uintptr_t ptr = 0;
239 ptr -= kPageSize; // Now it's close to the top.
240 std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousOverflow",
241 reinterpret_cast<uint8_t*>(ptr),
242 2 * kPageSize, // brings it over the top.
243 PROT_READ | PROT_WRITE,
244 false,
245 false,
246 &error_msg));
247 ASSERT_EQ(nullptr, map.get());
248 ASSERT_FALSE(error_msg.empty());
249 }
250
251 #ifdef __LP64__
TEST_F(MemMapTest,MapAnonymousLow4GBExpectedTooHigh)252 TEST_F(MemMapTest, MapAnonymousLow4GBExpectedTooHigh) {
253 CommonInit();
254 std::string error_msg;
255 std::unique_ptr<MemMap> map(
256 MemMap::MapAnonymous("MapAnonymousLow4GBExpectedTooHigh",
257 reinterpret_cast<uint8_t*>(UINT64_C(0x100000000)),
258 kPageSize,
259 PROT_READ | PROT_WRITE,
260 true,
261 false,
262 &error_msg));
263 ASSERT_EQ(nullptr, map.get());
264 ASSERT_FALSE(error_msg.empty());
265 }
266
TEST_F(MemMapTest,MapAnonymousLow4GBRangeTooHigh)267 TEST_F(MemMapTest, MapAnonymousLow4GBRangeTooHigh) {
268 CommonInit();
269 std::string error_msg;
270 std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousLow4GBRangeTooHigh",
271 reinterpret_cast<uint8_t*>(0xF0000000),
272 0x20000000,
273 PROT_READ | PROT_WRITE,
274 true,
275 false,
276 &error_msg));
277 ASSERT_EQ(nullptr, map.get());
278 ASSERT_FALSE(error_msg.empty());
279 }
280 #endif
281
TEST_F(MemMapTest,MapAnonymousReuse)282 TEST_F(MemMapTest, MapAnonymousReuse) {
283 CommonInit();
284 std::string error_msg;
285 std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousReserve",
286 nullptr,
287 0x20000,
288 PROT_READ | PROT_WRITE,
289 false,
290 false,
291 &error_msg));
292 ASSERT_NE(nullptr, map.get());
293 ASSERT_TRUE(error_msg.empty());
294 std::unique_ptr<MemMap> map2(MemMap::MapAnonymous("MapAnonymousReused",
295 reinterpret_cast<uint8_t*>(map->BaseBegin()),
296 0x10000,
297 PROT_READ | PROT_WRITE,
298 false,
299 true,
300 &error_msg));
301 ASSERT_NE(nullptr, map2.get());
302 ASSERT_TRUE(error_msg.empty());
303 }
304
TEST_F(MemMapTest,CheckNoGaps)305 TEST_F(MemMapTest, CheckNoGaps) {
306 CommonInit();
307 std::string error_msg;
308 constexpr size_t kNumPages = 3;
309 // Map a 3-page mem map.
310 std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymous0",
311 nullptr,
312 kPageSize * kNumPages,
313 PROT_READ | PROT_WRITE,
314 false,
315 false,
316 &error_msg));
317 ASSERT_TRUE(map.get() != nullptr) << error_msg;
318 ASSERT_TRUE(error_msg.empty());
319 // Record the base address.
320 uint8_t* map_base = reinterpret_cast<uint8_t*>(map->BaseBegin());
321 // Unmap it.
322 map.reset();
323
324 // Map at the same address, but in page-sized separate mem maps,
325 // assuming the space at the address is still available.
326 std::unique_ptr<MemMap> map0(MemMap::MapAnonymous("MapAnonymous0",
327 map_base,
328 kPageSize,
329 PROT_READ | PROT_WRITE,
330 false,
331 false,
332 &error_msg));
333 ASSERT_TRUE(map0.get() != nullptr) << error_msg;
334 ASSERT_TRUE(error_msg.empty());
335 std::unique_ptr<MemMap> map1(MemMap::MapAnonymous("MapAnonymous1",
336 map_base + kPageSize,
337 kPageSize,
338 PROT_READ | PROT_WRITE,
339 false,
340 false,
341 &error_msg));
342 ASSERT_TRUE(map1.get() != nullptr) << error_msg;
343 ASSERT_TRUE(error_msg.empty());
344 std::unique_ptr<MemMap> map2(MemMap::MapAnonymous("MapAnonymous2",
345 map_base + kPageSize * 2,
346 kPageSize,
347 PROT_READ | PROT_WRITE,
348 false,
349 false,
350 &error_msg));
351 ASSERT_TRUE(map2.get() != nullptr) << error_msg;
352 ASSERT_TRUE(error_msg.empty());
353
354 // One-map cases.
355 ASSERT_TRUE(MemMap::CheckNoGaps(map0.get(), map0.get()));
356 ASSERT_TRUE(MemMap::CheckNoGaps(map1.get(), map1.get()));
357 ASSERT_TRUE(MemMap::CheckNoGaps(map2.get(), map2.get()));
358
359 // Two or three-map cases.
360 ASSERT_TRUE(MemMap::CheckNoGaps(map0.get(), map1.get()));
361 ASSERT_TRUE(MemMap::CheckNoGaps(map1.get(), map2.get()));
362 ASSERT_TRUE(MemMap::CheckNoGaps(map0.get(), map2.get()));
363
364 // Unmap the middle one.
365 map1.reset();
366
367 // Should return false now that there's a gap in the middle.
368 ASSERT_FALSE(MemMap::CheckNoGaps(map0.get(), map2.get()));
369 }
370
371 } // namespace art
372