1 /*
2 * Copyright (C) 2015 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <sys/mman.h>
18
19 #include "common_runtime_test.h"
20 #include "gc/collector/immune_spaces.h"
21 #include "gc/space/image_space.h"
22 #include "gc/space/space-inl.h"
23 #include "oat_file.h"
24 #include "thread-current-inl.h"
25
26 namespace art {
27 namespace mirror {
28 class Object;
29 } // namespace mirror
30 namespace gc {
31 namespace collector {
32
33 class FakeOatFile : public OatFile {
34 public:
FakeOatFile(uint8_t * begin,uint8_t * end)35 FakeOatFile(uint8_t* begin, uint8_t* end) : OatFile("Location", /*executable=*/ false) {
36 begin_ = begin;
37 end_ = end;
38 }
39 };
40
41 class FakeImageSpace : public space::ImageSpace {
42 public:
FakeImageSpace(MemMap && map,accounting::ContinuousSpaceBitmap && live_bitmap,std::unique_ptr<FakeOatFile> && oat_file,MemMap && oat_map)43 FakeImageSpace(MemMap&& map,
44 accounting::ContinuousSpaceBitmap&& live_bitmap,
45 std::unique_ptr<FakeOatFile>&& oat_file,
46 MemMap&& oat_map)
47 : ImageSpace("FakeImageSpace",
48 /*image_location=*/"",
49 /*profile_file=*/"",
50 std::move(map),
51 std::move(live_bitmap),
52 map.End()),
53 oat_map_(std::move(oat_map)) {
54 oat_file_ = std::move(oat_file);
55 oat_file_non_owned_ = oat_file_.get();
56 }
57
58 private:
59 MemMap oat_map_;
60 };
61
62 class ImmuneSpacesTest : public CommonRuntimeTest {
63 static constexpr size_t kMaxBitmaps = 10;
64
65 public:
ImmuneSpacesTest()66 ImmuneSpacesTest() {}
67
ReserveBitmaps()68 void ReserveBitmaps() {
69 // Create a bunch of fake bitmaps since these are required to create image spaces. The bitmaps
70 // do not need to cover the image spaces though.
71 for (size_t i = 0; i < kMaxBitmaps; ++i) {
72 accounting::ContinuousSpaceBitmap bitmap(
73 accounting::ContinuousSpaceBitmap::Create("bitmap",
74 reinterpret_cast<uint8_t*>(kPageSize),
75 kPageSize));
76 CHECK(bitmap.IsValid());
77 live_bitmaps_.push_back(std::move(bitmap));
78 }
79 }
80
81 // Create an image space, the oat file is optional.
CreateImageSpace(size_t image_size,size_t oat_size,MemMap * image_reservation,MemMap * oat_reservation)82 FakeImageSpace* CreateImageSpace(size_t image_size,
83 size_t oat_size,
84 MemMap* image_reservation,
85 MemMap* oat_reservation) {
86 DCHECK(image_reservation != nullptr);
87 DCHECK(oat_reservation != nullptr);
88 std::string error_str;
89 MemMap image_map = MemMap::MapAnonymous("FakeImageSpace",
90 image_size,
91 PROT_READ | PROT_WRITE,
92 /*low_4gb=*/ true,
93 /*reservation=*/ image_reservation,
94 &error_str);
95 if (!image_map.IsValid()) {
96 LOG(ERROR) << error_str;
97 return nullptr;
98 }
99 CHECK(!live_bitmaps_.empty());
100 accounting::ContinuousSpaceBitmap live_bitmap(std::move(live_bitmaps_.back()));
101 live_bitmaps_.pop_back();
102 MemMap oat_map = MemMap::MapAnonymous("OatMap",
103 oat_size,
104 PROT_READ | PROT_WRITE,
105 /*low_4gb=*/ true,
106 /*reservation=*/ oat_reservation,
107 &error_str);
108 if (!oat_map.IsValid()) {
109 LOG(ERROR) << error_str;
110 return nullptr;
111 }
112 std::unique_ptr<FakeOatFile> oat_file(new FakeOatFile(oat_map.Begin(), oat_map.End()));
113 // Create image header.
114 ImageSection sections[ImageHeader::kSectionCount];
115 new (image_map.Begin()) ImageHeader(
116 /*image_reservation_size=*/ image_size,
117 /*component_count=*/ 1u,
118 /*image_begin=*/ PointerToLowMemUInt32(image_map.Begin()),
119 /*image_size=*/ image_size,
120 sections,
121 /*image_roots=*/ PointerToLowMemUInt32(image_map.Begin()) + 1,
122 /*oat_checksum=*/ 0u,
123 // The oat file data in the header is always right after the image space.
124 /*oat_file_begin=*/ PointerToLowMemUInt32(oat_map.Begin()),
125 /*oat_data_begin=*/ PointerToLowMemUInt32(oat_map.Begin()),
126 /*oat_data_end=*/ PointerToLowMemUInt32(oat_map.Begin() + oat_size),
127 /*oat_file_end=*/ PointerToLowMemUInt32(oat_map.Begin() + oat_size),
128 /*boot_image_begin=*/ 0u,
129 /*boot_image_size=*/ 0u,
130 /*boot_image_component_count=*/ 0u,
131 /*boot_image_checksum=*/ 0u,
132 /*pointer_size=*/ sizeof(void*));
133 return new FakeImageSpace(std::move(image_map),
134 std::move(live_bitmap),
135 std::move(oat_file),
136 std::move(oat_map));
137 }
138
139 private:
140 // Bitmap pool for pre-allocated fake bitmaps. We need to pre-allocate them since we don't want
141 // them to randomly get placed somewhere where we want an image space.
142 std::vector<accounting::ContinuousSpaceBitmap> live_bitmaps_;
143 };
144
145 class FakeSpace : public space::ContinuousSpace {
146 public:
FakeSpace(uint8_t * begin,uint8_t * end)147 FakeSpace(uint8_t* begin, uint8_t* end)
148 : ContinuousSpace("FakeSpace",
149 space::kGcRetentionPolicyNeverCollect,
150 begin,
151 end,
152 /*limit=*/end) {}
153
GetType() const154 space::SpaceType GetType() const override {
155 return space::kSpaceTypeMallocSpace;
156 }
157
CanMoveObjects() const158 bool CanMoveObjects() const override {
159 return false;
160 }
161
GetLiveBitmap()162 accounting::ContinuousSpaceBitmap* GetLiveBitmap() override {
163 return nullptr;
164 }
165
GetMarkBitmap()166 accounting::ContinuousSpaceBitmap* GetMarkBitmap() override {
167 return nullptr;
168 }
169 };
170
TEST_F(ImmuneSpacesTest,AppendBasic)171 TEST_F(ImmuneSpacesTest, AppendBasic) {
172 ImmuneSpaces spaces;
173 uint8_t* const base = reinterpret_cast<uint8_t*>(0x1000);
174 FakeSpace a(base, base + 45 * KB);
175 FakeSpace b(a.Limit(), a.Limit() + 813 * KB);
176 {
177 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
178 spaces.AddSpace(&a);
179 spaces.AddSpace(&b);
180 }
181 EXPECT_TRUE(spaces.ContainsSpace(&a));
182 EXPECT_TRUE(spaces.ContainsSpace(&b));
183 EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().Begin()), a.Begin());
184 EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().End()), b.Limit());
185 }
186
187 // Tests [image][oat][space] producing a single large immune region.
TEST_F(ImmuneSpacesTest,AppendAfterImage)188 TEST_F(ImmuneSpacesTest, AppendAfterImage) {
189 ReserveBitmaps();
190 ImmuneSpaces spaces;
191 constexpr size_t kImageSize = 123 * kPageSize;
192 constexpr size_t kImageOatSize = 321 * kPageSize;
193 constexpr size_t kOtherSpaceSize = 100 * kPageSize;
194
195 std::string error_str;
196 MemMap reservation = MemMap::MapAnonymous("reserve",
197 kImageSize + kImageOatSize + kOtherSpaceSize,
198 PROT_READ | PROT_WRITE,
199 /*low_4gb=*/ true,
200 &error_str);
201 ASSERT_TRUE(reservation.IsValid()) << "Failed to allocate memory region " << error_str;
202 MemMap image_reservation = reservation.TakeReservedMemory(kImageSize);
203 ASSERT_TRUE(image_reservation.IsValid());
204 ASSERT_TRUE(reservation.IsValid());
205
206 std::unique_ptr<FakeImageSpace> image_space(CreateImageSpace(kImageSize,
207 kImageOatSize,
208 &image_reservation,
209 &reservation));
210 ASSERT_TRUE(image_space != nullptr);
211 ASSERT_FALSE(image_reservation.IsValid());
212 ASSERT_TRUE(reservation.IsValid());
213
214 const ImageHeader& image_header = image_space->GetImageHeader();
215 FakeSpace space(image_header.GetOatFileEnd(), image_header.GetOatFileEnd() + kOtherSpaceSize);
216
217 EXPECT_EQ(image_header.GetImageSize(), kImageSize);
218 EXPECT_EQ(static_cast<size_t>(image_header.GetOatFileEnd() - image_header.GetOatFileBegin()),
219 kImageOatSize);
220 EXPECT_EQ(image_space->GetOatFile()->Size(), kImageOatSize);
221 // Check that we do not include the oat if there is no space after.
222 {
223 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
224 spaces.AddSpace(image_space.get());
225 }
226 EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().Begin()),
227 image_space->Begin());
228 EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().End()),
229 image_space->Limit());
230 // Add another space and ensure it gets appended.
231 EXPECT_NE(image_space->Limit(), space.Begin());
232 {
233 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
234 spaces.AddSpace(&space);
235 }
236 EXPECT_TRUE(spaces.ContainsSpace(image_space.get()));
237 EXPECT_TRUE(spaces.ContainsSpace(&space));
238 // CreateLargestImmuneRegion should have coalesced the two spaces since the oat code after the
239 // image prevents gaps.
240 // Check that we have a continuous region.
241 EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().Begin()),
242 image_space->Begin());
243 EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().End()), space.Limit());
244 }
245
246 // Test [image1][image2][image1 oat][image2 oat][image3] producing a single large immune region.
TEST_F(ImmuneSpacesTest,MultiImage)247 TEST_F(ImmuneSpacesTest, MultiImage) {
248 ReserveBitmaps();
249 // Image 2 needs to be smaller or else it may be chosen for immune region.
250 constexpr size_t kImage1Size = kPageSize * 17;
251 constexpr size_t kImage2Size = kPageSize * 13;
252 constexpr size_t kImage3Size = kPageSize * 3;
253 constexpr size_t kImage1OatSize = kPageSize * 5;
254 constexpr size_t kImage2OatSize = kPageSize * 8;
255 constexpr size_t kImage3OatSize = kPageSize;
256 constexpr size_t kImageBytes = kImage1Size + kImage2Size + kImage3Size;
257 constexpr size_t kMemorySize = kImageBytes + kImage1OatSize + kImage2OatSize + kImage3OatSize;
258 std::string error_str;
259 MemMap reservation = MemMap::MapAnonymous("reserve",
260 kMemorySize,
261 PROT_READ | PROT_WRITE,
262 /*low_4gb=*/ true,
263 &error_str);
264 ASSERT_TRUE(reservation.IsValid()) << "Failed to allocate memory region " << error_str;
265 MemMap image_reservation = reservation.TakeReservedMemory(kImage1Size + kImage2Size);
266 ASSERT_TRUE(image_reservation.IsValid());
267 ASSERT_TRUE(reservation.IsValid());
268
269 std::unique_ptr<FakeImageSpace> space1(CreateImageSpace(kImage1Size,
270 kImage1OatSize,
271 &image_reservation,
272 &reservation));
273 ASSERT_TRUE(space1 != nullptr);
274 ASSERT_TRUE(image_reservation.IsValid());
275 ASSERT_TRUE(reservation.IsValid());
276
277 std::unique_ptr<FakeImageSpace> space2(CreateImageSpace(kImage2Size,
278 kImage2OatSize,
279 &image_reservation,
280 &reservation));
281 ASSERT_TRUE(space2 != nullptr);
282 ASSERT_FALSE(image_reservation.IsValid());
283 ASSERT_TRUE(reservation.IsValid());
284
285 // Finally put a 3rd image space.
286 image_reservation = reservation.TakeReservedMemory(kImage3Size);
287 ASSERT_TRUE(image_reservation.IsValid());
288 ASSERT_TRUE(reservation.IsValid());
289 std::unique_ptr<FakeImageSpace> space3(CreateImageSpace(kImage3Size,
290 kImage3OatSize,
291 &image_reservation,
292 &reservation));
293 ASSERT_TRUE(space3 != nullptr);
294 ASSERT_FALSE(image_reservation.IsValid());
295 ASSERT_FALSE(reservation.IsValid());
296
297 // Check that we do not include the oat if there is no space after.
298 ImmuneSpaces spaces;
299 {
300 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
301 LOG(INFO) << "Adding space1 " << reinterpret_cast<const void*>(space1->Begin());
302 spaces.AddSpace(space1.get());
303 LOG(INFO) << "Adding space2 " << reinterpret_cast<const void*>(space2->Begin());
304 spaces.AddSpace(space2.get());
305 }
306 // There are no more heap bytes, the immune region should only be the first 2 image spaces and
307 // should exclude the image oat files.
308 EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().Begin()),
309 space1->Begin());
310 EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().End()),
311 space2->Limit());
312
313 // Add another space after the oat files, now it should contain the entire memory region.
314 {
315 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
316 LOG(INFO) << "Adding space3 " << reinterpret_cast<const void*>(space3->Begin());
317 spaces.AddSpace(space3.get());
318 }
319 EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().Begin()),
320 space1->Begin());
321 EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().End()),
322 space3->Limit());
323
324 // Add a smaller non-adjacent space and ensure it does not become part of the immune region.
325 // Image size is kImageBytes - kPageSize
326 // Oat size is kPageSize.
327 // Guard pages to ensure it is not adjacent to an existing immune region.
328 // Layout: [guard page][image][oat][guard page]
329 constexpr size_t kGuardSize = kPageSize;
330 constexpr size_t kImage4Size = kImageBytes - kPageSize;
331 constexpr size_t kImage4OatSize = kPageSize;
332
333 reservation = MemMap::MapAnonymous("reserve",
334 kImage4Size + kImage4OatSize + kGuardSize * 2,
335 PROT_READ | PROT_WRITE,
336 /*low_4gb=*/ true,
337 &error_str);
338 ASSERT_TRUE(reservation.IsValid()) << "Failed to allocate memory region " << error_str;
339 MemMap guard = reservation.TakeReservedMemory(kGuardSize);
340 ASSERT_TRUE(guard.IsValid());
341 ASSERT_TRUE(reservation.IsValid());
342 guard.Reset(); // Release the guard memory.
343 image_reservation = reservation.TakeReservedMemory(kImage4Size);
344 ASSERT_TRUE(image_reservation.IsValid());
345 ASSERT_TRUE(reservation.IsValid());
346 std::unique_ptr<FakeImageSpace> space4(CreateImageSpace(kImage4Size,
347 kImage4OatSize,
348 &image_reservation,
349 &reservation));
350 ASSERT_TRUE(space4 != nullptr);
351 ASSERT_FALSE(image_reservation.IsValid());
352 ASSERT_TRUE(reservation.IsValid());
353 ASSERT_EQ(reservation.Size(), kGuardSize);
354 reservation.Reset(); // Release the guard memory.
355 {
356 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
357 LOG(INFO) << "Adding space4 " << reinterpret_cast<const void*>(space4->Begin());
358 spaces.AddSpace(space4.get());
359 }
360 EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().Begin()),
361 space1->Begin());
362 EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().End()),
363 space3->Limit());
364
365 // Add a larger non-adjacent space and ensure it becomes the new largest immune region.
366 // Image size is kImageBytes + kPageSize
367 // Oat size is kPageSize.
368 // Guard pages to ensure it is not adjacent to an existing immune region.
369 // Layout: [guard page][image][oat][guard page]
370 constexpr size_t kImage5Size = kImageBytes + kPageSize;
371 constexpr size_t kImage5OatSize = kPageSize;
372 reservation = MemMap::MapAnonymous("reserve",
373 kImage5Size + kImage5OatSize + kGuardSize * 2,
374 PROT_READ | PROT_WRITE,
375 /*low_4gb=*/ true,
376 &error_str);
377 ASSERT_TRUE(reservation.IsValid()) << "Failed to allocate memory region " << error_str;
378 guard = reservation.TakeReservedMemory(kGuardSize);
379 ASSERT_TRUE(guard.IsValid());
380 ASSERT_TRUE(reservation.IsValid());
381 guard.Reset(); // Release the guard memory.
382 image_reservation = reservation.TakeReservedMemory(kImage5Size);
383 ASSERT_TRUE(image_reservation.IsValid());
384 ASSERT_TRUE(reservation.IsValid());
385 std::unique_ptr<FakeImageSpace> space5(CreateImageSpace(kImage5Size,
386 kImage5OatSize,
387 &image_reservation,
388 &reservation));
389 ASSERT_TRUE(space5 != nullptr);
390 ASSERT_FALSE(image_reservation.IsValid());
391 ASSERT_TRUE(reservation.IsValid());
392 ASSERT_EQ(reservation.Size(), kGuardSize);
393 reservation.Reset(); // Release the guard memory.
394 {
395 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
396 LOG(INFO) << "Adding space5 " << reinterpret_cast<const void*>(space5->Begin());
397 spaces.AddSpace(space5.get());
398 }
399 EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().Begin()), space5->Begin());
400 EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().End()), space5->Limit());
401 }
402
403 } // namespace collector
404 } // namespace gc
405 } // namespace art
406