1 /*
2  * Copyright (C) 2015 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include <sys/mman.h>
18 
19 #include "common_runtime_test.h"
20 #include "gc/collector/immune_spaces.h"
21 #include "gc/space/image_space.h"
22 #include "gc/space/space-inl.h"
23 #include "oat_file.h"
24 #include "thread-current-inl.h"
25 
26 namespace art {
27 namespace mirror {
28 class Object;
29 }  // namespace mirror
30 namespace gc {
31 namespace collector {
32 
33 class DummyOatFile : public OatFile {
34  public:
DummyOatFile(uint8_t * begin,uint8_t * end)35   DummyOatFile(uint8_t* begin, uint8_t* end) : OatFile("Location", /*executable=*/ false) {
36     begin_ = begin;
37     end_ = end;
38   }
39 };
40 
41 class DummyImageSpace : public space::ImageSpace {
42  public:
DummyImageSpace(MemMap && map,std::unique_ptr<accounting::ContinuousSpaceBitmap> live_bitmap,std::unique_ptr<DummyOatFile> && oat_file,MemMap && oat_map)43   DummyImageSpace(MemMap&& map,
44                   std::unique_ptr<accounting::ContinuousSpaceBitmap> live_bitmap,
45                   std::unique_ptr<DummyOatFile>&& oat_file,
46                   MemMap&& oat_map)
47       : ImageSpace("DummyImageSpace",
48                    /*image_location=*/"",
49                    std::move(map),
50                    std::move(live_bitmap),
51                    map.End()),
52         oat_map_(std::move(oat_map)) {
53     oat_file_ = std::move(oat_file);
54     oat_file_non_owned_ = oat_file_.get();
55   }
56 
57  private:
58   MemMap oat_map_;
59 };
60 
61 class ImmuneSpacesTest : public CommonRuntimeTest {
62   static constexpr size_t kMaxBitmaps = 10;
63 
64  public:
ImmuneSpacesTest()65   ImmuneSpacesTest() {}
66 
ReserveBitmaps()67   void ReserveBitmaps() {
68     // Create a bunch of dummy bitmaps since these are required to create image spaces. The bitmaps
69     // do not need to cover the image spaces though.
70     for (size_t i = 0; i < kMaxBitmaps; ++i) {
71       std::unique_ptr<accounting::ContinuousSpaceBitmap> bitmap(
72           accounting::ContinuousSpaceBitmap::Create("bitmap",
73                                                     reinterpret_cast<uint8_t*>(kPageSize),
74                                                     kPageSize));
75       CHECK(bitmap != nullptr);
76       live_bitmaps_.push_back(std::move(bitmap));
77     }
78   }
79 
80   // Create an image space, the oat file is optional.
CreateImageSpace(size_t image_size,size_t oat_size,MemMap * image_reservation,MemMap * oat_reservation)81   DummyImageSpace* CreateImageSpace(size_t image_size,
82                                     size_t oat_size,
83                                     MemMap* image_reservation,
84                                     MemMap* oat_reservation) {
85     DCHECK(image_reservation != nullptr);
86     DCHECK(oat_reservation != nullptr);
87     std::string error_str;
88     MemMap image_map = MemMap::MapAnonymous("DummyImageSpace",
89                                             image_size,
90                                             PROT_READ | PROT_WRITE,
91                                             /*low_4gb=*/ true,
92                                             /*reservation=*/ image_reservation,
93                                             &error_str);
94     if (!image_map.IsValid()) {
95       LOG(ERROR) << error_str;
96       return nullptr;
97     }
98     CHECK(!live_bitmaps_.empty());
99     std::unique_ptr<accounting::ContinuousSpaceBitmap> live_bitmap(std::move(live_bitmaps_.back()));
100     live_bitmaps_.pop_back();
101     MemMap oat_map = MemMap::MapAnonymous("OatMap",
102                                           oat_size,
103                                           PROT_READ | PROT_WRITE,
104                                           /*low_4gb=*/ true,
105                                           /*reservation=*/ oat_reservation,
106                                           &error_str);
107     if (!oat_map.IsValid()) {
108       LOG(ERROR) << error_str;
109       return nullptr;
110     }
111     std::unique_ptr<DummyOatFile> oat_file(new DummyOatFile(oat_map.Begin(), oat_map.End()));
112     // Create image header.
113     ImageSection sections[ImageHeader::kSectionCount];
114     new (image_map.Begin()) ImageHeader(
115         /*image_reservation_size=*/ image_size,
116         /*component_count=*/ 1u,
117         /*image_begin=*/ PointerToLowMemUInt32(image_map.Begin()),
118         /*image_size=*/ image_size,
119         sections,
120         /*image_roots=*/ PointerToLowMemUInt32(image_map.Begin()) + 1,
121         /*oat_checksum=*/ 0u,
122         // The oat file data in the header is always right after the image space.
123         /*oat_file_begin=*/ PointerToLowMemUInt32(oat_map.Begin()),
124         /*oat_data_begin=*/ PointerToLowMemUInt32(oat_map.Begin()),
125         /*oat_data_end=*/ PointerToLowMemUInt32(oat_map.Begin() + oat_size),
126         /*oat_file_end=*/ PointerToLowMemUInt32(oat_map.Begin() + oat_size),
127         /*boot_image_begin=*/ 0u,
128         /*boot_image_size=*/ 0u,
129         /*pointer_size=*/ sizeof(void*));
130     return new DummyImageSpace(std::move(image_map),
131                                std::move(live_bitmap),
132                                std::move(oat_file),
133                                std::move(oat_map));
134   }
135 
136  private:
137   // Bitmap pool for pre-allocated dummy bitmaps. We need to pre-allocate them since we don't want
138   // them to randomly get placed somewhere where we want an image space.
139   std::vector<std::unique_ptr<accounting::ContinuousSpaceBitmap>> live_bitmaps_;
140 };
141 
142 class DummySpace : public space::ContinuousSpace {
143  public:
DummySpace(uint8_t * begin,uint8_t * end)144   DummySpace(uint8_t* begin, uint8_t* end)
145       : ContinuousSpace("DummySpace",
146                         space::kGcRetentionPolicyNeverCollect,
147                         begin,
148                         end,
149                         /*limit=*/end) {}
150 
GetType() const151   space::SpaceType GetType() const override {
152     return space::kSpaceTypeMallocSpace;
153   }
154 
CanMoveObjects() const155   bool CanMoveObjects() const override {
156     return false;
157   }
158 
GetLiveBitmap() const159   accounting::ContinuousSpaceBitmap* GetLiveBitmap() const override {
160     return nullptr;
161   }
162 
GetMarkBitmap() const163   accounting::ContinuousSpaceBitmap* GetMarkBitmap() const override {
164     return nullptr;
165   }
166 };
167 
TEST_F(ImmuneSpacesTest,AppendBasic)168 TEST_F(ImmuneSpacesTest, AppendBasic) {
169   ImmuneSpaces spaces;
170   uint8_t* const base = reinterpret_cast<uint8_t*>(0x1000);
171   DummySpace a(base, base + 45 * KB);
172   DummySpace b(a.Limit(), a.Limit() + 813 * KB);
173   {
174     WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
175     spaces.AddSpace(&a);
176     spaces.AddSpace(&b);
177   }
178   EXPECT_TRUE(spaces.ContainsSpace(&a));
179   EXPECT_TRUE(spaces.ContainsSpace(&b));
180   EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().Begin()), a.Begin());
181   EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().End()), b.Limit());
182 }
183 
184 // Tests [image][oat][space] producing a single large immune region.
TEST_F(ImmuneSpacesTest,AppendAfterImage)185 TEST_F(ImmuneSpacesTest, AppendAfterImage) {
186   ReserveBitmaps();
187   ImmuneSpaces spaces;
188   constexpr size_t kImageSize = 123 * kPageSize;
189   constexpr size_t kImageOatSize = 321 * kPageSize;
190   constexpr size_t kOtherSpaceSize = 100 * kPageSize;
191 
192   std::string error_str;
193   MemMap reservation = MemMap::MapAnonymous("reserve",
194                                             kImageSize + kImageOatSize + kOtherSpaceSize,
195                                             PROT_READ | PROT_WRITE,
196                                             /*low_4gb=*/ true,
197                                             &error_str);
198   ASSERT_TRUE(reservation.IsValid()) << "Failed to allocate memory region " << error_str;
199   MemMap image_reservation = reservation.TakeReservedMemory(kImageSize);
200   ASSERT_TRUE(image_reservation.IsValid());
201   ASSERT_TRUE(reservation.IsValid());
202 
203   std::unique_ptr<DummyImageSpace> image_space(CreateImageSpace(kImageSize,
204                                                                 kImageOatSize,
205                                                                 &image_reservation,
206                                                                 &reservation));
207   ASSERT_TRUE(image_space != nullptr);
208   ASSERT_FALSE(image_reservation.IsValid());
209   ASSERT_TRUE(reservation.IsValid());
210 
211   const ImageHeader& image_header = image_space->GetImageHeader();
212   DummySpace space(image_header.GetOatFileEnd(), image_header.GetOatFileEnd() + kOtherSpaceSize);
213 
214   EXPECT_EQ(image_header.GetImageSize(), kImageSize);
215   EXPECT_EQ(static_cast<size_t>(image_header.GetOatFileEnd() - image_header.GetOatFileBegin()),
216             kImageOatSize);
217   EXPECT_EQ(image_space->GetOatFile()->Size(), kImageOatSize);
218   // Check that we do not include the oat if there is no space after.
219   {
220     WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
221     spaces.AddSpace(image_space.get());
222   }
223   EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().Begin()),
224             image_space->Begin());
225   EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().End()),
226             image_space->Limit());
227   // Add another space and ensure it gets appended.
228   EXPECT_NE(image_space->Limit(), space.Begin());
229   {
230     WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
231     spaces.AddSpace(&space);
232   }
233   EXPECT_TRUE(spaces.ContainsSpace(image_space.get()));
234   EXPECT_TRUE(spaces.ContainsSpace(&space));
235   // CreateLargestImmuneRegion should have coalesced the two spaces since the oat code after the
236   // image prevents gaps.
237   // Check that we have a continuous region.
238   EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().Begin()),
239             image_space->Begin());
240   EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().End()), space.Limit());
241 }
242 
243 // Test [image1][image2][image1 oat][image2 oat][image3] producing a single large immune region.
TEST_F(ImmuneSpacesTest,MultiImage)244 TEST_F(ImmuneSpacesTest, MultiImage) {
245   ReserveBitmaps();
246   // Image 2 needs to be smaller or else it may be chosen for immune region.
247   constexpr size_t kImage1Size = kPageSize * 17;
248   constexpr size_t kImage2Size = kPageSize * 13;
249   constexpr size_t kImage3Size = kPageSize * 3;
250   constexpr size_t kImage1OatSize = kPageSize * 5;
251   constexpr size_t kImage2OatSize = kPageSize * 8;
252   constexpr size_t kImage3OatSize = kPageSize;
253   constexpr size_t kImageBytes = kImage1Size + kImage2Size + kImage3Size;
254   constexpr size_t kMemorySize = kImageBytes + kImage1OatSize + kImage2OatSize + kImage3OatSize;
255   std::string error_str;
256   MemMap reservation = MemMap::MapAnonymous("reserve",
257                                             kMemorySize,
258                                             PROT_READ | PROT_WRITE,
259                                             /*low_4gb=*/ true,
260                                             &error_str);
261   ASSERT_TRUE(reservation.IsValid()) << "Failed to allocate memory region " << error_str;
262   MemMap image_reservation = reservation.TakeReservedMemory(kImage1Size + kImage2Size);
263   ASSERT_TRUE(image_reservation.IsValid());
264   ASSERT_TRUE(reservation.IsValid());
265 
266   std::unique_ptr<DummyImageSpace> space1(CreateImageSpace(kImage1Size,
267                                                            kImage1OatSize,
268                                                            &image_reservation,
269                                                            &reservation));
270   ASSERT_TRUE(space1 != nullptr);
271   ASSERT_TRUE(image_reservation.IsValid());
272   ASSERT_TRUE(reservation.IsValid());
273 
274   std::unique_ptr<DummyImageSpace> space2(CreateImageSpace(kImage2Size,
275                                                            kImage2OatSize,
276                                                            &image_reservation,
277                                                            &reservation));
278   ASSERT_TRUE(space2 != nullptr);
279   ASSERT_FALSE(image_reservation.IsValid());
280   ASSERT_TRUE(reservation.IsValid());
281 
282   // Finally put a 3rd image space.
283   image_reservation = reservation.TakeReservedMemory(kImage3Size);
284   ASSERT_TRUE(image_reservation.IsValid());
285   ASSERT_TRUE(reservation.IsValid());
286   std::unique_ptr<DummyImageSpace> space3(CreateImageSpace(kImage3Size,
287                                                            kImage3OatSize,
288                                                            &image_reservation,
289                                                            &reservation));
290   ASSERT_TRUE(space3 != nullptr);
291   ASSERT_FALSE(image_reservation.IsValid());
292   ASSERT_FALSE(reservation.IsValid());
293 
294   // Check that we do not include the oat if there is no space after.
295   ImmuneSpaces spaces;
296   {
297     WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
298     LOG(INFO) << "Adding space1 " << reinterpret_cast<const void*>(space1->Begin());
299     spaces.AddSpace(space1.get());
300     LOG(INFO) << "Adding space2 " << reinterpret_cast<const void*>(space2->Begin());
301     spaces.AddSpace(space2.get());
302   }
303   // There are no more heap bytes, the immune region should only be the first 2 image spaces and
304   // should exclude the image oat files.
305   EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().Begin()),
306             space1->Begin());
307   EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().End()),
308             space2->Limit());
309 
310   // Add another space after the oat files, now it should contain the entire memory region.
311   {
312     WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
313     LOG(INFO) << "Adding space3 " << reinterpret_cast<const void*>(space3->Begin());
314     spaces.AddSpace(space3.get());
315   }
316   EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().Begin()),
317             space1->Begin());
318   EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().End()),
319             space3->Limit());
320 
321   // Add a smaller non-adjacent space and ensure it does not become part of the immune region.
322   // Image size is kImageBytes - kPageSize
323   // Oat size is kPageSize.
324   // Guard pages to ensure it is not adjacent to an existing immune region.
325   // Layout:  [guard page][image][oat][guard page]
326   constexpr size_t kGuardSize = kPageSize;
327   constexpr size_t kImage4Size = kImageBytes - kPageSize;
328   constexpr size_t kImage4OatSize = kPageSize;
329 
330   reservation = MemMap::MapAnonymous("reserve",
331                                      kImage4Size + kImage4OatSize + kGuardSize * 2,
332                                      PROT_READ | PROT_WRITE,
333                                      /*low_4gb=*/ true,
334                                      &error_str);
335   ASSERT_TRUE(reservation.IsValid()) << "Failed to allocate memory region " << error_str;
336   MemMap guard = reservation.TakeReservedMemory(kGuardSize);
337   ASSERT_TRUE(guard.IsValid());
338   ASSERT_TRUE(reservation.IsValid());
339   guard.Reset();  // Release the guard memory.
340   image_reservation = reservation.TakeReservedMemory(kImage4Size);
341   ASSERT_TRUE(image_reservation.IsValid());
342   ASSERT_TRUE(reservation.IsValid());
343   std::unique_ptr<DummyImageSpace> space4(CreateImageSpace(kImage4Size,
344                                                            kImage4OatSize,
345                                                            &image_reservation,
346                                                            &reservation));
347   ASSERT_TRUE(space4 != nullptr);
348   ASSERT_FALSE(image_reservation.IsValid());
349   ASSERT_TRUE(reservation.IsValid());
350   ASSERT_EQ(reservation.Size(), kGuardSize);
351   reservation.Reset();  // Release the guard memory.
352   {
353     WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
354     LOG(INFO) << "Adding space4 " << reinterpret_cast<const void*>(space4->Begin());
355     spaces.AddSpace(space4.get());
356   }
357   EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().Begin()),
358             space1->Begin());
359   EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().End()),
360             space3->Limit());
361 
362   // Add a larger non-adjacent space and ensure it becomes the new largest immune region.
363   // Image size is kImageBytes + kPageSize
364   // Oat size is kPageSize.
365   // Guard pages to ensure it is not adjacent to an existing immune region.
366   // Layout:  [guard page][image][oat][guard page]
367   constexpr size_t kImage5Size = kImageBytes + kPageSize;
368   constexpr size_t kImage5OatSize = kPageSize;
369   reservation = MemMap::MapAnonymous("reserve",
370                                      kImage5Size + kImage5OatSize + kGuardSize * 2,
371                                      PROT_READ | PROT_WRITE,
372                                      /*low_4gb=*/ true,
373                                      &error_str);
374   ASSERT_TRUE(reservation.IsValid()) << "Failed to allocate memory region " << error_str;
375   guard = reservation.TakeReservedMemory(kGuardSize);
376   ASSERT_TRUE(guard.IsValid());
377   ASSERT_TRUE(reservation.IsValid());
378   guard.Reset();  // Release the guard memory.
379   image_reservation = reservation.TakeReservedMemory(kImage5Size);
380   ASSERT_TRUE(image_reservation.IsValid());
381   ASSERT_TRUE(reservation.IsValid());
382   std::unique_ptr<DummyImageSpace> space5(CreateImageSpace(kImage5Size,
383                                                            kImage5OatSize,
384                                                            &image_reservation,
385                                                            &reservation));
386   ASSERT_TRUE(space5 != nullptr);
387   ASSERT_FALSE(image_reservation.IsValid());
388   ASSERT_TRUE(reservation.IsValid());
389   ASSERT_EQ(reservation.Size(), kGuardSize);
390   reservation.Reset();  // Release the guard memory.
391   {
392     WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
393     LOG(INFO) << "Adding space5 " << reinterpret_cast<const void*>(space5->Begin());
394     spaces.AddSpace(space5.get());
395   }
396   EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().Begin()), space5->Begin());
397   EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().End()), space5->Limit());
398 }
399 
400 }  // namespace collector
401 }  // namespace gc
402 }  // namespace art
403