1 /*
2  * Copyright (C) 2015 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include <sys/mman.h>
18 
19 #include "base/common_art_test.h"
20 #include "base/pointer_size.h"
21 #include "base/utils.h"
22 #include "gc/collector/immune_spaces.h"
23 #include "gc/space/image_space.h"
24 #include "gc/space/space-inl.h"
25 #include "oat/oat_file.h"
26 #include "thread-current-inl.h"
27 
28 namespace art HIDDEN {
29 namespace mirror {
30 class Object;
31 }  // namespace mirror
32 namespace gc {
33 namespace collector {
34 
35 class FakeOatFile : public OatFile {
36  public:
FakeOatFile(uint8_t * begin,uint8_t * end)37   FakeOatFile(uint8_t* begin, uint8_t* end) : OatFile("Location", /*executable=*/ false) {
38     begin_ = begin;
39     end_ = end;
40   }
41 };
42 
43 class FakeImageSpace : public space::ImageSpace {
44  public:
FakeImageSpace(MemMap && map,accounting::ContinuousSpaceBitmap && live_bitmap,std::unique_ptr<FakeOatFile> && oat_file,MemMap && oat_map)45   FakeImageSpace(MemMap&& map,
46                  accounting::ContinuousSpaceBitmap&& live_bitmap,
47                  std::unique_ptr<FakeOatFile>&& oat_file,
48                  MemMap&& oat_map)
49       : ImageSpace("FakeImageSpace",
50                    /*image_location=*/"",
51                    /*profile_files=*/{},
52                    std::move(map),
53                    std::move(live_bitmap),
54                    map.End()),
55         oat_map_(std::move(oat_map)) {
56     oat_file_ = std::move(oat_file);
57     oat_file_non_owned_ = oat_file_.get();
58   }
59 
60  private:
61   MemMap oat_map_;
62 };
63 
64 class ImmuneSpacesTest : public CommonArtTest {
65   static constexpr size_t kMaxBitmaps = 10;
66 
67  public:
ImmuneSpacesTest()68   ImmuneSpacesTest() {}
69 
ReserveBitmaps()70   void ReserveBitmaps() {
71     const size_t page_size = MemMap::GetPageSize();
72 
73     // Create a bunch of fake bitmaps since these are required to create image spaces. The bitmaps
74     // do not need to cover the image spaces though.
75     for (size_t i = 0; i < kMaxBitmaps; ++i) {
76       accounting::ContinuousSpaceBitmap bitmap(
77           accounting::ContinuousSpaceBitmap::Create(
78               "bitmap", reinterpret_cast<uint8_t*>(static_cast<size_t>(page_size)), page_size));
79       CHECK(bitmap.IsValid());
80       live_bitmaps_.push_back(std::move(bitmap));
81     }
82   }
83 
ReserveImage(size_t image_size,std::string * error_str)84   MemMap ReserveImage(size_t image_size, /*out*/ std::string* error_str) {
85     // If the image is aligned to the current runtime page size, it will already
86     // be naturally aligned. On the other hand, MayAnonymousAligned() requires
87     // that the requested alignment is higher.
88     DCHECK_LE(MemMap::GetPageSize(), kElfSegmentAlignment);
89     if (MemMap::GetPageSize() == kElfSegmentAlignment) {
90       return MemMap::MapAnonymous("reserve",
91                                   image_size,
92                                   PROT_READ | PROT_WRITE,
93                                   /*low_4gb=*/true,
94                                   error_str);
95     }
96     return MemMap::MapAnonymousAligned("reserve",
97                                        image_size,
98                                        PROT_READ | PROT_WRITE,
99                                        /*low_4gb=*/true,
100                                        kElfSegmentAlignment,
101                                        error_str);
102   }
103 
104   // Create an image space, the oat file is optional.
CreateImageSpace(size_t image_size,size_t oat_size,MemMap * image_reservation,MemMap * oat_reservation)105   FakeImageSpace* CreateImageSpace(size_t image_size,
106                                    size_t oat_size,
107                                    MemMap* image_reservation,
108                                    MemMap* oat_reservation) {
109     DCHECK(image_reservation != nullptr);
110     DCHECK(oat_reservation != nullptr);
111     std::string error_str;
112     MemMap image_map = MemMap::MapAnonymous("FakeImageSpace",
113                                             image_size,
114                                             PROT_READ | PROT_WRITE,
115                                             /*low_4gb=*/ true,
116                                             /*reservation=*/ image_reservation,
117                                             &error_str);
118     if (!image_map.IsValid()) {
119       LOG(ERROR) << error_str;
120       return nullptr;
121     }
122     CHECK(!live_bitmaps_.empty());
123     accounting::ContinuousSpaceBitmap live_bitmap(std::move(live_bitmaps_.back()));
124     live_bitmaps_.pop_back();
125     MemMap oat_map = MemMap::MapAnonymous("OatMap",
126                                           oat_size,
127                                           PROT_READ | PROT_WRITE,
128                                           /*low_4gb=*/ true,
129                                           /*reservation=*/ oat_reservation,
130                                           &error_str);
131     if (!oat_map.IsValid()) {
132       LOG(ERROR) << error_str;
133       return nullptr;
134     }
135     std::unique_ptr<FakeOatFile> oat_file(new FakeOatFile(oat_map.Begin(), oat_map.End()));
136     // Create image header.
137     ImageSection sections[ImageHeader::kSectionCount];
138     new (image_map.Begin()) ImageHeader(
139         /*image_reservation_size=*/ image_size,
140         /*component_count=*/ 1u,
141         /*image_begin=*/ PointerToLowMemUInt32(image_map.Begin()),
142         /*image_size=*/ image_size,
143         sections,
144         /*image_roots=*/ PointerToLowMemUInt32(image_map.Begin()) + 1,
145         /*oat_checksum=*/ 0u,
146         // The oat file data in the header is always right after the image space.
147         /*oat_file_begin=*/ PointerToLowMemUInt32(oat_map.Begin()),
148         /*oat_data_begin=*/ PointerToLowMemUInt32(oat_map.Begin()),
149         /*oat_data_end=*/ PointerToLowMemUInt32(oat_map.Begin() + oat_size),
150         /*oat_file_end=*/ PointerToLowMemUInt32(oat_map.Begin() + oat_size),
151         /*boot_image_begin=*/ 0u,
152         /*boot_image_size=*/ 0u,
153         /*boot_image_component_count=*/ 0u,
154         /*boot_image_checksum=*/ 0u,
155         /*pointer_size=*/ kRuntimePointerSize);
156     return new FakeImageSpace(std::move(image_map),
157                               std::move(live_bitmap),
158                               std::move(oat_file),
159                               std::move(oat_map));
160   }
161 
162  private:
163   // Bitmap pool for pre-allocated fake bitmaps. We need to pre-allocate them since we don't want
164   // them to randomly get placed somewhere where we want an image space.
165   std::vector<accounting::ContinuousSpaceBitmap> live_bitmaps_;
166 };
167 
168 class FakeSpace : public space::ContinuousSpace {
169  public:
FakeSpace(uint8_t * begin,uint8_t * end)170   FakeSpace(uint8_t* begin, uint8_t* end)
171       : ContinuousSpace("FakeSpace",
172                         space::kGcRetentionPolicyNeverCollect,
173                         begin,
174                         end,
175                         /*limit=*/end) {}
176 
GetType() const177   space::SpaceType GetType() const override {
178     return space::kSpaceTypeMallocSpace;
179   }
180 
CanMoveObjects() const181   bool CanMoveObjects() const override {
182     return false;
183   }
184 
GetLiveBitmap()185   accounting::ContinuousSpaceBitmap* GetLiveBitmap() override {
186     return nullptr;
187   }
188 
GetMarkBitmap()189   accounting::ContinuousSpaceBitmap* GetMarkBitmap() override {
190     return nullptr;
191   }
192 };
193 
TEST_F(ImmuneSpacesTest,AppendBasic)194 TEST_F(ImmuneSpacesTest, AppendBasic) {
195   ImmuneSpaces spaces;
196   uint8_t* const base = reinterpret_cast<uint8_t*>(0x1000);
197   FakeSpace a(base, base + 45 * KB);
198   FakeSpace b(a.Limit(), a.Limit() + 813 * KB);
199   {
200     WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
201     spaces.AddSpace(&a);
202     spaces.AddSpace(&b);
203   }
204   EXPECT_TRUE(spaces.ContainsSpace(&a));
205   EXPECT_TRUE(spaces.ContainsSpace(&b));
206   EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().Begin()), a.Begin());
207   EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().End()), b.Limit());
208 }
209 
210 // Tests [image][oat][space] producing a single large immune region.
TEST_F(ImmuneSpacesTest,AppendAfterImage)211 TEST_F(ImmuneSpacesTest, AppendAfterImage) {
212   ReserveBitmaps();
213   ImmuneSpaces spaces;
214   constexpr size_t kImageSize = 123 * kElfSegmentAlignment;
215   constexpr size_t kImageOatSize = 321 * kElfSegmentAlignment;
216   constexpr size_t kOtherSpaceSize = 100 * kElfSegmentAlignment;
217 
218   std::string error_str;
219   MemMap reservation = ReserveImage(kImageSize + kImageOatSize + kOtherSpaceSize, &error_str);
220   ASSERT_TRUE(reservation.IsValid()) << "Failed to allocate memory region " << error_str;
221   MemMap image_reservation = reservation.TakeReservedMemory(kImageSize);
222   ASSERT_TRUE(image_reservation.IsValid());
223   ASSERT_TRUE(reservation.IsValid());
224 
225   std::unique_ptr<FakeImageSpace> image_space(CreateImageSpace(kImageSize,
226                                                                kImageOatSize,
227                                                                &image_reservation,
228                                                                &reservation));
229   ASSERT_TRUE(image_space != nullptr);
230   ASSERT_FALSE(image_reservation.IsValid());
231   ASSERT_TRUE(reservation.IsValid());
232 
233   const ImageHeader& image_header = image_space->GetImageHeader();
234   FakeSpace space(image_header.GetOatFileEnd(), image_header.GetOatFileEnd() + kOtherSpaceSize);
235 
236   EXPECT_EQ(image_header.GetImageSize(), kImageSize);
237   EXPECT_EQ(static_cast<size_t>(image_header.GetOatFileEnd() - image_header.GetOatFileBegin()),
238             kImageOatSize);
239   EXPECT_EQ(image_space->GetOatFile()->Size(), kImageOatSize);
240   // Check that we do not include the oat if there is no space after.
241   {
242     WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
243     spaces.AddSpace(image_space.get());
244   }
245   EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().Begin()),
246             image_space->Begin());
247   EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().End()),
248             image_space->Limit());
249   // Add another space and ensure it gets appended.
250   EXPECT_NE(image_space->Limit(), space.Begin());
251   {
252     WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
253     spaces.AddSpace(&space);
254   }
255   EXPECT_TRUE(spaces.ContainsSpace(image_space.get()));
256   EXPECT_TRUE(spaces.ContainsSpace(&space));
257   // CreateLargestImmuneRegion should have coalesced the two spaces since the oat code after the
258   // image prevents gaps.
259   // Check that we have a continuous region.
260   EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().Begin()),
261             image_space->Begin());
262   EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().End()), space.Limit());
263 }
264 
265 // Test [image1][image2][image1 oat][image2 oat][image3] producing a single large immune region.
TEST_F(ImmuneSpacesTest,MultiImage)266 TEST_F(ImmuneSpacesTest, MultiImage) {
267   ReserveBitmaps();
268   // Image 2 needs to be smaller or else it may be chosen for immune region.
269   constexpr size_t kImage1Size = kElfSegmentAlignment * 17;
270   constexpr size_t kImage2Size = kElfSegmentAlignment * 13;
271   constexpr size_t kImage3Size = kElfSegmentAlignment * 3;
272   constexpr size_t kImage1OatSize = kElfSegmentAlignment * 5;
273   constexpr size_t kImage2OatSize = kElfSegmentAlignment * 8;
274   constexpr size_t kImage3OatSize = kElfSegmentAlignment;
275   constexpr size_t kImageBytes = kImage1Size + kImage2Size + kImage3Size;
276   constexpr size_t kMemorySize = kImageBytes + kImage1OatSize + kImage2OatSize + kImage3OatSize;
277   std::string error_str;
278   MemMap reservation = ReserveImage(kMemorySize, &error_str);
279   ASSERT_TRUE(reservation.IsValid()) << "Failed to allocate memory region " << error_str;
280   MemMap image_reservation = reservation.TakeReservedMemory(kImage1Size + kImage2Size);
281   ASSERT_TRUE(image_reservation.IsValid());
282   ASSERT_TRUE(reservation.IsValid());
283 
284   std::unique_ptr<FakeImageSpace> space1(CreateImageSpace(kImage1Size,
285                                                           kImage1OatSize,
286                                                           &image_reservation,
287                                                           &reservation));
288   ASSERT_TRUE(space1 != nullptr);
289   ASSERT_TRUE(image_reservation.IsValid());
290   ASSERT_TRUE(reservation.IsValid());
291 
292   std::unique_ptr<FakeImageSpace> space2(CreateImageSpace(kImage2Size,
293                                                           kImage2OatSize,
294                                                           &image_reservation,
295                                                           &reservation));
296   ASSERT_TRUE(space2 != nullptr);
297   ASSERT_FALSE(image_reservation.IsValid());
298   ASSERT_TRUE(reservation.IsValid());
299 
300   // Finally put a 3rd image space.
301   image_reservation = reservation.TakeReservedMemory(kImage3Size);
302   ASSERT_TRUE(image_reservation.IsValid());
303   ASSERT_TRUE(reservation.IsValid());
304   std::unique_ptr<FakeImageSpace> space3(CreateImageSpace(kImage3Size,
305                                                           kImage3OatSize,
306                                                           &image_reservation,
307                                                           &reservation));
308   ASSERT_TRUE(space3 != nullptr);
309   ASSERT_FALSE(image_reservation.IsValid());
310   ASSERT_FALSE(reservation.IsValid());
311 
312   // Check that we do not include the oat if there is no space after.
313   ImmuneSpaces spaces;
314   {
315     WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
316     LOG(INFO) << "Adding space1 " << reinterpret_cast<const void*>(space1->Begin());
317     spaces.AddSpace(space1.get());
318     LOG(INFO) << "Adding space2 " << reinterpret_cast<const void*>(space2->Begin());
319     spaces.AddSpace(space2.get());
320   }
321   // There are no more heap bytes, the immune region should only be the first 2 image spaces and
322   // should exclude the image oat files.
323   EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().Begin()),
324             space1->Begin());
325   EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().End()),
326             space2->Limit());
327 
328   // Add another space after the oat files, now it should contain the entire memory region.
329   {
330     WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
331     LOG(INFO) << "Adding space3 " << reinterpret_cast<const void*>(space3->Begin());
332     spaces.AddSpace(space3.get());
333   }
334   EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().Begin()),
335             space1->Begin());
336   EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().End()),
337             space3->Limit());
338 
339   // Add a smaller non-adjacent space and ensure it does not become part of the immune region.
340   // Image size is kImageBytes - kElfSegmentAlignment
341   // Oat size is kElfSegmentAlignment.
342   // Guard pages to ensure it is not adjacent to an existing immune region.
343   // Layout:  [guard page][image][oat][guard page]
344   constexpr size_t kGuardSize = kElfSegmentAlignment;
345   constexpr size_t kImage4Size = kImageBytes - kElfSegmentAlignment;
346   constexpr size_t kImage4OatSize = kElfSegmentAlignment;
347 
348   reservation = ReserveImage(kImage4Size + kImage4OatSize + kGuardSize * 2, &error_str);
349   ASSERT_TRUE(reservation.IsValid()) << "Failed to allocate memory region " << error_str;
350   MemMap guard = reservation.TakeReservedMemory(kGuardSize);
351   ASSERT_TRUE(guard.IsValid());
352   ASSERT_TRUE(reservation.IsValid());
353   guard.Reset();  // Release the guard memory.
354   image_reservation = reservation.TakeReservedMemory(kImage4Size);
355   ASSERT_TRUE(image_reservation.IsValid());
356   ASSERT_TRUE(reservation.IsValid());
357   std::unique_ptr<FakeImageSpace> space4(CreateImageSpace(kImage4Size,
358                                                           kImage4OatSize,
359                                                           &image_reservation,
360                                                           &reservation));
361   ASSERT_TRUE(space4 != nullptr);
362   ASSERT_FALSE(image_reservation.IsValid());
363   ASSERT_TRUE(reservation.IsValid());
364   ASSERT_EQ(reservation.Size(), kGuardSize);
365   reservation.Reset();  // Release the guard memory.
366   {
367     WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
368     LOG(INFO) << "Adding space4 " << reinterpret_cast<const void*>(space4->Begin());
369     spaces.AddSpace(space4.get());
370   }
371   EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().Begin()),
372             space1->Begin());
373   EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().End()),
374             space3->Limit());
375 
376   // Add a larger non-adjacent space and ensure it becomes the new largest immune region.
377   // Image size is kImageBytes + kElfSegmentAlignment
378   // Oat size is kElfSegmentAlignment.
379   // Guard pages to ensure it is not adjacent to an existing immune region.
380   // Layout:  [guard page][image][oat][guard page]
381   constexpr size_t kImage5Size = kImageBytes + kElfSegmentAlignment;
382   constexpr size_t kImage5OatSize = kElfSegmentAlignment;
383   reservation = ReserveImage(kImage5Size + kImage5OatSize + kGuardSize * 2, &error_str);
384   ASSERT_TRUE(reservation.IsValid()) << "Failed to allocate memory region " << error_str;
385   guard = reservation.TakeReservedMemory(kGuardSize);
386   ASSERT_TRUE(guard.IsValid());
387   ASSERT_TRUE(reservation.IsValid());
388   guard.Reset();  // Release the guard memory.
389   image_reservation = reservation.TakeReservedMemory(kImage5Size);
390   ASSERT_TRUE(image_reservation.IsValid());
391   ASSERT_TRUE(reservation.IsValid());
392   std::unique_ptr<FakeImageSpace> space5(CreateImageSpace(kImage5Size,
393                                                           kImage5OatSize,
394                                                           &image_reservation,
395                                                           &reservation));
396   ASSERT_TRUE(space5 != nullptr);
397   ASSERT_FALSE(image_reservation.IsValid());
398   ASSERT_TRUE(reservation.IsValid());
399   ASSERT_EQ(reservation.Size(), kGuardSize);
400   reservation.Reset();  // Release the guard memory.
401   {
402     WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
403     LOG(INFO) << "Adding space5 " << reinterpret_cast<const void*>(space5->Begin());
404     spaces.AddSpace(space5.get());
405   }
406   EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().Begin()), space5->Begin());
407   EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().End()), space5->Limit());
408 }
409 
410 }  // namespace collector
411 }  // namespace gc
412 }  // namespace art
413