1 /*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #ifndef ART_RUNTIME_GC_SPACE_SPACE_TEST_H_
18 #define ART_RUNTIME_GC_SPACE_SPACE_TEST_H_
19
20 #include <stdint.h>
21 #include <memory>
22
23 #include "common_runtime_test.h"
24 #include "globals.h"
25 #include "mirror/array-inl.h"
26 #include "mirror/object-inl.h"
27 #include "scoped_thread_state_change.h"
28 #include "zygote_space.h"
29
30 namespace art {
31 namespace gc {
32 namespace space {
33
34 class SpaceTest : public CommonRuntimeTest {
35 public:
36 jobject byte_array_class_;
37
SpaceTest()38 SpaceTest() : byte_array_class_(nullptr) {
39 }
40
41 void AddSpace(ContinuousSpace* space, bool revoke = true) {
42 Heap* heap = Runtime::Current()->GetHeap();
43 if (revoke) {
44 heap->RevokeAllThreadLocalBuffers();
45 }
46 heap->AddSpace(space);
47 heap->SetSpaceAsDefault(space);
48 }
49
GetByteArrayClass(Thread * self)50 mirror::Class* GetByteArrayClass(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
51 StackHandleScope<1> hs(self);
52 auto null_loader(hs.NewHandle<mirror::ClassLoader>(nullptr));
53 if (byte_array_class_ == nullptr) {
54 mirror::Class* byte_array_class =
55 Runtime::Current()->GetClassLinker()->FindClass(self, "[B", null_loader);
56 EXPECT_TRUE(byte_array_class != nullptr);
57 byte_array_class_ = self->GetJniEnv()->NewLocalRef(byte_array_class);
58 EXPECT_TRUE(byte_array_class_ != nullptr);
59 }
60 return reinterpret_cast<mirror::Class*>(self->DecodeJObject(byte_array_class_));
61 }
62
Alloc(space::MallocSpace * alloc_space,Thread * self,size_t bytes,size_t * bytes_allocated,size_t * usable_size)63 mirror::Object* Alloc(space::MallocSpace* alloc_space, Thread* self, size_t bytes,
64 size_t* bytes_allocated, size_t* usable_size)
65 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
66 StackHandleScope<1> hs(self);
67 Handle<mirror::Class> byte_array_class(hs.NewHandle(GetByteArrayClass(self)));
68 mirror::Object* obj = alloc_space->Alloc(self, bytes, bytes_allocated, usable_size);
69 if (obj != nullptr) {
70 InstallClass(obj, byte_array_class.Get(), bytes);
71 }
72 return obj;
73 }
74
AllocWithGrowth(space::MallocSpace * alloc_space,Thread * self,size_t bytes,size_t * bytes_allocated,size_t * usable_size)75 mirror::Object* AllocWithGrowth(space::MallocSpace* alloc_space, Thread* self, size_t bytes,
76 size_t* bytes_allocated, size_t* usable_size)
77 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
78 StackHandleScope<1> hs(self);
79 Handle<mirror::Class> byte_array_class(hs.NewHandle(GetByteArrayClass(self)));
80 mirror::Object* obj = alloc_space->AllocWithGrowth(self, bytes, bytes_allocated, usable_size);
81 if (obj != nullptr) {
82 InstallClass(obj, byte_array_class.Get(), bytes);
83 }
84 return obj;
85 }
86
InstallClass(mirror::Object * o,mirror::Class * byte_array_class,size_t size)87 void InstallClass(mirror::Object* o, mirror::Class* byte_array_class, size_t size)
88 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
89 // Note the minimum size, which is the size of a zero-length byte array.
90 EXPECT_GE(size, SizeOfZeroLengthByteArray());
91 EXPECT_TRUE(byte_array_class != nullptr);
92 o->SetClass(byte_array_class);
93 if (kUseBakerOrBrooksReadBarrier) {
94 // Like the proper heap object allocation, install and verify
95 // the correct read barrier pointer.
96 if (kUseBrooksReadBarrier) {
97 o->SetReadBarrierPointer(o);
98 }
99 o->AssertReadBarrierPointer();
100 }
101 mirror::Array* arr = o->AsArray<kVerifyNone>();
102 size_t header_size = SizeOfZeroLengthByteArray();
103 int32_t length = size - header_size;
104 arr->SetLength(length);
105 EXPECT_EQ(arr->SizeOf<kVerifyNone>(), size);
106 }
107
SizeOfZeroLengthByteArray()108 static size_t SizeOfZeroLengthByteArray() {
109 return mirror::Array::DataOffset(Primitive::ComponentSize(Primitive::kPrimByte)).Uint32Value();
110 }
111
112 typedef MallocSpace* (*CreateSpaceFn)(const std::string& name, size_t initial_size, size_t growth_limit,
113 size_t capacity, byte* requested_begin);
114 void InitTestBody(CreateSpaceFn create_space);
115 void ZygoteSpaceTestBody(CreateSpaceFn create_space);
116 void AllocAndFreeTestBody(CreateSpaceFn create_space);
117 void AllocAndFreeListTestBody(CreateSpaceFn create_space);
118
119 void SizeFootPrintGrowthLimitAndTrimBody(MallocSpace* space, intptr_t object_size,
120 int round, size_t growth_limit);
121 void SizeFootPrintGrowthLimitAndTrimDriver(size_t object_size, CreateSpaceFn create_space);
122 };
123
test_rand(size_t * seed)124 static inline size_t test_rand(size_t* seed) {
125 *seed = *seed * 1103515245 + 12345;
126 return *seed;
127 }
128
InitTestBody(CreateSpaceFn create_space)129 void SpaceTest::InitTestBody(CreateSpaceFn create_space) {
130 {
131 // Init < max == growth
132 std::unique_ptr<Space> space(create_space("test", 16 * MB, 32 * MB, 32 * MB, nullptr));
133 EXPECT_TRUE(space.get() != nullptr);
134 }
135 {
136 // Init == max == growth
137 std::unique_ptr<Space> space(create_space("test", 16 * MB, 16 * MB, 16 * MB, nullptr));
138 EXPECT_TRUE(space.get() != nullptr);
139 }
140 {
141 // Init > max == growth
142 std::unique_ptr<Space> space(create_space("test", 32 * MB, 16 * MB, 16 * MB, nullptr));
143 EXPECT_TRUE(space.get() == nullptr);
144 }
145 {
146 // Growth == init < max
147 std::unique_ptr<Space> space(create_space("test", 16 * MB, 16 * MB, 32 * MB, nullptr));
148 EXPECT_TRUE(space.get() != nullptr);
149 }
150 {
151 // Growth < init < max
152 std::unique_ptr<Space> space(create_space("test", 16 * MB, 8 * MB, 32 * MB, nullptr));
153 EXPECT_TRUE(space.get() == nullptr);
154 }
155 {
156 // Init < growth < max
157 std::unique_ptr<Space> space(create_space("test", 8 * MB, 16 * MB, 32 * MB, nullptr));
158 EXPECT_TRUE(space.get() != nullptr);
159 }
160 {
161 // Init < max < growth
162 std::unique_ptr<Space> space(create_space("test", 8 * MB, 32 * MB, 16 * MB, nullptr));
163 EXPECT_TRUE(space.get() == nullptr);
164 }
165 }
166
167 // TODO: This test is not very good, we should improve it.
168 // The test should do more allocations before the creation of the ZygoteSpace, and then do
169 // allocations after the ZygoteSpace is created. The test should also do some GCs to ensure that
170 // the GC works with the ZygoteSpace.
ZygoteSpaceTestBody(CreateSpaceFn create_space)171 void SpaceTest::ZygoteSpaceTestBody(CreateSpaceFn create_space) {
172 size_t dummy;
173 MallocSpace* space(create_space("test", 4 * MB, 16 * MB, 16 * MB, nullptr));
174 ASSERT_TRUE(space != nullptr);
175
176 // Make space findable to the heap, will also delete space when runtime is cleaned up
177 AddSpace(space);
178 Thread* self = Thread::Current();
179 ScopedObjectAccess soa(self);
180
181 // Succeeds, fits without adjusting the footprint limit.
182 size_t ptr1_bytes_allocated, ptr1_usable_size;
183 StackHandleScope<3> hs(soa.Self());
184 Handle<mirror::Object> ptr1(
185 hs.NewHandle(Alloc(space, self, 1 * MB, &ptr1_bytes_allocated, &ptr1_usable_size)));
186 EXPECT_TRUE(ptr1.Get() != nullptr);
187 EXPECT_LE(1U * MB, ptr1_bytes_allocated);
188 EXPECT_LE(1U * MB, ptr1_usable_size);
189 EXPECT_LE(ptr1_usable_size, ptr1_bytes_allocated);
190
191 // Fails, requires a higher footprint limit.
192 mirror::Object* ptr2 = Alloc(space, self, 8 * MB, &dummy, nullptr);
193 EXPECT_TRUE(ptr2 == nullptr);
194
195 // Succeeds, adjusts the footprint.
196 size_t ptr3_bytes_allocated, ptr3_usable_size;
197 Handle<mirror::Object> ptr3(
198 hs.NewHandle(AllocWithGrowth(space, self, 8 * MB, &ptr3_bytes_allocated, &ptr3_usable_size)));
199 EXPECT_TRUE(ptr3.Get() != nullptr);
200 EXPECT_LE(8U * MB, ptr3_bytes_allocated);
201 EXPECT_LE(8U * MB, ptr3_usable_size);
202 EXPECT_LE(ptr3_usable_size, ptr3_bytes_allocated);
203
204 // Fails, requires a higher footprint limit.
205 mirror::Object* ptr4 = space->Alloc(self, 8 * MB, &dummy, nullptr);
206 EXPECT_TRUE(ptr4 == nullptr);
207
208 // Also fails, requires a higher allowed footprint.
209 mirror::Object* ptr5 = space->AllocWithGrowth(self, 8 * MB, &dummy, nullptr);
210 EXPECT_TRUE(ptr5 == nullptr);
211
212 // Release some memory.
213 size_t free3 = space->AllocationSize(ptr3.Get(), nullptr);
214 EXPECT_EQ(free3, ptr3_bytes_allocated);
215 EXPECT_EQ(free3, space->Free(self, ptr3.Assign(nullptr)));
216 EXPECT_LE(8U * MB, free3);
217
218 // Succeeds, now that memory has been freed.
219 size_t ptr6_bytes_allocated, ptr6_usable_size;
220 Handle<mirror::Object> ptr6(
221 hs.NewHandle(AllocWithGrowth(space, self, 9 * MB, &ptr6_bytes_allocated, &ptr6_usable_size)));
222 EXPECT_TRUE(ptr6.Get() != nullptr);
223 EXPECT_LE(9U * MB, ptr6_bytes_allocated);
224 EXPECT_LE(9U * MB, ptr6_usable_size);
225 EXPECT_LE(ptr6_usable_size, ptr6_bytes_allocated);
226
227 // Final clean up.
228 size_t free1 = space->AllocationSize(ptr1.Get(), nullptr);
229 space->Free(self, ptr1.Assign(nullptr));
230 EXPECT_LE(1U * MB, free1);
231
232 // Make sure that the zygote space isn't directly at the start of the space.
233 EXPECT_TRUE(space->Alloc(self, 1U * MB, &dummy, nullptr) != nullptr);
234
235 gc::Heap* heap = Runtime::Current()->GetHeap();
236 space::Space* old_space = space;
237 heap->RemoveSpace(old_space);
238 heap->RevokeAllThreadLocalBuffers();
239 space::ZygoteSpace* zygote_space = space->CreateZygoteSpace("alloc space",
240 heap->IsLowMemoryMode(),
241 &space);
242 delete old_space;
243 // Add the zygote space.
244 AddSpace(zygote_space, false);
245
246 // Make space findable to the heap, will also delete space when runtime is cleaned up
247 AddSpace(space, false);
248
249 // Succeeds, fits without adjusting the footprint limit.
250 ptr1.Assign(Alloc(space, self, 1 * MB, &ptr1_bytes_allocated, &ptr1_usable_size));
251 EXPECT_TRUE(ptr1.Get() != nullptr);
252 EXPECT_LE(1U * MB, ptr1_bytes_allocated);
253 EXPECT_LE(1U * MB, ptr1_usable_size);
254 EXPECT_LE(ptr1_usable_size, ptr1_bytes_allocated);
255
256 // Fails, requires a higher footprint limit.
257 ptr2 = Alloc(space, self, 8 * MB, &dummy, nullptr);
258 EXPECT_TRUE(ptr2 == nullptr);
259
260 // Succeeds, adjusts the footprint.
261 ptr3.Assign(AllocWithGrowth(space, self, 2 * MB, &ptr3_bytes_allocated, &ptr3_usable_size));
262 EXPECT_TRUE(ptr3.Get() != nullptr);
263 EXPECT_LE(2U * MB, ptr3_bytes_allocated);
264 EXPECT_LE(2U * MB, ptr3_usable_size);
265 EXPECT_LE(ptr3_usable_size, ptr3_bytes_allocated);
266 space->Free(self, ptr3.Assign(nullptr));
267
268 // Final clean up.
269 free1 = space->AllocationSize(ptr1.Get(), nullptr);
270 space->Free(self, ptr1.Assign(nullptr));
271 EXPECT_LE(1U * MB, free1);
272 }
273
AllocAndFreeTestBody(CreateSpaceFn create_space)274 void SpaceTest::AllocAndFreeTestBody(CreateSpaceFn create_space) {
275 size_t dummy = 0;
276 MallocSpace* space(create_space("test", 4 * MB, 16 * MB, 16 * MB, nullptr));
277 ASSERT_TRUE(space != nullptr);
278 Thread* self = Thread::Current();
279 ScopedObjectAccess soa(self);
280
281 // Make space findable to the heap, will also delete space when runtime is cleaned up
282 AddSpace(space);
283
284 // Succeeds, fits without adjusting the footprint limit.
285 size_t ptr1_bytes_allocated, ptr1_usable_size;
286 StackHandleScope<3> hs(soa.Self());
287 Handle<mirror::Object> ptr1(
288 hs.NewHandle(Alloc(space, self, 1 * MB, &ptr1_bytes_allocated, &ptr1_usable_size)));
289 EXPECT_TRUE(ptr1.Get() != nullptr);
290 EXPECT_LE(1U * MB, ptr1_bytes_allocated);
291 EXPECT_LE(1U * MB, ptr1_usable_size);
292 EXPECT_LE(ptr1_usable_size, ptr1_bytes_allocated);
293
294 // Fails, requires a higher footprint limit.
295 mirror::Object* ptr2 = Alloc(space, self, 8 * MB, &dummy, nullptr);
296 EXPECT_TRUE(ptr2 == nullptr);
297
298 // Succeeds, adjusts the footprint.
299 size_t ptr3_bytes_allocated, ptr3_usable_size;
300 Handle<mirror::Object> ptr3(
301 hs.NewHandle(AllocWithGrowth(space, self, 8 * MB, &ptr3_bytes_allocated, &ptr3_usable_size)));
302 EXPECT_TRUE(ptr3.Get() != nullptr);
303 EXPECT_LE(8U * MB, ptr3_bytes_allocated);
304 EXPECT_LE(8U * MB, ptr3_usable_size);
305 EXPECT_LE(ptr3_usable_size, ptr3_bytes_allocated);
306
307 // Fails, requires a higher footprint limit.
308 mirror::Object* ptr4 = Alloc(space, self, 8 * MB, &dummy, nullptr);
309 EXPECT_TRUE(ptr4 == nullptr);
310
311 // Also fails, requires a higher allowed footprint.
312 mirror::Object* ptr5 = AllocWithGrowth(space, self, 8 * MB, &dummy, nullptr);
313 EXPECT_TRUE(ptr5 == nullptr);
314
315 // Release some memory.
316 size_t free3 = space->AllocationSize(ptr3.Get(), nullptr);
317 EXPECT_EQ(free3, ptr3_bytes_allocated);
318 space->Free(self, ptr3.Assign(nullptr));
319 EXPECT_LE(8U * MB, free3);
320
321 // Succeeds, now that memory has been freed.
322 size_t ptr6_bytes_allocated, ptr6_usable_size;
323 Handle<mirror::Object> ptr6(
324 hs.NewHandle(AllocWithGrowth(space, self, 9 * MB, &ptr6_bytes_allocated, &ptr6_usable_size)));
325 EXPECT_TRUE(ptr6.Get() != nullptr);
326 EXPECT_LE(9U * MB, ptr6_bytes_allocated);
327 EXPECT_LE(9U * MB, ptr6_usable_size);
328 EXPECT_LE(ptr6_usable_size, ptr6_bytes_allocated);
329
330 // Final clean up.
331 size_t free1 = space->AllocationSize(ptr1.Get(), nullptr);
332 space->Free(self, ptr1.Assign(nullptr));
333 EXPECT_LE(1U * MB, free1);
334 }
335
AllocAndFreeListTestBody(CreateSpaceFn create_space)336 void SpaceTest::AllocAndFreeListTestBody(CreateSpaceFn create_space) {
337 MallocSpace* space(create_space("test", 4 * MB, 16 * MB, 16 * MB, nullptr));
338 ASSERT_TRUE(space != nullptr);
339
340 // Make space findable to the heap, will also delete space when runtime is cleaned up
341 AddSpace(space);
342 Thread* self = Thread::Current();
343 ScopedObjectAccess soa(self);
344
345 // Succeeds, fits without adjusting the max allowed footprint.
346 mirror::Object* lots_of_objects[1024];
347 for (size_t i = 0; i < arraysize(lots_of_objects); i++) {
348 size_t allocation_size, usable_size;
349 size_t size_of_zero_length_byte_array = SizeOfZeroLengthByteArray();
350 lots_of_objects[i] = Alloc(space, self, size_of_zero_length_byte_array, &allocation_size,
351 &usable_size);
352 EXPECT_TRUE(lots_of_objects[i] != nullptr);
353 size_t computed_usable_size;
354 EXPECT_EQ(allocation_size, space->AllocationSize(lots_of_objects[i], &computed_usable_size));
355 EXPECT_EQ(usable_size, computed_usable_size);
356 }
357
358 // Release memory.
359 space->FreeList(self, arraysize(lots_of_objects), lots_of_objects);
360
361 // Succeeds, fits by adjusting the max allowed footprint.
362 for (size_t i = 0; i < arraysize(lots_of_objects); i++) {
363 size_t allocation_size, usable_size;
364 lots_of_objects[i] = AllocWithGrowth(space, self, 1024, &allocation_size, &usable_size);
365 EXPECT_TRUE(lots_of_objects[i] != nullptr);
366 size_t computed_usable_size;
367 EXPECT_EQ(allocation_size, space->AllocationSize(lots_of_objects[i], &computed_usable_size));
368 EXPECT_EQ(usable_size, computed_usable_size);
369 }
370
371 // Release memory.
372 space->FreeList(self, arraysize(lots_of_objects), lots_of_objects);
373 }
374
SizeFootPrintGrowthLimitAndTrimBody(MallocSpace * space,intptr_t object_size,int round,size_t growth_limit)375 void SpaceTest::SizeFootPrintGrowthLimitAndTrimBody(MallocSpace* space, intptr_t object_size,
376 int round, size_t growth_limit) {
377 if (((object_size > 0 && object_size >= static_cast<intptr_t>(growth_limit))) ||
378 ((object_size < 0 && -object_size >= static_cast<intptr_t>(growth_limit)))) {
379 // No allocation can succeed
380 return;
381 }
382
383 // The space's footprint equals amount of resources requested from system
384 size_t footprint = space->GetFootprint();
385
386 // The space must at least have its book keeping allocated
387 EXPECT_GT(footprint, 0u);
388
389 // But it shouldn't exceed the initial size
390 EXPECT_LE(footprint, growth_limit);
391
392 // space's size shouldn't exceed the initial size
393 EXPECT_LE(space->Size(), growth_limit);
394
395 // this invariant should always hold or else the space has grown to be larger than what the
396 // space believes its size is (which will break invariants)
397 EXPECT_GE(space->Size(), footprint);
398
399 // Fill the space with lots of small objects up to the growth limit
400 size_t max_objects = (growth_limit / (object_size > 0 ? object_size : 8)) + 1;
401 std::unique_ptr<mirror::Object*[]> lots_of_objects(new mirror::Object*[max_objects]);
402 size_t last_object = 0; // last object for which allocation succeeded
403 size_t amount_allocated = 0; // amount of space allocated
404 Thread* self = Thread::Current();
405 ScopedObjectAccess soa(self);
406 size_t rand_seed = 123456789;
407 for (size_t i = 0; i < max_objects; i++) {
408 size_t alloc_fails = 0; // number of failed allocations
409 size_t max_fails = 30; // number of times we fail allocation before giving up
410 for (; alloc_fails < max_fails; alloc_fails++) {
411 size_t alloc_size;
412 if (object_size > 0) {
413 alloc_size = object_size;
414 } else {
415 alloc_size = test_rand(&rand_seed) % static_cast<size_t>(-object_size);
416 // Note the minimum size, which is the size of a zero-length byte array.
417 size_t size_of_zero_length_byte_array = SizeOfZeroLengthByteArray();
418 if (alloc_size < size_of_zero_length_byte_array) {
419 alloc_size = size_of_zero_length_byte_array;
420 }
421 }
422 StackHandleScope<1> hs(soa.Self());
423 auto object(hs.NewHandle<mirror::Object>(nullptr));
424 size_t bytes_allocated = 0;
425 if (round <= 1) {
426 object.Assign(Alloc(space, self, alloc_size, &bytes_allocated, nullptr));
427 } else {
428 object.Assign(AllocWithGrowth(space, self, alloc_size, &bytes_allocated, nullptr));
429 }
430 footprint = space->GetFootprint();
431 EXPECT_GE(space->Size(), footprint); // invariant
432 if (object.Get() != nullptr) { // allocation succeeded
433 lots_of_objects[i] = object.Get();
434 size_t allocation_size = space->AllocationSize(object.Get(), nullptr);
435 EXPECT_EQ(bytes_allocated, allocation_size);
436 if (object_size > 0) {
437 EXPECT_GE(allocation_size, static_cast<size_t>(object_size));
438 } else {
439 EXPECT_GE(allocation_size, 8u);
440 }
441 amount_allocated += allocation_size;
442 break;
443 }
444 }
445 if (alloc_fails == max_fails) {
446 last_object = i;
447 break;
448 }
449 }
450 CHECK_NE(last_object, 0u); // we should have filled the space
451 EXPECT_GT(amount_allocated, 0u);
452
453 // We shouldn't have gone past the growth_limit
454 EXPECT_LE(amount_allocated, growth_limit);
455 EXPECT_LE(footprint, growth_limit);
456 EXPECT_LE(space->Size(), growth_limit);
457
458 // footprint and size should agree with amount allocated
459 EXPECT_GE(footprint, amount_allocated);
460 EXPECT_GE(space->Size(), amount_allocated);
461
462 // Release storage in a semi-adhoc manner
463 size_t free_increment = 96;
464 while (true) {
465 {
466 ScopedThreadStateChange tsc(self, kNative);
467 // Give the space a haircut.
468 space->Trim();
469 }
470
471 // Bounds sanity
472 footprint = space->GetFootprint();
473 EXPECT_LE(amount_allocated, growth_limit);
474 EXPECT_GE(footprint, amount_allocated);
475 EXPECT_LE(footprint, growth_limit);
476 EXPECT_GE(space->Size(), amount_allocated);
477 EXPECT_LE(space->Size(), growth_limit);
478
479 if (free_increment == 0) {
480 break;
481 }
482
483 // Free some objects
484 for (size_t i = 0; i < last_object; i += free_increment) {
485 mirror::Object* object = lots_of_objects.get()[i];
486 if (object == nullptr) {
487 continue;
488 }
489 size_t allocation_size = space->AllocationSize(object, nullptr);
490 if (object_size > 0) {
491 EXPECT_GE(allocation_size, static_cast<size_t>(object_size));
492 } else {
493 EXPECT_GE(allocation_size, 8u);
494 }
495 space->Free(self, object);
496 lots_of_objects.get()[i] = nullptr;
497 amount_allocated -= allocation_size;
498 footprint = space->GetFootprint();
499 EXPECT_GE(space->Size(), footprint); // invariant
500 }
501
502 free_increment >>= 1;
503 }
504
505 // The space has become empty here before allocating a large object
506 // below. For RosAlloc, revoke thread-local runs, which are kept
507 // even when empty for a performance reason, so that they won't
508 // cause the following large object allocation to fail due to
509 // potential fragmentation. Note they are normally revoked at each
510 // GC (but no GC here.)
511 space->RevokeAllThreadLocalBuffers();
512
513 // All memory was released, try a large allocation to check freed memory is being coalesced
514 StackHandleScope<1> hs(soa.Self());
515 auto large_object(hs.NewHandle<mirror::Object>(nullptr));
516 size_t three_quarters_space = (growth_limit / 2) + (growth_limit / 4);
517 size_t bytes_allocated = 0;
518 if (round <= 1) {
519 large_object.Assign(Alloc(space, self, three_quarters_space, &bytes_allocated, nullptr));
520 } else {
521 large_object.Assign(AllocWithGrowth(space, self, three_quarters_space, &bytes_allocated,
522 nullptr));
523 }
524 EXPECT_TRUE(large_object.Get() != nullptr);
525
526 // Sanity check footprint
527 footprint = space->GetFootprint();
528 EXPECT_LE(footprint, growth_limit);
529 EXPECT_GE(space->Size(), footprint);
530 EXPECT_LE(space->Size(), growth_limit);
531
532 // Clean up
533 space->Free(self, large_object.Assign(nullptr));
534
535 // Sanity check footprint
536 footprint = space->GetFootprint();
537 EXPECT_LE(footprint, growth_limit);
538 EXPECT_GE(space->Size(), footprint);
539 EXPECT_LE(space->Size(), growth_limit);
540 }
541
SizeFootPrintGrowthLimitAndTrimDriver(size_t object_size,CreateSpaceFn create_space)542 void SpaceTest::SizeFootPrintGrowthLimitAndTrimDriver(size_t object_size, CreateSpaceFn create_space) {
543 if (object_size < SizeOfZeroLengthByteArray()) {
544 // Too small for the object layout/model.
545 return;
546 }
547 size_t initial_size = 4 * MB;
548 size_t growth_limit = 8 * MB;
549 size_t capacity = 16 * MB;
550 MallocSpace* space(create_space("test", initial_size, growth_limit, capacity, nullptr));
551 ASSERT_TRUE(space != nullptr);
552
553 // Basic sanity
554 EXPECT_EQ(space->Capacity(), growth_limit);
555 EXPECT_EQ(space->NonGrowthLimitCapacity(), capacity);
556
557 // Make space findable to the heap, will also delete space when runtime is cleaned up
558 AddSpace(space);
559
560 // In this round we don't allocate with growth and therefore can't grow past the initial size.
561 // This effectively makes the growth_limit the initial_size, so assert this.
562 SizeFootPrintGrowthLimitAndTrimBody(space, object_size, 1, initial_size);
563 SizeFootPrintGrowthLimitAndTrimBody(space, object_size, 2, growth_limit);
564 // Remove growth limit
565 space->ClearGrowthLimit();
566 EXPECT_EQ(space->Capacity(), capacity);
567 SizeFootPrintGrowthLimitAndTrimBody(space, object_size, 3, capacity);
568 }
569
570 #define TEST_SizeFootPrintGrowthLimitAndTrimStatic(name, spaceName, spaceFn, size) \
571 TEST_F(spaceName##StaticTest, SizeFootPrintGrowthLimitAndTrim_AllocationsOf_##name) { \
572 SizeFootPrintGrowthLimitAndTrimDriver(size, spaceFn); \
573 }
574
575 #define TEST_SizeFootPrintGrowthLimitAndTrimRandom(name, spaceName, spaceFn, size) \
576 TEST_F(spaceName##RandomTest, SizeFootPrintGrowthLimitAndTrim_RandomAllocationsWithMax_##name) { \
577 SizeFootPrintGrowthLimitAndTrimDriver(-size, spaceFn); \
578 }
579
580 #define TEST_SPACE_CREATE_FN_BASE(spaceName, spaceFn) \
581 class spaceName##BaseTest : public SpaceTest { \
582 }; \
583 \
584 TEST_F(spaceName##BaseTest, Init) { \
585 InitTestBody(spaceFn); \
586 } \
587 TEST_F(spaceName##BaseTest, ZygoteSpace) { \
588 ZygoteSpaceTestBody(spaceFn); \
589 } \
590 TEST_F(spaceName##BaseTest, AllocAndFree) { \
591 AllocAndFreeTestBody(spaceFn); \
592 } \
593 TEST_F(spaceName##BaseTest, AllocAndFreeList) { \
594 AllocAndFreeListTestBody(spaceFn); \
595 }
596
597 #define TEST_SPACE_CREATE_FN_STATIC(spaceName, spaceFn) \
598 class spaceName##StaticTest : public SpaceTest { \
599 }; \
600 \
601 TEST_SizeFootPrintGrowthLimitAndTrimStatic(12B, spaceName, spaceFn, 12) \
602 TEST_SizeFootPrintGrowthLimitAndTrimStatic(16B, spaceName, spaceFn, 16) \
603 TEST_SizeFootPrintGrowthLimitAndTrimStatic(24B, spaceName, spaceFn, 24) \
604 TEST_SizeFootPrintGrowthLimitAndTrimStatic(32B, spaceName, spaceFn, 32) \
605 TEST_SizeFootPrintGrowthLimitAndTrimStatic(64B, spaceName, spaceFn, 64) \
606 TEST_SizeFootPrintGrowthLimitAndTrimStatic(128B, spaceName, spaceFn, 128) \
607 TEST_SizeFootPrintGrowthLimitAndTrimStatic(1KB, spaceName, spaceFn, 1 * KB) \
608 TEST_SizeFootPrintGrowthLimitAndTrimStatic(4KB, spaceName, spaceFn, 4 * KB) \
609 TEST_SizeFootPrintGrowthLimitAndTrimStatic(1MB, spaceName, spaceFn, 1 * MB) \
610 TEST_SizeFootPrintGrowthLimitAndTrimStatic(4MB, spaceName, spaceFn, 4 * MB) \
611 TEST_SizeFootPrintGrowthLimitAndTrimStatic(8MB, spaceName, spaceFn, 8 * MB)
612
613 #define TEST_SPACE_CREATE_FN_RANDOM(spaceName, spaceFn) \
614 class spaceName##RandomTest : public SpaceTest { \
615 }; \
616 \
617 TEST_SizeFootPrintGrowthLimitAndTrimRandom(16B, spaceName, spaceFn, 16) \
618 TEST_SizeFootPrintGrowthLimitAndTrimRandom(24B, spaceName, spaceFn, 24) \
619 TEST_SizeFootPrintGrowthLimitAndTrimRandom(32B, spaceName, spaceFn, 32) \
620 TEST_SizeFootPrintGrowthLimitAndTrimRandom(64B, spaceName, spaceFn, 64) \
621 TEST_SizeFootPrintGrowthLimitAndTrimRandom(128B, spaceName, spaceFn, 128) \
622 TEST_SizeFootPrintGrowthLimitAndTrimRandom(1KB, spaceName, spaceFn, 1 * KB) \
623 TEST_SizeFootPrintGrowthLimitAndTrimRandom(4KB, spaceName, spaceFn, 4 * KB) \
624 TEST_SizeFootPrintGrowthLimitAndTrimRandom(1MB, spaceName, spaceFn, 1 * MB) \
625 TEST_SizeFootPrintGrowthLimitAndTrimRandom(4MB, spaceName, spaceFn, 4 * MB) \
626 TEST_SizeFootPrintGrowthLimitAndTrimRandom(8MB, spaceName, spaceFn, 8 * MB)
627
628 } // namespace space
629 } // namespace gc
630 } // namespace art
631
632 #endif // ART_RUNTIME_GC_SPACE_SPACE_TEST_H_
633