1 /*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16 #include <deque>
17
18 #include "bump_pointer_space-inl.h"
19 #include "bump_pointer_space.h"
20 #include "base/dumpable.h"
21 #include "base/logging.h"
22 #include "gc/accounting/read_barrier_table.h"
23 #include "mirror/class-inl.h"
24 #include "mirror/object-inl.h"
25 #include "thread_list.h"
26
27 namespace art {
28 namespace gc {
29 namespace space {
30
31 // If a region has live objects whose size is less than this percent
32 // value of the region size, evaculate the region.
33 static constexpr uint kEvacuateLivePercentThreshold = 75U;
34
35 // Whether we protect the unused and cleared regions.
36 static constexpr bool kProtectClearedRegions = kIsDebugBuild;
37
38 // Wether we poison memory areas occupied by dead objects in unevacuated regions.
39 static constexpr bool kPoisonDeadObjectsInUnevacuatedRegions = true;
40
41 // Special 32-bit value used to poison memory areas occupied by dead
42 // objects in unevacuated regions. Dereferencing this value is expected
43 // to trigger a memory protection fault, as it is unlikely that it
44 // points to a valid, non-protected memory area.
45 static constexpr uint32_t kPoisonDeadObject = 0xBADDB01D; // "BADDROID"
46
47 // Whether we check a region's live bytes count against the region bitmap.
48 static constexpr bool kCheckLiveBytesAgainstRegionBitmap = kIsDebugBuild;
49
CreateMemMap(const std::string & name,size_t capacity,uint8_t * requested_begin)50 MemMap RegionSpace::CreateMemMap(const std::string& name,
51 size_t capacity,
52 uint8_t* requested_begin) {
53 CHECK_ALIGNED(capacity, kRegionSize);
54 std::string error_msg;
55 // Ask for the capacity of an additional kRegionSize so that we can align the map by kRegionSize
56 // even if we get unaligned base address. This is necessary for the ReadBarrierTable to work.
57 MemMap mem_map;
58 while (true) {
59 mem_map = MemMap::MapAnonymous(name.c_str(),
60 requested_begin,
61 capacity + kRegionSize,
62 PROT_READ | PROT_WRITE,
63 /*low_4gb=*/ true,
64 /*reuse=*/ false,
65 /*reservation=*/ nullptr,
66 &error_msg);
67 if (mem_map.IsValid() || requested_begin == nullptr) {
68 break;
69 }
70 // Retry with no specified request begin.
71 requested_begin = nullptr;
72 }
73 if (!mem_map.IsValid()) {
74 LOG(ERROR) << "Failed to allocate pages for alloc space (" << name << ") of size "
75 << PrettySize(capacity) << " with message " << error_msg;
76 PrintFileToLog("/proc/self/maps", LogSeverity::ERROR);
77 MemMap::DumpMaps(LOG_STREAM(ERROR));
78 return MemMap::Invalid();
79 }
80 CHECK_EQ(mem_map.Size(), capacity + kRegionSize);
81 CHECK_EQ(mem_map.Begin(), mem_map.BaseBegin());
82 CHECK_EQ(mem_map.Size(), mem_map.BaseSize());
83 if (IsAlignedParam(mem_map.Begin(), kRegionSize)) {
84 // Got an aligned map. Since we requested a map that's kRegionSize larger. Shrink by
85 // kRegionSize at the end.
86 mem_map.SetSize(capacity);
87 } else {
88 // Got an unaligned map. Align the both ends.
89 mem_map.AlignBy(kRegionSize);
90 }
91 CHECK_ALIGNED(mem_map.Begin(), kRegionSize);
92 CHECK_ALIGNED(mem_map.End(), kRegionSize);
93 CHECK_EQ(mem_map.Size(), capacity);
94 return mem_map;
95 }
96
Create(const std::string & name,MemMap && mem_map,bool use_generational_cc)97 RegionSpace* RegionSpace::Create(
98 const std::string& name, MemMap&& mem_map, bool use_generational_cc) {
99 return new RegionSpace(name, std::move(mem_map), use_generational_cc);
100 }
101
RegionSpace(const std::string & name,MemMap && mem_map,bool use_generational_cc)102 RegionSpace::RegionSpace(const std::string& name, MemMap&& mem_map, bool use_generational_cc)
103 : ContinuousMemMapAllocSpace(name,
104 std::move(mem_map),
105 mem_map.Begin(),
106 mem_map.End(),
107 mem_map.End(),
108 kGcRetentionPolicyAlwaysCollect),
109 region_lock_("Region lock", kRegionSpaceRegionLock),
110 use_generational_cc_(use_generational_cc),
111 time_(1U),
112 num_regions_(mem_map_.Size() / kRegionSize),
113 num_non_free_regions_(0U),
114 num_evac_regions_(0U),
115 max_peak_num_non_free_regions_(0U),
116 non_free_region_index_limit_(0U),
117 current_region_(&full_region_),
118 evac_region_(nullptr),
119 cyclic_alloc_region_index_(0U) {
120 CHECK_ALIGNED(mem_map_.Size(), kRegionSize);
121 CHECK_ALIGNED(mem_map_.Begin(), kRegionSize);
122 DCHECK_GT(num_regions_, 0U);
123 regions_.reset(new Region[num_regions_]);
124 uint8_t* region_addr = mem_map_.Begin();
125 for (size_t i = 0; i < num_regions_; ++i, region_addr += kRegionSize) {
126 regions_[i].Init(i, region_addr, region_addr + kRegionSize);
127 }
128 mark_bitmap_ =
129 accounting::ContinuousSpaceBitmap::Create("region space live bitmap", Begin(), Capacity());
130 if (kIsDebugBuild) {
131 CHECK_EQ(regions_[0].Begin(), Begin());
132 for (size_t i = 0; i < num_regions_; ++i) {
133 CHECK(regions_[i].IsFree());
134 CHECK_EQ(static_cast<size_t>(regions_[i].End() - regions_[i].Begin()), kRegionSize);
135 if (i + 1 < num_regions_) {
136 CHECK_EQ(regions_[i].End(), regions_[i + 1].Begin());
137 }
138 }
139 CHECK_EQ(regions_[num_regions_ - 1].End(), Limit());
140 }
141 DCHECK(!full_region_.IsFree());
142 DCHECK(full_region_.IsAllocated());
143 size_t ignored;
144 DCHECK(full_region_.Alloc(kAlignment, &ignored, nullptr, &ignored) == nullptr);
145 // Protect the whole region space from the start.
146 Protect();
147 }
148
FromSpaceSize()149 size_t RegionSpace::FromSpaceSize() {
150 uint64_t num_regions = 0;
151 MutexLock mu(Thread::Current(), region_lock_);
152 for (size_t i = 0; i < num_regions_; ++i) {
153 Region* r = ®ions_[i];
154 if (r->IsInFromSpace()) {
155 ++num_regions;
156 }
157 }
158 return num_regions * kRegionSize;
159 }
160
UnevacFromSpaceSize()161 size_t RegionSpace::UnevacFromSpaceSize() {
162 uint64_t num_regions = 0;
163 MutexLock mu(Thread::Current(), region_lock_);
164 for (size_t i = 0; i < num_regions_; ++i) {
165 Region* r = ®ions_[i];
166 if (r->IsInUnevacFromSpace()) {
167 ++num_regions;
168 }
169 }
170 return num_regions * kRegionSize;
171 }
172
ToSpaceSize()173 size_t RegionSpace::ToSpaceSize() {
174 uint64_t num_regions = 0;
175 MutexLock mu(Thread::Current(), region_lock_);
176 for (size_t i = 0; i < num_regions_; ++i) {
177 Region* r = ®ions_[i];
178 if (r->IsInToSpace()) {
179 ++num_regions;
180 }
181 }
182 return num_regions * kRegionSize;
183 }
184
SetAsUnevacFromSpace(bool clear_live_bytes)185 void RegionSpace::Region::SetAsUnevacFromSpace(bool clear_live_bytes) {
186 // Live bytes are only preserved (i.e. not cleared) during sticky-bit CC collections.
187 DCHECK(GetUseGenerationalCC() || clear_live_bytes);
188 DCHECK(!IsFree() && IsInToSpace());
189 type_ = RegionType::kRegionTypeUnevacFromSpace;
190 if (IsNewlyAllocated()) {
191 // A newly allocated region set as unevac from-space must be
192 // a large or large tail region.
193 DCHECK(IsLarge() || IsLargeTail()) << static_cast<uint>(state_);
194 // Always clear the live bytes of a newly allocated (large or
195 // large tail) region.
196 clear_live_bytes = true;
197 // Clear the "newly allocated" status here, as we do not want the
198 // GC to see it when encountering (and processing) references in the
199 // from-space.
200 //
201 // Invariant: There should be no newly-allocated region in the
202 // from-space (when the from-space exists, which is between the calls
203 // to RegionSpace::SetFromSpace and RegionSpace::ClearFromSpace).
204 is_newly_allocated_ = false;
205 }
206 if (clear_live_bytes) {
207 // Reset the live bytes, as we have made a non-evacuation
208 // decision (possibly based on the percentage of live bytes).
209 live_bytes_ = 0;
210 }
211 }
212
GetUseGenerationalCC()213 bool RegionSpace::Region::GetUseGenerationalCC() {
214 // We are retrieving the info from Heap, instead of the cached version in
215 // RegionSpace, because accessing the Heap from a Region object is easier
216 // than accessing the RegionSpace.
217 return art::Runtime::Current()->GetHeap()->GetUseGenerationalCC();
218 }
219
ShouldBeEvacuated(EvacMode evac_mode)220 inline bool RegionSpace::Region::ShouldBeEvacuated(EvacMode evac_mode) {
221 // Evacuation mode `kEvacModeNewlyAllocated` is only used during sticky-bit CC collections.
222 DCHECK(GetUseGenerationalCC() || (evac_mode != kEvacModeNewlyAllocated));
223 DCHECK((IsAllocated() || IsLarge()) && IsInToSpace());
224 // The region should be evacuated if:
225 // - the evacuation is forced (`evac_mode == kEvacModeForceAll`); or
226 // - the region was allocated after the start of the previous GC (newly allocated region); or
227 // - the live ratio is below threshold (`kEvacuateLivePercentThreshold`).
228 if (UNLIKELY(evac_mode == kEvacModeForceAll)) {
229 return true;
230 }
231 bool result = false;
232 if (is_newly_allocated_) {
233 // Invariant: newly allocated regions have an undefined live bytes count.
234 DCHECK_EQ(live_bytes_, static_cast<size_t>(-1));
235 if (IsAllocated()) {
236 // We always evacuate newly-allocated non-large regions as we
237 // believe they contain many dead objects (a very simple form of
238 // the generational hypothesis, even before the Sticky-Bit CC
239 // approach).
240 //
241 // TODO: Verify that assertion by collecting statistics on the
242 // number/proportion of live objects in newly allocated regions
243 // in RegionSpace::ClearFromSpace.
244 //
245 // Note that a side effect of evacuating a newly-allocated
246 // non-large region is that the "newly allocated" status will
247 // later be removed, as its live objects will be copied to an
248 // evacuation region, which won't be marked as "newly
249 // allocated" (see RegionSpace::AllocateRegion).
250 result = true;
251 } else {
252 DCHECK(IsLarge());
253 // We never want to evacuate a large region (and the associated
254 // tail regions), except if:
255 // - we are forced to do so (see the `kEvacModeForceAll` case
256 // above); or
257 // - we know that the (sole) object contained in this region is
258 // dead (see the corresponding logic below, in the
259 // `kEvacModeLivePercentNewlyAllocated` case).
260 // For a newly allocated region (i.e. allocated since the
261 // previous GC started), we don't have any liveness information
262 // (the live bytes count is -1 -- also note this region has been
263 // a to-space one between the time of its allocation and now),
264 // so we prefer not to evacuate it.
265 result = false;
266 }
267 } else if (evac_mode == kEvacModeLivePercentNewlyAllocated) {
268 bool is_live_percent_valid = (live_bytes_ != static_cast<size_t>(-1));
269 if (is_live_percent_valid) {
270 DCHECK(IsInToSpace());
271 DCHECK(!IsLargeTail());
272 DCHECK_NE(live_bytes_, static_cast<size_t>(-1));
273 DCHECK_LE(live_bytes_, BytesAllocated());
274 const size_t bytes_allocated = RoundUp(BytesAllocated(), kRegionSize);
275 DCHECK_LE(live_bytes_, bytes_allocated);
276 if (IsAllocated()) {
277 // Side node: live_percent == 0 does not necessarily mean
278 // there's no live objects due to rounding (there may be a
279 // few).
280 result = (live_bytes_ * 100U < kEvacuateLivePercentThreshold * bytes_allocated);
281 } else {
282 DCHECK(IsLarge());
283 result = (live_bytes_ == 0U);
284 }
285 } else {
286 result = false;
287 }
288 }
289 return result;
290 }
291
ZeroLiveBytesForLargeObject(mirror::Object * obj)292 void RegionSpace::ZeroLiveBytesForLargeObject(mirror::Object* obj) {
293 // This method is only used when Generational CC collection is enabled.
294 DCHECK(use_generational_cc_);
295
296 // This code uses a logic similar to the one used in RegionSpace::FreeLarge
297 // to traverse the regions supporting `obj`.
298 // TODO: Refactor.
299 DCHECK(IsLargeObject(obj));
300 DCHECK_ALIGNED(obj, kRegionSize);
301 size_t obj_size = obj->SizeOf<kDefaultVerifyFlags>();
302 DCHECK_GT(obj_size, space::RegionSpace::kRegionSize);
303 // Size of the memory area allocated for `obj`.
304 size_t obj_alloc_size = RoundUp(obj_size, space::RegionSpace::kRegionSize);
305 uint8_t* begin_addr = reinterpret_cast<uint8_t*>(obj);
306 uint8_t* end_addr = begin_addr + obj_alloc_size;
307 DCHECK_ALIGNED(end_addr, kRegionSize);
308
309 // Zero the live bytes of the large region and large tail regions containing the object.
310 MutexLock mu(Thread::Current(), region_lock_);
311 for (uint8_t* addr = begin_addr; addr < end_addr; addr += kRegionSize) {
312 Region* region = RefToRegionLocked(reinterpret_cast<mirror::Object*>(addr));
313 if (addr == begin_addr) {
314 DCHECK(region->IsLarge());
315 } else {
316 DCHECK(region->IsLargeTail());
317 }
318 region->ZeroLiveBytes();
319 }
320 if (kIsDebugBuild && end_addr < Limit()) {
321 // If we aren't at the end of the space, check that the next region is not a large tail.
322 Region* following_region = RefToRegionLocked(reinterpret_cast<mirror::Object*>(end_addr));
323 DCHECK(!following_region->IsLargeTail());
324 }
325 }
326
327 // Determine which regions to evacuate and mark them as
328 // from-space. Mark the rest as unevacuated from-space.
SetFromSpace(accounting::ReadBarrierTable * rb_table,EvacMode evac_mode,bool clear_live_bytes)329 void RegionSpace::SetFromSpace(accounting::ReadBarrierTable* rb_table,
330 EvacMode evac_mode,
331 bool clear_live_bytes) {
332 // Live bytes are only preserved (i.e. not cleared) during sticky-bit CC collections.
333 DCHECK(use_generational_cc_ || clear_live_bytes);
334 ++time_;
335 if (kUseTableLookupReadBarrier) {
336 DCHECK(rb_table->IsAllCleared());
337 rb_table->SetAll();
338 }
339 MutexLock mu(Thread::Current(), region_lock_);
340 // We cannot use the partially utilized TLABs across a GC. Therefore, revoke
341 // them during the thread-flip.
342 partial_tlabs_.clear();
343
344 // Counter for the number of expected large tail regions following a large region.
345 size_t num_expected_large_tails = 0U;
346 // Flag to store whether the previously seen large region has been evacuated.
347 // This is used to apply the same evacuation policy to related large tail regions.
348 bool prev_large_evacuated = false;
349 VerifyNonFreeRegionLimit();
350 const size_t iter_limit = kUseTableLookupReadBarrier
351 ? num_regions_
352 : std::min(num_regions_, non_free_region_index_limit_);
353 for (size_t i = 0; i < iter_limit; ++i) {
354 Region* r = ®ions_[i];
355 RegionState state = r->State();
356 RegionType type = r->Type();
357 if (!r->IsFree()) {
358 DCHECK(r->IsInToSpace());
359 if (LIKELY(num_expected_large_tails == 0U)) {
360 DCHECK((state == RegionState::kRegionStateAllocated ||
361 state == RegionState::kRegionStateLarge) &&
362 type == RegionType::kRegionTypeToSpace);
363 bool should_evacuate = r->ShouldBeEvacuated(evac_mode);
364 bool is_newly_allocated = r->IsNewlyAllocated();
365 if (should_evacuate) {
366 r->SetAsFromSpace();
367 DCHECK(r->IsInFromSpace());
368 } else {
369 r->SetAsUnevacFromSpace(clear_live_bytes);
370 DCHECK(r->IsInUnevacFromSpace());
371 }
372 if (UNLIKELY(state == RegionState::kRegionStateLarge &&
373 type == RegionType::kRegionTypeToSpace)) {
374 prev_large_evacuated = should_evacuate;
375 // In 2-phase full heap GC, this function is called after marking is
376 // done. So, it is possible that some newly allocated large object is
377 // marked but its live_bytes is still -1. We need to clear the
378 // mark-bit otherwise the live_bytes will not be updated in
379 // ConcurrentCopying::ProcessMarkStackRef() and hence will break the
380 // logic.
381 if (use_generational_cc_ && !should_evacuate && is_newly_allocated) {
382 GetMarkBitmap()->Clear(reinterpret_cast<mirror::Object*>(r->Begin()));
383 }
384 num_expected_large_tails = RoundUp(r->BytesAllocated(), kRegionSize) / kRegionSize - 1;
385 DCHECK_GT(num_expected_large_tails, 0U);
386 }
387 } else {
388 DCHECK(state == RegionState::kRegionStateLargeTail &&
389 type == RegionType::kRegionTypeToSpace);
390 if (prev_large_evacuated) {
391 r->SetAsFromSpace();
392 DCHECK(r->IsInFromSpace());
393 } else {
394 r->SetAsUnevacFromSpace(clear_live_bytes);
395 DCHECK(r->IsInUnevacFromSpace());
396 }
397 --num_expected_large_tails;
398 }
399 } else {
400 DCHECK_EQ(num_expected_large_tails, 0U);
401 if (kUseTableLookupReadBarrier) {
402 // Clear the rb table for to-space regions.
403 rb_table->Clear(r->Begin(), r->End());
404 }
405 }
406 // Invariant: There should be no newly-allocated region in the from-space.
407 DCHECK(!r->is_newly_allocated_);
408 }
409 DCHECK_EQ(num_expected_large_tails, 0U);
410 current_region_ = &full_region_;
411 evac_region_ = &full_region_;
412 }
413
ZeroAndProtectRegion(uint8_t * begin,uint8_t * end)414 static void ZeroAndProtectRegion(uint8_t* begin, uint8_t* end) {
415 ZeroAndReleasePages(begin, end - begin);
416 if (kProtectClearedRegions) {
417 CheckedCall(mprotect, __FUNCTION__, begin, end - begin, PROT_NONE);
418 }
419 }
420
ClearFromSpace(uint64_t * cleared_bytes,uint64_t * cleared_objects,const bool clear_bitmap)421 void RegionSpace::ClearFromSpace(/* out */ uint64_t* cleared_bytes,
422 /* out */ uint64_t* cleared_objects,
423 const bool clear_bitmap) {
424 DCHECK(cleared_bytes != nullptr);
425 DCHECK(cleared_objects != nullptr);
426 *cleared_bytes = 0;
427 *cleared_objects = 0;
428 size_t new_non_free_region_index_limit = 0;
429 // We should avoid calling madvise syscalls while holding region_lock_.
430 // Therefore, we split the working of this function into 2 loops. The first
431 // loop gathers memory ranges that must be madvised. Then we release the lock
432 // and perform madvise on the gathered memory ranges. Finally, we reacquire
433 // the lock and loop over the regions to clear the from-space regions and make
434 // them availabe for allocation.
435 std::deque<std::pair<uint8_t*, uint8_t*>> madvise_list;
436 // Gather memory ranges that need to be madvised.
437 {
438 MutexLock mu(Thread::Current(), region_lock_);
439 // Lambda expression `expand_madvise_range` adds a region to the "clear block".
440 //
441 // As we iterate over from-space regions, we maintain a "clear block", composed of
442 // adjacent to-be-cleared regions and whose bounds are `clear_block_begin` and
443 // `clear_block_end`. When processing a new region which is not adjacent to
444 // the clear block (discontinuity in cleared regions), the clear block
445 // is added to madvise_list and the clear block is reset (to the most recent
446 // to-be-cleared region).
447 //
448 // This is done in order to combine zeroing and releasing pages to reduce how
449 // often madvise is called. This helps reduce contention on the mmap semaphore
450 // (see b/62194020).
451 uint8_t* clear_block_begin = nullptr;
452 uint8_t* clear_block_end = nullptr;
453 auto expand_madvise_range = [&madvise_list, &clear_block_begin, &clear_block_end] (Region* r) {
454 if (clear_block_end != r->Begin()) {
455 if (clear_block_begin != nullptr) {
456 DCHECK(clear_block_end != nullptr);
457 madvise_list.push_back(std::pair(clear_block_begin, clear_block_end));
458 }
459 clear_block_begin = r->Begin();
460 }
461 clear_block_end = r->End();
462 };
463 for (size_t i = 0; i < std::min(num_regions_, non_free_region_index_limit_); ++i) {
464 Region* r = ®ions_[i];
465 // The following check goes through objects in the region, therefore it
466 // must be performed before madvising the region. Therefore, it can't be
467 // executed in the following loop.
468 if (kCheckLiveBytesAgainstRegionBitmap) {
469 CheckLiveBytesAgainstRegionBitmap(r);
470 }
471 if (r->IsInFromSpace()) {
472 expand_madvise_range(r);
473 } else if (r->IsInUnevacFromSpace()) {
474 // We must skip tails of live large objects.
475 if (r->LiveBytes() == 0 && !r->IsLargeTail()) {
476 // Special case for 0 live bytes, this means all of the objects in the region are
477 // dead and we can to clear it. This is important for large objects since we must
478 // not visit dead ones in RegionSpace::Walk because they may contain dangling
479 // references to invalid objects. It is also better to clear these regions now
480 // instead of at the end of the next GC to save RAM. If we don't clear the regions
481 // here, they will be cleared in next GC by the normal live percent evacuation logic.
482 expand_madvise_range(r);
483 // Also release RAM for large tails.
484 while (i + 1 < num_regions_ && regions_[i + 1].IsLargeTail()) {
485 expand_madvise_range(®ions_[i + 1]);
486 i++;
487 }
488 }
489 }
490 }
491 // There is a small probability that we may reach here with
492 // clear_block_{begin, end} = nullptr. If all the regions allocated since
493 // last GC have been for large objects and all of them survive till this GC
494 // cycle, then there will be no regions in from-space.
495 if (LIKELY(clear_block_begin != nullptr)) {
496 DCHECK(clear_block_end != nullptr);
497 madvise_list.push_back(std::pair(clear_block_begin, clear_block_end));
498 }
499 }
500
501 // Madvise the memory ranges.
502 for (const auto &iter : madvise_list) {
503 ZeroAndProtectRegion(iter.first, iter.second);
504 if (clear_bitmap) {
505 GetLiveBitmap()->ClearRange(
506 reinterpret_cast<mirror::Object*>(iter.first),
507 reinterpret_cast<mirror::Object*>(iter.second));
508 }
509 }
510 madvise_list.clear();
511
512 // Iterate over regions again and actually make the from space regions
513 // available for allocation.
514 MutexLock mu(Thread::Current(), region_lock_);
515 VerifyNonFreeRegionLimit();
516
517 // Update max of peak non free region count before reclaiming evacuated regions.
518 max_peak_num_non_free_regions_ = std::max(max_peak_num_non_free_regions_,
519 num_non_free_regions_);
520
521 for (size_t i = 0; i < std::min(num_regions_, non_free_region_index_limit_); ++i) {
522 Region* r = ®ions_[i];
523 if (r->IsInFromSpace()) {
524 DCHECK(!r->IsTlab());
525 *cleared_bytes += r->BytesAllocated();
526 *cleared_objects += r->ObjectsAllocated();
527 --num_non_free_regions_;
528 r->Clear(/*zero_and_release_pages=*/false);
529 } else if (r->IsInUnevacFromSpace()) {
530 if (r->LiveBytes() == 0) {
531 DCHECK(!r->IsLargeTail());
532 *cleared_bytes += r->BytesAllocated();
533 *cleared_objects += r->ObjectsAllocated();
534 r->Clear(/*zero_and_release_pages=*/false);
535 size_t free_regions = 1;
536 // Also release RAM for large tails.
537 while (i + free_regions < num_regions_ && regions_[i + free_regions].IsLargeTail()) {
538 regions_[i + free_regions].Clear(/*zero_and_release_pages=*/false);
539 ++free_regions;
540 }
541 num_non_free_regions_ -= free_regions;
542 // When clear_bitmap is true, this clearing of bitmap is taken care in
543 // clear_region().
544 if (!clear_bitmap) {
545 GetLiveBitmap()->ClearRange(
546 reinterpret_cast<mirror::Object*>(r->Begin()),
547 reinterpret_cast<mirror::Object*>(r->Begin() + free_regions * kRegionSize));
548 }
549 continue;
550 }
551 r->SetUnevacFromSpaceAsToSpace();
552 if (r->AllAllocatedBytesAreLive()) {
553 // Try to optimize the number of ClearRange calls by checking whether the next regions
554 // can also be cleared.
555 size_t regions_to_clear_bitmap = 1;
556 while (i + regions_to_clear_bitmap < num_regions_) {
557 Region* const cur = ®ions_[i + regions_to_clear_bitmap];
558 if (!cur->AllAllocatedBytesAreLive()) {
559 DCHECK(!cur->IsLargeTail());
560 break;
561 }
562 CHECK(cur->IsInUnevacFromSpace());
563 cur->SetUnevacFromSpaceAsToSpace();
564 ++regions_to_clear_bitmap;
565 }
566
567 // Optimization (for full CC only): If the live bytes are *all* live
568 // in a region then the live-bit information for these objects is
569 // superfluous:
570 // - We can determine that these objects are all live by using
571 // Region::AllAllocatedBytesAreLive (which just checks whether
572 // `LiveBytes() == static_cast<size_t>(Top() - Begin())`.
573 // - We can visit the objects in this region using
574 // RegionSpace::GetNextObject, i.e. without resorting to the
575 // live bits (see RegionSpace::WalkInternal).
576 // Therefore, we can clear the bits for these objects in the
577 // (live) region space bitmap (and release the corresponding pages).
578 //
579 // This optimization is incompatible with Generational CC, because:
580 // - minor (young-generation) collections need to know which objects
581 // where marked during the previous GC cycle, meaning all mark bitmaps
582 // (this includes the region space bitmap) need to be preserved
583 // between a (minor or major) collection N and a following minor
584 // collection N+1;
585 // - at this stage (in the current GC cycle), we cannot determine
586 // whether the next collection will be a minor or a major one;
587 // This means that we need to be conservative and always preserve the
588 // region space bitmap when using Generational CC.
589 // Note that major collections do not require the previous mark bitmaps
590 // to be preserved, and as matter of fact they do clear the region space
591 // bitmap. But they cannot do so before we know the next GC cycle will
592 // be a major one, so this operation happens at the beginning of such a
593 // major collection, before marking starts.
594 if (!use_generational_cc_) {
595 GetLiveBitmap()->ClearRange(
596 reinterpret_cast<mirror::Object*>(r->Begin()),
597 reinterpret_cast<mirror::Object*>(r->Begin()
598 + regions_to_clear_bitmap * kRegionSize));
599 }
600 // Skip over extra regions for which we cleared the bitmaps: we shall not clear them,
601 // as they are unevac regions that are live.
602 // Subtract one for the for-loop.
603 i += regions_to_clear_bitmap - 1;
604 } else {
605 // TODO: Explain why we do not poison dead objects in region
606 // `r` when it has an undefined live bytes count (i.e. when
607 // `r->LiveBytes() == static_cast<size_t>(-1)`) with
608 // Generational CC.
609 if (!use_generational_cc_ || (r->LiveBytes() != static_cast<size_t>(-1))) {
610 // Only some allocated bytes are live in this unevac region.
611 // This should only happen for an allocated non-large region.
612 DCHECK(r->IsAllocated()) << r->State();
613 if (kPoisonDeadObjectsInUnevacuatedRegions) {
614 PoisonDeadObjectsInUnevacuatedRegion(r);
615 }
616 }
617 }
618 }
619 // Note r != last_checked_region if r->IsInUnevacFromSpace() was true above.
620 Region* last_checked_region = ®ions_[i];
621 if (!last_checked_region->IsFree()) {
622 new_non_free_region_index_limit = std::max(new_non_free_region_index_limit,
623 last_checked_region->Idx() + 1);
624 }
625 }
626 // Update non_free_region_index_limit_.
627 SetNonFreeRegionLimit(new_non_free_region_index_limit);
628 evac_region_ = nullptr;
629 num_non_free_regions_ += num_evac_regions_;
630 num_evac_regions_ = 0;
631 }
632
CheckLiveBytesAgainstRegionBitmap(Region * r)633 void RegionSpace::CheckLiveBytesAgainstRegionBitmap(Region* r) {
634 if (r->LiveBytes() == static_cast<size_t>(-1)) {
635 // Live bytes count is undefined for `r`; nothing to check here.
636 return;
637 }
638
639 // Functor walking the region space bitmap for the range corresponding
640 // to region `r` and calculating the sum of live bytes.
641 size_t live_bytes_recount = 0u;
642 auto recount_live_bytes =
643 [&r, &live_bytes_recount](mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
644 DCHECK_ALIGNED(obj, kAlignment);
645 if (r->IsLarge()) {
646 // If `r` is a large region, then it contains at most one
647 // object, which must start at the beginning of the
648 // region. The live byte count in that case is equal to the
649 // allocated regions (large region + large tails regions).
650 DCHECK_EQ(reinterpret_cast<uint8_t*>(obj), r->Begin());
651 DCHECK_EQ(live_bytes_recount, 0u);
652 live_bytes_recount = r->Top() - r->Begin();
653 } else {
654 DCHECK(r->IsAllocated())
655 << "r->State()=" << r->State() << " r->LiveBytes()=" << r->LiveBytes();
656 size_t obj_size = obj->SizeOf<kDefaultVerifyFlags>();
657 size_t alloc_size = RoundUp(obj_size, space::RegionSpace::kAlignment);
658 live_bytes_recount += alloc_size;
659 }
660 };
661 // Visit live objects in `r` and recount the live bytes.
662 GetLiveBitmap()->VisitMarkedRange(reinterpret_cast<uintptr_t>(r->Begin()),
663 reinterpret_cast<uintptr_t>(r->Top()),
664 recount_live_bytes);
665 // Check that this recount matches the region's current live bytes count.
666 DCHECK_EQ(live_bytes_recount, r->LiveBytes());
667 }
668
669 // Poison the memory area in range [`begin`, `end`) with value `kPoisonDeadObject`.
PoisonUnevacuatedRange(uint8_t * begin,uint8_t * end)670 static void PoisonUnevacuatedRange(uint8_t* begin, uint8_t* end) {
671 static constexpr size_t kPoisonDeadObjectSize = sizeof(kPoisonDeadObject);
672 static_assert(IsPowerOfTwo(kPoisonDeadObjectSize) &&
673 IsPowerOfTwo(RegionSpace::kAlignment) &&
674 (kPoisonDeadObjectSize < RegionSpace::kAlignment),
675 "RegionSpace::kAlignment should be a multiple of kPoisonDeadObjectSize"
676 " and both should be powers of 2");
677 DCHECK_ALIGNED(begin, kPoisonDeadObjectSize);
678 DCHECK_ALIGNED(end, kPoisonDeadObjectSize);
679 uint32_t* begin_addr = reinterpret_cast<uint32_t*>(begin);
680 uint32_t* end_addr = reinterpret_cast<uint32_t*>(end);
681 std::fill(begin_addr, end_addr, kPoisonDeadObject);
682 }
683
PoisonDeadObjectsInUnevacuatedRegion(Region * r)684 void RegionSpace::PoisonDeadObjectsInUnevacuatedRegion(Region* r) {
685 // The live byte count of `r` should be different from -1, as this
686 // region should neither be a newly allocated region nor an
687 // evacuated region.
688 DCHECK_NE(r->LiveBytes(), static_cast<size_t>(-1))
689 << "Unexpected live bytes count of -1 in " << Dumpable<Region>(*r);
690
691 // Past-the-end address of the previously visited (live) object (or
692 // the beginning of the region, if `maybe_poison` has not run yet).
693 uint8_t* prev_obj_end = reinterpret_cast<uint8_t*>(r->Begin());
694
695 // Functor poisoning the space between `obj` and the previously
696 // visited (live) object (or the beginng of the region), if any.
697 auto maybe_poison = [&prev_obj_end](mirror::Object* obj) REQUIRES(Locks::mutator_lock_) {
698 DCHECK_ALIGNED(obj, kAlignment);
699 uint8_t* cur_obj_begin = reinterpret_cast<uint8_t*>(obj);
700 if (cur_obj_begin != prev_obj_end) {
701 // There is a gap (dead object(s)) between the previously
702 // visited (live) object (or the beginning of the region) and
703 // `obj`; poison that space.
704 PoisonUnevacuatedRange(prev_obj_end, cur_obj_begin);
705 }
706 prev_obj_end = reinterpret_cast<uint8_t*>(GetNextObject(obj));
707 };
708
709 // Visit live objects in `r` and poison gaps (dead objects) between them.
710 GetLiveBitmap()->VisitMarkedRange(reinterpret_cast<uintptr_t>(r->Begin()),
711 reinterpret_cast<uintptr_t>(r->Top()),
712 maybe_poison);
713 // Poison memory between the last live object and the end of the region, if any.
714 if (prev_obj_end < r->Top()) {
715 PoisonUnevacuatedRange(prev_obj_end, r->Top());
716 }
717 }
718
LogFragmentationAllocFailure(std::ostream & os,size_t)719 void RegionSpace::LogFragmentationAllocFailure(std::ostream& os,
720 size_t /* failed_alloc_bytes */) {
721 size_t max_contiguous_allocation = 0;
722 MutexLock mu(Thread::Current(), region_lock_);
723 if (current_region_->End() - current_region_->Top() > 0) {
724 max_contiguous_allocation = current_region_->End() - current_region_->Top();
725 }
726 if (num_non_free_regions_ * 2 < num_regions_) {
727 // We reserve half of the regions for evaluation only. If we
728 // occupy more than half the regions, do not report the free
729 // regions as available.
730 size_t max_contiguous_free_regions = 0;
731 size_t num_contiguous_free_regions = 0;
732 bool prev_free_region = false;
733 for (size_t i = 0; i < num_regions_; ++i) {
734 Region* r = ®ions_[i];
735 if (r->IsFree()) {
736 if (!prev_free_region) {
737 CHECK_EQ(num_contiguous_free_regions, 0U);
738 prev_free_region = true;
739 }
740 ++num_contiguous_free_regions;
741 } else {
742 if (prev_free_region) {
743 CHECK_NE(num_contiguous_free_regions, 0U);
744 max_contiguous_free_regions = std::max(max_contiguous_free_regions,
745 num_contiguous_free_regions);
746 num_contiguous_free_regions = 0U;
747 prev_free_region = false;
748 }
749 }
750 }
751 max_contiguous_allocation = std::max(max_contiguous_allocation,
752 max_contiguous_free_regions * kRegionSize);
753 }
754 os << "; failed due to fragmentation (largest possible contiguous allocation "
755 << max_contiguous_allocation << " bytes)";
756 // Caller's job to print failed_alloc_bytes.
757 }
758
Clear()759 void RegionSpace::Clear() {
760 MutexLock mu(Thread::Current(), region_lock_);
761 for (size_t i = 0; i < num_regions_; ++i) {
762 Region* r = ®ions_[i];
763 if (!r->IsFree()) {
764 --num_non_free_regions_;
765 }
766 r->Clear(/*zero_and_release_pages=*/true);
767 }
768 SetNonFreeRegionLimit(0);
769 DCHECK_EQ(num_non_free_regions_, 0u);
770 current_region_ = &full_region_;
771 evac_region_ = &full_region_;
772 }
773
Protect()774 void RegionSpace::Protect() {
775 if (kProtectClearedRegions) {
776 CheckedCall(mprotect, __FUNCTION__, Begin(), Size(), PROT_NONE);
777 }
778 }
779
Unprotect()780 void RegionSpace::Unprotect() {
781 if (kProtectClearedRegions) {
782 CheckedCall(mprotect, __FUNCTION__, Begin(), Size(), PROT_READ | PROT_WRITE);
783 }
784 }
785
ClampGrowthLimit(size_t new_capacity)786 void RegionSpace::ClampGrowthLimit(size_t new_capacity) {
787 MutexLock mu(Thread::Current(), region_lock_);
788 CHECK_LE(new_capacity, NonGrowthLimitCapacity());
789 size_t new_num_regions = new_capacity / kRegionSize;
790 if (non_free_region_index_limit_ > new_num_regions) {
791 LOG(WARNING) << "Couldn't clamp region space as there are regions in use beyond growth limit.";
792 return;
793 }
794 num_regions_ = new_num_regions;
795 if (kCyclicRegionAllocation && cyclic_alloc_region_index_ >= num_regions_) {
796 cyclic_alloc_region_index_ = 0u;
797 }
798 SetLimit(Begin() + new_capacity);
799 if (Size() > new_capacity) {
800 SetEnd(Limit());
801 }
802 GetMarkBitmap()->SetHeapSize(new_capacity);
803 GetMemMap()->SetSize(new_capacity);
804 }
805
Dump(std::ostream & os) const806 void RegionSpace::Dump(std::ostream& os) const {
807 os << GetName() << " "
808 << reinterpret_cast<void*>(Begin()) << "-" << reinterpret_cast<void*>(Limit());
809 }
810
DumpRegionForObject(std::ostream & os,mirror::Object * obj)811 void RegionSpace::DumpRegionForObject(std::ostream& os, mirror::Object* obj) {
812 CHECK(HasAddress(obj));
813 MutexLock mu(Thread::Current(), region_lock_);
814 RefToRegionUnlocked(obj)->Dump(os);
815 }
816
DumpRegions(std::ostream & os)817 void RegionSpace::DumpRegions(std::ostream& os) {
818 MutexLock mu(Thread::Current(), region_lock_);
819 for (size_t i = 0; i < num_regions_; ++i) {
820 regions_[i].Dump(os);
821 }
822 }
823
DumpNonFreeRegions(std::ostream & os)824 void RegionSpace::DumpNonFreeRegions(std::ostream& os) {
825 MutexLock mu(Thread::Current(), region_lock_);
826 for (size_t i = 0; i < num_regions_; ++i) {
827 Region* reg = ®ions_[i];
828 if (!reg->IsFree()) {
829 reg->Dump(os);
830 }
831 }
832 }
833
RecordAlloc(mirror::Object * ref)834 void RegionSpace::RecordAlloc(mirror::Object* ref) {
835 CHECK(ref != nullptr);
836 Region* r = RefToRegion(ref);
837 r->objects_allocated_.fetch_add(1, std::memory_order_relaxed);
838 }
839
AllocNewTlab(Thread * self,const size_t tlab_size,size_t * bytes_tl_bulk_allocated)840 bool RegionSpace::AllocNewTlab(Thread* self,
841 const size_t tlab_size,
842 size_t* bytes_tl_bulk_allocated) {
843 MutexLock mu(self, region_lock_);
844 RevokeThreadLocalBuffersLocked(self, /*reuse=*/ gc::Heap::kUsePartialTlabs);
845 Region* r = nullptr;
846 uint8_t* pos = nullptr;
847 *bytes_tl_bulk_allocated = tlab_size;
848 // First attempt to get a partially used TLAB, if available.
849 if (tlab_size < kRegionSize) {
850 // Fetch the largest partial TLAB. The multimap is ordered in decreasing
851 // size.
852 auto largest_partial_tlab = partial_tlabs_.begin();
853 if (largest_partial_tlab != partial_tlabs_.end() && largest_partial_tlab->first >= tlab_size) {
854 r = largest_partial_tlab->second;
855 pos = r->End() - largest_partial_tlab->first;
856 partial_tlabs_.erase(largest_partial_tlab);
857 DCHECK_GT(r->End(), pos);
858 DCHECK_LE(r->Begin(), pos);
859 DCHECK_GE(r->Top(), pos);
860 *bytes_tl_bulk_allocated -= r->Top() - pos;
861 }
862 }
863 if (r == nullptr) {
864 // Fallback to allocating an entire region as TLAB.
865 r = AllocateRegion(/*for_evac=*/ false);
866 }
867 if (r != nullptr) {
868 uint8_t* start = pos != nullptr ? pos : r->Begin();
869 DCHECK_ALIGNED(start, kObjectAlignment);
870 r->is_a_tlab_ = true;
871 r->thread_ = self;
872 r->SetTop(r->End());
873 self->SetTlab(start, start + tlab_size, r->End());
874 return true;
875 }
876 return false;
877 }
878
RevokeThreadLocalBuffers(Thread * thread)879 size_t RegionSpace::RevokeThreadLocalBuffers(Thread* thread) {
880 MutexLock mu(Thread::Current(), region_lock_);
881 RevokeThreadLocalBuffersLocked(thread, /*reuse=*/ gc::Heap::kUsePartialTlabs);
882 return 0U;
883 }
884
RevokeThreadLocalBuffers(Thread * thread,const bool reuse)885 size_t RegionSpace::RevokeThreadLocalBuffers(Thread* thread, const bool reuse) {
886 MutexLock mu(Thread::Current(), region_lock_);
887 RevokeThreadLocalBuffersLocked(thread, reuse);
888 return 0U;
889 }
890
RevokeThreadLocalBuffersLocked(Thread * thread,bool reuse)891 void RegionSpace::RevokeThreadLocalBuffersLocked(Thread* thread, bool reuse) {
892 uint8_t* tlab_start = thread->GetTlabStart();
893 DCHECK_EQ(thread->HasTlab(), tlab_start != nullptr);
894 if (tlab_start != nullptr) {
895 Region* r = RefToRegionLocked(reinterpret_cast<mirror::Object*>(tlab_start));
896 r->is_a_tlab_ = false;
897 r->thread_ = nullptr;
898 DCHECK(r->IsAllocated());
899 DCHECK_LE(thread->GetThreadLocalBytesAllocated(), kRegionSize);
900 r->RecordThreadLocalAllocations(thread->GetThreadLocalObjectsAllocated(),
901 thread->GetTlabEnd() - r->Begin());
902 DCHECK_GE(r->End(), thread->GetTlabPos());
903 DCHECK_LE(r->Begin(), thread->GetTlabPos());
904 size_t remaining_bytes = r->End() - thread->GetTlabPos();
905 if (reuse && remaining_bytes >= gc::Heap::kPartialTlabSize) {
906 partial_tlabs_.insert(std::make_pair(remaining_bytes, r));
907 }
908 }
909 thread->ResetTlab();
910 }
911
RevokeAllThreadLocalBuffers()912 size_t RegionSpace::RevokeAllThreadLocalBuffers() {
913 Thread* self = Thread::Current();
914 MutexLock mu(self, *Locks::runtime_shutdown_lock_);
915 MutexLock mu2(self, *Locks::thread_list_lock_);
916 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
917 for (Thread* thread : thread_list) {
918 RevokeThreadLocalBuffers(thread);
919 }
920 return 0U;
921 }
922
AssertThreadLocalBuffersAreRevoked(Thread * thread)923 void RegionSpace::AssertThreadLocalBuffersAreRevoked(Thread* thread) {
924 if (kIsDebugBuild) {
925 DCHECK(!thread->HasTlab());
926 }
927 }
928
AssertAllThreadLocalBuffersAreRevoked()929 void RegionSpace::AssertAllThreadLocalBuffersAreRevoked() {
930 if (kIsDebugBuild) {
931 Thread* self = Thread::Current();
932 MutexLock mu(self, *Locks::runtime_shutdown_lock_);
933 MutexLock mu2(self, *Locks::thread_list_lock_);
934 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
935 for (Thread* thread : thread_list) {
936 AssertThreadLocalBuffersAreRevoked(thread);
937 }
938 }
939 }
940
Dump(std::ostream & os) const941 void RegionSpace::Region::Dump(std::ostream& os) const {
942 os << "Region[" << idx_ << "]="
943 << reinterpret_cast<void*>(begin_)
944 << "-" << reinterpret_cast<void*>(Top())
945 << "-" << reinterpret_cast<void*>(end_)
946 << " state=" << state_
947 << " type=" << type_
948 << " objects_allocated=" << objects_allocated_
949 << " alloc_time=" << alloc_time_
950 << " live_bytes=" << live_bytes_;
951
952 if (live_bytes_ != static_cast<size_t>(-1)) {
953 os << " ratio over allocated bytes="
954 << (static_cast<float>(live_bytes_) / RoundUp(BytesAllocated(), kRegionSize));
955 uint64_t longest_consecutive_free_bytes = GetLongestConsecutiveFreeBytes();
956 os << " longest_consecutive_free_bytes=" << longest_consecutive_free_bytes
957 << " (" << PrettySize(longest_consecutive_free_bytes) << ")";
958 }
959
960 os << " is_newly_allocated=" << std::boolalpha << is_newly_allocated_ << std::noboolalpha
961 << " is_a_tlab=" << std::boolalpha << is_a_tlab_ << std::noboolalpha
962 << " thread=" << thread_ << '\n';
963 }
964
GetLongestConsecutiveFreeBytes() const965 uint64_t RegionSpace::Region::GetLongestConsecutiveFreeBytes() const {
966 if (IsFree()) {
967 return kRegionSize;
968 }
969 if (IsLarge() || IsLargeTail()) {
970 return 0u;
971 }
972 uintptr_t max_gap = 0u;
973 uintptr_t prev_object_end = reinterpret_cast<uintptr_t>(Begin());
974 // Iterate through all live objects and find the largest free gap.
975 auto visitor = [&max_gap, &prev_object_end](mirror::Object* obj)
976 REQUIRES_SHARED(Locks::mutator_lock_) {
977 uintptr_t current = reinterpret_cast<uintptr_t>(obj);
978 uintptr_t diff = current - prev_object_end;
979 max_gap = std::max(diff, max_gap);
980 uintptr_t object_end = reinterpret_cast<uintptr_t>(obj) + obj->SizeOf();
981 prev_object_end = RoundUp(object_end, kAlignment);
982 };
983 space::RegionSpace* region_space = art::Runtime::Current()->GetHeap()->GetRegionSpace();
984 region_space->WalkNonLargeRegion(visitor, this);
985 return static_cast<uint64_t>(max_gap);
986 }
987
988
AllocationSizeNonvirtual(mirror::Object * obj,size_t * usable_size)989 size_t RegionSpace::AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size) {
990 size_t num_bytes = obj->SizeOf();
991 if (usable_size != nullptr) {
992 if (LIKELY(num_bytes <= kRegionSize)) {
993 DCHECK(RefToRegion(obj)->IsAllocated());
994 *usable_size = RoundUp(num_bytes, kAlignment);
995 } else {
996 DCHECK(RefToRegion(obj)->IsLarge());
997 *usable_size = RoundUp(num_bytes, kRegionSize);
998 }
999 }
1000 return num_bytes;
1001 }
1002
Clear(bool zero_and_release_pages)1003 void RegionSpace::Region::Clear(bool zero_and_release_pages) {
1004 top_.store(begin_, std::memory_order_relaxed);
1005 state_ = RegionState::kRegionStateFree;
1006 type_ = RegionType::kRegionTypeNone;
1007 objects_allocated_.store(0, std::memory_order_relaxed);
1008 alloc_time_ = 0;
1009 live_bytes_ = static_cast<size_t>(-1);
1010 if (zero_and_release_pages) {
1011 ZeroAndProtectRegion(begin_, end_);
1012 }
1013 is_newly_allocated_ = false;
1014 is_a_tlab_ = false;
1015 thread_ = nullptr;
1016 }
1017
TraceHeapSize()1018 void RegionSpace::TraceHeapSize() {
1019 Heap* heap = Runtime::Current()->GetHeap();
1020 heap->TraceHeapSize(heap->GetBytesAllocated() + EvacBytes());
1021 }
1022
AllocateRegion(bool for_evac)1023 RegionSpace::Region* RegionSpace::AllocateRegion(bool for_evac) {
1024 if (!for_evac && (num_non_free_regions_ + 1) * 2 > num_regions_) {
1025 return nullptr;
1026 }
1027 for (size_t i = 0; i < num_regions_; ++i) {
1028 // When using the cyclic region allocation strategy, try to
1029 // allocate a region starting from the last cyclic allocated
1030 // region marker. Otherwise, try to allocate a region starting
1031 // from the beginning of the region space.
1032 size_t region_index = kCyclicRegionAllocation
1033 ? ((cyclic_alloc_region_index_ + i) % num_regions_)
1034 : i;
1035 Region* r = ®ions_[region_index];
1036 if (r->IsFree()) {
1037 r->Unfree(this, time_);
1038 if (use_generational_cc_) {
1039 // TODO: Add an explanation for this assertion.
1040 DCHECK(!for_evac || !r->is_newly_allocated_);
1041 }
1042 if (for_evac) {
1043 ++num_evac_regions_;
1044 TraceHeapSize();
1045 // Evac doesn't count as newly allocated.
1046 } else {
1047 r->SetNewlyAllocated();
1048 ++num_non_free_regions_;
1049 }
1050 if (kCyclicRegionAllocation) {
1051 // Move the cyclic allocation region marker to the region
1052 // following the one that was just allocated.
1053 cyclic_alloc_region_index_ = (region_index + 1) % num_regions_;
1054 }
1055 return r;
1056 }
1057 }
1058 return nullptr;
1059 }
1060
MarkAsAllocated(RegionSpace * region_space,uint32_t alloc_time)1061 void RegionSpace::Region::MarkAsAllocated(RegionSpace* region_space, uint32_t alloc_time) {
1062 DCHECK(IsFree());
1063 alloc_time_ = alloc_time;
1064 region_space->AdjustNonFreeRegionLimit(idx_);
1065 type_ = RegionType::kRegionTypeToSpace;
1066 if (kProtectClearedRegions) {
1067 CheckedCall(mprotect, __FUNCTION__, Begin(), kRegionSize, PROT_READ | PROT_WRITE);
1068 }
1069 }
1070
Unfree(RegionSpace * region_space,uint32_t alloc_time)1071 void RegionSpace::Region::Unfree(RegionSpace* region_space, uint32_t alloc_time) {
1072 MarkAsAllocated(region_space, alloc_time);
1073 state_ = RegionState::kRegionStateAllocated;
1074 }
1075
UnfreeLarge(RegionSpace * region_space,uint32_t alloc_time)1076 void RegionSpace::Region::UnfreeLarge(RegionSpace* region_space, uint32_t alloc_time) {
1077 MarkAsAllocated(region_space, alloc_time);
1078 state_ = RegionState::kRegionStateLarge;
1079 }
1080
UnfreeLargeTail(RegionSpace * region_space,uint32_t alloc_time)1081 void RegionSpace::Region::UnfreeLargeTail(RegionSpace* region_space, uint32_t alloc_time) {
1082 MarkAsAllocated(region_space, alloc_time);
1083 state_ = RegionState::kRegionStateLargeTail;
1084 }
1085
1086 } // namespace space
1087 } // namespace gc
1088 } // namespace art
1089