1 /*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "bump_pointer_space.h"
18 #include "bump_pointer_space-inl.h"
19 #include "mirror/object-inl.h"
20 #include "mirror/class-inl.h"
21 #include "thread_list.h"
22
23 namespace art {
24 namespace gc {
25 namespace space {
26
27 // If a region has live objects whose size is less than this percent
28 // value of the region size, evaculate the region.
29 static constexpr uint kEvaculateLivePercentThreshold = 75U;
30
CreateMemMap(const std::string & name,size_t capacity,uint8_t * requested_begin)31 MemMap* RegionSpace::CreateMemMap(const std::string& name, size_t capacity,
32 uint8_t* requested_begin) {
33 CHECK_ALIGNED(capacity, kRegionSize);
34 std::string error_msg;
35 // Ask for the capacity of an additional kRegionSize so that we can align the map by kRegionSize
36 // even if we get unaligned base address. This is necessary for the ReadBarrierTable to work.
37 std::unique_ptr<MemMap> mem_map;
38 while (true) {
39 mem_map.reset(MemMap::MapAnonymous(name.c_str(),
40 requested_begin,
41 capacity + kRegionSize,
42 PROT_READ | PROT_WRITE,
43 true,
44 false,
45 &error_msg));
46 if (mem_map.get() != nullptr || requested_begin == nullptr) {
47 break;
48 }
49 // Retry with no specified request begin.
50 requested_begin = nullptr;
51 }
52 if (mem_map.get() == nullptr) {
53 LOG(ERROR) << "Failed to allocate pages for alloc space (" << name << ") of size "
54 << PrettySize(capacity) << " with message " << error_msg;
55 MemMap::DumpMaps(LOG_STREAM(ERROR));
56 return nullptr;
57 }
58 CHECK_EQ(mem_map->Size(), capacity + kRegionSize);
59 CHECK_EQ(mem_map->Begin(), mem_map->BaseBegin());
60 CHECK_EQ(mem_map->Size(), mem_map->BaseSize());
61 if (IsAlignedParam(mem_map->Begin(), kRegionSize)) {
62 // Got an aligned map. Since we requested a map that's kRegionSize larger. Shrink by
63 // kRegionSize at the end.
64 mem_map->SetSize(capacity);
65 } else {
66 // Got an unaligned map. Align the both ends.
67 mem_map->AlignBy(kRegionSize);
68 }
69 CHECK_ALIGNED(mem_map->Begin(), kRegionSize);
70 CHECK_ALIGNED(mem_map->End(), kRegionSize);
71 CHECK_EQ(mem_map->Size(), capacity);
72 return mem_map.release();
73 }
74
Create(const std::string & name,MemMap * mem_map)75 RegionSpace* RegionSpace::Create(const std::string& name, MemMap* mem_map) {
76 return new RegionSpace(name, mem_map);
77 }
78
RegionSpace(const std::string & name,MemMap * mem_map)79 RegionSpace::RegionSpace(const std::string& name, MemMap* mem_map)
80 : ContinuousMemMapAllocSpace(name, mem_map, mem_map->Begin(), mem_map->End(), mem_map->End(),
81 kGcRetentionPolicyAlwaysCollect),
82 region_lock_("Region lock", kRegionSpaceRegionLock), time_(1U) {
83 size_t mem_map_size = mem_map->Size();
84 CHECK_ALIGNED(mem_map_size, kRegionSize);
85 CHECK_ALIGNED(mem_map->Begin(), kRegionSize);
86 num_regions_ = mem_map_size / kRegionSize;
87 num_non_free_regions_ = 0U;
88 DCHECK_GT(num_regions_, 0U);
89 non_free_region_index_limit_ = 0U;
90 regions_.reset(new Region[num_regions_]);
91 uint8_t* region_addr = mem_map->Begin();
92 for (size_t i = 0; i < num_regions_; ++i, region_addr += kRegionSize) {
93 regions_[i].Init(i, region_addr, region_addr + kRegionSize);
94 }
95 mark_bitmap_.reset(
96 accounting::ContinuousSpaceBitmap::Create("region space live bitmap", Begin(), Capacity()));
97 if (kIsDebugBuild) {
98 CHECK_EQ(regions_[0].Begin(), Begin());
99 for (size_t i = 0; i < num_regions_; ++i) {
100 CHECK(regions_[i].IsFree());
101 CHECK_EQ(static_cast<size_t>(regions_[i].End() - regions_[i].Begin()), kRegionSize);
102 if (i + 1 < num_regions_) {
103 CHECK_EQ(regions_[i].End(), regions_[i + 1].Begin());
104 }
105 }
106 CHECK_EQ(regions_[num_regions_ - 1].End(), Limit());
107 }
108 DCHECK(!full_region_.IsFree());
109 DCHECK(full_region_.IsAllocated());
110 current_region_ = &full_region_;
111 evac_region_ = nullptr;
112 size_t ignored;
113 DCHECK(full_region_.Alloc(kAlignment, &ignored, nullptr, &ignored) == nullptr);
114 }
115
FromSpaceSize()116 size_t RegionSpace::FromSpaceSize() {
117 uint64_t num_regions = 0;
118 MutexLock mu(Thread::Current(), region_lock_);
119 for (size_t i = 0; i < num_regions_; ++i) {
120 Region* r = ®ions_[i];
121 if (r->IsInFromSpace()) {
122 ++num_regions;
123 }
124 }
125 return num_regions * kRegionSize;
126 }
127
UnevacFromSpaceSize()128 size_t RegionSpace::UnevacFromSpaceSize() {
129 uint64_t num_regions = 0;
130 MutexLock mu(Thread::Current(), region_lock_);
131 for (size_t i = 0; i < num_regions_; ++i) {
132 Region* r = ®ions_[i];
133 if (r->IsInUnevacFromSpace()) {
134 ++num_regions;
135 }
136 }
137 return num_regions * kRegionSize;
138 }
139
ToSpaceSize()140 size_t RegionSpace::ToSpaceSize() {
141 uint64_t num_regions = 0;
142 MutexLock mu(Thread::Current(), region_lock_);
143 for (size_t i = 0; i < num_regions_; ++i) {
144 Region* r = ®ions_[i];
145 if (r->IsInToSpace()) {
146 ++num_regions;
147 }
148 }
149 return num_regions * kRegionSize;
150 }
151
ShouldBeEvacuated()152 inline bool RegionSpace::Region::ShouldBeEvacuated() {
153 DCHECK((IsAllocated() || IsLarge()) && IsInToSpace());
154 // if the region was allocated after the start of the
155 // previous GC or the live ratio is below threshold, evacuate
156 // it.
157 bool result;
158 if (is_newly_allocated_) {
159 result = true;
160 } else {
161 bool is_live_percent_valid = live_bytes_ != static_cast<size_t>(-1);
162 if (is_live_percent_valid) {
163 DCHECK(IsInToSpace());
164 DCHECK(!IsLargeTail());
165 DCHECK_NE(live_bytes_, static_cast<size_t>(-1));
166 DCHECK_LE(live_bytes_, BytesAllocated());
167 const size_t bytes_allocated = RoundUp(BytesAllocated(), kRegionSize);
168 DCHECK_LE(live_bytes_, bytes_allocated);
169 if (IsAllocated()) {
170 // Side node: live_percent == 0 does not necessarily mean
171 // there's no live objects due to rounding (there may be a
172 // few).
173 result = live_bytes_ * 100U < kEvaculateLivePercentThreshold * bytes_allocated;
174 } else {
175 DCHECK(IsLarge());
176 result = live_bytes_ == 0U;
177 }
178 } else {
179 result = false;
180 }
181 }
182 return result;
183 }
184
185 // Determine which regions to evacuate and mark them as
186 // from-space. Mark the rest as unevacuated from-space.
SetFromSpace(accounting::ReadBarrierTable * rb_table,bool force_evacuate_all)187 void RegionSpace::SetFromSpace(accounting::ReadBarrierTable* rb_table, bool force_evacuate_all) {
188 ++time_;
189 if (kUseTableLookupReadBarrier) {
190 DCHECK(rb_table->IsAllCleared());
191 rb_table->SetAll();
192 }
193 MutexLock mu(Thread::Current(), region_lock_);
194 size_t num_expected_large_tails = 0;
195 bool prev_large_evacuated = false;
196 VerifyNonFreeRegionLimit();
197 const size_t iter_limit = kUseTableLookupReadBarrier
198 ? num_regions_
199 : std::min(num_regions_, non_free_region_index_limit_);
200 for (size_t i = 0; i < iter_limit; ++i) {
201 Region* r = ®ions_[i];
202 RegionState state = r->State();
203 RegionType type = r->Type();
204 if (!r->IsFree()) {
205 DCHECK(r->IsInToSpace());
206 if (LIKELY(num_expected_large_tails == 0U)) {
207 DCHECK((state == RegionState::kRegionStateAllocated ||
208 state == RegionState::kRegionStateLarge) &&
209 type == RegionType::kRegionTypeToSpace);
210 bool should_evacuate = force_evacuate_all || r->ShouldBeEvacuated();
211 if (should_evacuate) {
212 r->SetAsFromSpace();
213 DCHECK(r->IsInFromSpace());
214 } else {
215 r->SetAsUnevacFromSpace();
216 DCHECK(r->IsInUnevacFromSpace());
217 }
218 if (UNLIKELY(state == RegionState::kRegionStateLarge &&
219 type == RegionType::kRegionTypeToSpace)) {
220 prev_large_evacuated = should_evacuate;
221 num_expected_large_tails = RoundUp(r->BytesAllocated(), kRegionSize) / kRegionSize - 1;
222 DCHECK_GT(num_expected_large_tails, 0U);
223 }
224 } else {
225 DCHECK(state == RegionState::kRegionStateLargeTail &&
226 type == RegionType::kRegionTypeToSpace);
227 if (prev_large_evacuated) {
228 r->SetAsFromSpace();
229 DCHECK(r->IsInFromSpace());
230 } else {
231 r->SetAsUnevacFromSpace();
232 DCHECK(r->IsInUnevacFromSpace());
233 }
234 --num_expected_large_tails;
235 }
236 } else {
237 DCHECK_EQ(num_expected_large_tails, 0U);
238 if (kUseTableLookupReadBarrier) {
239 // Clear the rb table for to-space regions.
240 rb_table->Clear(r->Begin(), r->End());
241 }
242 }
243 }
244 DCHECK_EQ(num_expected_large_tails, 0U);
245 current_region_ = &full_region_;
246 evac_region_ = &full_region_;
247 }
248
ClearFromSpace(uint64_t * cleared_bytes,uint64_t * cleared_objects)249 void RegionSpace::ClearFromSpace(uint64_t* cleared_bytes, uint64_t* cleared_objects) {
250 DCHECK(cleared_bytes != nullptr);
251 DCHECK(cleared_objects != nullptr);
252 *cleared_bytes = 0;
253 *cleared_objects = 0;
254 MutexLock mu(Thread::Current(), region_lock_);
255 VerifyNonFreeRegionLimit();
256 size_t new_non_free_region_index_limit = 0;
257
258 // Combine zeroing and releasing pages to reduce how often madvise is called. This helps
259 // reduce contention on the mmap semaphore. b/62194020
260 // clear_region adds a region to the current block. If the region is not adjacent, the
261 // clear block is zeroed, released, and a new block begins.
262 uint8_t* clear_block_begin = nullptr;
263 uint8_t* clear_block_end = nullptr;
264 auto clear_region = [&clear_block_begin, &clear_block_end](Region* r) {
265 r->Clear(/*zero_and_release_pages*/false);
266 if (clear_block_end != r->Begin()) {
267 ZeroAndReleasePages(clear_block_begin, clear_block_end - clear_block_begin);
268 clear_block_begin = r->Begin();
269 }
270 clear_block_end = r->End();
271 };
272 for (size_t i = 0; i < std::min(num_regions_, non_free_region_index_limit_); ++i) {
273 Region* r = ®ions_[i];
274 if (r->IsInFromSpace()) {
275 *cleared_bytes += r->BytesAllocated();
276 *cleared_objects += r->ObjectsAllocated();
277 --num_non_free_regions_;
278 clear_region(r);
279 } else if (r->IsInUnevacFromSpace()) {
280 if (r->LiveBytes() == 0) {
281 // Special case for 0 live bytes, this means all of the objects in the region are dead and
282 // we can clear it. This is important for large objects since we must not visit dead ones in
283 // RegionSpace::Walk because they may contain dangling references to invalid objects.
284 // It is also better to clear these regions now instead of at the end of the next GC to
285 // save RAM. If we don't clear the regions here, they will be cleared next GC by the normal
286 // live percent evacuation logic.
287 size_t free_regions = 1;
288 // Also release RAM for large tails.
289 while (i + free_regions < num_regions_ && regions_[i + free_regions].IsLargeTail()) {
290 DCHECK(r->IsLarge());
291 clear_region(®ions_[i + free_regions]);
292 ++free_regions;
293 }
294 *cleared_bytes += r->BytesAllocated();
295 *cleared_objects += r->ObjectsAllocated();
296 num_non_free_regions_ -= free_regions;
297 clear_region(r);
298 GetLiveBitmap()->ClearRange(
299 reinterpret_cast<mirror::Object*>(r->Begin()),
300 reinterpret_cast<mirror::Object*>(r->Begin() + free_regions * kRegionSize));
301 continue;
302 }
303 size_t full_count = 0;
304 while (r->IsInUnevacFromSpace()) {
305 Region* const cur = ®ions_[i + full_count];
306 if (i + full_count >= num_regions_ ||
307 cur->LiveBytes() != static_cast<size_t>(cur->Top() - cur->Begin())) {
308 break;
309 }
310 DCHECK(cur->IsInUnevacFromSpace());
311 if (full_count != 0) {
312 cur->SetUnevacFromSpaceAsToSpace();
313 }
314 ++full_count;
315 }
316 // Note that r is the full_count == 0 iteration since it is not handled by the loop.
317 r->SetUnevacFromSpaceAsToSpace();
318 if (full_count >= 1) {
319 GetLiveBitmap()->ClearRange(
320 reinterpret_cast<mirror::Object*>(r->Begin()),
321 reinterpret_cast<mirror::Object*>(r->Begin() + full_count * kRegionSize));
322 // Skip over extra regions we cleared.
323 // Subtract one for the for loop.
324 i += full_count - 1;
325 }
326 }
327 // Note r != last_checked_region if r->IsInUnevacFromSpace() was true above.
328 Region* last_checked_region = ®ions_[i];
329 if (!last_checked_region->IsFree()) {
330 new_non_free_region_index_limit = std::max(new_non_free_region_index_limit,
331 last_checked_region->Idx() + 1);
332 }
333 }
334 // Clear pages for the last block since clearing happens when a new block opens.
335 ZeroAndReleasePages(clear_block_begin, clear_block_end - clear_block_begin);
336 // Update non_free_region_index_limit_.
337 SetNonFreeRegionLimit(new_non_free_region_index_limit);
338 evac_region_ = nullptr;
339 }
340
LogFragmentationAllocFailure(std::ostream & os,size_t)341 void RegionSpace::LogFragmentationAllocFailure(std::ostream& os,
342 size_t /* failed_alloc_bytes */) {
343 size_t max_contiguous_allocation = 0;
344 MutexLock mu(Thread::Current(), region_lock_);
345 if (current_region_->End() - current_region_->Top() > 0) {
346 max_contiguous_allocation = current_region_->End() - current_region_->Top();
347 }
348 if (num_non_free_regions_ * 2 < num_regions_) {
349 // We reserve half of the regions for evaluation only. If we
350 // occupy more than half the regions, do not report the free
351 // regions as available.
352 size_t max_contiguous_free_regions = 0;
353 size_t num_contiguous_free_regions = 0;
354 bool prev_free_region = false;
355 for (size_t i = 0; i < num_regions_; ++i) {
356 Region* r = ®ions_[i];
357 if (r->IsFree()) {
358 if (!prev_free_region) {
359 CHECK_EQ(num_contiguous_free_regions, 0U);
360 prev_free_region = true;
361 }
362 ++num_contiguous_free_regions;
363 } else {
364 if (prev_free_region) {
365 CHECK_NE(num_contiguous_free_regions, 0U);
366 max_contiguous_free_regions = std::max(max_contiguous_free_regions,
367 num_contiguous_free_regions);
368 num_contiguous_free_regions = 0U;
369 prev_free_region = false;
370 }
371 }
372 }
373 max_contiguous_allocation = std::max(max_contiguous_allocation,
374 max_contiguous_free_regions * kRegionSize);
375 }
376 os << "; failed due to fragmentation (largest possible contiguous allocation "
377 << max_contiguous_allocation << " bytes)";
378 // Caller's job to print failed_alloc_bytes.
379 }
380
Clear()381 void RegionSpace::Clear() {
382 MutexLock mu(Thread::Current(), region_lock_);
383 for (size_t i = 0; i < num_regions_; ++i) {
384 Region* r = ®ions_[i];
385 if (!r->IsFree()) {
386 --num_non_free_regions_;
387 }
388 r->Clear(/*zero_and_release_pages*/true);
389 }
390 SetNonFreeRegionLimit(0);
391 current_region_ = &full_region_;
392 evac_region_ = &full_region_;
393 }
394
Dump(std::ostream & os) const395 void RegionSpace::Dump(std::ostream& os) const {
396 os << GetName() << " "
397 << reinterpret_cast<void*>(Begin()) << "-" << reinterpret_cast<void*>(Limit());
398 }
399
FreeLarge(mirror::Object * large_obj,size_t bytes_allocated)400 void RegionSpace::FreeLarge(mirror::Object* large_obj, size_t bytes_allocated) {
401 DCHECK(Contains(large_obj));
402 DCHECK_ALIGNED(large_obj, kRegionSize);
403 MutexLock mu(Thread::Current(), region_lock_);
404 uint8_t* begin_addr = reinterpret_cast<uint8_t*>(large_obj);
405 uint8_t* end_addr = AlignUp(reinterpret_cast<uint8_t*>(large_obj) + bytes_allocated, kRegionSize);
406 CHECK_LT(begin_addr, end_addr);
407 for (uint8_t* addr = begin_addr; addr < end_addr; addr += kRegionSize) {
408 Region* reg = RefToRegionLocked(reinterpret_cast<mirror::Object*>(addr));
409 if (addr == begin_addr) {
410 DCHECK(reg->IsLarge());
411 } else {
412 DCHECK(reg->IsLargeTail());
413 }
414 reg->Clear(/*zero_and_release_pages*/true);
415 --num_non_free_regions_;
416 }
417 if (end_addr < Limit()) {
418 // If we aren't at the end of the space, check that the next region is not a large tail.
419 Region* following_reg = RefToRegionLocked(reinterpret_cast<mirror::Object*>(end_addr));
420 DCHECK(!following_reg->IsLargeTail());
421 }
422 }
423
DumpRegions(std::ostream & os)424 void RegionSpace::DumpRegions(std::ostream& os) {
425 MutexLock mu(Thread::Current(), region_lock_);
426 for (size_t i = 0; i < num_regions_; ++i) {
427 regions_[i].Dump(os);
428 }
429 }
430
DumpNonFreeRegions(std::ostream & os)431 void RegionSpace::DumpNonFreeRegions(std::ostream& os) {
432 MutexLock mu(Thread::Current(), region_lock_);
433 for (size_t i = 0; i < num_regions_; ++i) {
434 Region* reg = ®ions_[i];
435 if (!reg->IsFree()) {
436 reg->Dump(os);
437 }
438 }
439 }
440
RecordAlloc(mirror::Object * ref)441 void RegionSpace::RecordAlloc(mirror::Object* ref) {
442 CHECK(ref != nullptr);
443 Region* r = RefToRegion(ref);
444 r->objects_allocated_.FetchAndAddSequentiallyConsistent(1);
445 }
446
AllocNewTlab(Thread * self,size_t min_bytes)447 bool RegionSpace::AllocNewTlab(Thread* self, size_t min_bytes) {
448 MutexLock mu(self, region_lock_);
449 RevokeThreadLocalBuffersLocked(self);
450 // Retain sufficient free regions for full evacuation.
451 if ((num_non_free_regions_ + 1) * 2 > num_regions_) {
452 return false;
453 }
454 for (size_t i = 0; i < num_regions_; ++i) {
455 Region* r = ®ions_[i];
456 if (r->IsFree()) {
457 r->Unfree(this, time_);
458 ++num_non_free_regions_;
459 r->SetNewlyAllocated();
460 r->SetTop(r->End());
461 r->is_a_tlab_ = true;
462 r->thread_ = self;
463 self->SetTlab(r->Begin(), r->Begin() + min_bytes, r->End());
464 return true;
465 }
466 }
467 return false;
468 }
469
RevokeThreadLocalBuffers(Thread * thread)470 size_t RegionSpace::RevokeThreadLocalBuffers(Thread* thread) {
471 MutexLock mu(Thread::Current(), region_lock_);
472 RevokeThreadLocalBuffersLocked(thread);
473 return 0U;
474 }
475
RevokeThreadLocalBuffersLocked(Thread * thread)476 void RegionSpace::RevokeThreadLocalBuffersLocked(Thread* thread) {
477 uint8_t* tlab_start = thread->GetTlabStart();
478 DCHECK_EQ(thread->HasTlab(), tlab_start != nullptr);
479 if (tlab_start != nullptr) {
480 DCHECK_ALIGNED(tlab_start, kRegionSize);
481 Region* r = RefToRegionLocked(reinterpret_cast<mirror::Object*>(tlab_start));
482 DCHECK(r->IsAllocated());
483 DCHECK_LE(thread->GetThreadLocalBytesAllocated(), kRegionSize);
484 r->RecordThreadLocalAllocations(thread->GetThreadLocalObjectsAllocated(),
485 thread->GetThreadLocalBytesAllocated());
486 r->is_a_tlab_ = false;
487 r->thread_ = nullptr;
488 }
489 thread->SetTlab(nullptr, nullptr, nullptr);
490 }
491
RevokeAllThreadLocalBuffers()492 size_t RegionSpace::RevokeAllThreadLocalBuffers() {
493 Thread* self = Thread::Current();
494 MutexLock mu(self, *Locks::runtime_shutdown_lock_);
495 MutexLock mu2(self, *Locks::thread_list_lock_);
496 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
497 for (Thread* thread : thread_list) {
498 RevokeThreadLocalBuffers(thread);
499 }
500 return 0U;
501 }
502
AssertThreadLocalBuffersAreRevoked(Thread * thread)503 void RegionSpace::AssertThreadLocalBuffersAreRevoked(Thread* thread) {
504 if (kIsDebugBuild) {
505 DCHECK(!thread->HasTlab());
506 }
507 }
508
AssertAllThreadLocalBuffersAreRevoked()509 void RegionSpace::AssertAllThreadLocalBuffersAreRevoked() {
510 if (kIsDebugBuild) {
511 Thread* self = Thread::Current();
512 MutexLock mu(self, *Locks::runtime_shutdown_lock_);
513 MutexLock mu2(self, *Locks::thread_list_lock_);
514 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
515 for (Thread* thread : thread_list) {
516 AssertThreadLocalBuffersAreRevoked(thread);
517 }
518 }
519 }
520
Dump(std::ostream & os) const521 void RegionSpace::Region::Dump(std::ostream& os) const {
522 os << "Region[" << idx_ << "]=" << reinterpret_cast<void*>(begin_) << "-"
523 << reinterpret_cast<void*>(Top())
524 << "-" << reinterpret_cast<void*>(end_)
525 << " state=" << static_cast<uint>(state_) << " type=" << static_cast<uint>(type_)
526 << " objects_allocated=" << objects_allocated_
527 << " alloc_time=" << alloc_time_ << " live_bytes=" << live_bytes_
528 << " is_newly_allocated=" << is_newly_allocated_ << " is_a_tlab=" << is_a_tlab_ << " thread=" << thread_ << "\n";
529 }
530
531 } // namespace space
532 } // namespace gc
533 } // namespace art
534