1 /*
2  * Copyright (C) 2014 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "bump_pointer_space.h"
18 #include "bump_pointer_space-inl.h"
19 #include "mirror/object-inl.h"
20 #include "mirror/class-inl.h"
21 #include "thread_list.h"
22 
23 namespace art {
24 namespace gc {
25 namespace space {
26 
27 // If a region has live objects whose size is less than this percent
28 // value of the region size, evaculate the region.
29 static constexpr uint kEvaculateLivePercentThreshold = 75U;
30 
Create(const std::string & name,size_t capacity,uint8_t * requested_begin)31 RegionSpace* RegionSpace::Create(const std::string& name, size_t capacity,
32                                  uint8_t* requested_begin) {
33   capacity = RoundUp(capacity, kRegionSize);
34   std::string error_msg;
35   std::unique_ptr<MemMap> mem_map(MemMap::MapAnonymous(name.c_str(), requested_begin, capacity,
36                                                        PROT_READ | PROT_WRITE, true, false,
37                                                        &error_msg));
38   if (mem_map.get() == nullptr) {
39     LOG(ERROR) << "Failed to allocate pages for alloc space (" << name << ") of size "
40         << PrettySize(capacity) << " with message " << error_msg;
41     MemMap::DumpMaps(LOG(ERROR));
42     return nullptr;
43   }
44   return new RegionSpace(name, mem_map.release());
45 }
46 
RegionSpace(const std::string & name,MemMap * mem_map)47 RegionSpace::RegionSpace(const std::string& name, MemMap* mem_map)
48     : ContinuousMemMapAllocSpace(name, mem_map, mem_map->Begin(), mem_map->End(), mem_map->End(),
49                                  kGcRetentionPolicyAlwaysCollect),
50       region_lock_("Region lock", kRegionSpaceRegionLock), time_(1U) {
51   size_t mem_map_size = mem_map->Size();
52   CHECK_ALIGNED(mem_map_size, kRegionSize);
53   CHECK_ALIGNED(mem_map->Begin(), kRegionSize);
54   num_regions_ = mem_map_size / kRegionSize;
55   num_non_free_regions_ = 0U;
56   DCHECK_GT(num_regions_, 0U);
57   regions_.reset(new Region[num_regions_]);
58   uint8_t* region_addr = mem_map->Begin();
59   for (size_t i = 0; i < num_regions_; ++i, region_addr += kRegionSize) {
60     regions_[i] = Region(i, region_addr, region_addr + kRegionSize);
61   }
62   if (kIsDebugBuild) {
63     CHECK_EQ(regions_[0].Begin(), Begin());
64     for (size_t i = 0; i < num_regions_; ++i) {
65       CHECK(regions_[i].IsFree());
66       CHECK_EQ(static_cast<size_t>(regions_[i].End() - regions_[i].Begin()), kRegionSize);
67       if (i + 1 < num_regions_) {
68         CHECK_EQ(regions_[i].End(), regions_[i + 1].Begin());
69       }
70     }
71     CHECK_EQ(regions_[num_regions_ - 1].End(), Limit());
72   }
73   full_region_ = Region();
74   DCHECK(!full_region_.IsFree());
75   DCHECK(full_region_.IsAllocated());
76   current_region_ = &full_region_;
77   evac_region_ = nullptr;
78   size_t ignored;
79   DCHECK(full_region_.Alloc(kAlignment, &ignored, nullptr, &ignored) == nullptr);
80 }
81 
FromSpaceSize()82 size_t RegionSpace::FromSpaceSize() {
83   uint64_t num_regions = 0;
84   MutexLock mu(Thread::Current(), region_lock_);
85   for (size_t i = 0; i < num_regions_; ++i) {
86     Region* r = &regions_[i];
87     if (r->IsInFromSpace()) {
88       ++num_regions;
89     }
90   }
91   return num_regions * kRegionSize;
92 }
93 
UnevacFromSpaceSize()94 size_t RegionSpace::UnevacFromSpaceSize() {
95   uint64_t num_regions = 0;
96   MutexLock mu(Thread::Current(), region_lock_);
97   for (size_t i = 0; i < num_regions_; ++i) {
98     Region* r = &regions_[i];
99     if (r->IsInUnevacFromSpace()) {
100       ++num_regions;
101     }
102   }
103   return num_regions * kRegionSize;
104 }
105 
ToSpaceSize()106 size_t RegionSpace::ToSpaceSize() {
107   uint64_t num_regions = 0;
108   MutexLock mu(Thread::Current(), region_lock_);
109   for (size_t i = 0; i < num_regions_; ++i) {
110     Region* r = &regions_[i];
111     if (r->IsInToSpace()) {
112       ++num_regions;
113     }
114   }
115   return num_regions * kRegionSize;
116 }
117 
ShouldBeEvacuated()118 inline bool RegionSpace::Region::ShouldBeEvacuated() {
119   DCHECK((IsAllocated() || IsLarge()) && IsInToSpace());
120   // if the region was allocated after the start of the
121   // previous GC or the live ratio is below threshold, evacuate
122   // it.
123   bool result;
124   if (is_newly_allocated_) {
125     result = true;
126   } else {
127     bool is_live_percent_valid = live_bytes_ != static_cast<size_t>(-1);
128     if (is_live_percent_valid) {
129       uint live_percent = GetLivePercent();
130       if (IsAllocated()) {
131         // Side node: live_percent == 0 does not necessarily mean
132         // there's no live objects due to rounding (there may be a
133         // few).
134         result = live_percent < kEvaculateLivePercentThreshold;
135       } else {
136         DCHECK(IsLarge());
137         result = live_percent == 0U;
138       }
139     } else {
140       result = false;
141     }
142   }
143   return result;
144 }
145 
146 // Determine which regions to evacuate and mark them as
147 // from-space. Mark the rest as unevacuated from-space.
SetFromSpace(accounting::ReadBarrierTable * rb_table,bool force_evacuate_all)148 void RegionSpace::SetFromSpace(accounting::ReadBarrierTable* rb_table, bool force_evacuate_all) {
149   ++time_;
150   if (kUseTableLookupReadBarrier) {
151     DCHECK(rb_table->IsAllCleared());
152     rb_table->SetAll();
153   }
154   MutexLock mu(Thread::Current(), region_lock_);
155   size_t num_expected_large_tails = 0;
156   bool prev_large_evacuated = false;
157   for (size_t i = 0; i < num_regions_; ++i) {
158     Region* r = &regions_[i];
159     RegionState state = r->State();
160     RegionType type = r->Type();
161     if (!r->IsFree()) {
162       DCHECK(r->IsInToSpace());
163       if (LIKELY(num_expected_large_tails == 0U)) {
164         DCHECK((state == RegionState::kRegionStateAllocated ||
165                 state == RegionState::kRegionStateLarge) &&
166                type == RegionType::kRegionTypeToSpace);
167         bool should_evacuate = force_evacuate_all || r->ShouldBeEvacuated();
168         if (should_evacuate) {
169           r->SetAsFromSpace();
170           DCHECK(r->IsInFromSpace());
171         } else {
172           r->SetAsUnevacFromSpace();
173           DCHECK(r->IsInUnevacFromSpace());
174         }
175         if (UNLIKELY(state == RegionState::kRegionStateLarge &&
176                      type == RegionType::kRegionTypeToSpace)) {
177           prev_large_evacuated = should_evacuate;
178           num_expected_large_tails = RoundUp(r->BytesAllocated(), kRegionSize) / kRegionSize - 1;
179           DCHECK_GT(num_expected_large_tails, 0U);
180         }
181       } else {
182         DCHECK(state == RegionState::kRegionStateLargeTail &&
183                type == RegionType::kRegionTypeToSpace);
184         if (prev_large_evacuated) {
185           r->SetAsFromSpace();
186           DCHECK(r->IsInFromSpace());
187         } else {
188           r->SetAsUnevacFromSpace();
189           DCHECK(r->IsInUnevacFromSpace());
190         }
191         --num_expected_large_tails;
192       }
193     } else {
194       DCHECK_EQ(num_expected_large_tails, 0U);
195       if (kUseTableLookupReadBarrier) {
196         // Clear the rb table for to-space regions.
197         rb_table->Clear(r->Begin(), r->End());
198       }
199     }
200   }
201   current_region_ = &full_region_;
202   evac_region_ = &full_region_;
203 }
204 
ClearFromSpace()205 void RegionSpace::ClearFromSpace() {
206   MutexLock mu(Thread::Current(), region_lock_);
207   for (size_t i = 0; i < num_regions_; ++i) {
208     Region* r = &regions_[i];
209     if (r->IsInFromSpace()) {
210       r->Clear();
211       --num_non_free_regions_;
212     } else if (r->IsInUnevacFromSpace()) {
213       r->SetUnevacFromSpaceAsToSpace();
214     }
215   }
216   evac_region_ = nullptr;
217 }
218 
AssertAllRegionLiveBytesZeroOrCleared()219 void RegionSpace::AssertAllRegionLiveBytesZeroOrCleared() {
220   if (kIsDebugBuild) {
221     MutexLock mu(Thread::Current(), region_lock_);
222     for (size_t i = 0; i < num_regions_; ++i) {
223       Region* r = &regions_[i];
224       size_t live_bytes = r->LiveBytes();
225       CHECK(live_bytes == 0U || live_bytes == static_cast<size_t>(-1)) << live_bytes;
226     }
227   }
228 }
229 
LogFragmentationAllocFailure(std::ostream & os,size_t)230 void RegionSpace::LogFragmentationAllocFailure(std::ostream& os,
231                                                size_t /* failed_alloc_bytes */) {
232   size_t max_contiguous_allocation = 0;
233   MutexLock mu(Thread::Current(), region_lock_);
234   if (current_region_->End() - current_region_->Top() > 0) {
235     max_contiguous_allocation = current_region_->End() - current_region_->Top();
236   }
237   if (num_non_free_regions_ * 2 < num_regions_) {
238     // We reserve half of the regions for evaluation only. If we
239     // occupy more than half the regions, do not report the free
240     // regions as available.
241     size_t max_contiguous_free_regions = 0;
242     size_t num_contiguous_free_regions = 0;
243     bool prev_free_region = false;
244     for (size_t i = 0; i < num_regions_; ++i) {
245       Region* r = &regions_[i];
246       if (r->IsFree()) {
247         if (!prev_free_region) {
248           CHECK_EQ(num_contiguous_free_regions, 0U);
249           prev_free_region = true;
250         }
251         ++num_contiguous_free_regions;
252       } else {
253         if (prev_free_region) {
254           CHECK_NE(num_contiguous_free_regions, 0U);
255           max_contiguous_free_regions = std::max(max_contiguous_free_regions,
256                                                  num_contiguous_free_regions);
257           num_contiguous_free_regions = 0U;
258           prev_free_region = false;
259         }
260       }
261     }
262     max_contiguous_allocation = std::max(max_contiguous_allocation,
263                                          max_contiguous_free_regions * kRegionSize);
264   }
265   os << "; failed due to fragmentation (largest possible contiguous allocation "
266      <<  max_contiguous_allocation << " bytes)";
267   // Caller's job to print failed_alloc_bytes.
268 }
269 
Clear()270 void RegionSpace::Clear() {
271   MutexLock mu(Thread::Current(), region_lock_);
272   for (size_t i = 0; i < num_regions_; ++i) {
273     Region* r = &regions_[i];
274     if (!r->IsFree()) {
275       --num_non_free_regions_;
276     }
277     r->Clear();
278   }
279   current_region_ = &full_region_;
280   evac_region_ = &full_region_;
281 }
282 
Dump(std::ostream & os) const283 void RegionSpace::Dump(std::ostream& os) const {
284   os << GetName() << " "
285       << reinterpret_cast<void*>(Begin()) << "-" << reinterpret_cast<void*>(Limit());
286 }
287 
FreeLarge(mirror::Object * large_obj,size_t bytes_allocated)288 void RegionSpace::FreeLarge(mirror::Object* large_obj, size_t bytes_allocated) {
289   DCHECK(Contains(large_obj));
290   DCHECK_ALIGNED(large_obj, kRegionSize);
291   MutexLock mu(Thread::Current(), region_lock_);
292   uint8_t* begin_addr = reinterpret_cast<uint8_t*>(large_obj);
293   uint8_t* end_addr = AlignUp(reinterpret_cast<uint8_t*>(large_obj) + bytes_allocated, kRegionSize);
294   CHECK_LT(begin_addr, end_addr);
295   for (uint8_t* addr = begin_addr; addr < end_addr; addr += kRegionSize) {
296     Region* reg = RefToRegionLocked(reinterpret_cast<mirror::Object*>(addr));
297     if (addr == begin_addr) {
298       DCHECK(reg->IsLarge());
299     } else {
300       DCHECK(reg->IsLargeTail());
301     }
302     reg->Clear();
303     --num_non_free_regions_;
304   }
305   if (end_addr < Limit()) {
306     // If we aren't at the end of the space, check that the next region is not a large tail.
307     Region* following_reg = RefToRegionLocked(reinterpret_cast<mirror::Object*>(end_addr));
308     DCHECK(!following_reg->IsLargeTail());
309   }
310 }
311 
DumpRegions(std::ostream & os)312 void RegionSpace::DumpRegions(std::ostream& os) {
313   MutexLock mu(Thread::Current(), region_lock_);
314   for (size_t i = 0; i < num_regions_; ++i) {
315     regions_[i].Dump(os);
316   }
317 }
318 
DumpNonFreeRegions(std::ostream & os)319 void RegionSpace::DumpNonFreeRegions(std::ostream& os) {
320   MutexLock mu(Thread::Current(), region_lock_);
321   for (size_t i = 0; i < num_regions_; ++i) {
322     Region* reg = &regions_[i];
323     if (!reg->IsFree()) {
324       reg->Dump(os);
325     }
326   }
327 }
328 
RecordAlloc(mirror::Object * ref)329 void RegionSpace::RecordAlloc(mirror::Object* ref) {
330   CHECK(ref != nullptr);
331   Region* r = RefToRegion(ref);
332   reinterpret_cast<Atomic<uint64_t>*>(&r->objects_allocated_)->FetchAndAddSequentiallyConsistent(1);
333 }
334 
AllocNewTlab(Thread * self)335 bool RegionSpace::AllocNewTlab(Thread* self) {
336   MutexLock mu(self, region_lock_);
337   RevokeThreadLocalBuffersLocked(self);
338   // Retain sufficient free regions for full evacuation.
339   if ((num_non_free_regions_ + 1) * 2 > num_regions_) {
340     return false;
341   }
342   for (size_t i = 0; i < num_regions_; ++i) {
343     Region* r = &regions_[i];
344     if (r->IsFree()) {
345       r->Unfree(time_);
346       ++num_non_free_regions_;
347       // TODO: this is buggy. Debug it.
348       // r->SetNewlyAllocated();
349       r->SetTop(r->End());
350       r->is_a_tlab_ = true;
351       r->thread_ = self;
352       self->SetTlab(r->Begin(), r->End());
353       return true;
354     }
355   }
356   return false;
357 }
358 
RevokeThreadLocalBuffers(Thread * thread)359 size_t RegionSpace::RevokeThreadLocalBuffers(Thread* thread) {
360   MutexLock mu(Thread::Current(), region_lock_);
361   RevokeThreadLocalBuffersLocked(thread);
362   return 0U;
363 }
364 
RevokeThreadLocalBuffersLocked(Thread * thread)365 void RegionSpace::RevokeThreadLocalBuffersLocked(Thread* thread) {
366   uint8_t* tlab_start = thread->GetTlabStart();
367   DCHECK_EQ(thread->HasTlab(), tlab_start != nullptr);
368   if (tlab_start != nullptr) {
369     DCHECK_ALIGNED(tlab_start, kRegionSize);
370     Region* r = RefToRegionLocked(reinterpret_cast<mirror::Object*>(tlab_start));
371     DCHECK(r->IsAllocated());
372     DCHECK_EQ(thread->GetThreadLocalBytesAllocated(), kRegionSize);
373     r->RecordThreadLocalAllocations(thread->GetThreadLocalObjectsAllocated(),
374                                     thread->GetThreadLocalBytesAllocated());
375     r->is_a_tlab_ = false;
376     r->thread_ = nullptr;
377   }
378   thread->SetTlab(nullptr, nullptr);
379 }
380 
RevokeAllThreadLocalBuffers()381 size_t RegionSpace::RevokeAllThreadLocalBuffers() {
382   Thread* self = Thread::Current();
383   MutexLock mu(self, *Locks::runtime_shutdown_lock_);
384   MutexLock mu2(self, *Locks::thread_list_lock_);
385   std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
386   for (Thread* thread : thread_list) {
387     RevokeThreadLocalBuffers(thread);
388   }
389   return 0U;
390 }
391 
AssertThreadLocalBuffersAreRevoked(Thread * thread)392 void RegionSpace::AssertThreadLocalBuffersAreRevoked(Thread* thread) {
393   if (kIsDebugBuild) {
394     DCHECK(!thread->HasTlab());
395   }
396 }
397 
AssertAllThreadLocalBuffersAreRevoked()398 void RegionSpace::AssertAllThreadLocalBuffersAreRevoked() {
399   if (kIsDebugBuild) {
400     Thread* self = Thread::Current();
401     MutexLock mu(self, *Locks::runtime_shutdown_lock_);
402     MutexLock mu2(self, *Locks::thread_list_lock_);
403     std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
404     for (Thread* thread : thread_list) {
405       AssertThreadLocalBuffersAreRevoked(thread);
406     }
407   }
408 }
409 
Dump(std::ostream & os) const410 void RegionSpace::Region::Dump(std::ostream& os) const {
411   os << "Region[" << idx_ << "]=" << reinterpret_cast<void*>(begin_) << "-" << reinterpret_cast<void*>(top_)
412      << "-" << reinterpret_cast<void*>(end_)
413      << " state=" << static_cast<uint>(state_) << " type=" << static_cast<uint>(type_)
414      << " objects_allocated=" << objects_allocated_
415      << " alloc_time=" << alloc_time_ << " live_bytes=" << live_bytes_
416      << " is_newly_allocated=" << is_newly_allocated_ << " is_a_tlab=" << is_a_tlab_ << " thread=" << thread_ << "\n";
417 }
418 
419 }  // namespace space
420 }  // namespace gc
421 }  // namespace art
422