1 /*
2  * Copyright (C) 2014 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_GC_SPACE_REGION_SPACE_H_
18 #define ART_RUNTIME_GC_SPACE_REGION_SPACE_H_
19 
20 #include "base/macros.h"
21 #include "base/mutex.h"
22 #include "space.h"
23 #include "thread.h"
24 
25 #include <functional>
26 #include <map>
27 
28 namespace art HIDDEN {
29 namespace gc {
30 
31 namespace accounting {
32 class ReadBarrierTable;
33 }  // namespace accounting
34 
35 namespace space {
36 
37 // Cyclic region allocation strategy. If `true`, region allocation
38 // will not try to allocate a new region from the beginning of the
39 // region space, but from the last allocated region. This allocation
40 // strategy reduces region reuse and should help catch some GC bugs
41 // earlier. However, cyclic region allocation can also create memory
42 // fragmentation at the region level (see b/33795328); therefore, we
43 // only enable it in debug mode.
44 static constexpr bool kCyclicRegionAllocation = kIsDebugBuild;
45 
46 // A space that consists of equal-sized regions.
47 class RegionSpace final : public ContinuousMemMapAllocSpace {
48  public:
49   using WalkCallback = void (*)(void *start, void *end, size_t num_bytes, void* callback_arg);
50 
51   enum EvacMode {
52     kEvacModeNewlyAllocated,
53     kEvacModeLivePercentNewlyAllocated,
54     kEvacModeForceAll,
55   };
56 
GetType()57   SpaceType GetType() const override {
58     return kSpaceTypeRegionSpace;
59   }
60 
61   // Create a region space mem map with the requested sizes. The requested base address is not
62   // guaranteed to be granted, if it is required, the caller should call Begin on the returned
63   // space to confirm the request was granted.
64   static MemMap CreateMemMap(const std::string& name, size_t capacity, uint8_t* requested_begin);
65   static RegionSpace* Create(const std::string& name, MemMap&& mem_map, bool use_generational_cc);
66 
67   // Allocate `num_bytes`, returns null if the space is full.
68   mirror::Object* Alloc(Thread* self,
69                         size_t num_bytes,
70                         /* out */ size_t* bytes_allocated,
71                         /* out */ size_t* usable_size,
72                         /* out */ size_t* bytes_tl_bulk_allocated)
73       override REQUIRES(!region_lock_);
74   // Thread-unsafe allocation for when mutators are suspended, used by the semispace collector.
75   mirror::Object* AllocThreadUnsafe(Thread* self,
76                                     size_t num_bytes,
77                                     /* out */ size_t* bytes_allocated,
78                                     /* out */ size_t* usable_size,
79                                     /* out */ size_t* bytes_tl_bulk_allocated)
80       override REQUIRES(Locks::mutator_lock_) REQUIRES(!region_lock_);
81   // The main allocation routine.
82   template<bool kForEvac>
83   ALWAYS_INLINE mirror::Object* AllocNonvirtual(size_t num_bytes,
84                                                 /* out */ size_t* bytes_allocated,
85                                                 /* out */ size_t* usable_size,
86                                                 /* out */ size_t* bytes_tl_bulk_allocated)
87       REQUIRES(!region_lock_);
88   // Allocate/free large objects (objects that are larger than the region size).
89   template<bool kForEvac>
90   mirror::Object* AllocLarge(size_t num_bytes,
91                              /* out */ size_t* bytes_allocated,
92                              /* out */ size_t* usable_size,
93                              /* out */ size_t* bytes_tl_bulk_allocated) REQUIRES(!region_lock_);
94   template<bool kForEvac>
95   void FreeLarge(mirror::Object* large_obj, size_t bytes_allocated) REQUIRES(!region_lock_);
96 
97   // Return the storage space required by obj.
AllocationSize(mirror::Object * obj,size_t * usable_size)98   size_t AllocationSize(mirror::Object* obj, size_t* usable_size) override
99       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!region_lock_) {
100     return AllocationSizeNonvirtual(obj, usable_size);
101   }
102   EXPORT size_t AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size)
103       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!region_lock_);
104 
Free(Thread *,mirror::Object *)105   size_t Free(Thread*, mirror::Object*) override {
106     UNIMPLEMENTED(FATAL);
107     return 0;
108   }
FreeList(Thread *,size_t,mirror::Object **)109   size_t FreeList(Thread*, size_t, mirror::Object**) override {
110     UNIMPLEMENTED(FATAL);
111     return 0;
112   }
GetLiveBitmap()113   accounting::ContinuousSpaceBitmap* GetLiveBitmap() override {
114     return &mark_bitmap_;
115   }
GetMarkBitmap()116   accounting::ContinuousSpaceBitmap* GetMarkBitmap() override {
117     return &mark_bitmap_;
118   }
119 
120   EXPORT void Clear() override REQUIRES(!region_lock_);
121 
122   // Remove read and write memory protection from the whole region space,
123   // i.e. make memory pages backing the region area not readable and not
124   // writable.
125   void Protect();
126 
127   // Remove memory protection from the whole region space, i.e. make memory
128   // pages backing the region area readable and writable. This method is useful
129   // to avoid page protection faults when dumping information about an invalid
130   // reference.
131   EXPORT void Unprotect();
132 
133   // Change the non growth limit capacity to new capacity by shrinking or expanding the map.
134   // Currently, only shrinking is supported.
135   // Unlike implementations of this function in other spaces, we need to pass
136   // new capacity as argument here as region space doesn't have any notion of
137   // growth limit.
138   void ClampGrowthLimit(size_t new_capacity) REQUIRES(!region_lock_);
139 
140   EXPORT void Dump(std::ostream& os) const override;
141   void DumpRegions(std::ostream& os) REQUIRES(!region_lock_);
142   // Dump region containing object `obj`. Precondition: `obj` is in the region space.
143   void DumpRegionForObject(std::ostream& os, mirror::Object* obj) REQUIRES(!region_lock_);
144   EXPORT void DumpNonFreeRegions(std::ostream& os) REQUIRES(!region_lock_);
145 
146   EXPORT size_t RevokeThreadLocalBuffers(Thread* thread) override REQUIRES(!region_lock_);
147   size_t RevokeThreadLocalBuffers(Thread* thread, const bool reuse) REQUIRES(!region_lock_);
148   EXPORT size_t RevokeAllThreadLocalBuffers() override
149       REQUIRES(!Locks::runtime_shutdown_lock_, !Locks::thread_list_lock_, !region_lock_);
150   void AssertThreadLocalBuffersAreRevoked(Thread* thread) REQUIRES(!region_lock_);
151   void AssertAllThreadLocalBuffersAreRevoked()
152       REQUIRES(!Locks::runtime_shutdown_lock_, !Locks::thread_list_lock_, !region_lock_);
153 
154   enum class RegionType : uint8_t {
155     kRegionTypeAll,              // All types.
156     kRegionTypeFromSpace,        // From-space. To be evacuated.
157     kRegionTypeUnevacFromSpace,  // Unevacuated from-space. Not to be evacuated.
158     kRegionTypeToSpace,          // To-space.
159     kRegionTypeNone,             // None.
160   };
161 
162   enum class RegionState : uint8_t {
163     kRegionStateFree,            // Free region.
164     kRegionStateAllocated,       // Allocated region.
165     kRegionStateLarge,           // Large allocated (allocation larger than the region size).
166     kRegionStateLargeTail,       // Large tail (non-first regions of a large allocation).
167   };
168 
169   template<RegionType kRegionType> uint64_t GetBytesAllocatedInternal() REQUIRES(!region_lock_);
170   template<RegionType kRegionType> uint64_t GetObjectsAllocatedInternal() REQUIRES(!region_lock_);
GetBytesAllocated()171   uint64_t GetBytesAllocated() override REQUIRES(!region_lock_) {
172     return GetBytesAllocatedInternal<RegionType::kRegionTypeAll>();
173   }
GetObjectsAllocated()174   uint64_t GetObjectsAllocated() override REQUIRES(!region_lock_) {
175     return GetObjectsAllocatedInternal<RegionType::kRegionTypeAll>();
176   }
GetBytesAllocatedInFromSpace()177   uint64_t GetBytesAllocatedInFromSpace() REQUIRES(!region_lock_) {
178     return GetBytesAllocatedInternal<RegionType::kRegionTypeFromSpace>();
179   }
GetObjectsAllocatedInFromSpace()180   uint64_t GetObjectsAllocatedInFromSpace() REQUIRES(!region_lock_) {
181     return GetObjectsAllocatedInternal<RegionType::kRegionTypeFromSpace>();
182   }
GetBytesAllocatedInUnevacFromSpace()183   uint64_t GetBytesAllocatedInUnevacFromSpace() REQUIRES(!region_lock_) {
184     return GetBytesAllocatedInternal<RegionType::kRegionTypeUnevacFromSpace>();
185   }
GetObjectsAllocatedInUnevacFromSpace()186   uint64_t GetObjectsAllocatedInUnevacFromSpace() REQUIRES(!region_lock_) {
187     return GetObjectsAllocatedInternal<RegionType::kRegionTypeUnevacFromSpace>();
188   }
GetMaxPeakNumNonFreeRegions()189   size_t GetMaxPeakNumNonFreeRegions() const {
190     return max_peak_num_non_free_regions_;
191   }
GetNumRegions()192   size_t GetNumRegions() const {
193     return num_regions_;
194   }
GetNumNonFreeRegions()195   size_t GetNumNonFreeRegions() const NO_THREAD_SAFETY_ANALYSIS {
196     return num_non_free_regions_;
197   }
198 
CanMoveObjects()199   bool CanMoveObjects() const override {
200     return true;
201   }
202 
Contains(const mirror::Object * obj)203   bool Contains(const mirror::Object* obj) const override {
204     const uint8_t* byte_obj = reinterpret_cast<const uint8_t*>(obj);
205     return byte_obj >= Begin() && byte_obj < Limit();
206   }
207 
AsRegionSpace()208   RegionSpace* AsRegionSpace() override {
209     return this;
210   }
211 
212   // Go through all of the blocks and visit the continuous objects.
213   template <typename Visitor>
214   ALWAYS_INLINE void Walk(Visitor&& visitor) REQUIRES(Locks::mutator_lock_);
215   template <typename Visitor>
216   ALWAYS_INLINE void WalkToSpace(Visitor&& visitor) REQUIRES(Locks::mutator_lock_);
217 
218   // Scans regions and calls visitor for objects in unevac-space corresponding
219   // to the bits set in 'bitmap'.
220   // Cannot acquire region_lock_ as visitor may need to acquire it for allocation.
221   // Should not be called concurrently with functions (like SetFromSpace()) which
222   // change regions' type.
223   template <typename Visitor>
224   ALWAYS_INLINE void ScanUnevacFromSpace(accounting::ContinuousSpaceBitmap* bitmap,
225                                          Visitor&& visitor) NO_THREAD_SAFETY_ANALYSIS;
226 
GetSweepCallback()227   accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() override {
228     return nullptr;
229   }
230   EXPORT bool LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) override
231       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!region_lock_);
232 
233   // Object alignment within the space.
234   static constexpr size_t kAlignment = kObjectAlignment;
235   // The region size.
236   static constexpr size_t kRegionSize = 256 * KB;
237 
IsInFromSpace(mirror::Object * ref)238   bool IsInFromSpace(mirror::Object* ref) {
239     if (HasAddress(ref)) {
240       Region* r = RefToRegionUnlocked(ref);
241       return r->IsInFromSpace();
242     }
243     return false;
244   }
245 
IsRegionNewlyAllocated(size_t idx)246   bool IsRegionNewlyAllocated(size_t idx) const NO_THREAD_SAFETY_ANALYSIS {
247     DCHECK_LT(idx, num_regions_);
248     return regions_[idx].IsNewlyAllocated();
249   }
250 
IsInNewlyAllocatedRegion(mirror::Object * ref)251   bool IsInNewlyAllocatedRegion(mirror::Object* ref) {
252     if (HasAddress(ref)) {
253       Region* r = RefToRegionUnlocked(ref);
254       return r->IsNewlyAllocated();
255     }
256     return false;
257   }
258 
IsInUnevacFromSpace(mirror::Object * ref)259   bool IsInUnevacFromSpace(mirror::Object* ref) {
260     if (HasAddress(ref)) {
261       Region* r = RefToRegionUnlocked(ref);
262       return r->IsInUnevacFromSpace();
263     }
264     return false;
265   }
266 
IsLargeObject(mirror::Object * ref)267   bool IsLargeObject(mirror::Object* ref) {
268     if (HasAddress(ref)) {
269       Region* r = RefToRegionUnlocked(ref);
270       return r->IsLarge();
271     }
272     return false;
273   }
274 
IsInToSpace(mirror::Object * ref)275   bool IsInToSpace(mirror::Object* ref) {
276     if (HasAddress(ref)) {
277       Region* r = RefToRegionUnlocked(ref);
278       return r->IsInToSpace();
279     }
280     return false;
281   }
282 
283   // If `ref` is in the region space, return the type of its region;
284   // otherwise, return `RegionType::kRegionTypeNone`.
GetRegionType(mirror::Object * ref)285   RegionType GetRegionType(mirror::Object* ref) {
286     if (HasAddress(ref)) {
287       return GetRegionTypeUnsafe(ref);
288     }
289     return RegionType::kRegionTypeNone;
290   }
291 
292   // Unsafe version of RegionSpace::GetRegionType.
293   // Precondition: `ref` is in the region space.
GetRegionTypeUnsafe(mirror::Object * ref)294   RegionType GetRegionTypeUnsafe(mirror::Object* ref) {
295     DCHECK(HasAddress(ref)) << ref;
296     Region* r = RefToRegionUnlocked(ref);
297     return r->Type();
298   }
299 
300   // Zero live bytes for a large object, used by young gen CC for marking newly allocated large
301   // objects.
302   void ZeroLiveBytesForLargeObject(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_);
303 
304   // Determine which regions to evacuate and tag them as
305   // from-space. Tag the rest as unevacuated from-space.
306   void SetFromSpace(accounting::ReadBarrierTable* rb_table,
307                     EvacMode evac_mode,
308                     bool clear_live_bytes)
309       REQUIRES(!region_lock_);
310 
311   size_t FromSpaceSize() REQUIRES(!region_lock_);
312   size_t UnevacFromSpaceSize() REQUIRES(!region_lock_);
313   size_t ToSpaceSize() REQUIRES(!region_lock_);
314   void ClearFromSpace(/* out */ uint64_t* cleared_bytes,
315                       /* out */ uint64_t* cleared_objects,
316                       const bool clear_bitmap,
317                       const bool release_eagerly)
318       REQUIRES(!region_lock_);
319 
AddLiveBytes(mirror::Object * ref,size_t alloc_size)320   void AddLiveBytes(mirror::Object* ref, size_t alloc_size) {
321     Region* reg = RefToRegionUnlocked(ref);
322     reg->AddLiveBytes(alloc_size);
323   }
324 
AssertAllRegionLiveBytesZeroOrCleared()325   void AssertAllRegionLiveBytesZeroOrCleared() REQUIRES(!region_lock_) {
326     if (kIsDebugBuild) {
327       MutexLock mu(Thread::Current(), region_lock_);
328       for (size_t i = 0; i < num_regions_; ++i) {
329         Region* r = &regions_[i];
330         size_t live_bytes = r->LiveBytes();
331         CHECK(live_bytes == 0U || live_bytes == static_cast<size_t>(-1)) << live_bytes;
332       }
333     }
334   }
335 
SetAllRegionLiveBytesZero()336   void SetAllRegionLiveBytesZero() REQUIRES(!region_lock_) {
337     MutexLock mu(Thread::Current(), region_lock_);
338     const size_t iter_limit = kUseTableLookupReadBarrier
339         ? num_regions_
340         : std::min(num_regions_, non_free_region_index_limit_);
341     for (size_t i = 0; i < iter_limit; ++i) {
342       Region* r = &regions_[i];
343       // Newly allocated regions don't need up-to-date live_bytes_ for deciding
344       // whether to be evacuated or not. See Region::ShouldBeEvacuated().
345       if (!r->IsFree() && !r->IsNewlyAllocated()) {
346         r->ZeroLiveBytes();
347       }
348     }
349   }
350 
RegionIdxForRefUnchecked(mirror::Object * ref)351   size_t RegionIdxForRefUnchecked(mirror::Object* ref) const NO_THREAD_SAFETY_ANALYSIS {
352     DCHECK(HasAddress(ref));
353     uintptr_t offset = reinterpret_cast<uintptr_t>(ref) - reinterpret_cast<uintptr_t>(Begin());
354     size_t reg_idx = offset / kRegionSize;
355     DCHECK_LT(reg_idx, num_regions_);
356     Region* reg = &regions_[reg_idx];
357     DCHECK_EQ(reg->Idx(), reg_idx);
358     DCHECK(reg->Contains(ref));
359     return reg_idx;
360   }
361   // Return -1 as region index for references outside this region space.
RegionIdxForRef(mirror::Object * ref)362   size_t RegionIdxForRef(mirror::Object* ref) const NO_THREAD_SAFETY_ANALYSIS {
363     if (HasAddress(ref)) {
364       return RegionIdxForRefUnchecked(ref);
365     } else {
366       return static_cast<size_t>(-1);
367     }
368   }
369 
370   // Increment object allocation count for region containing ref.
371   void RecordAlloc(mirror::Object* ref) REQUIRES(!region_lock_);
372 
373   bool AllocNewTlab(Thread* self, const size_t tlab_size, size_t* bytes_tl_bulk_allocated)
374       REQUIRES(!region_lock_);
375 
Time()376   uint32_t Time() {
377     return time_;
378   }
379 
EvacBytes()380   size_t EvacBytes() const NO_THREAD_SAFETY_ANALYSIS {
381     return num_evac_regions_ * kRegionSize;
382   }
383 
GetMadviseTime()384   uint64_t GetMadviseTime() const {
385     return madvise_time_;
386   }
387 
388   void ReleaseFreeRegions();
389 
390  private:
391   RegionSpace(const std::string& name, MemMap&& mem_map, bool use_generational_cc);
392 
393   class Region {
394    public:
Region()395     Region()
396         : idx_(static_cast<size_t>(-1)),
397           live_bytes_(static_cast<size_t>(-1)),
398           begin_(nullptr),
399           thread_(nullptr),
400           top_(nullptr),
401           end_(nullptr),
402           objects_allocated_(0),
403           alloc_time_(0),
404           is_newly_allocated_(false),
405           is_a_tlab_(false),
406           state_(RegionState::kRegionStateAllocated),
407           type_(RegionType::kRegionTypeToSpace) {}
408 
Init(size_t idx,uint8_t * begin,uint8_t * end)409     void Init(size_t idx, uint8_t* begin, uint8_t* end) {
410       idx_ = idx;
411       begin_ = begin;
412       top_.store(begin, std::memory_order_relaxed);
413       end_ = end;
414       state_ = RegionState::kRegionStateFree;
415       type_ = RegionType::kRegionTypeNone;
416       objects_allocated_.store(0, std::memory_order_relaxed);
417       alloc_time_ = 0;
418       live_bytes_ = static_cast<size_t>(-1);
419       is_newly_allocated_ = false;
420       is_a_tlab_ = false;
421       thread_ = nullptr;
422       DCHECK_LT(begin, end);
423       DCHECK_EQ(static_cast<size_t>(end - begin), kRegionSize);
424     }
425 
State()426     RegionState State() const {
427       return state_;
428     }
429 
Type()430     RegionType Type() const {
431       return type_;
432     }
433 
434     void Clear(bool zero_and_release_pages);
435 
436     ALWAYS_INLINE mirror::Object* Alloc(size_t num_bytes,
437                                         /* out */ size_t* bytes_allocated,
438                                         /* out */ size_t* usable_size,
439                                         /* out */ size_t* bytes_tl_bulk_allocated);
440 
IsFree()441     bool IsFree() const {
442       bool is_free = (state_ == RegionState::kRegionStateFree);
443       if (is_free) {
444         DCHECK(IsInNoSpace());
445         DCHECK_EQ(begin_, Top());
446         DCHECK_EQ(objects_allocated_.load(std::memory_order_relaxed), 0U);
447       }
448       return is_free;
449     }
450 
451     // Given a free region, declare it non-free (allocated).
452     void Unfree(RegionSpace* region_space, uint32_t alloc_time)
453         REQUIRES(region_space->region_lock_);
454 
455     // Given a free region, declare it non-free (allocated) and large.
456     EXPORT void UnfreeLarge(RegionSpace* region_space, uint32_t alloc_time)
457         REQUIRES(region_space->region_lock_);
458 
459     // Given a free region, declare it non-free (allocated) and large tail.
460     EXPORT void UnfreeLargeTail(RegionSpace* region_space, uint32_t alloc_time)
461         REQUIRES(region_space->region_lock_);
462 
463     void MarkAsAllocated(RegionSpace* region_space, uint32_t alloc_time)
464         REQUIRES(region_space->region_lock_);
465 
SetNewlyAllocated()466     void SetNewlyAllocated() {
467       is_newly_allocated_ = true;
468     }
469 
470     // Non-large, non-large-tail allocated.
IsAllocated()471     bool IsAllocated() const {
472       return state_ == RegionState::kRegionStateAllocated;
473     }
474 
475     // Large allocated.
IsLarge()476     bool IsLarge() const {
477       bool is_large = (state_ == RegionState::kRegionStateLarge);
478       if (is_large) {
479         DCHECK_LT(begin_ + kRegionSize, Top());
480       }
481       return is_large;
482     }
483 
ZeroLiveBytes()484     void ZeroLiveBytes() {
485       live_bytes_ = 0;
486     }
487 
488     // Large-tail allocated.
IsLargeTail()489     bool IsLargeTail() const {
490       bool is_large_tail = (state_ == RegionState::kRegionStateLargeTail);
491       if (is_large_tail) {
492         DCHECK_EQ(begin_, Top());
493       }
494       return is_large_tail;
495     }
496 
Idx()497     size_t Idx() const {
498       return idx_;
499     }
500 
IsNewlyAllocated()501     bool IsNewlyAllocated() const {
502       return is_newly_allocated_;
503     }
504 
IsTlab()505     bool IsTlab() const {
506       return is_a_tlab_;
507     }
508 
IsInFromSpace()509     bool IsInFromSpace() const {
510       return type_ == RegionType::kRegionTypeFromSpace;
511     }
512 
IsInToSpace()513     bool IsInToSpace() const {
514       return type_ == RegionType::kRegionTypeToSpace;
515     }
516 
IsInUnevacFromSpace()517     bool IsInUnevacFromSpace() const {
518       return type_ == RegionType::kRegionTypeUnevacFromSpace;
519     }
520 
IsInNoSpace()521     bool IsInNoSpace() const {
522       return type_ == RegionType::kRegionTypeNone;
523     }
524 
525     // Set this region as evacuated from-space. At the end of the
526     // collection, RegionSpace::ClearFromSpace will clear and reclaim
527     // the space used by this region, and tag it as unallocated/free.
SetAsFromSpace()528     void SetAsFromSpace() {
529       DCHECK(!IsFree() && IsInToSpace());
530       type_ = RegionType::kRegionTypeFromSpace;
531       if (IsNewlyAllocated()) {
532         // Clear the "newly allocated" status here, as we do not want the
533         // GC to see it when encountering references in the from-space.
534         //
535         // Invariant: There should be no newly-allocated region in the
536         // from-space (when the from-space exists, which is between the calls
537         // to RegionSpace::SetFromSpace and RegionSpace::ClearFromSpace).
538         is_newly_allocated_ = false;
539       }
540       // Set live bytes to an invalid value, as we have made an
541       // evacuation decision (possibly based on the percentage of live
542       // bytes).
543       live_bytes_ = static_cast<size_t>(-1);
544     }
545 
546     // Set this region as unevacuated from-space. At the end of the
547     // collection, RegionSpace::ClearFromSpace will preserve the space
548     // used by this region, and tag it as to-space (see
549     // Region::SetUnevacFromSpaceAsToSpace below).
550     void SetAsUnevacFromSpace(bool clear_live_bytes);
551 
552     // Set this region as to-space. Used by RegionSpace::ClearFromSpace.
553     // This is only valid if it is currently an unevac from-space region.
SetUnevacFromSpaceAsToSpace()554     void SetUnevacFromSpaceAsToSpace() {
555       DCHECK(!IsFree() && IsInUnevacFromSpace());
556       type_ = RegionType::kRegionTypeToSpace;
557     }
558 
559     // Return whether this region should be evacuated. Used by RegionSpace::SetFromSpace.
560     ALWAYS_INLINE bool ShouldBeEvacuated(EvacMode evac_mode);
561 
AddLiveBytes(size_t live_bytes)562     void AddLiveBytes(size_t live_bytes) {
563       DCHECK(GetUseGenerationalCC() || IsInUnevacFromSpace());
564       DCHECK(!IsLargeTail());
565       DCHECK_NE(live_bytes_, static_cast<size_t>(-1));
566       // For large allocations, we always consider all bytes in the regions live.
567       live_bytes_ += IsLarge() ? Top() - begin_ : live_bytes;
568       DCHECK_LE(live_bytes_, BytesAllocated());
569     }
570 
AllAllocatedBytesAreLive()571     bool AllAllocatedBytesAreLive() const {
572       return LiveBytes() == static_cast<size_t>(Top() - Begin());
573     }
574 
LiveBytes()575     size_t LiveBytes() const {
576       return live_bytes_;
577     }
578 
579     // Returns the number of allocated bytes.  "Bulk allocated" bytes in active TLABs are excluded.
580     size_t BytesAllocated() const;
581 
582     size_t ObjectsAllocated() const;
583 
Begin()584     uint8_t* Begin() const {
585       return begin_;
586     }
587 
Top()588     ALWAYS_INLINE uint8_t* Top() const {
589       return top_.load(std::memory_order_relaxed);
590     }
591 
SetTop(uint8_t * new_top)592     void SetTop(uint8_t* new_top) {
593       top_.store(new_top, std::memory_order_relaxed);
594     }
595 
End()596     uint8_t* End() const {
597       return end_;
598     }
599 
Contains(mirror::Object * ref)600     bool Contains(mirror::Object* ref) const {
601       return begin_ <= reinterpret_cast<uint8_t*>(ref) && reinterpret_cast<uint8_t*>(ref) < end_;
602     }
603 
604     void Dump(std::ostream& os) const;
605 
RecordThreadLocalAllocations(size_t num_objects,size_t num_bytes)606     void RecordThreadLocalAllocations(size_t num_objects, size_t num_bytes) {
607       DCHECK(IsAllocated());
608       DCHECK_EQ(Top(), end_);
609       objects_allocated_.fetch_add(num_objects, std::memory_order_relaxed);
610       top_.store(begin_ + num_bytes, std::memory_order_relaxed);
611       DCHECK_LE(Top(), end_);
612     }
613 
614     uint64_t GetLongestConsecutiveFreeBytes() const;
615 
616    private:
617     static bool GetUseGenerationalCC();
618 
619     size_t idx_;                        // The region's index in the region space.
620     // Number of bytes in live objects, or -1 for newly allocated regions.  Used to compute
621     // percent live for region evacuation decisions, and to determine whether an unevacuated
622     // region is completely empty, and thus can be reclaimed. Reset to zero either at the
623     // beginning of MarkingPhase(), or during the flip for a nongenerational GC, where we
624     // don't have a separate mark phase. It is then incremented whenever a mark bit in that
625     // region is set.
626     size_t live_bytes_;                 // The live bytes. Used to compute the live percent.
627     uint8_t* begin_;                    // The begin address of the region.
628     Thread* thread_;                    // The owning thread if it's a tlab.
629     // Note that `top_` can be higher than `end_` in the case of a
630     // large region, where an allocated object spans multiple regions
631     // (large region + one or more large tail regions).
632     Atomic<uint8_t*> top_;              // The current position of the allocation.
633     uint8_t* end_;                      // The end address of the region.
634     // objects_allocated_ is accessed using memory_order_relaxed. Treat as approximate when there
635     // are concurrent updates.
636     Atomic<size_t> objects_allocated_;  // The number of objects allocated.
637     uint32_t alloc_time_;               // The allocation time of the region.
638     // Note that newly allocated and evacuated regions use -1 as
639     // special value for `live_bytes_`.
640     bool is_newly_allocated_;           // True if it's allocated after the last collection.
641     bool is_a_tlab_;                    // True if it's a tlab.
642     RegionState state_;                 // The region state (see RegionState).
643     RegionType type_;                   // The region type (see RegionType).
644 
645     friend class RegionSpace;
646   };
647 
648   template<bool kToSpaceOnly, typename Visitor>
649   ALWAYS_INLINE void WalkInternal(Visitor&& visitor) NO_THREAD_SAFETY_ANALYSIS;
650 
651   // Visitor will be iterating on objects in increasing address order.
652   template<typename Visitor>
653   ALWAYS_INLINE void WalkNonLargeRegion(Visitor&& visitor, const Region* r)
654       NO_THREAD_SAFETY_ANALYSIS;
655 
RefToRegion(mirror::Object * ref)656   Region* RefToRegion(mirror::Object* ref) REQUIRES(!region_lock_) {
657     MutexLock mu(Thread::Current(), region_lock_);
658     return RefToRegionLocked(ref);
659   }
660 
661   void TraceHeapSize() REQUIRES(region_lock_);
662 
RefToRegionUnlocked(mirror::Object * ref)663   Region* RefToRegionUnlocked(mirror::Object* ref) NO_THREAD_SAFETY_ANALYSIS {
664     // For a performance reason (this is frequently called via
665     // RegionSpace::IsInFromSpace, etc.) we avoid taking a lock here.
666     // Note that since we only change a region from to-space to (evac)
667     // from-space during a pause (in RegionSpace::SetFromSpace) and
668     // from (evac) from-space to free (after GC is done), as long as
669     // `ref` is a valid reference into an allocated region, it's safe
670     // to access the region state without the lock.
671     return RefToRegionLocked(ref);
672   }
673 
RefToRegionLocked(mirror::Object * ref)674   Region* RefToRegionLocked(mirror::Object* ref) REQUIRES(region_lock_) {
675     DCHECK(HasAddress(ref));
676     uintptr_t offset = reinterpret_cast<uintptr_t>(ref) - reinterpret_cast<uintptr_t>(Begin());
677     size_t reg_idx = offset / kRegionSize;
678     DCHECK_LT(reg_idx, num_regions_);
679     Region* reg = &regions_[reg_idx];
680     DCHECK_EQ(reg->Idx(), reg_idx);
681     DCHECK(reg->Contains(ref));
682     return reg;
683   }
684 
685   // Return the object location following `obj` in the region space
686   // (i.e., the object location at `obj + obj->SizeOf()`).
687   //
688   // Note that unless
689   // - the region containing `obj` is fully used; and
690   // - `obj` is not the last object of that region;
691   // the returned location is not guaranteed to be a valid object.
692   static mirror::Object* GetNextObject(mirror::Object* obj)
693       REQUIRES_SHARED(Locks::mutator_lock_);
694 
AdjustNonFreeRegionLimit(size_t new_non_free_region_index)695   void AdjustNonFreeRegionLimit(size_t new_non_free_region_index) REQUIRES(region_lock_) {
696     DCHECK_LT(new_non_free_region_index, num_regions_);
697     non_free_region_index_limit_ = std::max(non_free_region_index_limit_,
698                                             new_non_free_region_index + 1);
699     VerifyNonFreeRegionLimit();
700   }
701 
SetNonFreeRegionLimit(size_t new_non_free_region_index_limit)702   void SetNonFreeRegionLimit(size_t new_non_free_region_index_limit) REQUIRES(region_lock_) {
703     DCHECK_LE(new_non_free_region_index_limit, num_regions_);
704     non_free_region_index_limit_ = new_non_free_region_index_limit;
705     VerifyNonFreeRegionLimit();
706   }
707 
708   // Implementation of this invariant:
709   // for all `i >= non_free_region_index_limit_`, `regions_[i].IsFree()` is true.
VerifyNonFreeRegionLimit()710   void VerifyNonFreeRegionLimit() REQUIRES(region_lock_) {
711     if (kIsDebugBuild && non_free_region_index_limit_ < num_regions_) {
712       for (size_t i = non_free_region_index_limit_; i < num_regions_; ++i) {
713         CHECK(regions_[i].IsFree());
714       }
715     }
716   }
717 
718   EXPORT Region* AllocateRegion(bool for_evac) REQUIRES(region_lock_);
719   void RevokeThreadLocalBuffersLocked(Thread* thread, bool reuse) REQUIRES(region_lock_);
720 
721   // Scan region range [`begin`, `end`) in increasing order to try to
722   // allocate a large region having a size of `num_regs_in_large_region`
723   // regions. If there is no space in the region space to allocate this
724   // large region, return null.
725   //
726   // If argument `next_region` is not null, use `*next_region` to
727   // return the index to the region next to the allocated large region
728   // returned by this method.
729   template<bool kForEvac>
730   mirror::Object* AllocLargeInRange(size_t begin,
731                                     size_t end,
732                                     size_t num_regs_in_large_region,
733                                     /* out */ size_t* bytes_allocated,
734                                     /* out */ size_t* usable_size,
735                                     /* out */ size_t* bytes_tl_bulk_allocated,
736                                     /* out */ size_t* next_region = nullptr) REQUIRES(region_lock_);
737 
738   // Check that the value of `r->LiveBytes()` matches the number of
739   // (allocated) bytes used by live objects according to the live bits
740   // in the region space bitmap range corresponding to region `r`.
741   void CheckLiveBytesAgainstRegionBitmap(Region* r);
742 
743   // Poison memory areas used by dead objects within unevacuated
744   // region `r`. This is meant to detect dangling references to dead
745   // objects earlier in debug mode.
746   void PoisonDeadObjectsInUnevacuatedRegion(Region* r);
747 
748   Mutex region_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
749 
750   // Cached version of Heap::use_generational_cc_.
751   const bool use_generational_cc_;
752   uint32_t time_;                  // The time as the number of collections since the startup.
753   size_t num_regions_;             // The number of regions in this space.
754   uint64_t madvise_time_;          // The amount of time spent in madvise for purging pages.
755   // The number of non-free regions in this space.
756   size_t num_non_free_regions_ GUARDED_BY(region_lock_);
757 
758   // The number of evac regions allocated during collection. 0 when GC not running.
759   size_t num_evac_regions_ GUARDED_BY(region_lock_);
760 
761   // Maintain the maximum of number of non-free regions collected just before
762   // reclaim in each GC cycle. At this moment in cycle, highest number of
763   // regions are in non-free.
764   size_t max_peak_num_non_free_regions_;
765 
766   // The pointer to the region array.
767   std::unique_ptr<Region[]> regions_ GUARDED_BY(region_lock_);
768 
769   // To hold partially used TLABs which can be reassigned to threads later for
770   // utilizing the un-used portion.
771   std::multimap<size_t, Region*, std::greater<size_t>> partial_tlabs_ GUARDED_BY(region_lock_);
772   // The upper-bound index of the non-free regions. Used to avoid scanning all regions in
773   // RegionSpace::SetFromSpace and RegionSpace::ClearFromSpace.
774   //
775   // Invariant (verified by RegionSpace::VerifyNonFreeRegionLimit):
776   //   for all `i >= non_free_region_index_limit_`, `regions_[i].IsFree()` is true.
777   size_t non_free_region_index_limit_ GUARDED_BY(region_lock_);
778 
779   Region* current_region_;         // The region currently used for allocation.
780   Region* evac_region_;            // The region currently used for evacuation.
781   Region full_region_;             // The fake/sentinel region that looks full.
782 
783   // Index into the region array pointing to the starting region when
784   // trying to allocate a new region. Only used when
785   // `kCyclicRegionAllocation` is true.
786   size_t cyclic_alloc_region_index_ GUARDED_BY(region_lock_);
787 
788   // Mark bitmap used by the GC.
789   accounting::ContinuousSpaceBitmap mark_bitmap_;
790 
791   DISALLOW_COPY_AND_ASSIGN(RegionSpace);
792 };
793 
794 std::ostream& operator<<(std::ostream& os, RegionSpace::RegionState value);
795 std::ostream& operator<<(std::ostream& os, RegionSpace::RegionType value);
796 
797 }  // namespace space
798 }  // namespace gc
799 }  // namespace art
800 
801 #endif  // ART_RUNTIME_GC_SPACE_REGION_SPACE_H_
802