1 /*
2  * Copyright (C) 2014 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_GC_SPACE_REGION_SPACE_H_
18 #define ART_RUNTIME_GC_SPACE_REGION_SPACE_H_
19 
20 #include "base/macros.h"
21 #include "base/mutex.h"
22 #include "space.h"
23 #include "thread.h"
24 
25 namespace art {
26 namespace gc {
27 
28 namespace accounting {
29 class ReadBarrierTable;
30 }  // namespace accounting
31 
32 namespace space {
33 
34 // A space that consists of equal-sized regions.
35 class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
36  public:
37   typedef void(*WalkCallback)(void *start, void *end, size_t num_bytes, void* callback_arg);
38 
GetType()39   SpaceType GetType() const OVERRIDE {
40     return kSpaceTypeRegionSpace;
41   }
42 
43   // Create a region space mem map with the requested sizes. The requested base address is not
44   // guaranteed to be granted, if it is required, the caller should call Begin on the returned
45   // space to confirm the request was granted.
46   static MemMap* CreateMemMap(const std::string& name, size_t capacity, uint8_t* requested_begin);
47   static RegionSpace* Create(const std::string& name, MemMap* mem_map);
48 
49   // Allocate `num_bytes`, returns null if the space is full.
50   mirror::Object* Alloc(Thread* self,
51                         size_t num_bytes,
52                         /* out */ size_t* bytes_allocated,
53                         /* out */ size_t* usable_size,
54                         /* out */ size_t* bytes_tl_bulk_allocated)
55       OVERRIDE REQUIRES(!region_lock_);
56   // Thread-unsafe allocation for when mutators are suspended, used by the semispace collector.
57   mirror::Object* AllocThreadUnsafe(Thread* self,
58                                     size_t num_bytes,
59                                     /* out */ size_t* bytes_allocated,
60                                     /* out */ size_t* usable_size,
61                                     /* out */ size_t* bytes_tl_bulk_allocated)
62       OVERRIDE REQUIRES(Locks::mutator_lock_) REQUIRES(!region_lock_);
63   // The main allocation routine.
64   template<bool kForEvac>
65   ALWAYS_INLINE mirror::Object* AllocNonvirtual(size_t num_bytes,
66                                                 /* out */ size_t* bytes_allocated,
67                                                 /* out */ size_t* usable_size,
68                                                 /* out */ size_t* bytes_tl_bulk_allocated)
69       REQUIRES(!region_lock_);
70   // Allocate/free large objects (objects that are larger than the region size).
71   template<bool kForEvac>
72   mirror::Object* AllocLarge(size_t num_bytes,
73                              /* out */ size_t* bytes_allocated,
74                              /* out */ size_t* usable_size,
75                              /* out */ size_t* bytes_tl_bulk_allocated) REQUIRES(!region_lock_);
76   template<bool kForEvac>
77   void FreeLarge(mirror::Object* large_obj, size_t bytes_allocated) REQUIRES(!region_lock_);
78 
79   // Return the storage space required by obj.
AllocationSize(mirror::Object * obj,size_t * usable_size)80   size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE
81       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!region_lock_) {
82     return AllocationSizeNonvirtual(obj, usable_size);
83   }
84   size_t AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size)
85       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!region_lock_);
86 
Free(Thread *,mirror::Object *)87   size_t Free(Thread*, mirror::Object*) OVERRIDE {
88     UNIMPLEMENTED(FATAL);
89     return 0;
90   }
FreeList(Thread *,size_t,mirror::Object **)91   size_t FreeList(Thread*, size_t, mirror::Object**) OVERRIDE {
92     UNIMPLEMENTED(FATAL);
93     return 0;
94   }
GetLiveBitmap()95   accounting::ContinuousSpaceBitmap* GetLiveBitmap() const OVERRIDE {
96     return mark_bitmap_.get();
97   }
GetMarkBitmap()98   accounting::ContinuousSpaceBitmap* GetMarkBitmap() const OVERRIDE {
99     return mark_bitmap_.get();
100   }
101 
102   void Clear() OVERRIDE REQUIRES(!region_lock_);
103 
104   // Change the non growth limit capacity to new capacity by shrinking or expanding the map.
105   // Currently, only shrinking is supported.
106   // Unlike implementations of this function in other spaces, we need to pass
107   // new capacity as argument here as region space doesn't have any notion of
108   // growth limit.
109   void ClampGrowthLimit(size_t new_capacity) REQUIRES(!region_lock_);
110 
111   void Dump(std::ostream& os) const;
112   void DumpRegions(std::ostream& os) REQUIRES(!region_lock_);
113   // Dump region containing object `obj`. Precondition: `obj` is in the region space.
114   void DumpRegionForObject(std::ostream& os, mirror::Object* obj) REQUIRES(!region_lock_);
115   void DumpNonFreeRegions(std::ostream& os) REQUIRES(!region_lock_);
116 
117   size_t RevokeThreadLocalBuffers(Thread* thread) REQUIRES(!region_lock_);
118   void RevokeThreadLocalBuffersLocked(Thread* thread) REQUIRES(region_lock_);
119   size_t RevokeAllThreadLocalBuffers()
120       REQUIRES(!Locks::runtime_shutdown_lock_, !Locks::thread_list_lock_, !region_lock_);
121   void AssertThreadLocalBuffersAreRevoked(Thread* thread) REQUIRES(!region_lock_);
122   void AssertAllThreadLocalBuffersAreRevoked()
123       REQUIRES(!Locks::runtime_shutdown_lock_, !Locks::thread_list_lock_, !region_lock_);
124 
125   enum class RegionType : uint8_t {
126     kRegionTypeAll,              // All types.
127     kRegionTypeFromSpace,        // From-space. To be evacuated.
128     kRegionTypeUnevacFromSpace,  // Unevacuated from-space. Not to be evacuated.
129     kRegionTypeToSpace,          // To-space.
130     kRegionTypeNone,             // None.
131   };
132 
133   enum class RegionState : uint8_t {
134     kRegionStateFree,            // Free region.
135     kRegionStateAllocated,       // Allocated region.
136     kRegionStateLarge,           // Large allocated (allocation larger than the region size).
137     kRegionStateLargeTail,       // Large tail (non-first regions of a large allocation).
138   };
139 
140   template<RegionType kRegionType> uint64_t GetBytesAllocatedInternal() REQUIRES(!region_lock_);
141   template<RegionType kRegionType> uint64_t GetObjectsAllocatedInternal() REQUIRES(!region_lock_);
GetBytesAllocated()142   uint64_t GetBytesAllocated() REQUIRES(!region_lock_) {
143     return GetBytesAllocatedInternal<RegionType::kRegionTypeAll>();
144   }
GetObjectsAllocated()145   uint64_t GetObjectsAllocated() REQUIRES(!region_lock_) {
146     return GetObjectsAllocatedInternal<RegionType::kRegionTypeAll>();
147   }
GetBytesAllocatedInFromSpace()148   uint64_t GetBytesAllocatedInFromSpace() REQUIRES(!region_lock_) {
149     return GetBytesAllocatedInternal<RegionType::kRegionTypeFromSpace>();
150   }
GetObjectsAllocatedInFromSpace()151   uint64_t GetObjectsAllocatedInFromSpace() REQUIRES(!region_lock_) {
152     return GetObjectsAllocatedInternal<RegionType::kRegionTypeFromSpace>();
153   }
GetBytesAllocatedInUnevacFromSpace()154   uint64_t GetBytesAllocatedInUnevacFromSpace() REQUIRES(!region_lock_) {
155     return GetBytesAllocatedInternal<RegionType::kRegionTypeUnevacFromSpace>();
156   }
GetObjectsAllocatedInUnevacFromSpace()157   uint64_t GetObjectsAllocatedInUnevacFromSpace() REQUIRES(!region_lock_) {
158     return GetObjectsAllocatedInternal<RegionType::kRegionTypeUnevacFromSpace>();
159   }
GetMaxPeakNumNonFreeRegions()160   size_t GetMaxPeakNumNonFreeRegions() const {
161     return max_peak_num_non_free_regions_;
162   }
GetNumRegions()163   size_t GetNumRegions() const {
164     return num_regions_;
165   }
166 
CanMoveObjects()167   bool CanMoveObjects() const OVERRIDE {
168     return true;
169   }
170 
Contains(const mirror::Object * obj)171   bool Contains(const mirror::Object* obj) const {
172     const uint8_t* byte_obj = reinterpret_cast<const uint8_t*>(obj);
173     return byte_obj >= Begin() && byte_obj < Limit();
174   }
175 
AsRegionSpace()176   RegionSpace* AsRegionSpace() OVERRIDE {
177     return this;
178   }
179 
180   // Go through all of the blocks and visit the continuous objects.
181   template <typename Visitor>
Walk(Visitor && visitor)182   ALWAYS_INLINE void Walk(Visitor&& visitor) REQUIRES(Locks::mutator_lock_) {
183     WalkInternal<false /* kToSpaceOnly */>(visitor);
184   }
185   template <typename Visitor>
WalkToSpace(Visitor && visitor)186   ALWAYS_INLINE void WalkToSpace(Visitor&& visitor)
187       REQUIRES(Locks::mutator_lock_) {
188     WalkInternal<true /* kToSpaceOnly */>(visitor);
189   }
190 
GetSweepCallback()191   accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() OVERRIDE {
192     return nullptr;
193   }
194   void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE
195       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!region_lock_);
196 
197   // Object alignment within the space.
198   static constexpr size_t kAlignment = kObjectAlignment;
199   // The region size.
200   static constexpr size_t kRegionSize = 256 * KB;
201 
IsInFromSpace(mirror::Object * ref)202   bool IsInFromSpace(mirror::Object* ref) {
203     if (HasAddress(ref)) {
204       Region* r = RefToRegionUnlocked(ref);
205       return r->IsInFromSpace();
206     }
207     return false;
208   }
209 
IsInNewlyAllocatedRegion(mirror::Object * ref)210   bool IsInNewlyAllocatedRegion(mirror::Object* ref) {
211     if (HasAddress(ref)) {
212       Region* r = RefToRegionUnlocked(ref);
213       return r->IsNewlyAllocated();
214     }
215     return false;
216   }
217 
IsInUnevacFromSpace(mirror::Object * ref)218   bool IsInUnevacFromSpace(mirror::Object* ref) {
219     if (HasAddress(ref)) {
220       Region* r = RefToRegionUnlocked(ref);
221       return r->IsInUnevacFromSpace();
222     }
223     return false;
224   }
225 
IsInToSpace(mirror::Object * ref)226   bool IsInToSpace(mirror::Object* ref) {
227     if (HasAddress(ref)) {
228       Region* r = RefToRegionUnlocked(ref);
229       return r->IsInToSpace();
230     }
231     return false;
232   }
233 
234   // If `ref` is in the region space, return the type of its region;
235   // otherwise, return `RegionType::kRegionTypeNone`.
GetRegionType(mirror::Object * ref)236   RegionType GetRegionType(mirror::Object* ref) {
237     if (HasAddress(ref)) {
238       return GetRegionTypeUnsafe(ref);
239     }
240     return RegionType::kRegionTypeNone;
241   }
242 
243   // Unsafe version of RegionSpace::GetRegionType.
244   // Precondition: `ref` is in the region space.
GetRegionTypeUnsafe(mirror::Object * ref)245   RegionType GetRegionTypeUnsafe(mirror::Object* ref) {
246     DCHECK(HasAddress(ref)) << ref;
247     Region* r = RefToRegionUnlocked(ref);
248     return r->Type();
249   }
250 
251   // Determine which regions to evacuate and tag them as
252   // from-space. Tag the rest as unevacuated from-space.
253   void SetFromSpace(accounting::ReadBarrierTable* rb_table, bool force_evacuate_all)
254       REQUIRES(!region_lock_);
255 
256   size_t FromSpaceSize() REQUIRES(!region_lock_);
257   size_t UnevacFromSpaceSize() REQUIRES(!region_lock_);
258   size_t ToSpaceSize() REQUIRES(!region_lock_);
259   void ClearFromSpace(/* out */ uint64_t* cleared_bytes, /* out */ uint64_t* cleared_objects)
260       REQUIRES(!region_lock_);
261 
AddLiveBytes(mirror::Object * ref,size_t alloc_size)262   void AddLiveBytes(mirror::Object* ref, size_t alloc_size) {
263     Region* reg = RefToRegionUnlocked(ref);
264     reg->AddLiveBytes(alloc_size);
265   }
266 
AssertAllRegionLiveBytesZeroOrCleared()267   void AssertAllRegionLiveBytesZeroOrCleared() REQUIRES(!region_lock_) {
268     if (kIsDebugBuild) {
269       MutexLock mu(Thread::Current(), region_lock_);
270       for (size_t i = 0; i < num_regions_; ++i) {
271         Region* r = &regions_[i];
272         size_t live_bytes = r->LiveBytes();
273         CHECK(live_bytes == 0U || live_bytes == static_cast<size_t>(-1)) << live_bytes;
274       }
275     }
276   }
277 
278   void RecordAlloc(mirror::Object* ref) REQUIRES(!region_lock_);
279   bool AllocNewTlab(Thread* self, size_t min_bytes) REQUIRES(!region_lock_);
280 
Time()281   uint32_t Time() {
282     return time_;
283   }
284 
285  private:
286   RegionSpace(const std::string& name, MemMap* mem_map);
287 
288   template<bool kToSpaceOnly, typename Visitor>
289   ALWAYS_INLINE void WalkInternal(Visitor&& visitor) NO_THREAD_SAFETY_ANALYSIS;
290 
291   class Region {
292    public:
Region()293     Region()
294         : idx_(static_cast<size_t>(-1)),
295           begin_(nullptr), top_(nullptr), end_(nullptr),
296           state_(RegionState::kRegionStateAllocated), type_(RegionType::kRegionTypeToSpace),
297           objects_allocated_(0), alloc_time_(0), live_bytes_(static_cast<size_t>(-1)),
298           is_newly_allocated_(false), is_a_tlab_(false), thread_(nullptr) {}
299 
Init(size_t idx,uint8_t * begin,uint8_t * end)300     void Init(size_t idx, uint8_t* begin, uint8_t* end) {
301       idx_ = idx;
302       begin_ = begin;
303       top_.StoreRelaxed(begin);
304       end_ = end;
305       state_ = RegionState::kRegionStateFree;
306       type_ = RegionType::kRegionTypeNone;
307       objects_allocated_.StoreRelaxed(0);
308       alloc_time_ = 0;
309       live_bytes_ = static_cast<size_t>(-1);
310       is_newly_allocated_ = false;
311       is_a_tlab_ = false;
312       thread_ = nullptr;
313       DCHECK_LT(begin, end);
314       DCHECK_EQ(static_cast<size_t>(end - begin), kRegionSize);
315     }
316 
State()317     RegionState State() const {
318       return state_;
319     }
320 
Type()321     RegionType Type() const {
322       return type_;
323     }
324 
325     void Clear(bool zero_and_release_pages);
326 
327     ALWAYS_INLINE mirror::Object* Alloc(size_t num_bytes,
328                                         /* out */ size_t* bytes_allocated,
329                                         /* out */ size_t* usable_size,
330                                         /* out */ size_t* bytes_tl_bulk_allocated);
331 
IsFree()332     bool IsFree() const {
333       bool is_free = (state_ == RegionState::kRegionStateFree);
334       if (is_free) {
335         DCHECK(IsInNoSpace());
336         DCHECK_EQ(begin_, Top());
337         DCHECK_EQ(objects_allocated_.LoadRelaxed(), 0U);
338       }
339       return is_free;
340     }
341 
342     // Given a free region, declare it non-free (allocated).
343     void Unfree(RegionSpace* region_space, uint32_t alloc_time)
344         REQUIRES(region_space->region_lock_);
345 
346     // Given a free region, declare it non-free (allocated) and large.
347     void UnfreeLarge(RegionSpace* region_space, uint32_t alloc_time)
348         REQUIRES(region_space->region_lock_);
349 
350     // Given a free region, declare it non-free (allocated) and large tail.
351     void UnfreeLargeTail(RegionSpace* region_space, uint32_t alloc_time)
352         REQUIRES(region_space->region_lock_);
353 
354     void MarkAsAllocated(RegionSpace* region_space, uint32_t alloc_time)
355         REQUIRES(region_space->region_lock_);
356 
SetNewlyAllocated()357     void SetNewlyAllocated() {
358       is_newly_allocated_ = true;
359     }
360 
361     // Non-large, non-large-tail allocated.
IsAllocated()362     bool IsAllocated() const {
363       return state_ == RegionState::kRegionStateAllocated;
364     }
365 
366     // Large allocated.
IsLarge()367     bool IsLarge() const {
368       bool is_large = (state_ == RegionState::kRegionStateLarge);
369       if (is_large) {
370         DCHECK_LT(begin_ + kRegionSize, Top());
371       }
372       return is_large;
373     }
374 
375     // Large-tail allocated.
IsLargeTail()376     bool IsLargeTail() const {
377       bool is_large_tail = (state_ == RegionState::kRegionStateLargeTail);
378       if (is_large_tail) {
379         DCHECK_EQ(begin_, Top());
380       }
381       return is_large_tail;
382     }
383 
Idx()384     size_t Idx() const {
385       return idx_;
386     }
387 
IsNewlyAllocated()388     bool IsNewlyAllocated() const {
389       return is_newly_allocated_;
390     }
391 
IsInFromSpace()392     bool IsInFromSpace() const {
393       return type_ == RegionType::kRegionTypeFromSpace;
394     }
395 
IsInToSpace()396     bool IsInToSpace() const {
397       return type_ == RegionType::kRegionTypeToSpace;
398     }
399 
IsInUnevacFromSpace()400     bool IsInUnevacFromSpace() const {
401       return type_ == RegionType::kRegionTypeUnevacFromSpace;
402     }
403 
IsInNoSpace()404     bool IsInNoSpace() const {
405       return type_ == RegionType::kRegionTypeNone;
406     }
407 
408     // Set this region as evacuated from-space. At the end of the
409     // collection, RegionSpace::ClearFromSpace will clear and reclaim
410     // the space used by this region, and tag it as unallocated/free.
SetAsFromSpace()411     void SetAsFromSpace() {
412       DCHECK(!IsFree() && IsInToSpace());
413       type_ = RegionType::kRegionTypeFromSpace;
414       live_bytes_ = static_cast<size_t>(-1);
415     }
416 
417     // Set this region as unevacuated from-space. At the end of the
418     // collection, RegionSpace::ClearFromSpace will preserve the space
419     // used by this region, and tag it as to-space (see
420     // Region::SetUnevacFromSpaceAsToSpace below).
SetAsUnevacFromSpace()421     void SetAsUnevacFromSpace() {
422       DCHECK(!IsFree() && IsInToSpace());
423       type_ = RegionType::kRegionTypeUnevacFromSpace;
424       live_bytes_ = 0U;
425     }
426 
427     // Set this region as to-space. Used by RegionSpace::ClearFromSpace.
428     // This is only valid if it is currently an unevac from-space region.
SetUnevacFromSpaceAsToSpace()429     void SetUnevacFromSpaceAsToSpace() {
430       DCHECK(!IsFree() && IsInUnevacFromSpace());
431       type_ = RegionType::kRegionTypeToSpace;
432     }
433 
434     // Return whether this region should be evacuated. Used by RegionSpace::SetFromSpace.
435     ALWAYS_INLINE bool ShouldBeEvacuated();
436 
AddLiveBytes(size_t live_bytes)437     void AddLiveBytes(size_t live_bytes) {
438       DCHECK(IsInUnevacFromSpace());
439       DCHECK(!IsLargeTail());
440       DCHECK_NE(live_bytes_, static_cast<size_t>(-1));
441       // For large allocations, we always consider all bytes in the
442       // regions live.
443       live_bytes_ += IsLarge() ? Top() - begin_ : live_bytes;
444       DCHECK_LE(live_bytes_, BytesAllocated());
445     }
446 
AllAllocatedBytesAreLive()447     bool AllAllocatedBytesAreLive() const {
448       return LiveBytes() == static_cast<size_t>(Top() - Begin());
449     }
450 
LiveBytes()451     size_t LiveBytes() const {
452       return live_bytes_;
453     }
454 
455     size_t BytesAllocated() const;
456 
457     size_t ObjectsAllocated() const;
458 
Begin()459     uint8_t* Begin() const {
460       return begin_;
461     }
462 
Top()463     ALWAYS_INLINE uint8_t* Top() const {
464       return top_.LoadRelaxed();
465     }
466 
SetTop(uint8_t * new_top)467     void SetTop(uint8_t* new_top) {
468       top_.StoreRelaxed(new_top);
469     }
470 
End()471     uint8_t* End() const {
472       return end_;
473     }
474 
Contains(mirror::Object * ref)475     bool Contains(mirror::Object* ref) const {
476       return begin_ <= reinterpret_cast<uint8_t*>(ref) && reinterpret_cast<uint8_t*>(ref) < end_;
477     }
478 
479     void Dump(std::ostream& os) const;
480 
RecordThreadLocalAllocations(size_t num_objects,size_t num_bytes)481     void RecordThreadLocalAllocations(size_t num_objects, size_t num_bytes) {
482       DCHECK(IsAllocated());
483       DCHECK_EQ(objects_allocated_.LoadRelaxed(), 0U);
484       DCHECK_EQ(Top(), end_);
485       objects_allocated_.StoreRelaxed(num_objects);
486       top_.StoreRelaxed(begin_ + num_bytes);
487       DCHECK_LE(Top(), end_);
488     }
489 
490    private:
491     size_t idx_;                        // The region's index in the region space.
492     uint8_t* begin_;                    // The begin address of the region.
493     // Note that `top_` can be higher than `end_` in the case of a
494     // large region, where an allocated object spans multiple regions
495     // (large region + one or more large tail regions).
496     Atomic<uint8_t*> top_;              // The current position of the allocation.
497     uint8_t* end_;                      // The end address of the region.
498     RegionState state_;                 // The region state (see RegionState).
499     RegionType type_;                   // The region type (see RegionType).
500     Atomic<size_t> objects_allocated_;  // The number of objects allocated.
501     uint32_t alloc_time_;               // The allocation time of the region.
502     // Note that newly allocated and evacuated regions use -1 as
503     // special value for `live_bytes_`.
504     size_t live_bytes_;                 // The live bytes. Used to compute the live percent.
505     bool is_newly_allocated_;           // True if it's allocated after the last collection.
506     bool is_a_tlab_;                    // True if it's a tlab.
507     Thread* thread_;                    // The owning thread if it's a tlab.
508 
509     friend class RegionSpace;
510   };
511 
RefToRegion(mirror::Object * ref)512   Region* RefToRegion(mirror::Object* ref) REQUIRES(!region_lock_) {
513     MutexLock mu(Thread::Current(), region_lock_);
514     return RefToRegionLocked(ref);
515   }
516 
RefToRegionUnlocked(mirror::Object * ref)517   Region* RefToRegionUnlocked(mirror::Object* ref) NO_THREAD_SAFETY_ANALYSIS {
518     // For a performance reason (this is frequently called via
519     // RegionSpace::IsInFromSpace, etc.) we avoid taking a lock here.
520     // Note that since we only change a region from to-space to (evac)
521     // from-space during a pause (in RegionSpace::SetFromSpace) and
522     // from (evac) from-space to free (after GC is done), as long as
523     // `ref` is a valid reference into an allocated region, it's safe
524     // to access the region state without the lock.
525     return RefToRegionLocked(ref);
526   }
527 
RefToRegionLocked(mirror::Object * ref)528   Region* RefToRegionLocked(mirror::Object* ref) REQUIRES(region_lock_) {
529     DCHECK(HasAddress(ref));
530     uintptr_t offset = reinterpret_cast<uintptr_t>(ref) - reinterpret_cast<uintptr_t>(Begin());
531     size_t reg_idx = offset / kRegionSize;
532     DCHECK_LT(reg_idx, num_regions_);
533     Region* reg = &regions_[reg_idx];
534     DCHECK_EQ(reg->Idx(), reg_idx);
535     DCHECK(reg->Contains(ref));
536     return reg;
537   }
538 
539   // Return the object location following `obj` in the region space
540   // (i.e., the object location at `obj + obj->SizeOf()`).
541   //
542   // Note that unless
543   // - the region containing `obj` is fully used; and
544   // - `obj` is not the last object of that region;
545   // the returned location is not guaranteed to be a valid object.
546   mirror::Object* GetNextObject(mirror::Object* obj)
547       REQUIRES_SHARED(Locks::mutator_lock_);
548 
AdjustNonFreeRegionLimit(size_t new_non_free_region_index)549   void AdjustNonFreeRegionLimit(size_t new_non_free_region_index) REQUIRES(region_lock_) {
550     DCHECK_LT(new_non_free_region_index, num_regions_);
551     non_free_region_index_limit_ = std::max(non_free_region_index_limit_,
552                                             new_non_free_region_index + 1);
553     VerifyNonFreeRegionLimit();
554   }
555 
SetNonFreeRegionLimit(size_t new_non_free_region_index_limit)556   void SetNonFreeRegionLimit(size_t new_non_free_region_index_limit) REQUIRES(region_lock_) {
557     DCHECK_LE(new_non_free_region_index_limit, num_regions_);
558     non_free_region_index_limit_ = new_non_free_region_index_limit;
559     VerifyNonFreeRegionLimit();
560   }
561 
562   // Implementation of this invariant:
563   // for all `i >= non_free_region_index_limit_`, `regions_[i].IsFree()` is true.
VerifyNonFreeRegionLimit()564   void VerifyNonFreeRegionLimit() REQUIRES(region_lock_) {
565     if (kIsDebugBuild && non_free_region_index_limit_ < num_regions_) {
566       for (size_t i = non_free_region_index_limit_; i < num_regions_; ++i) {
567         CHECK(regions_[i].IsFree());
568       }
569     }
570   }
571 
572   Region* AllocateRegion(bool for_evac) REQUIRES(region_lock_);
573 
574   Mutex region_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
575 
576   uint32_t time_;                  // The time as the number of collections since the startup.
577   size_t num_regions_;             // The number of regions in this space.
578   // The number of non-free regions in this space.
579   size_t num_non_free_regions_ GUARDED_BY(region_lock_);
580 
581   // The number of evac regions allocated during collection. 0 when GC not running.
582   size_t num_evac_regions_ GUARDED_BY(region_lock_);
583 
584   // Maintain the maximum of number of non-free regions collected just before
585   // reclaim in each GC cycle. At this moment in cycle, highest number of
586   // regions are in non-free.
587   size_t max_peak_num_non_free_regions_;
588 
589   // The pointer to the region array.
590   std::unique_ptr<Region[]> regions_ GUARDED_BY(region_lock_);
591 
592   // The upper-bound index of the non-free regions. Used to avoid scanning all regions in
593   // RegionSpace::SetFromSpace and RegionSpace::ClearFromSpace.
594   //
595   // Invariant (verified by RegionSpace::VerifyNonFreeRegionLimit):
596   //   for all `i >= non_free_region_index_limit_`, `regions_[i].IsFree()` is true.
597   size_t non_free_region_index_limit_ GUARDED_BY(region_lock_);
598 
599   Region* current_region_;         // The region currently used for allocation.
600   Region* evac_region_;            // The region currently used for evacuation.
601   Region full_region_;             // The dummy/sentinel region that looks full.
602 
603   // Mark bitmap used by the GC.
604   std::unique_ptr<accounting::ContinuousSpaceBitmap> mark_bitmap_;
605 
606   DISALLOW_COPY_AND_ASSIGN(RegionSpace);
607 };
608 
609 std::ostream& operator<<(std::ostream& os, const RegionSpace::RegionState& value);
610 std::ostream& operator<<(std::ostream& os, const RegionSpace::RegionType& value);
611 
612 }  // namespace space
613 }  // namespace gc
614 }  // namespace art
615 
616 #endif  // ART_RUNTIME_GC_SPACE_REGION_SPACE_H_
617