1 /*
2  * Copyright (C) 2014 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_GC_SPACE_REGION_SPACE_H_
18 #define ART_RUNTIME_GC_SPACE_REGION_SPACE_H_
19 
20 #include "gc/accounting/read_barrier_table.h"
21 #include "object_callbacks.h"
22 #include "space.h"
23 #include "thread.h"
24 
25 namespace art {
26 namespace gc {
27 namespace space {
28 
29 // A space that consists of equal-sized regions.
30 class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
31  public:
32   typedef void(*WalkCallback)(void *start, void *end, size_t num_bytes, void* callback_arg);
33 
GetType()34   SpaceType GetType() const OVERRIDE {
35     return kSpaceTypeRegionSpace;
36   }
37 
38   // Create a region space mem map with the requested sizes. The requested base address is not
39   // guaranteed to be granted, if it is required, the caller should call Begin on the returned
40   // space to confirm the request was granted.
41   static MemMap* CreateMemMap(const std::string& name, size_t capacity, uint8_t* requested_begin);
42   static RegionSpace* Create(const std::string& name, MemMap* mem_map);
43 
44   // Allocate num_bytes, returns null if the space is full.
45   mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
46                         size_t* usable_size, size_t* bytes_tl_bulk_allocated)
47       OVERRIDE REQUIRES(!region_lock_);
48   // Thread-unsafe allocation for when mutators are suspended, used by the semispace collector.
49   mirror::Object* AllocThreadUnsafe(Thread* self, size_t num_bytes, size_t* bytes_allocated,
50                                     size_t* usable_size, size_t* bytes_tl_bulk_allocated)
51       OVERRIDE REQUIRES(Locks::mutator_lock_) REQUIRES(!region_lock_);
52   // The main allocation routine.
53   template<bool kForEvac>
54   ALWAYS_INLINE mirror::Object* AllocNonvirtual(size_t num_bytes, size_t* bytes_allocated,
55                                                 size_t* usable_size,
56                                                 size_t* bytes_tl_bulk_allocated)
57       REQUIRES(!region_lock_);
58   // Allocate/free large objects (objects that are larger than the region size.)
59   template<bool kForEvac>
60   mirror::Object* AllocLarge(size_t num_bytes, size_t* bytes_allocated, size_t* usable_size,
61                              size_t* bytes_tl_bulk_allocated) REQUIRES(!region_lock_);
62   void FreeLarge(mirror::Object* large_obj, size_t bytes_allocated) REQUIRES(!region_lock_);
63 
64   // Return the storage space required by obj.
AllocationSize(mirror::Object * obj,size_t * usable_size)65   size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE
66       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!region_lock_) {
67     return AllocationSizeNonvirtual(obj, usable_size);
68   }
69   size_t AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size)
70       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!region_lock_);
71 
Free(Thread *,mirror::Object *)72   size_t Free(Thread*, mirror::Object*) OVERRIDE {
73     UNIMPLEMENTED(FATAL);
74     return 0;
75   }
FreeList(Thread *,size_t,mirror::Object **)76   size_t FreeList(Thread*, size_t, mirror::Object**) OVERRIDE {
77     UNIMPLEMENTED(FATAL);
78     return 0;
79   }
GetLiveBitmap()80   accounting::ContinuousSpaceBitmap* GetLiveBitmap() const OVERRIDE {
81     return mark_bitmap_.get();
82   }
GetMarkBitmap()83   accounting::ContinuousSpaceBitmap* GetMarkBitmap() const OVERRIDE {
84     return mark_bitmap_.get();
85   }
86 
87   void Clear() OVERRIDE REQUIRES(!region_lock_);
88 
89   void Dump(std::ostream& os) const;
90   void DumpRegions(std::ostream& os) REQUIRES(!region_lock_);
91   void DumpNonFreeRegions(std::ostream& os) REQUIRES(!region_lock_);
92 
93   size_t RevokeThreadLocalBuffers(Thread* thread) REQUIRES(!region_lock_);
94   void RevokeThreadLocalBuffersLocked(Thread* thread) REQUIRES(region_lock_);
95   size_t RevokeAllThreadLocalBuffers()
96       REQUIRES(!Locks::runtime_shutdown_lock_, !Locks::thread_list_lock_, !region_lock_);
97   void AssertThreadLocalBuffersAreRevoked(Thread* thread) REQUIRES(!region_lock_);
98   void AssertAllThreadLocalBuffersAreRevoked()
99       REQUIRES(!Locks::runtime_shutdown_lock_, !Locks::thread_list_lock_, !region_lock_);
100 
101   enum class RegionType : uint8_t {
102     kRegionTypeAll,              // All types.
103     kRegionTypeFromSpace,        // From-space. To be evacuated.
104     kRegionTypeUnevacFromSpace,  // Unevacuated from-space. Not to be evacuated.
105     kRegionTypeToSpace,          // To-space.
106     kRegionTypeNone,             // None.
107   };
108 
109   enum class RegionState : uint8_t {
110     kRegionStateFree,            // Free region.
111     kRegionStateAllocated,       // Allocated region.
112     kRegionStateLarge,           // Large allocated (allocation larger than the region size).
113     kRegionStateLargeTail,       // Large tail (non-first regions of a large allocation).
114   };
115 
116   template<RegionType kRegionType> uint64_t GetBytesAllocatedInternal() REQUIRES(!region_lock_);
117   template<RegionType kRegionType> uint64_t GetObjectsAllocatedInternal() REQUIRES(!region_lock_);
GetBytesAllocated()118   uint64_t GetBytesAllocated() REQUIRES(!region_lock_) {
119     return GetBytesAllocatedInternal<RegionType::kRegionTypeAll>();
120   }
GetObjectsAllocated()121   uint64_t GetObjectsAllocated() REQUIRES(!region_lock_) {
122     return GetObjectsAllocatedInternal<RegionType::kRegionTypeAll>();
123   }
GetBytesAllocatedInFromSpace()124   uint64_t GetBytesAllocatedInFromSpace() REQUIRES(!region_lock_) {
125     return GetBytesAllocatedInternal<RegionType::kRegionTypeFromSpace>();
126   }
GetObjectsAllocatedInFromSpace()127   uint64_t GetObjectsAllocatedInFromSpace() REQUIRES(!region_lock_) {
128     return GetObjectsAllocatedInternal<RegionType::kRegionTypeFromSpace>();
129   }
GetBytesAllocatedInUnevacFromSpace()130   uint64_t GetBytesAllocatedInUnevacFromSpace() REQUIRES(!region_lock_) {
131     return GetBytesAllocatedInternal<RegionType::kRegionTypeUnevacFromSpace>();
132   }
GetObjectsAllocatedInUnevacFromSpace()133   uint64_t GetObjectsAllocatedInUnevacFromSpace() REQUIRES(!region_lock_) {
134     return GetObjectsAllocatedInternal<RegionType::kRegionTypeUnevacFromSpace>();
135   }
136 
CanMoveObjects()137   bool CanMoveObjects() const OVERRIDE {
138     return true;
139   }
140 
Contains(const mirror::Object * obj)141   bool Contains(const mirror::Object* obj) const {
142     const uint8_t* byte_obj = reinterpret_cast<const uint8_t*>(obj);
143     return byte_obj >= Begin() && byte_obj < Limit();
144   }
145 
AsRegionSpace()146   RegionSpace* AsRegionSpace() OVERRIDE {
147     return this;
148   }
149 
150   // Go through all of the blocks and visit the continuous objects.
Walk(ObjectCallback * callback,void * arg)151   void Walk(ObjectCallback* callback, void* arg)
152       REQUIRES(Locks::mutator_lock_) {
153     WalkInternal<false>(callback, arg);
154   }
155 
WalkToSpace(ObjectCallback * callback,void * arg)156   void WalkToSpace(ObjectCallback* callback, void* arg)
157       REQUIRES(Locks::mutator_lock_) {
158     WalkInternal<true>(callback, arg);
159   }
160 
GetSweepCallback()161   accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() OVERRIDE {
162     return nullptr;
163   }
164   void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE
165       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!region_lock_);
166 
167   // Object alignment within the space.
168   static constexpr size_t kAlignment = kObjectAlignment;
169   // The region size.
170   static constexpr size_t kRegionSize = 256 * KB;
171 
IsInFromSpace(mirror::Object * ref)172   bool IsInFromSpace(mirror::Object* ref) {
173     if (HasAddress(ref)) {
174       Region* r = RefToRegionUnlocked(ref);
175       return r->IsInFromSpace();
176     }
177     return false;
178   }
179 
IsInNewlyAllocatedRegion(mirror::Object * ref)180   bool IsInNewlyAllocatedRegion(mirror::Object* ref) {
181     if (HasAddress(ref)) {
182       Region* r = RefToRegionUnlocked(ref);
183       return r->IsNewlyAllocated();
184     }
185     return false;
186   }
187 
IsInUnevacFromSpace(mirror::Object * ref)188   bool IsInUnevacFromSpace(mirror::Object* ref) {
189     if (HasAddress(ref)) {
190       Region* r = RefToRegionUnlocked(ref);
191       return r->IsInUnevacFromSpace();
192     }
193     return false;
194   }
195 
IsInToSpace(mirror::Object * ref)196   bool IsInToSpace(mirror::Object* ref) {
197     if (HasAddress(ref)) {
198       Region* r = RefToRegionUnlocked(ref);
199       return r->IsInToSpace();
200     }
201     return false;
202   }
203 
GetRegionType(mirror::Object * ref)204   RegionType GetRegionType(mirror::Object* ref) {
205     if (HasAddress(ref)) {
206       Region* r = RefToRegionUnlocked(ref);
207       return r->Type();
208     }
209     return RegionType::kRegionTypeNone;
210   }
211 
212   void SetFromSpace(accounting::ReadBarrierTable* rb_table, bool force_evacuate_all)
213       REQUIRES(!region_lock_);
214 
215   size_t FromSpaceSize() REQUIRES(!region_lock_);
216   size_t UnevacFromSpaceSize() REQUIRES(!region_lock_);
217   size_t ToSpaceSize() REQUIRES(!region_lock_);
218   void ClearFromSpace(uint64_t* cleared_bytes, uint64_t* cleared_objects) REQUIRES(!region_lock_);
219 
AddLiveBytes(mirror::Object * ref,size_t alloc_size)220   void AddLiveBytes(mirror::Object* ref, size_t alloc_size) {
221     Region* reg = RefToRegionUnlocked(ref);
222     reg->AddLiveBytes(alloc_size);
223   }
224 
AssertAllRegionLiveBytesZeroOrCleared()225   void AssertAllRegionLiveBytesZeroOrCleared() REQUIRES(!region_lock_) {
226     if (kIsDebugBuild) {
227       MutexLock mu(Thread::Current(), region_lock_);
228       for (size_t i = 0; i < num_regions_; ++i) {
229         Region* r = &regions_[i];
230         size_t live_bytes = r->LiveBytes();
231         CHECK(live_bytes == 0U || live_bytes == static_cast<size_t>(-1)) << live_bytes;
232       }
233     }
234   }
235 
236   void RecordAlloc(mirror::Object* ref) REQUIRES(!region_lock_);
237   bool AllocNewTlab(Thread* self, size_t min_bytes) REQUIRES(!region_lock_);
238 
Time()239   uint32_t Time() {
240     return time_;
241   }
242 
243  private:
244   RegionSpace(const std::string& name, MemMap* mem_map);
245 
246   template<bool kToSpaceOnly>
247   void WalkInternal(ObjectCallback* callback, void* arg) NO_THREAD_SAFETY_ANALYSIS;
248 
249   class Region {
250    public:
Region()251     Region()
252         : idx_(static_cast<size_t>(-1)),
253           begin_(nullptr), top_(nullptr), end_(nullptr),
254           state_(RegionState::kRegionStateAllocated), type_(RegionType::kRegionTypeToSpace),
255           objects_allocated_(0), alloc_time_(0), live_bytes_(static_cast<size_t>(-1)),
256           is_newly_allocated_(false), is_a_tlab_(false), thread_(nullptr) {}
257 
Init(size_t idx,uint8_t * begin,uint8_t * end)258     void Init(size_t idx, uint8_t* begin, uint8_t* end) {
259       idx_ = idx;
260       begin_ = begin;
261       top_.StoreRelaxed(begin);
262       end_ = end;
263       state_ = RegionState::kRegionStateFree;
264       type_ = RegionType::kRegionTypeNone;
265       objects_allocated_.StoreRelaxed(0);
266       alloc_time_ = 0;
267       live_bytes_ = static_cast<size_t>(-1);
268       is_newly_allocated_ = false;
269       is_a_tlab_ = false;
270       thread_ = nullptr;
271       DCHECK_LT(begin, end);
272       DCHECK_EQ(static_cast<size_t>(end - begin), kRegionSize);
273     }
274 
State()275     RegionState State() const {
276       return state_;
277     }
278 
Type()279     RegionType Type() const {
280       return type_;
281     }
282 
Clear(bool zero_and_release_pages)283     void Clear(bool zero_and_release_pages) {
284       top_.StoreRelaxed(begin_);
285       state_ = RegionState::kRegionStateFree;
286       type_ = RegionType::kRegionTypeNone;
287       objects_allocated_.StoreRelaxed(0);
288       alloc_time_ = 0;
289       live_bytes_ = static_cast<size_t>(-1);
290       if (zero_and_release_pages) {
291         ZeroAndReleasePages(begin_, end_ - begin_);
292       }
293       is_newly_allocated_ = false;
294       is_a_tlab_ = false;
295       thread_ = nullptr;
296     }
297 
298     ALWAYS_INLINE mirror::Object* Alloc(size_t num_bytes, size_t* bytes_allocated,
299                                         size_t* usable_size,
300                                         size_t* bytes_tl_bulk_allocated);
301 
IsFree()302     bool IsFree() const {
303       bool is_free = state_ == RegionState::kRegionStateFree;
304       if (is_free) {
305         DCHECK(IsInNoSpace());
306         DCHECK_EQ(begin_, Top());
307         DCHECK_EQ(objects_allocated_.LoadRelaxed(), 0U);
308       }
309       return is_free;
310     }
311 
312     // Given a free region, declare it non-free (allocated).
Unfree(RegionSpace * region_space,uint32_t alloc_time)313     void Unfree(RegionSpace* region_space, uint32_t alloc_time)
314         REQUIRES(region_space->region_lock_) {
315       DCHECK(IsFree());
316       state_ = RegionState::kRegionStateAllocated;
317       type_ = RegionType::kRegionTypeToSpace;
318       alloc_time_ = alloc_time;
319       region_space->AdjustNonFreeRegionLimit(idx_);
320     }
321 
UnfreeLarge(RegionSpace * region_space,uint32_t alloc_time)322     void UnfreeLarge(RegionSpace* region_space, uint32_t alloc_time)
323         REQUIRES(region_space->region_lock_) {
324       DCHECK(IsFree());
325       state_ = RegionState::kRegionStateLarge;
326       type_ = RegionType::kRegionTypeToSpace;
327       alloc_time_ = alloc_time;
328       region_space->AdjustNonFreeRegionLimit(idx_);
329     }
330 
UnfreeLargeTail(RegionSpace * region_space,uint32_t alloc_time)331     void UnfreeLargeTail(RegionSpace* region_space, uint32_t alloc_time)
332         REQUIRES(region_space->region_lock_) {
333       DCHECK(IsFree());
334       state_ = RegionState::kRegionStateLargeTail;
335       type_ = RegionType::kRegionTypeToSpace;
336       alloc_time_ = alloc_time;
337       region_space->AdjustNonFreeRegionLimit(idx_);
338     }
339 
SetNewlyAllocated()340     void SetNewlyAllocated() {
341       is_newly_allocated_ = true;
342     }
343 
344     // Non-large, non-large-tail allocated.
IsAllocated()345     bool IsAllocated() const {
346       return state_ == RegionState::kRegionStateAllocated;
347     }
348 
349     // Large allocated.
IsLarge()350     bool IsLarge() const {
351       bool is_large = state_ == RegionState::kRegionStateLarge;
352       if (is_large) {
353         DCHECK_LT(begin_ + kRegionSize, Top());
354       }
355       return is_large;
356     }
357 
358     // Large-tail allocated.
IsLargeTail()359     bool IsLargeTail() const {
360       bool is_large_tail = state_ == RegionState::kRegionStateLargeTail;
361       if (is_large_tail) {
362         DCHECK_EQ(begin_, Top());
363       }
364       return is_large_tail;
365     }
366 
Idx()367     size_t Idx() const {
368       return idx_;
369     }
370 
IsNewlyAllocated()371     bool IsNewlyAllocated() const {
372       return is_newly_allocated_;
373     }
374 
IsInFromSpace()375     bool IsInFromSpace() const {
376       return type_ == RegionType::kRegionTypeFromSpace;
377     }
378 
IsInToSpace()379     bool IsInToSpace() const {
380       return type_ == RegionType::kRegionTypeToSpace;
381     }
382 
IsInUnevacFromSpace()383     bool IsInUnevacFromSpace() const {
384       return type_ == RegionType::kRegionTypeUnevacFromSpace;
385     }
386 
IsInNoSpace()387     bool IsInNoSpace() const {
388       return type_ == RegionType::kRegionTypeNone;
389     }
390 
SetAsFromSpace()391     void SetAsFromSpace() {
392       DCHECK(!IsFree() && IsInToSpace());
393       type_ = RegionType::kRegionTypeFromSpace;
394       live_bytes_ = static_cast<size_t>(-1);
395     }
396 
SetAsUnevacFromSpace()397     void SetAsUnevacFromSpace() {
398       DCHECK(!IsFree() && IsInToSpace());
399       type_ = RegionType::kRegionTypeUnevacFromSpace;
400       live_bytes_ = 0U;
401     }
402 
SetUnevacFromSpaceAsToSpace()403     void SetUnevacFromSpaceAsToSpace() {
404       DCHECK(!IsFree() && IsInUnevacFromSpace());
405       type_ = RegionType::kRegionTypeToSpace;
406     }
407 
408     ALWAYS_INLINE bool ShouldBeEvacuated();
409 
AddLiveBytes(size_t live_bytes)410     void AddLiveBytes(size_t live_bytes) {
411       DCHECK(IsInUnevacFromSpace());
412       DCHECK(!IsLargeTail());
413       DCHECK_NE(live_bytes_, static_cast<size_t>(-1));
414       live_bytes_ += live_bytes;
415       DCHECK_LE(live_bytes_, BytesAllocated());
416     }
417 
LiveBytes()418     size_t LiveBytes() const {
419       return live_bytes_;
420     }
421 
422     size_t BytesAllocated() const;
423 
ObjectsAllocated()424     size_t ObjectsAllocated() const {
425       if (IsLarge()) {
426         DCHECK_LT(begin_ + kRegionSize, Top());
427         DCHECK_EQ(objects_allocated_.LoadRelaxed(), 0U);
428         return 1;
429       } else if (IsLargeTail()) {
430         DCHECK_EQ(begin_, Top());
431         DCHECK_EQ(objects_allocated_.LoadRelaxed(), 0U);
432         return 0;
433       } else {
434         DCHECK(IsAllocated()) << static_cast<uint>(state_);
435         return objects_allocated_;
436       }
437     }
438 
Begin()439     uint8_t* Begin() const {
440       return begin_;
441     }
442 
Top()443     ALWAYS_INLINE uint8_t* Top() const {
444       return top_.LoadRelaxed();
445     }
446 
SetTop(uint8_t * new_top)447     void SetTop(uint8_t* new_top) {
448       top_.StoreRelaxed(new_top);
449     }
450 
End()451     uint8_t* End() const {
452       return end_;
453     }
454 
Contains(mirror::Object * ref)455     bool Contains(mirror::Object* ref) const {
456       return begin_ <= reinterpret_cast<uint8_t*>(ref) && reinterpret_cast<uint8_t*>(ref) < end_;
457     }
458 
459     void Dump(std::ostream& os) const;
460 
RecordThreadLocalAllocations(size_t num_objects,size_t num_bytes)461     void RecordThreadLocalAllocations(size_t num_objects, size_t num_bytes) {
462       DCHECK(IsAllocated());
463       DCHECK_EQ(objects_allocated_.LoadRelaxed(), 0U);
464       DCHECK_EQ(Top(), end_);
465       objects_allocated_.StoreRelaxed(num_objects);
466       top_.StoreRelaxed(begin_ + num_bytes);
467       DCHECK_LE(Top(), end_);
468     }
469 
470    private:
471     size_t idx_;                        // The region's index in the region space.
472     uint8_t* begin_;                    // The begin address of the region.
473     Atomic<uint8_t*> top_;              // The current position of the allocation.
474     uint8_t* end_;                      // The end address of the region.
475     RegionState state_;                 // The region state (see RegionState).
476     RegionType type_;                   // The region type (see RegionType).
477     Atomic<size_t> objects_allocated_;  // The number of objects allocated.
478     uint32_t alloc_time_;               // The allocation time of the region.
479     size_t live_bytes_;                 // The live bytes. Used to compute the live percent.
480     bool is_newly_allocated_;           // True if it's allocated after the last collection.
481     bool is_a_tlab_;                    // True if it's a tlab.
482     Thread* thread_;                    // The owning thread if it's a tlab.
483 
484     friend class RegionSpace;
485   };
486 
RefToRegion(mirror::Object * ref)487   Region* RefToRegion(mirror::Object* ref) REQUIRES(!region_lock_) {
488     MutexLock mu(Thread::Current(), region_lock_);
489     return RefToRegionLocked(ref);
490   }
491 
RefToRegionUnlocked(mirror::Object * ref)492   Region* RefToRegionUnlocked(mirror::Object* ref) NO_THREAD_SAFETY_ANALYSIS {
493     // For a performance reason (this is frequently called via
494     // IsInFromSpace() etc.) we avoid taking a lock here. Note that
495     // since we only change a region from to-space to from-space only
496     // during a pause (SetFromSpace()) and from from-space to free
497     // (after GC is done) as long as ref is a valid reference into an
498     // allocated region, it's safe to access the region state without
499     // the lock.
500     return RefToRegionLocked(ref);
501   }
502 
RefToRegionLocked(mirror::Object * ref)503   Region* RefToRegionLocked(mirror::Object* ref) REQUIRES(region_lock_) {
504     DCHECK(HasAddress(ref));
505     uintptr_t offset = reinterpret_cast<uintptr_t>(ref) - reinterpret_cast<uintptr_t>(Begin());
506     size_t reg_idx = offset / kRegionSize;
507     DCHECK_LT(reg_idx, num_regions_);
508     Region* reg = &regions_[reg_idx];
509     DCHECK_EQ(reg->Idx(), reg_idx);
510     DCHECK(reg->Contains(ref));
511     return reg;
512   }
513 
514   mirror::Object* GetNextObject(mirror::Object* obj)
515       REQUIRES_SHARED(Locks::mutator_lock_);
516 
AdjustNonFreeRegionLimit(size_t new_non_free_region_index)517   void AdjustNonFreeRegionLimit(size_t new_non_free_region_index) REQUIRES(region_lock_) {
518     DCHECK_LT(new_non_free_region_index, num_regions_);
519     non_free_region_index_limit_ = std::max(non_free_region_index_limit_,
520                                             new_non_free_region_index + 1);
521     VerifyNonFreeRegionLimit();
522   }
523 
SetNonFreeRegionLimit(size_t new_non_free_region_index_limit)524   void SetNonFreeRegionLimit(size_t new_non_free_region_index_limit) REQUIRES(region_lock_) {
525     DCHECK_LE(new_non_free_region_index_limit, num_regions_);
526     non_free_region_index_limit_ = new_non_free_region_index_limit;
527     VerifyNonFreeRegionLimit();
528   }
529 
VerifyNonFreeRegionLimit()530   void VerifyNonFreeRegionLimit() REQUIRES(region_lock_) {
531     if (kIsDebugBuild && non_free_region_index_limit_ < num_regions_) {
532       for (size_t i = non_free_region_index_limit_; i < num_regions_; ++i) {
533         CHECK(regions_[i].IsFree());
534       }
535     }
536   }
537 
538   Mutex region_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
539 
540   uint32_t time_;                  // The time as the number of collections since the startup.
541   size_t num_regions_;             // The number of regions in this space.
542   size_t num_non_free_regions_;    // The number of non-free regions in this space.
543   std::unique_ptr<Region[]> regions_ GUARDED_BY(region_lock_);
544                                    // The pointer to the region array.
545   // The upper-bound index of the non-free regions. Used to avoid scanning all regions in
546   // SetFromSpace().  Invariant: for all i >= non_free_region_index_limit_, regions_[i].IsFree() is
547   // true.
548   size_t non_free_region_index_limit_ GUARDED_BY(region_lock_);
549   Region* current_region_;         // The region that's being allocated currently.
550   Region* evac_region_;            // The region that's being evacuated to currently.
551   Region full_region_;             // The dummy/sentinel region that looks full.
552 
553   // Mark bitmap used by the GC.
554   std::unique_ptr<accounting::ContinuousSpaceBitmap> mark_bitmap_;
555 
556   DISALLOW_COPY_AND_ASSIGN(RegionSpace);
557 };
558 
559 std::ostream& operator<<(std::ostream& os, const RegionSpace::RegionState& value);
560 std::ostream& operator<<(std::ostream& os, const RegionSpace::RegionType& value);
561 
562 }  // namespace space
563 }  // namespace gc
564 }  // namespace art
565 
566 #endif  // ART_RUNTIME_GC_SPACE_REGION_SPACE_H_
567