1 /*
2  * Copyright (C) 2014 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_GC_SPACE_REGION_SPACE_H_
18 #define ART_RUNTIME_GC_SPACE_REGION_SPACE_H_
19 
20 #include "gc/accounting/read_barrier_table.h"
21 #include "object_callbacks.h"
22 #include "space.h"
23 #include "thread.h"
24 
25 namespace art {
26 namespace gc {
27 namespace space {
28 
29 // A space that consists of equal-sized regions.
30 class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
31  public:
32   typedef void(*WalkCallback)(void *start, void *end, size_t num_bytes, void* callback_arg);
33 
GetType()34   SpaceType GetType() const OVERRIDE {
35     return kSpaceTypeRegionSpace;
36   }
37 
38   // Create a region space with the requested sizes. The requested base address is not
39   // guaranteed to be granted, if it is required, the caller should call Begin on the returned
40   // space to confirm the request was granted.
41   static RegionSpace* Create(const std::string& name, size_t capacity, uint8_t* requested_begin);
42 
43   // Allocate num_bytes, returns null if the space is full.
44   mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
45                         size_t* usable_size, size_t* bytes_tl_bulk_allocated) OVERRIDE;
46   // Thread-unsafe allocation for when mutators are suspended, used by the semispace collector.
47   mirror::Object* AllocThreadUnsafe(Thread* self, size_t num_bytes, size_t* bytes_allocated,
48                                     size_t* usable_size, size_t* bytes_tl_bulk_allocated)
49       OVERRIDE EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
50   // The main allocation routine.
51   template<bool kForEvac>
52   ALWAYS_INLINE mirror::Object* AllocNonvirtual(size_t num_bytes, size_t* bytes_allocated,
53                                                 size_t* usable_size,
54                                                 size_t* bytes_tl_bulk_allocated);
55   // Allocate/free large objects (objects that are larger than the region size.)
56   template<bool kForEvac>
57   mirror::Object* AllocLarge(size_t num_bytes, size_t* bytes_allocated, size_t* usable_size,
58                              size_t* bytes_tl_bulk_allocated);
59   void FreeLarge(mirror::Object* large_obj, size_t bytes_allocated);
60 
61   // Return the storage space required by obj.
AllocationSize(mirror::Object * obj,size_t * usable_size)62   size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE
63       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
64     return AllocationSizeNonvirtual(obj, usable_size);
65   }
66   size_t AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size)
67       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
68 
Free(Thread *,mirror::Object *)69   size_t Free(Thread*, mirror::Object*) OVERRIDE {
70     UNIMPLEMENTED(FATAL);
71     return 0;
72   }
FreeList(Thread *,size_t,mirror::Object **)73   size_t FreeList(Thread*, size_t, mirror::Object**) OVERRIDE {
74     UNIMPLEMENTED(FATAL);
75     return 0;
76   }
GetLiveBitmap()77   accounting::ContinuousSpaceBitmap* GetLiveBitmap() const OVERRIDE {
78     // No live bitmap.
79     return nullptr;
80   }
GetMarkBitmap()81   accounting::ContinuousSpaceBitmap* GetMarkBitmap() const OVERRIDE {
82     // No mark bitmap.
83     return nullptr;
84   }
85 
86   void Clear() OVERRIDE LOCKS_EXCLUDED(region_lock_);
87 
88   void Dump(std::ostream& os) const;
89   void DumpRegions(std::ostream& os);
90   void DumpNonFreeRegions(std::ostream& os);
91 
92   size_t RevokeThreadLocalBuffers(Thread* thread) LOCKS_EXCLUDED(region_lock_);
93   void RevokeThreadLocalBuffersLocked(Thread* thread) EXCLUSIVE_LOCKS_REQUIRED(region_lock_);
94   size_t RevokeAllThreadLocalBuffers() LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_,
95                                                       Locks::thread_list_lock_);
96   void AssertThreadLocalBuffersAreRevoked(Thread* thread) LOCKS_EXCLUDED(region_lock_);
97   void AssertAllThreadLocalBuffersAreRevoked() LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_,
98                                                               Locks::thread_list_lock_);
99 
100   enum class RegionType : uint8_t {
101     kRegionTypeAll,              // All types.
102     kRegionTypeFromSpace,        // From-space. To be evacuated.
103     kRegionTypeUnevacFromSpace,  // Unevacuated from-space. Not to be evacuated.
104     kRegionTypeToSpace,          // To-space.
105     kRegionTypeNone,             // None.
106   };
107 
108   enum class RegionState : uint8_t {
109     kRegionStateFree,            // Free region.
110     kRegionStateAllocated,       // Allocated region.
111     kRegionStateLarge,           // Large allocated (allocation larger than the region size).
112     kRegionStateLargeTail,       // Large tail (non-first regions of a large allocation).
113   };
114 
115   template<RegionType kRegionType> uint64_t GetBytesAllocatedInternal();
116   template<RegionType kRegionType> uint64_t GetObjectsAllocatedInternal();
GetBytesAllocated()117   uint64_t GetBytesAllocated() {
118     return GetBytesAllocatedInternal<RegionType::kRegionTypeAll>();
119   }
GetObjectsAllocated()120   uint64_t GetObjectsAllocated() {
121     return GetObjectsAllocatedInternal<RegionType::kRegionTypeAll>();
122   }
GetBytesAllocatedInFromSpace()123   uint64_t GetBytesAllocatedInFromSpace() {
124     return GetBytesAllocatedInternal<RegionType::kRegionTypeFromSpace>();
125   }
GetObjectsAllocatedInFromSpace()126   uint64_t GetObjectsAllocatedInFromSpace() {
127     return GetObjectsAllocatedInternal<RegionType::kRegionTypeFromSpace>();
128   }
GetBytesAllocatedInUnevacFromSpace()129   uint64_t GetBytesAllocatedInUnevacFromSpace() {
130     return GetBytesAllocatedInternal<RegionType::kRegionTypeUnevacFromSpace>();
131   }
GetObjectsAllocatedInUnevacFromSpace()132   uint64_t GetObjectsAllocatedInUnevacFromSpace() {
133     return GetObjectsAllocatedInternal<RegionType::kRegionTypeUnevacFromSpace>();
134   }
135 
CanMoveObjects()136   bool CanMoveObjects() const OVERRIDE {
137     return true;
138   }
139 
Contains(const mirror::Object * obj)140   bool Contains(const mirror::Object* obj) const {
141     const uint8_t* byte_obj = reinterpret_cast<const uint8_t*>(obj);
142     return byte_obj >= Begin() && byte_obj < Limit();
143   }
144 
AsRegionSpace()145   RegionSpace* AsRegionSpace() OVERRIDE {
146     return this;
147   }
148 
149   // Go through all of the blocks and visit the continuous objects.
Walk(ObjectCallback * callback,void * arg)150   void Walk(ObjectCallback* callback, void* arg)
151       EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) {
152     WalkInternal<false>(callback, arg);
153   }
154 
WalkToSpace(ObjectCallback * callback,void * arg)155   void WalkToSpace(ObjectCallback* callback, void* arg)
156       EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) {
157     WalkInternal<true>(callback, arg);
158   }
159 
GetSweepCallback()160   accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() OVERRIDE {
161     return nullptr;
162   }
163   void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE
164       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
165 
166   // Object alignment within the space.
167   static constexpr size_t kAlignment = kObjectAlignment;
168   // The region size.
169   static constexpr size_t kRegionSize = 1 * MB;
170 
IsInFromSpace(mirror::Object * ref)171   bool IsInFromSpace(mirror::Object* ref) {
172     if (HasAddress(ref)) {
173       Region* r = RefToRegionUnlocked(ref);
174       return r->IsInFromSpace();
175     }
176     return false;
177   }
178 
IsInUnevacFromSpace(mirror::Object * ref)179   bool IsInUnevacFromSpace(mirror::Object* ref) {
180     if (HasAddress(ref)) {
181       Region* r = RefToRegionUnlocked(ref);
182       return r->IsInUnevacFromSpace();
183     }
184     return false;
185   }
186 
IsInToSpace(mirror::Object * ref)187   bool IsInToSpace(mirror::Object* ref) {
188     if (HasAddress(ref)) {
189       Region* r = RefToRegionUnlocked(ref);
190       return r->IsInToSpace();
191     }
192     return false;
193   }
194 
GetRegionType(mirror::Object * ref)195   RegionType GetRegionType(mirror::Object* ref) {
196     if (HasAddress(ref)) {
197       Region* r = RefToRegionUnlocked(ref);
198       return r->Type();
199     }
200     return RegionType::kRegionTypeNone;
201   }
202 
203   void SetFromSpace(accounting::ReadBarrierTable* rb_table, bool force_evacuate_all)
204       LOCKS_EXCLUDED(region_lock_);
205 
206   size_t FromSpaceSize();
207   size_t UnevacFromSpaceSize();
208   size_t ToSpaceSize();
209   void ClearFromSpace();
210 
AddLiveBytes(mirror::Object * ref,size_t alloc_size)211   void AddLiveBytes(mirror::Object* ref, size_t alloc_size) {
212     Region* reg = RefToRegionUnlocked(ref);
213     reg->AddLiveBytes(alloc_size);
214   }
215 
216   void AssertAllRegionLiveBytesZeroOrCleared();
217 
218   void RecordAlloc(mirror::Object* ref);
219   bool AllocNewTlab(Thread* self);
220 
Time()221   uint32_t Time() {
222     return time_;
223   }
224 
225  private:
226   RegionSpace(const std::string& name, MemMap* mem_map);
227 
228   template<bool kToSpaceOnly>
229   void WalkInternal(ObjectCallback* callback, void* arg) NO_THREAD_SAFETY_ANALYSIS;
230 
231   class Region {
232    public:
Region()233     Region()
234         : idx_(static_cast<size_t>(-1)),
235           begin_(nullptr), top_(nullptr), end_(nullptr),
236           state_(RegionState::kRegionStateAllocated), type_(RegionType::kRegionTypeToSpace),
237           objects_allocated_(0), alloc_time_(0), live_bytes_(static_cast<size_t>(-1)),
238           is_newly_allocated_(false), is_a_tlab_(false), thread_(nullptr) {}
239 
Region(size_t idx,uint8_t * begin,uint8_t * end)240     Region(size_t idx, uint8_t* begin, uint8_t* end)
241         : idx_(idx), begin_(begin), top_(begin), end_(end),
242           state_(RegionState::kRegionStateFree), type_(RegionType::kRegionTypeNone),
243           objects_allocated_(0), alloc_time_(0), live_bytes_(static_cast<size_t>(-1)),
244           is_newly_allocated_(false), is_a_tlab_(false), thread_(nullptr) {
245       DCHECK_LT(begin, end);
246       DCHECK_EQ(static_cast<size_t>(end - begin), kRegionSize);
247     }
248 
State()249     RegionState State() const {
250       return state_;
251     }
252 
Type()253     RegionType Type() const {
254       return type_;
255     }
256 
Clear()257     void Clear() {
258       top_ = begin_;
259       state_ = RegionState::kRegionStateFree;
260       type_ = RegionType::kRegionTypeNone;
261       objects_allocated_ = 0;
262       alloc_time_ = 0;
263       live_bytes_ = static_cast<size_t>(-1);
264       if (!kMadviseZeroes) {
265         memset(begin_, 0, end_ - begin_);
266       }
267       madvise(begin_, end_ - begin_, MADV_DONTNEED);
268       is_newly_allocated_ = false;
269       is_a_tlab_ = false;
270       thread_ = nullptr;
271     }
272 
273     ALWAYS_INLINE mirror::Object* Alloc(size_t num_bytes, size_t* bytes_allocated,
274                                         size_t* usable_size,
275                                         size_t* bytes_tl_bulk_allocated);
276 
IsFree()277     bool IsFree() const {
278       bool is_free = state_ == RegionState::kRegionStateFree;
279       if (is_free) {
280         DCHECK(IsInNoSpace());
281         DCHECK_EQ(begin_, top_);
282         DCHECK_EQ(objects_allocated_, 0U);
283       }
284       return is_free;
285     }
286 
287     // Given a free region, declare it non-free (allocated).
Unfree(uint32_t alloc_time)288     void Unfree(uint32_t alloc_time) {
289       DCHECK(IsFree());
290       state_ = RegionState::kRegionStateAllocated;
291       type_ = RegionType::kRegionTypeToSpace;
292       alloc_time_ = alloc_time;
293     }
294 
UnfreeLarge(uint32_t alloc_time)295     void UnfreeLarge(uint32_t alloc_time) {
296       DCHECK(IsFree());
297       state_ = RegionState::kRegionStateLarge;
298       type_ = RegionType::kRegionTypeToSpace;
299       alloc_time_ = alloc_time;
300     }
301 
UnfreeLargeTail(uint32_t alloc_time)302     void UnfreeLargeTail(uint32_t alloc_time) {
303       DCHECK(IsFree());
304       state_ = RegionState::kRegionStateLargeTail;
305       type_ = RegionType::kRegionTypeToSpace;
306       alloc_time_ = alloc_time;
307     }
308 
SetNewlyAllocated()309     void SetNewlyAllocated() {
310       is_newly_allocated_ = true;
311     }
312 
313     // Non-large, non-large-tail allocated.
IsAllocated()314     bool IsAllocated() const {
315       return state_ == RegionState::kRegionStateAllocated;
316     }
317 
318     // Large allocated.
IsLarge()319     bool IsLarge() const {
320       bool is_large = state_ == RegionState::kRegionStateLarge;
321       if (is_large) {
322         DCHECK_LT(begin_ + 1 * MB, top_);
323       }
324       return is_large;
325     }
326 
327     // Large-tail allocated.
IsLargeTail()328     bool IsLargeTail() const {
329       bool is_large_tail = state_ == RegionState::kRegionStateLargeTail;
330       if (is_large_tail) {
331         DCHECK_EQ(begin_, top_);
332       }
333       return is_large_tail;
334     }
335 
Idx()336     size_t Idx() const {
337       return idx_;
338     }
339 
IsInFromSpace()340     bool IsInFromSpace() const {
341       return type_ == RegionType::kRegionTypeFromSpace;
342     }
343 
IsInToSpace()344     bool IsInToSpace() const {
345       return type_ == RegionType::kRegionTypeToSpace;
346     }
347 
IsInUnevacFromSpace()348     bool IsInUnevacFromSpace() const {
349       return type_ == RegionType::kRegionTypeUnevacFromSpace;
350     }
351 
IsInNoSpace()352     bool IsInNoSpace() const {
353       return type_ == RegionType::kRegionTypeNone;
354     }
355 
SetAsFromSpace()356     void SetAsFromSpace() {
357       DCHECK(!IsFree() && IsInToSpace());
358       type_ = RegionType::kRegionTypeFromSpace;
359       live_bytes_ = static_cast<size_t>(-1);
360     }
361 
SetAsUnevacFromSpace()362     void SetAsUnevacFromSpace() {
363       DCHECK(!IsFree() && IsInToSpace());
364       type_ = RegionType::kRegionTypeUnevacFromSpace;
365       live_bytes_ = 0U;
366     }
367 
SetUnevacFromSpaceAsToSpace()368     void SetUnevacFromSpaceAsToSpace() {
369       DCHECK(!IsFree() && IsInUnevacFromSpace());
370       type_ = RegionType::kRegionTypeToSpace;
371     }
372 
373     ALWAYS_INLINE bool ShouldBeEvacuated();
374 
AddLiveBytes(size_t live_bytes)375     void AddLiveBytes(size_t live_bytes) {
376       DCHECK(IsInUnevacFromSpace());
377       DCHECK(!IsLargeTail());
378       DCHECK_NE(live_bytes_, static_cast<size_t>(-1));
379       live_bytes_ += live_bytes;
380       DCHECK_LE(live_bytes_, BytesAllocated());
381     }
382 
LiveBytes()383     size_t LiveBytes() const {
384       return live_bytes_;
385     }
386 
GetLivePercent()387     uint GetLivePercent() const {
388       DCHECK(IsInToSpace());
389       DCHECK(!IsLargeTail());
390       DCHECK_NE(live_bytes_, static_cast<size_t>(-1));
391       DCHECK_LE(live_bytes_, BytesAllocated());
392       size_t bytes_allocated = RoundUp(BytesAllocated(), kRegionSize);
393       DCHECK_GE(bytes_allocated, 0U);
394       uint result = (live_bytes_ * 100U) / bytes_allocated;
395       DCHECK_LE(result, 100U);
396       return result;
397     }
398 
BytesAllocated()399     size_t BytesAllocated() const {
400       if (IsLarge()) {
401         DCHECK_LT(begin_ + kRegionSize, top_);
402         return static_cast<size_t>(top_ - begin_);
403       } else if (IsLargeTail()) {
404         DCHECK_EQ(begin_, top_);
405         return 0;
406       } else {
407         DCHECK(IsAllocated()) << static_cast<uint>(state_);
408         DCHECK_LE(begin_, top_);
409         size_t bytes = static_cast<size_t>(top_ - begin_);
410         DCHECK_LE(bytes, kRegionSize);
411         return bytes;
412       }
413     }
414 
ObjectsAllocated()415     size_t ObjectsAllocated() const {
416       if (IsLarge()) {
417         DCHECK_LT(begin_ + 1 * MB, top_);
418         DCHECK_EQ(objects_allocated_, 0U);
419         return 1;
420       } else if (IsLargeTail()) {
421         DCHECK_EQ(begin_, top_);
422         DCHECK_EQ(objects_allocated_, 0U);
423         return 0;
424       } else {
425         DCHECK(IsAllocated()) << static_cast<uint>(state_);
426         return objects_allocated_;
427       }
428     }
429 
Begin()430     uint8_t* Begin() const {
431       return begin_;
432     }
433 
Top()434     uint8_t* Top() const {
435       return top_;
436     }
437 
SetTop(uint8_t * new_top)438     void SetTop(uint8_t* new_top) {
439       top_ = new_top;
440     }
441 
End()442     uint8_t* End() const {
443       return end_;
444     }
445 
Contains(mirror::Object * ref)446     bool Contains(mirror::Object* ref) const {
447       return begin_ <= reinterpret_cast<uint8_t*>(ref) && reinterpret_cast<uint8_t*>(ref) < end_;
448     }
449 
450     void Dump(std::ostream& os) const;
451 
RecordThreadLocalAllocations(size_t num_objects,size_t num_bytes)452     void RecordThreadLocalAllocations(size_t num_objects, size_t num_bytes) {
453       DCHECK(IsAllocated());
454       DCHECK_EQ(objects_allocated_, 0U);
455       DCHECK_EQ(top_, end_);
456       objects_allocated_ = num_objects;
457       top_ = begin_ + num_bytes;
458       DCHECK_EQ(top_, end_);
459     }
460 
461    private:
462     size_t idx_;                   // The region's index in the region space.
463     uint8_t* begin_;               // The begin address of the region.
464     // Can't use Atomic<uint8_t*> as Atomic's copy operator is implicitly deleted.
465     uint8_t* top_;                 // The current position of the allocation.
466     uint8_t* end_;                 // The end address of the region.
467     RegionState state_;            // The region state (see RegionState).
468     RegionType type_;              // The region type (see RegionType).
469     uint64_t objects_allocated_;   // The number of objects allocated.
470     uint32_t alloc_time_;          // The allocation time of the region.
471     size_t live_bytes_;            // The live bytes. Used to compute the live percent.
472     bool is_newly_allocated_;      // True if it's allocated after the last collection.
473     bool is_a_tlab_;               // True if it's a tlab.
474     Thread* thread_;               // The owning thread if it's a tlab.
475 
476     friend class RegionSpace;
477   };
478 
RefToRegion(mirror::Object * ref)479   Region* RefToRegion(mirror::Object* ref) LOCKS_EXCLUDED(region_lock_) {
480     MutexLock mu(Thread::Current(), region_lock_);
481     return RefToRegionLocked(ref);
482   }
483 
RefToRegionUnlocked(mirror::Object * ref)484   Region* RefToRegionUnlocked(mirror::Object* ref) NO_THREAD_SAFETY_ANALYSIS {
485     // For a performance reason (this is frequently called via
486     // IsInFromSpace() etc.) we avoid taking a lock here. Note that
487     // since we only change a region from to-space to from-space only
488     // during a pause (SetFromSpace()) and from from-space to free
489     // (after GC is done) as long as ref is a valid reference into an
490     // allocated region, it's safe to access the region state without
491     // the lock.
492     return RefToRegionLocked(ref);
493   }
494 
RefToRegionLocked(mirror::Object * ref)495   Region* RefToRegionLocked(mirror::Object* ref) EXCLUSIVE_LOCKS_REQUIRED(region_lock_) {
496     DCHECK(HasAddress(ref));
497     uintptr_t offset = reinterpret_cast<uintptr_t>(ref) - reinterpret_cast<uintptr_t>(Begin());
498     size_t reg_idx = offset / kRegionSize;
499     DCHECK_LT(reg_idx, num_regions_);
500     Region* reg = &regions_[reg_idx];
501     DCHECK_EQ(reg->Idx(), reg_idx);
502     DCHECK(reg->Contains(ref));
503     return reg;
504   }
505 
506   mirror::Object* GetNextObject(mirror::Object* obj)
507       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
508 
509   Mutex region_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
510 
511   uint32_t time_;                  // The time as the number of collections since the startup.
512   size_t num_regions_;             // The number of regions in this space.
513   size_t num_non_free_regions_;    // The number of non-free regions in this space.
514   std::unique_ptr<Region[]> regions_ GUARDED_BY(region_lock_);
515                                    // The pointer to the region array.
516   Region* current_region_;         // The region that's being allocated currently.
517   Region* evac_region_;            // The region that's being evacuated to currently.
518   Region full_region_;             // The dummy/sentinel region that looks full.
519 
520   DISALLOW_COPY_AND_ASSIGN(RegionSpace);
521 };
522 
523 std::ostream& operator<<(std::ostream& os, const RegionSpace::RegionState& value);
524 std::ostream& operator<<(std::ostream& os, const RegionSpace::RegionType& value);
525 
526 }  // namespace space
527 }  // namespace gc
528 }  // namespace art
529 
530 #endif  // ART_RUNTIME_GC_SPACE_REGION_SPACE_H_
531