1 /*
2  * Copyright (C) 2014 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_GC_SPACE_REGION_SPACE_H_
18 #define ART_RUNTIME_GC_SPACE_REGION_SPACE_H_
19 
20 #include "gc/accounting/read_barrier_table.h"
21 #include "object_callbacks.h"
22 #include "space.h"
23 #include "thread.h"
24 
25 namespace art {
26 namespace gc {
27 namespace space {
28 
29 // A space that consists of equal-sized regions.
30 class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
31  public:
32   typedef void(*WalkCallback)(void *start, void *end, size_t num_bytes, void* callback_arg);
33 
GetType()34   SpaceType GetType() const OVERRIDE {
35     return kSpaceTypeRegionSpace;
36   }
37 
38   // Create a region space with the requested sizes. The requested base address is not
39   // guaranteed to be granted, if it is required, the caller should call Begin on the returned
40   // space to confirm the request was granted.
41   static RegionSpace* Create(const std::string& name, size_t capacity, uint8_t* requested_begin);
42 
43   // Allocate num_bytes, returns null if the space is full.
44   mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
45                         size_t* usable_size, size_t* bytes_tl_bulk_allocated)
46       OVERRIDE REQUIRES(!region_lock_);
47   // Thread-unsafe allocation for when mutators are suspended, used by the semispace collector.
48   mirror::Object* AllocThreadUnsafe(Thread* self, size_t num_bytes, size_t* bytes_allocated,
49                                     size_t* usable_size, size_t* bytes_tl_bulk_allocated)
50       OVERRIDE REQUIRES(Locks::mutator_lock_) REQUIRES(!region_lock_);
51   // The main allocation routine.
52   template<bool kForEvac>
53   ALWAYS_INLINE mirror::Object* AllocNonvirtual(size_t num_bytes, size_t* bytes_allocated,
54                                                 size_t* usable_size,
55                                                 size_t* bytes_tl_bulk_allocated)
56       REQUIRES(!region_lock_);
57   // Allocate/free large objects (objects that are larger than the region size.)
58   template<bool kForEvac>
59   mirror::Object* AllocLarge(size_t num_bytes, size_t* bytes_allocated, size_t* usable_size,
60                              size_t* bytes_tl_bulk_allocated) REQUIRES(!region_lock_);
61   void FreeLarge(mirror::Object* large_obj, size_t bytes_allocated) REQUIRES(!region_lock_);
62 
63   // Return the storage space required by obj.
AllocationSize(mirror::Object * obj,size_t * usable_size)64   size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE
65       SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!region_lock_) {
66     return AllocationSizeNonvirtual(obj, usable_size);
67   }
68   size_t AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size)
69       SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!region_lock_);
70 
Free(Thread *,mirror::Object *)71   size_t Free(Thread*, mirror::Object*) OVERRIDE {
72     UNIMPLEMENTED(FATAL);
73     return 0;
74   }
FreeList(Thread *,size_t,mirror::Object **)75   size_t FreeList(Thread*, size_t, mirror::Object**) OVERRIDE {
76     UNIMPLEMENTED(FATAL);
77     return 0;
78   }
GetLiveBitmap()79   accounting::ContinuousSpaceBitmap* GetLiveBitmap() const OVERRIDE {
80     // No live bitmap.
81     return nullptr;
82   }
GetMarkBitmap()83   accounting::ContinuousSpaceBitmap* GetMarkBitmap() const OVERRIDE {
84     // No mark bitmap.
85     return nullptr;
86   }
87 
88   void Clear() OVERRIDE REQUIRES(!region_lock_);
89 
90   void Dump(std::ostream& os) const;
91   void DumpRegions(std::ostream& os) REQUIRES(!region_lock_);
92   void DumpNonFreeRegions(std::ostream& os) REQUIRES(!region_lock_);
93 
94   size_t RevokeThreadLocalBuffers(Thread* thread) REQUIRES(!region_lock_);
95   void RevokeThreadLocalBuffersLocked(Thread* thread) REQUIRES(region_lock_);
96   size_t RevokeAllThreadLocalBuffers()
97       REQUIRES(!Locks::runtime_shutdown_lock_, !Locks::thread_list_lock_, !region_lock_);
98   void AssertThreadLocalBuffersAreRevoked(Thread* thread) REQUIRES(!region_lock_);
99   void AssertAllThreadLocalBuffersAreRevoked()
100       REQUIRES(!Locks::runtime_shutdown_lock_, !Locks::thread_list_lock_, !region_lock_);
101 
102   enum class RegionType : uint8_t {
103     kRegionTypeAll,              // All types.
104     kRegionTypeFromSpace,        // From-space. To be evacuated.
105     kRegionTypeUnevacFromSpace,  // Unevacuated from-space. Not to be evacuated.
106     kRegionTypeToSpace,          // To-space.
107     kRegionTypeNone,             // None.
108   };
109 
110   enum class RegionState : uint8_t {
111     kRegionStateFree,            // Free region.
112     kRegionStateAllocated,       // Allocated region.
113     kRegionStateLarge,           // Large allocated (allocation larger than the region size).
114     kRegionStateLargeTail,       // Large tail (non-first regions of a large allocation).
115   };
116 
117   template<RegionType kRegionType> uint64_t GetBytesAllocatedInternal() REQUIRES(!region_lock_);
118   template<RegionType kRegionType> uint64_t GetObjectsAllocatedInternal() REQUIRES(!region_lock_);
GetBytesAllocated()119   uint64_t GetBytesAllocated() REQUIRES(!region_lock_) {
120     return GetBytesAllocatedInternal<RegionType::kRegionTypeAll>();
121   }
GetObjectsAllocated()122   uint64_t GetObjectsAllocated() REQUIRES(!region_lock_) {
123     return GetObjectsAllocatedInternal<RegionType::kRegionTypeAll>();
124   }
GetBytesAllocatedInFromSpace()125   uint64_t GetBytesAllocatedInFromSpace() REQUIRES(!region_lock_) {
126     return GetBytesAllocatedInternal<RegionType::kRegionTypeFromSpace>();
127   }
GetObjectsAllocatedInFromSpace()128   uint64_t GetObjectsAllocatedInFromSpace() REQUIRES(!region_lock_) {
129     return GetObjectsAllocatedInternal<RegionType::kRegionTypeFromSpace>();
130   }
GetBytesAllocatedInUnevacFromSpace()131   uint64_t GetBytesAllocatedInUnevacFromSpace() REQUIRES(!region_lock_) {
132     return GetBytesAllocatedInternal<RegionType::kRegionTypeUnevacFromSpace>();
133   }
GetObjectsAllocatedInUnevacFromSpace()134   uint64_t GetObjectsAllocatedInUnevacFromSpace() REQUIRES(!region_lock_) {
135     return GetObjectsAllocatedInternal<RegionType::kRegionTypeUnevacFromSpace>();
136   }
137 
CanMoveObjects()138   bool CanMoveObjects() const OVERRIDE {
139     return true;
140   }
141 
Contains(const mirror::Object * obj)142   bool Contains(const mirror::Object* obj) const {
143     const uint8_t* byte_obj = reinterpret_cast<const uint8_t*>(obj);
144     return byte_obj >= Begin() && byte_obj < Limit();
145   }
146 
AsRegionSpace()147   RegionSpace* AsRegionSpace() OVERRIDE {
148     return this;
149   }
150 
151   // Go through all of the blocks and visit the continuous objects.
Walk(ObjectCallback * callback,void * arg)152   void Walk(ObjectCallback* callback, void* arg)
153       REQUIRES(Locks::mutator_lock_) {
154     WalkInternal<false>(callback, arg);
155   }
156 
WalkToSpace(ObjectCallback * callback,void * arg)157   void WalkToSpace(ObjectCallback* callback, void* arg)
158       REQUIRES(Locks::mutator_lock_) {
159     WalkInternal<true>(callback, arg);
160   }
161 
GetSweepCallback()162   accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() OVERRIDE {
163     return nullptr;
164   }
165   void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE
166       SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!region_lock_);
167 
168   // Object alignment within the space.
169   static constexpr size_t kAlignment = kObjectAlignment;
170   // The region size.
171   static constexpr size_t kRegionSize = 1 * MB;
172 
IsInFromSpace(mirror::Object * ref)173   bool IsInFromSpace(mirror::Object* ref) {
174     if (HasAddress(ref)) {
175       Region* r = RefToRegionUnlocked(ref);
176       return r->IsInFromSpace();
177     }
178     return false;
179   }
180 
IsInUnevacFromSpace(mirror::Object * ref)181   bool IsInUnevacFromSpace(mirror::Object* ref) {
182     if (HasAddress(ref)) {
183       Region* r = RefToRegionUnlocked(ref);
184       return r->IsInUnevacFromSpace();
185     }
186     return false;
187   }
188 
IsInToSpace(mirror::Object * ref)189   bool IsInToSpace(mirror::Object* ref) {
190     if (HasAddress(ref)) {
191       Region* r = RefToRegionUnlocked(ref);
192       return r->IsInToSpace();
193     }
194     return false;
195   }
196 
GetRegionType(mirror::Object * ref)197   RegionType GetRegionType(mirror::Object* ref) {
198     if (HasAddress(ref)) {
199       Region* r = RefToRegionUnlocked(ref);
200       return r->Type();
201     }
202     return RegionType::kRegionTypeNone;
203   }
204 
205   void SetFromSpace(accounting::ReadBarrierTable* rb_table, bool force_evacuate_all)
206       REQUIRES(!region_lock_);
207 
208   size_t FromSpaceSize() REQUIRES(!region_lock_);
209   size_t UnevacFromSpaceSize() REQUIRES(!region_lock_);
210   size_t ToSpaceSize() REQUIRES(!region_lock_);
211   void ClearFromSpace() REQUIRES(!region_lock_);
212 
AddLiveBytes(mirror::Object * ref,size_t alloc_size)213   void AddLiveBytes(mirror::Object* ref, size_t alloc_size) {
214     Region* reg = RefToRegionUnlocked(ref);
215     reg->AddLiveBytes(alloc_size);
216   }
217 
218   void AssertAllRegionLiveBytesZeroOrCleared() REQUIRES(!region_lock_);
219 
220   void RecordAlloc(mirror::Object* ref) REQUIRES(!region_lock_);
221   bool AllocNewTlab(Thread* self) REQUIRES(!region_lock_);
222 
Time()223   uint32_t Time() {
224     return time_;
225   }
226 
227  private:
228   RegionSpace(const std::string& name, MemMap* mem_map);
229 
230   template<bool kToSpaceOnly>
231   void WalkInternal(ObjectCallback* callback, void* arg) NO_THREAD_SAFETY_ANALYSIS;
232 
233   class Region {
234    public:
Region()235     Region()
236         : idx_(static_cast<size_t>(-1)),
237           begin_(nullptr), top_(nullptr), end_(nullptr),
238           state_(RegionState::kRegionStateAllocated), type_(RegionType::kRegionTypeToSpace),
239           objects_allocated_(0), alloc_time_(0), live_bytes_(static_cast<size_t>(-1)),
240           is_newly_allocated_(false), is_a_tlab_(false), thread_(nullptr) {}
241 
Region(size_t idx,uint8_t * begin,uint8_t * end)242     Region(size_t idx, uint8_t* begin, uint8_t* end)
243         : idx_(idx), begin_(begin), top_(begin), end_(end),
244           state_(RegionState::kRegionStateFree), type_(RegionType::kRegionTypeNone),
245           objects_allocated_(0), alloc_time_(0), live_bytes_(static_cast<size_t>(-1)),
246           is_newly_allocated_(false), is_a_tlab_(false), thread_(nullptr) {
247       DCHECK_LT(begin, end);
248       DCHECK_EQ(static_cast<size_t>(end - begin), kRegionSize);
249     }
250 
State()251     RegionState State() const {
252       return state_;
253     }
254 
Type()255     RegionType Type() const {
256       return type_;
257     }
258 
Clear()259     void Clear() {
260       top_ = begin_;
261       state_ = RegionState::kRegionStateFree;
262       type_ = RegionType::kRegionTypeNone;
263       objects_allocated_ = 0;
264       alloc_time_ = 0;
265       live_bytes_ = static_cast<size_t>(-1);
266       if (!kMadviseZeroes) {
267         memset(begin_, 0, end_ - begin_);
268       }
269       madvise(begin_, end_ - begin_, MADV_DONTNEED);
270       is_newly_allocated_ = false;
271       is_a_tlab_ = false;
272       thread_ = nullptr;
273     }
274 
275     ALWAYS_INLINE mirror::Object* Alloc(size_t num_bytes, size_t* bytes_allocated,
276                                         size_t* usable_size,
277                                         size_t* bytes_tl_bulk_allocated);
278 
IsFree()279     bool IsFree() const {
280       bool is_free = state_ == RegionState::kRegionStateFree;
281       if (is_free) {
282         DCHECK(IsInNoSpace());
283         DCHECK_EQ(begin_, top_);
284         DCHECK_EQ(objects_allocated_, 0U);
285       }
286       return is_free;
287     }
288 
289     // Given a free region, declare it non-free (allocated).
Unfree(uint32_t alloc_time)290     void Unfree(uint32_t alloc_time) {
291       DCHECK(IsFree());
292       state_ = RegionState::kRegionStateAllocated;
293       type_ = RegionType::kRegionTypeToSpace;
294       alloc_time_ = alloc_time;
295     }
296 
UnfreeLarge(uint32_t alloc_time)297     void UnfreeLarge(uint32_t alloc_time) {
298       DCHECK(IsFree());
299       state_ = RegionState::kRegionStateLarge;
300       type_ = RegionType::kRegionTypeToSpace;
301       alloc_time_ = alloc_time;
302     }
303 
UnfreeLargeTail(uint32_t alloc_time)304     void UnfreeLargeTail(uint32_t alloc_time) {
305       DCHECK(IsFree());
306       state_ = RegionState::kRegionStateLargeTail;
307       type_ = RegionType::kRegionTypeToSpace;
308       alloc_time_ = alloc_time;
309     }
310 
SetNewlyAllocated()311     void SetNewlyAllocated() {
312       is_newly_allocated_ = true;
313     }
314 
315     // Non-large, non-large-tail allocated.
IsAllocated()316     bool IsAllocated() const {
317       return state_ == RegionState::kRegionStateAllocated;
318     }
319 
320     // Large allocated.
IsLarge()321     bool IsLarge() const {
322       bool is_large = state_ == RegionState::kRegionStateLarge;
323       if (is_large) {
324         DCHECK_LT(begin_ + 1 * MB, top_);
325       }
326       return is_large;
327     }
328 
329     // Large-tail allocated.
IsLargeTail()330     bool IsLargeTail() const {
331       bool is_large_tail = state_ == RegionState::kRegionStateLargeTail;
332       if (is_large_tail) {
333         DCHECK_EQ(begin_, top_);
334       }
335       return is_large_tail;
336     }
337 
Idx()338     size_t Idx() const {
339       return idx_;
340     }
341 
IsInFromSpace()342     bool IsInFromSpace() const {
343       return type_ == RegionType::kRegionTypeFromSpace;
344     }
345 
IsInToSpace()346     bool IsInToSpace() const {
347       return type_ == RegionType::kRegionTypeToSpace;
348     }
349 
IsInUnevacFromSpace()350     bool IsInUnevacFromSpace() const {
351       return type_ == RegionType::kRegionTypeUnevacFromSpace;
352     }
353 
IsInNoSpace()354     bool IsInNoSpace() const {
355       return type_ == RegionType::kRegionTypeNone;
356     }
357 
SetAsFromSpace()358     void SetAsFromSpace() {
359       DCHECK(!IsFree() && IsInToSpace());
360       type_ = RegionType::kRegionTypeFromSpace;
361       live_bytes_ = static_cast<size_t>(-1);
362     }
363 
SetAsUnevacFromSpace()364     void SetAsUnevacFromSpace() {
365       DCHECK(!IsFree() && IsInToSpace());
366       type_ = RegionType::kRegionTypeUnevacFromSpace;
367       live_bytes_ = 0U;
368     }
369 
SetUnevacFromSpaceAsToSpace()370     void SetUnevacFromSpaceAsToSpace() {
371       DCHECK(!IsFree() && IsInUnevacFromSpace());
372       type_ = RegionType::kRegionTypeToSpace;
373     }
374 
375     ALWAYS_INLINE bool ShouldBeEvacuated();
376 
AddLiveBytes(size_t live_bytes)377     void AddLiveBytes(size_t live_bytes) {
378       DCHECK(IsInUnevacFromSpace());
379       DCHECK(!IsLargeTail());
380       DCHECK_NE(live_bytes_, static_cast<size_t>(-1));
381       live_bytes_ += live_bytes;
382       DCHECK_LE(live_bytes_, BytesAllocated());
383     }
384 
LiveBytes()385     size_t LiveBytes() const {
386       return live_bytes_;
387     }
388 
GetLivePercent()389     uint GetLivePercent() const {
390       DCHECK(IsInToSpace());
391       DCHECK(!IsLargeTail());
392       DCHECK_NE(live_bytes_, static_cast<size_t>(-1));
393       DCHECK_LE(live_bytes_, BytesAllocated());
394       size_t bytes_allocated = RoundUp(BytesAllocated(), kRegionSize);
395       DCHECK_GE(bytes_allocated, 0U);
396       uint result = (live_bytes_ * 100U) / bytes_allocated;
397       DCHECK_LE(result, 100U);
398       return result;
399     }
400 
BytesAllocated()401     size_t BytesAllocated() const {
402       if (IsLarge()) {
403         DCHECK_LT(begin_ + kRegionSize, top_);
404         return static_cast<size_t>(top_ - begin_);
405       } else if (IsLargeTail()) {
406         DCHECK_EQ(begin_, top_);
407         return 0;
408       } else {
409         DCHECK(IsAllocated()) << static_cast<uint>(state_);
410         DCHECK_LE(begin_, top_);
411         size_t bytes = static_cast<size_t>(top_ - begin_);
412         DCHECK_LE(bytes, kRegionSize);
413         return bytes;
414       }
415     }
416 
ObjectsAllocated()417     size_t ObjectsAllocated() const {
418       if (IsLarge()) {
419         DCHECK_LT(begin_ + 1 * MB, top_);
420         DCHECK_EQ(objects_allocated_, 0U);
421         return 1;
422       } else if (IsLargeTail()) {
423         DCHECK_EQ(begin_, top_);
424         DCHECK_EQ(objects_allocated_, 0U);
425         return 0;
426       } else {
427         DCHECK(IsAllocated()) << static_cast<uint>(state_);
428         return objects_allocated_;
429       }
430     }
431 
Begin()432     uint8_t* Begin() const {
433       return begin_;
434     }
435 
Top()436     uint8_t* Top() const {
437       return top_;
438     }
439 
SetTop(uint8_t * new_top)440     void SetTop(uint8_t* new_top) {
441       top_ = new_top;
442     }
443 
End()444     uint8_t* End() const {
445       return end_;
446     }
447 
Contains(mirror::Object * ref)448     bool Contains(mirror::Object* ref) const {
449       return begin_ <= reinterpret_cast<uint8_t*>(ref) && reinterpret_cast<uint8_t*>(ref) < end_;
450     }
451 
452     void Dump(std::ostream& os) const;
453 
RecordThreadLocalAllocations(size_t num_objects,size_t num_bytes)454     void RecordThreadLocalAllocations(size_t num_objects, size_t num_bytes) {
455       DCHECK(IsAllocated());
456       DCHECK_EQ(objects_allocated_, 0U);
457       DCHECK_EQ(top_, end_);
458       objects_allocated_ = num_objects;
459       top_ = begin_ + num_bytes;
460       DCHECK_EQ(top_, end_);
461     }
462 
463    private:
464     size_t idx_;                   // The region's index in the region space.
465     uint8_t* begin_;               // The begin address of the region.
466     // Can't use Atomic<uint8_t*> as Atomic's copy operator is implicitly deleted.
467     uint8_t* top_;                 // The current position of the allocation.
468     uint8_t* end_;                 // The end address of the region.
469     RegionState state_;            // The region state (see RegionState).
470     RegionType type_;              // The region type (see RegionType).
471     uint64_t objects_allocated_;   // The number of objects allocated.
472     uint32_t alloc_time_;          // The allocation time of the region.
473     size_t live_bytes_;            // The live bytes. Used to compute the live percent.
474     bool is_newly_allocated_;      // True if it's allocated after the last collection.
475     bool is_a_tlab_;               // True if it's a tlab.
476     Thread* thread_;               // The owning thread if it's a tlab.
477 
478     friend class RegionSpace;
479   };
480 
RefToRegion(mirror::Object * ref)481   Region* RefToRegion(mirror::Object* ref) REQUIRES(!region_lock_) {
482     MutexLock mu(Thread::Current(), region_lock_);
483     return RefToRegionLocked(ref);
484   }
485 
RefToRegionUnlocked(mirror::Object * ref)486   Region* RefToRegionUnlocked(mirror::Object* ref) NO_THREAD_SAFETY_ANALYSIS {
487     // For a performance reason (this is frequently called via
488     // IsInFromSpace() etc.) we avoid taking a lock here. Note that
489     // since we only change a region from to-space to from-space only
490     // during a pause (SetFromSpace()) and from from-space to free
491     // (after GC is done) as long as ref is a valid reference into an
492     // allocated region, it's safe to access the region state without
493     // the lock.
494     return RefToRegionLocked(ref);
495   }
496 
RefToRegionLocked(mirror::Object * ref)497   Region* RefToRegionLocked(mirror::Object* ref) REQUIRES(region_lock_) {
498     DCHECK(HasAddress(ref));
499     uintptr_t offset = reinterpret_cast<uintptr_t>(ref) - reinterpret_cast<uintptr_t>(Begin());
500     size_t reg_idx = offset / kRegionSize;
501     DCHECK_LT(reg_idx, num_regions_);
502     Region* reg = &regions_[reg_idx];
503     DCHECK_EQ(reg->Idx(), reg_idx);
504     DCHECK(reg->Contains(ref));
505     return reg;
506   }
507 
508   mirror::Object* GetNextObject(mirror::Object* obj)
509       SHARED_REQUIRES(Locks::mutator_lock_);
510 
511   Mutex region_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
512 
513   uint32_t time_;                  // The time as the number of collections since the startup.
514   size_t num_regions_;             // The number of regions in this space.
515   size_t num_non_free_regions_;    // The number of non-free regions in this space.
516   std::unique_ptr<Region[]> regions_ GUARDED_BY(region_lock_);
517                                    // The pointer to the region array.
518   Region* current_region_;         // The region that's being allocated currently.
519   Region* evac_region_;            // The region that's being evacuated to currently.
520   Region full_region_;             // The dummy/sentinel region that looks full.
521 
522   DISALLOW_COPY_AND_ASSIGN(RegionSpace);
523 };
524 
525 std::ostream& operator<<(std::ostream& os, const RegionSpace::RegionState& value);
526 std::ostream& operator<<(std::ostream& os, const RegionSpace::RegionType& value);
527 
528 }  // namespace space
529 }  // namespace gc
530 }  // namespace art
531 
532 #endif  // ART_RUNTIME_GC_SPACE_REGION_SPACE_H_
533