1 /* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #ifndef ART_RUNTIME_GC_SPACE_SPACE_H_ 18 #define ART_RUNTIME_GC_SPACE_SPACE_H_ 19 20 #include <memory> 21 #include <string> 22 23 #include "atomic.h" 24 #include "base/macros.h" 25 #include "base/mutex.h" 26 #include "gc/accounting/space_bitmap.h" 27 #include "gc/collector/garbage_collector.h" 28 #include "globals.h" 29 #include "image.h" 30 #include "mem_map.h" 31 32 namespace art { 33 namespace mirror { 34 class Object; 35 } // namespace mirror 36 37 namespace gc { 38 39 class Heap; 40 41 namespace space { 42 43 class AllocSpace; 44 class BumpPointerSpace; 45 class ContinuousMemMapAllocSpace; 46 class ContinuousSpace; 47 class DiscontinuousSpace; 48 class MallocSpace; 49 class DlMallocSpace; 50 class RosAllocSpace; 51 class ImageSpace; 52 class LargeObjectSpace; 53 class ZygoteSpace; 54 55 static constexpr bool kDebugSpaces = kIsDebugBuild; 56 57 // See Space::GetGcRetentionPolicy. 58 enum GcRetentionPolicy { 59 // Objects are retained forever with this policy for a space. 60 kGcRetentionPolicyNeverCollect, 61 // Every GC cycle will attempt to collect objects in this space. 62 kGcRetentionPolicyAlwaysCollect, 63 // Objects will be considered for collection only in "full" GC cycles, ie faster partial 64 // collections won't scan these areas such as the Zygote. 65 kGcRetentionPolicyFullCollect, 66 }; 67 std::ostream& operator<<(std::ostream& os, const GcRetentionPolicy& policy); 68 69 enum SpaceType { 70 kSpaceTypeImageSpace, 71 kSpaceTypeMallocSpace, 72 kSpaceTypeZygoteSpace, 73 kSpaceTypeBumpPointerSpace, 74 kSpaceTypeLargeObjectSpace, 75 }; 76 std::ostream& operator<<(std::ostream& os, const SpaceType& space_type); 77 78 // A space contains memory allocated for managed objects. 79 class Space { 80 public: 81 // Dump space. Also key method for C++ vtables. 82 virtual void Dump(std::ostream& os) const; 83 84 // Name of the space. May vary, for example before/after the Zygote fork. GetName()85 const char* GetName() const { 86 return name_.c_str(); 87 } 88 89 // The policy of when objects are collected associated with this space. GetGcRetentionPolicy()90 GcRetentionPolicy GetGcRetentionPolicy() const { 91 return gc_retention_policy_; 92 } 93 94 // Is the given object contained within this space? 95 virtual bool Contains(const mirror::Object* obj) const = 0; 96 97 // The kind of space this: image, alloc, zygote, large object. 98 virtual SpaceType GetType() const = 0; 99 100 // Is this an image space, ie one backed by a memory mapped image file. IsImageSpace()101 bool IsImageSpace() const { 102 return GetType() == kSpaceTypeImageSpace; 103 } 104 ImageSpace* AsImageSpace(); 105 106 // Is this a dlmalloc backed allocation space? IsMallocSpace()107 bool IsMallocSpace() const { 108 SpaceType type = GetType(); 109 return type == kSpaceTypeMallocSpace; 110 } 111 MallocSpace* AsMallocSpace(); 112 IsDlMallocSpace()113 virtual bool IsDlMallocSpace() const { 114 return false; 115 } 116 virtual DlMallocSpace* AsDlMallocSpace(); 117 IsRosAllocSpace()118 virtual bool IsRosAllocSpace() const { 119 return false; 120 } 121 virtual RosAllocSpace* AsRosAllocSpace(); 122 123 // Is this the space allocated into by the Zygote and no-longer in use for allocation? IsZygoteSpace()124 bool IsZygoteSpace() const { 125 return GetType() == kSpaceTypeZygoteSpace; 126 } 127 virtual ZygoteSpace* AsZygoteSpace(); 128 129 // Is this space a bump pointer space? IsBumpPointerSpace()130 bool IsBumpPointerSpace() const { 131 return GetType() == kSpaceTypeBumpPointerSpace; 132 } 133 virtual BumpPointerSpace* AsBumpPointerSpace(); 134 135 // Does this space hold large objects and implement the large object space abstraction? IsLargeObjectSpace()136 bool IsLargeObjectSpace() const { 137 return GetType() == kSpaceTypeLargeObjectSpace; 138 } 139 LargeObjectSpace* AsLargeObjectSpace(); 140 IsContinuousSpace()141 virtual bool IsContinuousSpace() const { 142 return false; 143 } 144 ContinuousSpace* AsContinuousSpace(); 145 IsDiscontinuousSpace()146 virtual bool IsDiscontinuousSpace() const { 147 return false; 148 } 149 DiscontinuousSpace* AsDiscontinuousSpace(); 150 IsAllocSpace()151 virtual bool IsAllocSpace() const { 152 return false; 153 } 154 virtual AllocSpace* AsAllocSpace(); 155 IsContinuousMemMapAllocSpace()156 virtual bool IsContinuousMemMapAllocSpace() const { 157 return false; 158 } 159 virtual ContinuousMemMapAllocSpace* AsContinuousMemMapAllocSpace(); 160 161 // Returns true if objects in the space are movable. 162 virtual bool CanMoveObjects() const = 0; 163 ~Space()164 virtual ~Space() {} 165 166 protected: 167 Space(const std::string& name, GcRetentionPolicy gc_retention_policy); 168 SetGcRetentionPolicy(GcRetentionPolicy gc_retention_policy)169 void SetGcRetentionPolicy(GcRetentionPolicy gc_retention_policy) { 170 gc_retention_policy_ = gc_retention_policy; 171 } 172 173 // Name of the space that may vary due to the Zygote fork. 174 std::string name_; 175 176 protected: 177 // When should objects within this space be reclaimed? Not constant as we vary it in the case 178 // of Zygote forking. 179 GcRetentionPolicy gc_retention_policy_; 180 181 private: 182 friend class art::gc::Heap; 183 DISALLOW_COPY_AND_ASSIGN(Space); 184 }; 185 std::ostream& operator<<(std::ostream& os, const Space& space); 186 187 // AllocSpace interface. 188 class AllocSpace { 189 public: 190 // Number of bytes currently allocated. 191 virtual uint64_t GetBytesAllocated() = 0; 192 // Number of objects currently allocated. 193 virtual uint64_t GetObjectsAllocated() = 0; 194 195 // Allocate num_bytes without allowing growth. If the allocation 196 // succeeds, the output parameter bytes_allocated will be set to the 197 // actually allocated bytes which is >= num_bytes. 198 // Alloc can be called from multiple threads at the same time and must be thread-safe. 199 virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated, 200 size_t* usable_size) = 0; 201 202 // Thread-unsafe allocation for when mutators are suspended, used by the semispace collector. AllocThreadUnsafe(Thread * self,size_t num_bytes,size_t * bytes_allocated,size_t * usable_size)203 virtual mirror::Object* AllocThreadUnsafe(Thread* self, size_t num_bytes, size_t* bytes_allocated, 204 size_t* usable_size) 205 EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) { 206 return Alloc(self, num_bytes, bytes_allocated, usable_size); 207 } 208 209 // Return the storage space required by obj. 210 virtual size_t AllocationSize(mirror::Object* obj, size_t* usable_size) = 0; 211 212 // Returns how many bytes were freed. 213 virtual size_t Free(Thread* self, mirror::Object* ptr) = 0; 214 215 // Returns how many bytes were freed. 216 virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) = 0; 217 218 // Revoke any sort of thread-local buffers that are used to speed up allocations for the given 219 // thread, if the alloc space implementation uses any. 220 virtual void RevokeThreadLocalBuffers(Thread* thread) = 0; 221 222 // Revoke any sort of thread-local buffers that are used to speed up allocations for all the 223 // threads, if the alloc space implementation uses any. 224 virtual void RevokeAllThreadLocalBuffers() = 0; 225 226 virtual void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) = 0; 227 228 protected: 229 struct SweepCallbackContext { 230 SweepCallbackContext(bool swap_bitmaps, space::Space* space); 231 const bool swap_bitmaps; 232 space::Space* const space; 233 Thread* const self; 234 collector::ObjectBytePair freed; 235 }; 236 AllocSpace()237 AllocSpace() {} ~AllocSpace()238 virtual ~AllocSpace() {} 239 240 private: 241 DISALLOW_COPY_AND_ASSIGN(AllocSpace); 242 }; 243 244 // Continuous spaces have bitmaps, and an address range. Although not required, objects within 245 // continuous spaces can be marked in the card table. 246 class ContinuousSpace : public Space { 247 public: 248 // Address at which the space begins. Begin()249 byte* Begin() const { 250 return begin_; 251 } 252 253 // Current address at which the space ends, which may vary as the space is filled. End()254 byte* End() const { 255 return end_.LoadRelaxed(); 256 } 257 258 // The end of the address range covered by the space. Limit()259 byte* Limit() const { 260 return limit_; 261 } 262 263 // Change the end of the space. Be careful with use since changing the end of a space to an 264 // invalid value may break the GC. SetEnd(byte * end)265 void SetEnd(byte* end) { 266 end_.StoreRelaxed(end); 267 } 268 SetLimit(byte * limit)269 void SetLimit(byte* limit) { 270 limit_ = limit; 271 } 272 273 // Current size of space Size()274 size_t Size() const { 275 return End() - Begin(); 276 } 277 278 virtual accounting::ContinuousSpaceBitmap* GetLiveBitmap() const = 0; 279 virtual accounting::ContinuousSpaceBitmap* GetMarkBitmap() const = 0; 280 281 // Maximum which the mapped space can grow to. Capacity()282 virtual size_t Capacity() const { 283 return Limit() - Begin(); 284 } 285 286 // Is object within this space? We check to see if the pointer is beyond the end first as 287 // continuous spaces are iterated over from low to high. HasAddress(const mirror::Object * obj)288 bool HasAddress(const mirror::Object* obj) const { 289 const byte* byte_ptr = reinterpret_cast<const byte*>(obj); 290 return byte_ptr >= Begin() && byte_ptr < Limit(); 291 } 292 Contains(const mirror::Object * obj)293 bool Contains(const mirror::Object* obj) const { 294 return HasAddress(obj); 295 } 296 IsContinuousSpace()297 virtual bool IsContinuousSpace() const { 298 return true; 299 } 300 ~ContinuousSpace()301 virtual ~ContinuousSpace() {} 302 303 protected: ContinuousSpace(const std::string & name,GcRetentionPolicy gc_retention_policy,byte * begin,byte * end,byte * limit)304 ContinuousSpace(const std::string& name, GcRetentionPolicy gc_retention_policy, 305 byte* begin, byte* end, byte* limit) : 306 Space(name, gc_retention_policy), begin_(begin), end_(end), limit_(limit) { 307 } 308 309 // The beginning of the storage for fast access. 310 byte* begin_; 311 312 // Current end of the space. 313 Atomic<byte*> end_; 314 315 // Limit of the space. 316 byte* limit_; 317 318 private: 319 DISALLOW_COPY_AND_ASSIGN(ContinuousSpace); 320 }; 321 322 // A space where objects may be allocated higgledy-piggledy throughout virtual memory. Currently 323 // the card table can't cover these objects and so the write barrier shouldn't be triggered. This 324 // is suitable for use for large primitive arrays. 325 class DiscontinuousSpace : public Space { 326 public: GetLiveBitmap()327 accounting::LargeObjectBitmap* GetLiveBitmap() const { 328 return live_bitmap_.get(); 329 } 330 GetMarkBitmap()331 accounting::LargeObjectBitmap* GetMarkBitmap() const { 332 return mark_bitmap_.get(); 333 } 334 IsDiscontinuousSpace()335 virtual bool IsDiscontinuousSpace() const OVERRIDE { 336 return true; 337 } 338 ~DiscontinuousSpace()339 virtual ~DiscontinuousSpace() {} 340 341 protected: 342 DiscontinuousSpace(const std::string& name, GcRetentionPolicy gc_retention_policy); 343 344 std::unique_ptr<accounting::LargeObjectBitmap> live_bitmap_; 345 std::unique_ptr<accounting::LargeObjectBitmap> mark_bitmap_; 346 347 private: 348 DISALLOW_COPY_AND_ASSIGN(DiscontinuousSpace); 349 }; 350 351 class MemMapSpace : public ContinuousSpace { 352 public: 353 // Size of the space without a limit on its growth. By default this is just the Capacity, but 354 // for the allocation space we support starting with a small heap and then extending it. NonGrowthLimitCapacity()355 virtual size_t NonGrowthLimitCapacity() const { 356 return Capacity(); 357 } 358 GetMemMap()359 MemMap* GetMemMap() { 360 return mem_map_.get(); 361 } 362 GetMemMap()363 const MemMap* GetMemMap() const { 364 return mem_map_.get(); 365 } 366 ReleaseMemMap()367 MemMap* ReleaseMemMap() { 368 return mem_map_.release(); 369 } 370 371 protected: MemMapSpace(const std::string & name,MemMap * mem_map,byte * begin,byte * end,byte * limit,GcRetentionPolicy gc_retention_policy)372 MemMapSpace(const std::string& name, MemMap* mem_map, byte* begin, byte* end, byte* limit, 373 GcRetentionPolicy gc_retention_policy) 374 : ContinuousSpace(name, gc_retention_policy, begin, end, limit), 375 mem_map_(mem_map) { 376 } 377 378 // Underlying storage of the space 379 std::unique_ptr<MemMap> mem_map_; 380 381 private: 382 DISALLOW_COPY_AND_ASSIGN(MemMapSpace); 383 }; 384 385 // Used by the heap compaction interface to enable copying from one type of alloc space to another. 386 class ContinuousMemMapAllocSpace : public MemMapSpace, public AllocSpace { 387 public: IsAllocSpace()388 bool IsAllocSpace() const OVERRIDE { 389 return true; 390 } AsAllocSpace()391 AllocSpace* AsAllocSpace() OVERRIDE { 392 return this; 393 } 394 IsContinuousMemMapAllocSpace()395 bool IsContinuousMemMapAllocSpace() const OVERRIDE { 396 return true; 397 } AsContinuousMemMapAllocSpace()398 ContinuousMemMapAllocSpace* AsContinuousMemMapAllocSpace() { 399 return this; 400 } 401 402 bool HasBoundBitmaps() const EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 403 void BindLiveToMarkBitmap() 404 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 405 void UnBindBitmaps() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 406 // Swap the live and mark bitmaps of this space. This is used by the GC for concurrent sweeping. 407 void SwapBitmaps(); 408 409 // Clear the space back to an empty space. 410 virtual void Clear() = 0; 411 GetLiveBitmap()412 accounting::ContinuousSpaceBitmap* GetLiveBitmap() const OVERRIDE { 413 return live_bitmap_.get(); 414 } 415 GetMarkBitmap()416 accounting::ContinuousSpaceBitmap* GetMarkBitmap() const OVERRIDE { 417 return mark_bitmap_.get(); 418 } 419 420 collector::ObjectBytePair Sweep(bool swap_bitmaps); 421 virtual accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() = 0; 422 423 protected: 424 std::unique_ptr<accounting::ContinuousSpaceBitmap> live_bitmap_; 425 std::unique_ptr<accounting::ContinuousSpaceBitmap> mark_bitmap_; 426 std::unique_ptr<accounting::ContinuousSpaceBitmap> temp_bitmap_; 427 ContinuousMemMapAllocSpace(const std::string & name,MemMap * mem_map,byte * begin,byte * end,byte * limit,GcRetentionPolicy gc_retention_policy)428 ContinuousMemMapAllocSpace(const std::string& name, MemMap* mem_map, byte* begin, 429 byte* end, byte* limit, GcRetentionPolicy gc_retention_policy) 430 : MemMapSpace(name, mem_map, begin, end, limit, gc_retention_policy) { 431 } 432 433 private: 434 friend class gc::Heap; 435 DISALLOW_COPY_AND_ASSIGN(ContinuousMemMapAllocSpace); 436 }; 437 438 } // namespace space 439 } // namespace gc 440 } // namespace art 441 442 #endif // ART_RUNTIME_GC_SPACE_SPACE_H_ 443