1 /* 2 * Copyright (C) 2013 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #ifndef ART_RUNTIME_GC_SPACE_BUMP_POINTER_SPACE_H_ 18 #define ART_RUNTIME_GC_SPACE_BUMP_POINTER_SPACE_H_ 19 20 #include "object_callbacks.h" 21 #include "space.h" 22 23 namespace art { 24 namespace gc { 25 26 namespace collector { 27 class MarkSweep; 28 } // namespace collector 29 30 namespace space { 31 32 // A bump pointer space allocates by incrementing a pointer, it doesn't provide a free 33 // implementation as its intended to be evacuated. 34 class BumpPointerSpace FINAL : public ContinuousMemMapAllocSpace { 35 public: 36 typedef void(*WalkCallback)(void *start, void *end, size_t num_bytes, void* callback_arg); 37 GetType()38 SpaceType GetType() const OVERRIDE { 39 return kSpaceTypeBumpPointerSpace; 40 } 41 42 // Create a bump pointer space with the requested sizes. The requested base address is not 43 // guaranteed to be granted, if it is required, the caller should call Begin on the returned 44 // space to confirm the request was granted. 45 static BumpPointerSpace* Create(const std::string& name, size_t capacity, byte* requested_begin); 46 static BumpPointerSpace* CreateFromMemMap(const std::string& name, MemMap* mem_map); 47 48 // Allocate num_bytes, returns nullptr if the space is full. 49 mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated, 50 size_t* usable_size) OVERRIDE; 51 // Thread-unsafe allocation for when mutators are suspended, used by the semispace collector. 52 mirror::Object* AllocThreadUnsafe(Thread* self, size_t num_bytes, size_t* bytes_allocated, 53 size_t* usable_size) 54 OVERRIDE EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); 55 56 mirror::Object* AllocNonvirtual(size_t num_bytes); 57 mirror::Object* AllocNonvirtualWithoutAccounting(size_t num_bytes); 58 59 // Return the storage space required by obj. AllocationSize(mirror::Object * obj,size_t * usable_size)60 size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE 61 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 62 return AllocationSizeNonvirtual(obj, usable_size); 63 } 64 65 // NOPS unless we support free lists. Free(Thread *,mirror::Object *)66 size_t Free(Thread*, mirror::Object*) OVERRIDE { 67 return 0; 68 } 69 FreeList(Thread *,size_t,mirror::Object **)70 size_t FreeList(Thread*, size_t, mirror::Object**) OVERRIDE { 71 return 0; 72 } 73 74 size_t AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size) 75 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 76 77 // Removes the fork time growth limit on capacity, allowing the application to allocate up to the 78 // maximum reserved size of the heap. ClearGrowthLimit()79 void ClearGrowthLimit() { 80 growth_end_ = Limit(); 81 } 82 83 // Override capacity so that we only return the possibly limited capacity Capacity()84 size_t Capacity() const { 85 return growth_end_ - begin_; 86 } 87 88 // The total amount of memory reserved for the space. NonGrowthLimitCapacity()89 size_t NonGrowthLimitCapacity() const { 90 return GetMemMap()->Size(); 91 } 92 GetLiveBitmap()93 accounting::ContinuousSpaceBitmap* GetLiveBitmap() const OVERRIDE { 94 return nullptr; 95 } 96 GetMarkBitmap()97 accounting::ContinuousSpaceBitmap* GetMarkBitmap() const OVERRIDE { 98 return nullptr; 99 } 100 101 // Reset the space to empty. 102 void Clear() OVERRIDE LOCKS_EXCLUDED(block_lock_); 103 104 void Dump(std::ostream& os) const; 105 106 void RevokeThreadLocalBuffers(Thread* thread) LOCKS_EXCLUDED(block_lock_); 107 void RevokeAllThreadLocalBuffers() LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_, 108 Locks::thread_list_lock_); 109 void AssertThreadLocalBuffersAreRevoked(Thread* thread) LOCKS_EXCLUDED(block_lock_); 110 void AssertAllThreadLocalBuffersAreRevoked() LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_, 111 Locks::thread_list_lock_); 112 113 uint64_t GetBytesAllocated() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 114 uint64_t GetObjectsAllocated() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); IsEmpty()115 bool IsEmpty() const { 116 return Begin() == End(); 117 } 118 CanMoveObjects()119 bool CanMoveObjects() const OVERRIDE { 120 return true; 121 } 122 Contains(const mirror::Object * obj)123 bool Contains(const mirror::Object* obj) const { 124 const byte* byte_obj = reinterpret_cast<const byte*>(obj); 125 return byte_obj >= Begin() && byte_obj < End(); 126 } 127 128 // TODO: Change this? Mainly used for compacting to a particular region of memory. 129 BumpPointerSpace(const std::string& name, byte* begin, byte* limit); 130 131 // Return the object which comes after obj, while ensuring alignment. 132 static mirror::Object* GetNextObject(mirror::Object* obj) 133 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 134 135 // Allocate a new TLAB, returns false if the allocation failed. 136 bool AllocNewTlab(Thread* self, size_t bytes); 137 AsBumpPointerSpace()138 BumpPointerSpace* AsBumpPointerSpace() OVERRIDE { 139 return this; 140 } 141 142 // Go through all of the blocks and visit the continuous objects. 143 void Walk(ObjectCallback* callback, void* arg) 144 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 145 146 accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() OVERRIDE; 147 148 // Record objects / bytes freed. RecordFree(int32_t objects,int32_t bytes)149 void RecordFree(int32_t objects, int32_t bytes) { 150 objects_allocated_.FetchAndSubSequentiallyConsistent(objects); 151 bytes_allocated_.FetchAndSubSequentiallyConsistent(bytes); 152 } 153 154 void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE 155 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 156 157 // Object alignment within the space. 158 static constexpr size_t kAlignment = 8; 159 160 protected: 161 BumpPointerSpace(const std::string& name, MemMap* mem_map); 162 163 // Allocate a raw block of bytes. 164 byte* AllocBlock(size_t bytes) EXCLUSIVE_LOCKS_REQUIRED(block_lock_); 165 void RevokeThreadLocalBuffersLocked(Thread* thread) EXCLUSIVE_LOCKS_REQUIRED(block_lock_); 166 167 // The main block is an unbounded block where objects go when there are no other blocks. This 168 // enables us to maintain tightly packed objects when you are not using thread local buffers for 169 // allocation. The main block starts at the space Begin(). 170 void UpdateMainBlock() EXCLUSIVE_LOCKS_REQUIRED(block_lock_); 171 172 byte* growth_end_; 173 AtomicInteger objects_allocated_; // Accumulated from revoked thread local regions. 174 AtomicInteger bytes_allocated_; // Accumulated from revoked thread local regions. 175 Mutex block_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; 176 // The objects at the start of the space are stored in the main block. The main block doesn't 177 // have a header, this lets us walk empty spaces which are mprotected. 178 size_t main_block_size_ GUARDED_BY(block_lock_); 179 // The number of blocks in the space, if it is 0 then the space has one long continuous block 180 // which doesn't have an updated header. 181 size_t num_blocks_ GUARDED_BY(block_lock_); 182 183 private: 184 struct BlockHeader { 185 size_t size_; // Size of the block in bytes, does not include the header. 186 size_t unused_; // Ensures alignment of kAlignment. 187 }; 188 189 COMPILE_ASSERT(sizeof(BlockHeader) % kAlignment == 0, 190 continuous_block_must_be_kAlignment_aligned); 191 192 friend class collector::MarkSweep; 193 DISALLOW_COPY_AND_ASSIGN(BumpPointerSpace); 194 }; 195 196 } // namespace space 197 } // namespace gc 198 } // namespace art 199 200 #endif // ART_RUNTIME_GC_SPACE_BUMP_POINTER_SPACE_H_ 201