1 /*
2  * Copyright (C) 2013 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_GC_SPACE_BUMP_POINTER_SPACE_H_
18 #define ART_RUNTIME_GC_SPACE_BUMP_POINTER_SPACE_H_
19 
20 #include "object_callbacks.h"
21 #include "space.h"
22 
23 namespace art {
24 namespace gc {
25 
26 namespace collector {
27   class MarkSweep;
28 }  // namespace collector
29 
30 namespace space {
31 
32 // A bump pointer space allocates by incrementing a pointer, it doesn't provide a free
33 // implementation as its intended to be evacuated.
34 class BumpPointerSpace FINAL : public ContinuousMemMapAllocSpace {
35  public:
36   typedef void(*WalkCallback)(void *start, void *end, size_t num_bytes, void* callback_arg);
37 
GetType()38   SpaceType GetType() const OVERRIDE {
39     return kSpaceTypeBumpPointerSpace;
40   }
41 
42   // Create a bump pointer space with the requested sizes. The requested base address is not
43   // guaranteed to be granted, if it is required, the caller should call Begin on the returned
44   // space to confirm the request was granted.
45   static BumpPointerSpace* Create(const std::string& name, size_t capacity, uint8_t* requested_begin);
46   static BumpPointerSpace* CreateFromMemMap(const std::string& name, MemMap* mem_map);
47 
48   // Allocate num_bytes, returns null if the space is full.
49   mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
50                         size_t* usable_size, size_t* bytes_tl_bulk_allocated) OVERRIDE;
51   // Thread-unsafe allocation for when mutators are suspended, used by the semispace collector.
52   mirror::Object* AllocThreadUnsafe(Thread* self, size_t num_bytes, size_t* bytes_allocated,
53                                     size_t* usable_size, size_t* bytes_tl_bulk_allocated)
54       OVERRIDE REQUIRES(Locks::mutator_lock_);
55 
56   mirror::Object* AllocNonvirtual(size_t num_bytes);
57   mirror::Object* AllocNonvirtualWithoutAccounting(size_t num_bytes);
58 
59   // Return the storage space required by obj.
AllocationSize(mirror::Object * obj,size_t * usable_size)60   size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE
61       REQUIRES_SHARED(Locks::mutator_lock_) {
62     return AllocationSizeNonvirtual(obj, usable_size);
63   }
64 
65   // NOPS unless we support free lists.
Free(Thread *,mirror::Object *)66   size_t Free(Thread*, mirror::Object*) OVERRIDE {
67     return 0;
68   }
69 
FreeList(Thread *,size_t,mirror::Object **)70   size_t FreeList(Thread*, size_t, mirror::Object**) OVERRIDE {
71     return 0;
72   }
73 
74   size_t AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size)
75       REQUIRES_SHARED(Locks::mutator_lock_);
76 
77   // Removes the fork time growth limit on capacity, allowing the application to allocate up to the
78   // maximum reserved size of the heap.
ClearGrowthLimit()79   void ClearGrowthLimit() {
80     growth_end_ = Limit();
81   }
82 
83   // Override capacity so that we only return the possibly limited capacity
Capacity()84   size_t Capacity() const {
85     return growth_end_ - begin_;
86   }
87 
88   // The total amount of memory reserved for the space.
NonGrowthLimitCapacity()89   size_t NonGrowthLimitCapacity() const {
90     return GetMemMap()->Size();
91   }
92 
GetLiveBitmap()93   accounting::ContinuousSpaceBitmap* GetLiveBitmap() const OVERRIDE {
94     return nullptr;
95   }
96 
GetMarkBitmap()97   accounting::ContinuousSpaceBitmap* GetMarkBitmap() const OVERRIDE {
98     return nullptr;
99   }
100 
101   // Reset the space to empty.
102   void Clear() OVERRIDE REQUIRES(!block_lock_);
103 
104   void Dump(std::ostream& os) const;
105 
106   size_t RevokeThreadLocalBuffers(Thread* thread) REQUIRES(!block_lock_);
107   size_t RevokeAllThreadLocalBuffers()
108       REQUIRES(!Locks::runtime_shutdown_lock_, !Locks::thread_list_lock_, !block_lock_);
109   void AssertThreadLocalBuffersAreRevoked(Thread* thread) REQUIRES(!block_lock_);
110   void AssertAllThreadLocalBuffersAreRevoked()
111       REQUIRES(!Locks::runtime_shutdown_lock_, !Locks::thread_list_lock_, !block_lock_);
112 
113   uint64_t GetBytesAllocated() REQUIRES_SHARED(Locks::mutator_lock_)
114       REQUIRES(!*Locks::runtime_shutdown_lock_, !*Locks::thread_list_lock_, !block_lock_);
115   uint64_t GetObjectsAllocated() REQUIRES_SHARED(Locks::mutator_lock_)
116       REQUIRES(!*Locks::runtime_shutdown_lock_, !*Locks::thread_list_lock_, !block_lock_);
IsEmpty()117   bool IsEmpty() const {
118     return Begin() == End();
119   }
120 
CanMoveObjects()121   bool CanMoveObjects() const OVERRIDE {
122     return true;
123   }
124 
Contains(const mirror::Object * obj)125   bool Contains(const mirror::Object* obj) const {
126     const uint8_t* byte_obj = reinterpret_cast<const uint8_t*>(obj);
127     return byte_obj >= Begin() && byte_obj < End();
128   }
129 
130   // TODO: Change this? Mainly used for compacting to a particular region of memory.
131   BumpPointerSpace(const std::string& name, uint8_t* begin, uint8_t* limit);
132 
133   // Return the object which comes after obj, while ensuring alignment.
134   static mirror::Object* GetNextObject(mirror::Object* obj)
135       REQUIRES_SHARED(Locks::mutator_lock_);
136 
137   // Allocate a new TLAB, returns false if the allocation failed.
138   bool AllocNewTlab(Thread* self, size_t bytes) REQUIRES(!block_lock_);
139 
AsBumpPointerSpace()140   BumpPointerSpace* AsBumpPointerSpace() OVERRIDE {
141     return this;
142   }
143 
144   // Go through all of the blocks and visit the continuous objects.
145   void Walk(ObjectCallback* callback, void* arg)
146       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!block_lock_);
147 
148   accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() OVERRIDE;
149 
150   // Record objects / bytes freed.
RecordFree(int32_t objects,int32_t bytes)151   void RecordFree(int32_t objects, int32_t bytes) {
152     objects_allocated_.FetchAndSubSequentiallyConsistent(objects);
153     bytes_allocated_.FetchAndSubSequentiallyConsistent(bytes);
154   }
155 
156   void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE
157       REQUIRES_SHARED(Locks::mutator_lock_);
158 
159   // Object alignment within the space.
160   static constexpr size_t kAlignment = 8;
161 
162  protected:
163   BumpPointerSpace(const std::string& name, MemMap* mem_map);
164 
165   // Allocate a raw block of bytes.
166   uint8_t* AllocBlock(size_t bytes) REQUIRES(block_lock_);
167   void RevokeThreadLocalBuffersLocked(Thread* thread) REQUIRES(block_lock_);
168 
169   // The main block is an unbounded block where objects go when there are no other blocks. This
170   // enables us to maintain tightly packed objects when you are not using thread local buffers for
171   // allocation. The main block starts at the space Begin().
172   void UpdateMainBlock() REQUIRES(block_lock_);
173 
174   uint8_t* growth_end_;
175   AtomicInteger objects_allocated_;  // Accumulated from revoked thread local regions.
176   AtomicInteger bytes_allocated_;  // Accumulated from revoked thread local regions.
177   Mutex block_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
178   // The objects at the start of the space are stored in the main block. The main block doesn't
179   // have a header, this lets us walk empty spaces which are mprotected.
180   size_t main_block_size_ GUARDED_BY(block_lock_);
181   // The number of blocks in the space, if it is 0 then the space has one long continuous block
182   // which doesn't have an updated header.
183   size_t num_blocks_ GUARDED_BY(block_lock_);
184 
185  private:
186   struct BlockHeader {
187     size_t size_;  // Size of the block in bytes, does not include the header.
188     size_t unused_;  // Ensures alignment of kAlignment.
189   };
190 
191   static_assert(sizeof(BlockHeader) % kAlignment == 0,
192                 "continuous block must be kAlignment aligned");
193 
194   friend class collector::MarkSweep;
195   DISALLOW_COPY_AND_ASSIGN(BumpPointerSpace);
196 };
197 
198 }  // namespace space
199 }  // namespace gc
200 }  // namespace art
201 
202 #endif  // ART_RUNTIME_GC_SPACE_BUMP_POINTER_SPACE_H_
203