1 /*
2  * Copyright (C) 2012 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_GC_SPACE_LARGE_OBJECT_SPACE_H_
18 #define ART_RUNTIME_GC_SPACE_LARGE_OBJECT_SPACE_H_
19 
20 #include "base/allocator.h"
21 #include "base/safe_map.h"
22 #include "base/tracking_safe_map.h"
23 #include "dlmalloc_space.h"
24 #include "space.h"
25 #include "thread-current-inl.h"
26 
27 #include <set>
28 #include <vector>
29 
30 namespace art HIDDEN {
31 namespace gc {
32 namespace space {
33 
34 class AllocationInfo;
35 
36 enum class LargeObjectSpaceType {
37   kDisabled,
38   kMap,
39   kFreeList,
40 };
41 
42 // Abstraction implemented by all large object spaces.
43 class LargeObjectSpace : public DiscontinuousSpace, public AllocSpace {
44  public:
GetType()45   SpaceType GetType() const override {
46     return kSpaceTypeLargeObjectSpace;
47   }
48   void SwapBitmaps();
49   void CopyLiveToMarked();
50   virtual void Walk(DlMallocSpace::WalkCallback, void* arg) = 0;
~LargeObjectSpace()51   virtual ~LargeObjectSpace() {}
52 
GetBytesAllocated()53   uint64_t GetBytesAllocated() override {
54     MutexLock mu(Thread::Current(), lock_);
55     return num_bytes_allocated_;
56   }
GetObjectsAllocated()57   uint64_t GetObjectsAllocated() override {
58     MutexLock mu(Thread::Current(), lock_);
59     return num_objects_allocated_;
60   }
GetTotalBytesAllocated()61   uint64_t GetTotalBytesAllocated() const {
62     MutexLock mu(Thread::Current(), lock_);
63     return total_bytes_allocated_;
64   }
GetTotalObjectsAllocated()65   uint64_t GetTotalObjectsAllocated() const {
66     MutexLock mu(Thread::Current(), lock_);
67     return total_objects_allocated_;
68   }
69   size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) override;
70   // LargeObjectSpaces don't have thread local state.
RevokeThreadLocalBuffers(art::Thread *)71   size_t RevokeThreadLocalBuffers(art::Thread*) override {
72     return 0U;
73   }
RevokeAllThreadLocalBuffers()74   size_t RevokeAllThreadLocalBuffers() override {
75     return 0U;
76   }
IsAllocSpace()77   bool IsAllocSpace() const override {
78     return true;
79   }
AsAllocSpace()80   AllocSpace* AsAllocSpace() override {
81     return this;
82   }
83   collector::ObjectBytePair Sweep(bool swap_bitmaps);
CanMoveObjects()84   bool CanMoveObjects() const override {
85     return false;
86   }
87   // Current address at which the space begins, which may vary as the space is filled.
Begin()88   uint8_t* Begin() const {
89     return begin_;
90   }
91   // Current address at which the space ends, which may vary as the space is filled.
End()92   uint8_t* End() const {
93     return end_;
94   }
95   // Current size of space
Size()96   size_t Size() const {
97     return End() - Begin();
98   }
99   // Return true if we contain the specified address.
Contains(const mirror::Object * obj)100   bool Contains(const mirror::Object* obj) const override {
101     const uint8_t* byte_obj = reinterpret_cast<const uint8_t*>(obj);
102     return Begin() <= byte_obj && byte_obj < End();
103   }
104   bool LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) override
105       REQUIRES_SHARED(Locks::mutator_lock_);
106 
107   // Return true if the large object is a zygote large object. Potentially slow.
108   virtual bool IsZygoteLargeObject(Thread* self, mirror::Object* obj) const = 0;
109   // Called when we create the zygote space, mark all existing large objects as zygote large
110   // objects. Set mark-bit if called from PreZygoteFork() for ConcurrentCopying
111   // GC to avoid dirtying the first page.
112   virtual void SetAllLargeObjectsAsZygoteObjects(Thread* self, bool set_mark_bit) = 0;
113 
114   virtual void ForEachMemMap(std::function<void(const MemMap&)> func) const = 0;
115   // GetRangeAtomic returns Begin() and End() atomically, that is, it never returns Begin() and
116   // End() from different allocations.
117   virtual std::pair<uint8_t*, uint8_t*> GetBeginEndAtomic() const = 0;
118   // Clamp the space size to the given capacity.
119   virtual void ClampGrowthLimit(size_t capacity) = 0;
120 
121   // The way large object spaces are implemented, the object alignment has to be
122   // the same as the *runtime* OS page size. However, in the future this may
123   // change so it is important to use LargeObjectSpace::ObjectAlignment() rather
124   // than gPageSize when appropriate.
125 #if defined(ART_PAGE_SIZE_AGNOSTIC)
ObjectAlignment()126   static ALWAYS_INLINE size_t ObjectAlignment() { return gPageSize; }
127 #else
ObjectAlignment()128   static constexpr size_t ObjectAlignment() { return kMinPageSize; }
129 #endif
130 
131  protected:
132   explicit LargeObjectSpace(const std::string& name, uint8_t* begin, uint8_t* end,
133                             const char* lock_name);
134   static void SweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg);
135 
136   // Used to ensure mutual exclusion when the allocation spaces data structures,
137   // including the allocation counters below, are being modified.
138   mutable Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
139 
140   // Number of bytes which have been allocated into the space and not yet freed. The count is also
141   // included in the identically named field in Heap. Counts actual allocated (after rounding),
142   // not requested, sizes. TODO: It would be cheaper to just maintain total allocated and total
143   // free counts.
144   uint64_t num_bytes_allocated_ GUARDED_BY(lock_);
145   uint64_t num_objects_allocated_ GUARDED_BY(lock_);
146 
147   // Totals for large objects ever allocated, including those that have since been deallocated.
148   // Never decremented.
149   uint64_t total_bytes_allocated_ GUARDED_BY(lock_);
150   uint64_t total_objects_allocated_ GUARDED_BY(lock_);
151 
152   // Begin and end, may change as more large objects are allocated.
153   uint8_t* begin_;
154   uint8_t* end_;
155 
156   friend class Space;
157 
158  private:
159   DISALLOW_COPY_AND_ASSIGN(LargeObjectSpace);
160 };
161 
162 // A discontinuous large object space implemented by individual mmap/munmap calls.
163 class LargeObjectMapSpace : public LargeObjectSpace {
164  public:
165   // Creates a large object space. Allocations into the large object space use memory maps instead
166   // of malloc.
167   static LargeObjectMapSpace* Create(const std::string& name);
168   // Return the storage space required by obj.
169   size_t AllocationSize(mirror::Object* obj, size_t* usable_size) override REQUIRES(!lock_);
170   mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
171                         size_t* usable_size, size_t* bytes_tl_bulk_allocated) override
172       REQUIRES(!lock_);
173   size_t Free(Thread* self, mirror::Object* ptr) override REQUIRES(!lock_);
174   void Walk(DlMallocSpace::WalkCallback, void* arg) override REQUIRES(!lock_);
175   // TODO: disabling thread safety analysis as this may be called when we already hold lock_.
176   bool Contains(const mirror::Object* obj) const override NO_THREAD_SAFETY_ANALYSIS;
177   void ForEachMemMap(std::function<void(const MemMap&)> func) const override REQUIRES(!lock_);
178   std::pair<uint8_t*, uint8_t*> GetBeginEndAtomic() const override REQUIRES(!lock_);
ClampGrowthLimit(size_t capacity ATTRIBUTE_UNUSED)179   void ClampGrowthLimit(size_t capacity ATTRIBUTE_UNUSED) override {}
180 
181  protected:
182   struct LargeObject {
183     MemMap mem_map;
184     bool is_zygote;
185   };
186   explicit LargeObjectMapSpace(const std::string& name);
~LargeObjectMapSpace()187   virtual ~LargeObjectMapSpace() {}
188 
189   bool IsZygoteLargeObject(Thread* self, mirror::Object* obj) const override REQUIRES(!lock_);
190   void SetAllLargeObjectsAsZygoteObjects(Thread* self, bool set_mark_bit) override
191       REQUIRES(!lock_)
192       REQUIRES_SHARED(Locks::mutator_lock_);
193 
194   AllocationTrackingSafeMap<mirror::Object*, LargeObject, kAllocatorTagLOSMaps> large_objects_
195       GUARDED_BY(lock_);
196 };
197 
198 // A continuous large object space with a free-list to handle holes.
199 class FreeListSpace final : public LargeObjectSpace {
200  public:
201   virtual ~FreeListSpace();
202   static FreeListSpace* Create(const std::string& name, size_t capacity);
203   size_t AllocationSize(mirror::Object* obj, size_t* usable_size) override
204       REQUIRES(lock_);
205   mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
206                         size_t* usable_size, size_t* bytes_tl_bulk_allocated)
207       override REQUIRES(!lock_);
208   size_t Free(Thread* self, mirror::Object* obj) override REQUIRES(!lock_);
209   void Walk(DlMallocSpace::WalkCallback callback, void* arg) override REQUIRES(!lock_);
210   void Dump(std::ostream& os) const override REQUIRES(!lock_);
211   void ForEachMemMap(std::function<void(const MemMap&)> func) const override REQUIRES(!lock_);
212   std::pair<uint8_t*, uint8_t*> GetBeginEndAtomic() const override REQUIRES(!lock_);
213   void ClampGrowthLimit(size_t capacity) override REQUIRES(!lock_);
214 
215  protected:
216   FreeListSpace(const std::string& name, MemMap&& mem_map, uint8_t* begin, uint8_t* end);
GetSlotIndexForAddress(uintptr_t address)217   size_t GetSlotIndexForAddress(uintptr_t address) const {
218     DCHECK(Contains(reinterpret_cast<mirror::Object*>(address)));
219     return (address - reinterpret_cast<uintptr_t>(Begin())) / ObjectAlignment();
220   }
221   size_t GetSlotIndexForAllocationInfo(const AllocationInfo* info) const;
222   AllocationInfo* GetAllocationInfoForAddress(uintptr_t address);
223   const AllocationInfo* GetAllocationInfoForAddress(uintptr_t address) const;
GetAllocationAddressForSlot(size_t slot)224   uintptr_t GetAllocationAddressForSlot(size_t slot) const {
225     return reinterpret_cast<uintptr_t>(Begin()) + slot * ObjectAlignment();
226   }
GetAddressForAllocationInfo(const AllocationInfo * info)227   uintptr_t GetAddressForAllocationInfo(const AllocationInfo* info) const {
228     return GetAllocationAddressForSlot(GetSlotIndexForAllocationInfo(info));
229   }
230   // Removes header from the free blocks set by finding the corresponding iterator and erasing it.
231   void RemoveFreePrev(AllocationInfo* info) REQUIRES(lock_);
232   bool IsZygoteLargeObject(Thread* self, mirror::Object* obj) const override;
233   void SetAllLargeObjectsAsZygoteObjects(Thread* self, bool set_mark_bit) override
234       REQUIRES(!lock_)
235       REQUIRES_SHARED(Locks::mutator_lock_);
236 
237   class SortByPrevFree {
238    public:
239     bool operator()(const AllocationInfo* a, const AllocationInfo* b) const;
240   };
241   using FreeBlocks = std::set<AllocationInfo*,
242                               SortByPrevFree,
243                               TrackingAllocator<AllocationInfo*, kAllocatorTagLOSFreeList>>;
244 
245   // There is not footer for any allocations at the end of the space, so we keep track of how much
246   // free space there is at the end manually.
247   MemMap mem_map_;
248   // Side table for allocation info, one per page.
249   MemMap allocation_info_map_;
250   AllocationInfo* allocation_info_;
251 
252   // Free bytes at the end of the space.
253   size_t free_end_ GUARDED_BY(lock_);
254   FreeBlocks free_blocks_ GUARDED_BY(lock_);
255 };
256 
257 }  // namespace space
258 }  // namespace gc
259 }  // namespace art
260 
261 #endif  // ART_RUNTIME_GC_SPACE_LARGE_OBJECT_SPACE_H_
262