1 /* 2 * Copyright (C) 2012 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #ifndef ART_RUNTIME_GC_SPACE_LARGE_OBJECT_SPACE_H_ 18 #define ART_RUNTIME_GC_SPACE_LARGE_OBJECT_SPACE_H_ 19 20 #include "base/allocator.h" 21 #include "base/safe_map.h" 22 #include "base/tracking_safe_map.h" 23 #include "dlmalloc_space.h" 24 #include "space.h" 25 #include "thread-current-inl.h" 26 27 #include <set> 28 #include <vector> 29 30 namespace art { 31 namespace gc { 32 namespace space { 33 34 class AllocationInfo; 35 36 enum class LargeObjectSpaceType { 37 kDisabled, 38 kMap, 39 kFreeList, 40 }; 41 42 // Abstraction implemented by all large object spaces. 43 class LargeObjectSpace : public DiscontinuousSpace, public AllocSpace { 44 public: GetType()45 SpaceType GetType() const override { 46 return kSpaceTypeLargeObjectSpace; 47 } 48 void SwapBitmaps(); 49 void CopyLiveToMarked(); 50 virtual void Walk(DlMallocSpace::WalkCallback, void* arg) = 0; ~LargeObjectSpace()51 virtual ~LargeObjectSpace() {} 52 GetBytesAllocated()53 uint64_t GetBytesAllocated() override { 54 MutexLock mu(Thread::Current(), lock_); 55 return num_bytes_allocated_; 56 } GetObjectsAllocated()57 uint64_t GetObjectsAllocated() override { 58 MutexLock mu(Thread::Current(), lock_); 59 return num_objects_allocated_; 60 } GetTotalBytesAllocated()61 uint64_t GetTotalBytesAllocated() const { 62 MutexLock mu(Thread::Current(), lock_); 63 return total_bytes_allocated_; 64 } GetTotalObjectsAllocated()65 uint64_t GetTotalObjectsAllocated() const { 66 MutexLock mu(Thread::Current(), lock_); 67 return total_objects_allocated_; 68 } 69 size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) override; 70 // LargeObjectSpaces don't have thread local state. RevokeThreadLocalBuffers(art::Thread *)71 size_t RevokeThreadLocalBuffers(art::Thread*) override { 72 return 0U; 73 } RevokeAllThreadLocalBuffers()74 size_t RevokeAllThreadLocalBuffers() override { 75 return 0U; 76 } IsAllocSpace()77 bool IsAllocSpace() const override { 78 return true; 79 } AsAllocSpace()80 AllocSpace* AsAllocSpace() override { 81 return this; 82 } 83 collector::ObjectBytePair Sweep(bool swap_bitmaps); CanMoveObjects()84 bool CanMoveObjects() const override { 85 return false; 86 } 87 // Current address at which the space begins, which may vary as the space is filled. Begin()88 uint8_t* Begin() const { 89 return begin_; 90 } 91 // Current address at which the space ends, which may vary as the space is filled. End()92 uint8_t* End() const { 93 return end_; 94 } 95 // Current size of space Size()96 size_t Size() const { 97 return End() - Begin(); 98 } 99 // Return true if we contain the specified address. Contains(const mirror::Object * obj)100 bool Contains(const mirror::Object* obj) const override { 101 const uint8_t* byte_obj = reinterpret_cast<const uint8_t*>(obj); 102 return Begin() <= byte_obj && byte_obj < End(); 103 } 104 void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) override 105 REQUIRES_SHARED(Locks::mutator_lock_); 106 107 // Return true if the large object is a zygote large object. Potentially slow. 108 virtual bool IsZygoteLargeObject(Thread* self, mirror::Object* obj) const = 0; 109 // Called when we create the zygote space, mark all existing large objects as zygote large 110 // objects. Set mark-bit if called from PreZygoteFork() for ConcurrentCopying 111 // GC to avoid dirtying the first page. 112 virtual void SetAllLargeObjectsAsZygoteObjects(Thread* self, bool set_mark_bit) = 0; 113 114 virtual void ForEachMemMap(std::function<void(const MemMap&)> func) const = 0; 115 // GetRangeAtomic returns Begin() and End() atomically, that is, it never returns Begin() and 116 // End() from different allocations. 117 virtual std::pair<uint8_t*, uint8_t*> GetBeginEndAtomic() const = 0; 118 119 protected: 120 explicit LargeObjectSpace(const std::string& name, uint8_t* begin, uint8_t* end, 121 const char* lock_name); 122 static void SweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg); 123 124 // Used to ensure mutual exclusion when the allocation spaces data structures, 125 // including the allocation counters below, are being modified. 126 mutable Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; 127 128 // Number of bytes which have been allocated into the space and not yet freed. The count is also 129 // included in the identically named field in Heap. Counts actual allocated (after rounding), 130 // not requested, sizes. TODO: It would be cheaper to just maintain total allocated and total 131 // free counts. 132 uint64_t num_bytes_allocated_ GUARDED_BY(lock_); 133 uint64_t num_objects_allocated_ GUARDED_BY(lock_); 134 135 // Totals for large objects ever allocated, including those that have since been deallocated. 136 // Never decremented. 137 uint64_t total_bytes_allocated_ GUARDED_BY(lock_); 138 uint64_t total_objects_allocated_ GUARDED_BY(lock_); 139 140 // Begin and end, may change as more large objects are allocated. 141 uint8_t* begin_; 142 uint8_t* end_; 143 144 friend class Space; 145 146 private: 147 DISALLOW_COPY_AND_ASSIGN(LargeObjectSpace); 148 }; 149 150 // A discontinuous large object space implemented by individual mmap/munmap calls. 151 class LargeObjectMapSpace : public LargeObjectSpace { 152 public: 153 // Creates a large object space. Allocations into the large object space use memory maps instead 154 // of malloc. 155 static LargeObjectMapSpace* Create(const std::string& name); 156 // Return the storage space required by obj. 157 size_t AllocationSize(mirror::Object* obj, size_t* usable_size) override REQUIRES(!lock_); 158 mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated, 159 size_t* usable_size, size_t* bytes_tl_bulk_allocated) override 160 REQUIRES(!lock_); 161 size_t Free(Thread* self, mirror::Object* ptr) override REQUIRES(!lock_); 162 void Walk(DlMallocSpace::WalkCallback, void* arg) override REQUIRES(!lock_); 163 // TODO: disabling thread safety analysis as this may be called when we already hold lock_. 164 bool Contains(const mirror::Object* obj) const override NO_THREAD_SAFETY_ANALYSIS; 165 void ForEachMemMap(std::function<void(const MemMap&)> func) const override REQUIRES(!lock_); 166 std::pair<uint8_t*, uint8_t*> GetBeginEndAtomic() const override REQUIRES(!lock_); 167 168 protected: 169 struct LargeObject { 170 MemMap mem_map; 171 bool is_zygote; 172 }; 173 explicit LargeObjectMapSpace(const std::string& name); ~LargeObjectMapSpace()174 virtual ~LargeObjectMapSpace() {} 175 176 bool IsZygoteLargeObject(Thread* self, mirror::Object* obj) const override REQUIRES(!lock_); 177 void SetAllLargeObjectsAsZygoteObjects(Thread* self, bool set_mark_bit) override 178 REQUIRES(!lock_) 179 REQUIRES_SHARED(Locks::mutator_lock_); 180 181 AllocationTrackingSafeMap<mirror::Object*, LargeObject, kAllocatorTagLOSMaps> large_objects_ 182 GUARDED_BY(lock_); 183 }; 184 185 // A continuous large object space with a free-list to handle holes. 186 class FreeListSpace final : public LargeObjectSpace { 187 public: 188 static constexpr size_t kAlignment = kPageSize; 189 190 virtual ~FreeListSpace(); 191 static FreeListSpace* Create(const std::string& name, size_t capacity); 192 size_t AllocationSize(mirror::Object* obj, size_t* usable_size) override 193 REQUIRES(lock_); 194 mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated, 195 size_t* usable_size, size_t* bytes_tl_bulk_allocated) 196 override REQUIRES(!lock_); 197 size_t Free(Thread* self, mirror::Object* obj) override REQUIRES(!lock_); 198 void Walk(DlMallocSpace::WalkCallback callback, void* arg) override REQUIRES(!lock_); 199 void Dump(std::ostream& os) const override REQUIRES(!lock_); 200 void ForEachMemMap(std::function<void(const MemMap&)> func) const override REQUIRES(!lock_); 201 std::pair<uint8_t*, uint8_t*> GetBeginEndAtomic() const override REQUIRES(!lock_); 202 203 protected: 204 FreeListSpace(const std::string& name, MemMap&& mem_map, uint8_t* begin, uint8_t* end); GetSlotIndexForAddress(uintptr_t address)205 size_t GetSlotIndexForAddress(uintptr_t address) const { 206 DCHECK(Contains(reinterpret_cast<mirror::Object*>(address))); 207 return (address - reinterpret_cast<uintptr_t>(Begin())) / kAlignment; 208 } 209 size_t GetSlotIndexForAllocationInfo(const AllocationInfo* info) const; 210 AllocationInfo* GetAllocationInfoForAddress(uintptr_t address); 211 const AllocationInfo* GetAllocationInfoForAddress(uintptr_t address) const; GetAllocationAddressForSlot(size_t slot)212 uintptr_t GetAllocationAddressForSlot(size_t slot) const { 213 return reinterpret_cast<uintptr_t>(Begin()) + slot * kAlignment; 214 } GetAddressForAllocationInfo(const AllocationInfo * info)215 uintptr_t GetAddressForAllocationInfo(const AllocationInfo* info) const { 216 return GetAllocationAddressForSlot(GetSlotIndexForAllocationInfo(info)); 217 } 218 // Removes header from the free blocks set by finding the corresponding iterator and erasing it. 219 void RemoveFreePrev(AllocationInfo* info) REQUIRES(lock_); 220 bool IsZygoteLargeObject(Thread* self, mirror::Object* obj) const override; 221 void SetAllLargeObjectsAsZygoteObjects(Thread* self, bool set_mark_bit) override 222 REQUIRES(!lock_) 223 REQUIRES_SHARED(Locks::mutator_lock_); 224 225 class SortByPrevFree { 226 public: 227 bool operator()(const AllocationInfo* a, const AllocationInfo* b) const; 228 }; 229 typedef std::set<AllocationInfo*, SortByPrevFree, 230 TrackingAllocator<AllocationInfo*, kAllocatorTagLOSFreeList>> FreeBlocks; 231 232 // There is not footer for any allocations at the end of the space, so we keep track of how much 233 // free space there is at the end manually. 234 MemMap mem_map_; 235 // Side table for allocation info, one per page. 236 MemMap allocation_info_map_; 237 AllocationInfo* allocation_info_; 238 239 // Free bytes at the end of the space. 240 size_t free_end_ GUARDED_BY(lock_); 241 FreeBlocks free_blocks_ GUARDED_BY(lock_); 242 }; 243 244 } // namespace space 245 } // namespace gc 246 } // namespace art 247 248 #endif // ART_RUNTIME_GC_SPACE_LARGE_OBJECT_SPACE_H_ 249