1 /*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "dlmalloc_space-inl.h"
18
19 #include "base/time_utils.h"
20 #include "gc/accounting/card_table.h"
21 #include "gc/accounting/space_bitmap-inl.h"
22 #include "gc/heap.h"
23 #include "jit/jit.h"
24 #include "jit/jit_code_cache.h"
25 #include "memory_tool_malloc_space-inl.h"
26 #include "mirror/class-inl.h"
27 #include "mirror/object-inl.h"
28 #include "runtime.h"
29 #include "thread.h"
30 #include "thread_list.h"
31 #include "utils.h"
32
33 namespace art {
34 namespace gc {
35 namespace space {
36
37 static constexpr bool kPrefetchDuringDlMallocFreeList = true;
38
DlMallocSpace(MemMap * mem_map,size_t initial_size,const std::string & name,void * mspace,uint8_t * begin,uint8_t * end,uint8_t * limit,size_t growth_limit,bool can_move_objects,size_t starting_size)39 DlMallocSpace::DlMallocSpace(MemMap* mem_map, size_t initial_size, const std::string& name,
40 void* mspace, uint8_t* begin, uint8_t* end, uint8_t* limit,
41 size_t growth_limit, bool can_move_objects, size_t starting_size)
42 : MallocSpace(name, mem_map, begin, end, limit, growth_limit, true, can_move_objects,
43 starting_size, initial_size),
44 mspace_(mspace) {
45 CHECK(mspace != nullptr);
46 }
47
CreateFromMemMap(MemMap * mem_map,const std::string & name,size_t starting_size,size_t initial_size,size_t growth_limit,size_t capacity,bool can_move_objects)48 DlMallocSpace* DlMallocSpace::CreateFromMemMap(MemMap* mem_map, const std::string& name,
49 size_t starting_size, size_t initial_size,
50 size_t growth_limit, size_t capacity,
51 bool can_move_objects) {
52 DCHECK(mem_map != nullptr);
53 void* mspace = CreateMspace(mem_map->Begin(), starting_size, initial_size);
54 if (mspace == nullptr) {
55 LOG(ERROR) << "Failed to initialize mspace for alloc space (" << name << ")";
56 return nullptr;
57 }
58
59 // Protect memory beyond the starting size. morecore will add r/w permissions when necessory
60 uint8_t* end = mem_map->Begin() + starting_size;
61 if (capacity - starting_size > 0) {
62 CHECK_MEMORY_CALL(mprotect, (end, capacity - starting_size, PROT_NONE), name);
63 }
64
65 // Everything is set so record in immutable structure and leave
66 uint8_t* begin = mem_map->Begin();
67 if (Runtime::Current()->IsRunningOnMemoryTool()) {
68 return new MemoryToolMallocSpace<DlMallocSpace, kDefaultMemoryToolRedZoneBytes, true, false>(
69 mem_map, initial_size, name, mspace, begin, end, begin + capacity, growth_limit,
70 can_move_objects, starting_size);
71 } else {
72 return new DlMallocSpace(mem_map, initial_size, name, mspace, begin, end, begin + capacity,
73 growth_limit, can_move_objects, starting_size);
74 }
75 }
76
Create(const std::string & name,size_t initial_size,size_t growth_limit,size_t capacity,uint8_t * requested_begin,bool can_move_objects)77 DlMallocSpace* DlMallocSpace::Create(const std::string& name, size_t initial_size,
78 size_t growth_limit, size_t capacity, uint8_t* requested_begin,
79 bool can_move_objects) {
80 uint64_t start_time = 0;
81 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
82 start_time = NanoTime();
83 LOG(INFO) << "DlMallocSpace::Create entering " << name
84 << " initial_size=" << PrettySize(initial_size)
85 << " growth_limit=" << PrettySize(growth_limit)
86 << " capacity=" << PrettySize(capacity)
87 << " requested_begin=" << reinterpret_cast<void*>(requested_begin);
88 }
89
90 // Memory we promise to dlmalloc before it asks for morecore.
91 // Note: making this value large means that large allocations are unlikely to succeed as dlmalloc
92 // will ask for this memory from sys_alloc which will fail as the footprint (this value plus the
93 // size of the large allocation) will be greater than the footprint limit.
94 size_t starting_size = kPageSize;
95 MemMap* mem_map = CreateMemMap(name, starting_size, &initial_size, &growth_limit, &capacity,
96 requested_begin);
97 if (mem_map == nullptr) {
98 LOG(ERROR) << "Failed to create mem map for alloc space (" << name << ") of size "
99 << PrettySize(capacity);
100 return nullptr;
101 }
102 DlMallocSpace* space = CreateFromMemMap(mem_map, name, starting_size, initial_size,
103 growth_limit, capacity, can_move_objects);
104 // We start out with only the initial size possibly containing objects.
105 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
106 LOG(INFO) << "DlMallocSpace::Create exiting (" << PrettyDuration(NanoTime() - start_time)
107 << " ) " << *space;
108 }
109 return space;
110 }
111
CreateMspace(void * begin,size_t morecore_start,size_t initial_size)112 void* DlMallocSpace::CreateMspace(void* begin, size_t morecore_start, size_t initial_size) {
113 // clear errno to allow PLOG on error
114 errno = 0;
115 // create mspace using our backing storage starting at begin and with a footprint of
116 // morecore_start. Don't use an internal dlmalloc lock (as we already hold heap lock). When
117 // morecore_start bytes of memory is exhaused morecore will be called.
118 void* msp = create_mspace_with_base(begin, morecore_start, false /*locked*/);
119 if (msp != nullptr) {
120 // Do not allow morecore requests to succeed beyond the initial size of the heap
121 mspace_set_footprint_limit(msp, initial_size);
122 } else {
123 PLOG(ERROR) << "create_mspace_with_base failed";
124 }
125 return msp;
126 }
127
AllocWithGrowth(Thread * self,size_t num_bytes,size_t * bytes_allocated,size_t * usable_size,size_t * bytes_tl_bulk_allocated)128 mirror::Object* DlMallocSpace::AllocWithGrowth(Thread* self, size_t num_bytes,
129 size_t* bytes_allocated, size_t* usable_size,
130 size_t* bytes_tl_bulk_allocated) {
131 mirror::Object* result;
132 {
133 MutexLock mu(self, lock_);
134 // Grow as much as possible within the space.
135 size_t max_allowed = Capacity();
136 mspace_set_footprint_limit(mspace_, max_allowed);
137 // Try the allocation.
138 result = AllocWithoutGrowthLocked(self, num_bytes, bytes_allocated, usable_size,
139 bytes_tl_bulk_allocated);
140 // Shrink back down as small as possible.
141 size_t footprint = mspace_footprint(mspace_);
142 mspace_set_footprint_limit(mspace_, footprint);
143 }
144 if (result != nullptr) {
145 // Zero freshly allocated memory, done while not holding the space's lock.
146 memset(result, 0, num_bytes);
147 // Check that the result is contained in the space.
148 CHECK(!kDebugSpaces || Contains(result));
149 }
150 return result;
151 }
152
CreateInstance(MemMap * mem_map,const std::string & name,void * allocator,uint8_t * begin,uint8_t * end,uint8_t * limit,size_t growth_limit,bool can_move_objects)153 MallocSpace* DlMallocSpace::CreateInstance(MemMap* mem_map, const std::string& name,
154 void* allocator, uint8_t* begin, uint8_t* end,
155 uint8_t* limit, size_t growth_limit,
156 bool can_move_objects) {
157 if (Runtime::Current()->IsRunningOnMemoryTool()) {
158 return new MemoryToolMallocSpace<DlMallocSpace, kDefaultMemoryToolRedZoneBytes, true, false>(
159 mem_map, initial_size_, name, allocator, begin, end, limit, growth_limit,
160 can_move_objects, starting_size_);
161 } else {
162 return new DlMallocSpace(mem_map, initial_size_, name, allocator, begin, end, limit,
163 growth_limit, can_move_objects, starting_size_);
164 }
165 }
166
Free(Thread * self,mirror::Object * ptr)167 size_t DlMallocSpace::Free(Thread* self, mirror::Object* ptr) {
168 MutexLock mu(self, lock_);
169 if (kDebugSpaces) {
170 CHECK(ptr != nullptr);
171 CHECK(Contains(ptr)) << "Free (" << ptr << ") not in bounds of heap " << *this;
172 }
173 const size_t bytes_freed = AllocationSizeNonvirtual(ptr, nullptr);
174 if (kRecentFreeCount > 0) {
175 RegisterRecentFree(ptr);
176 }
177 mspace_free(mspace_, ptr);
178 return bytes_freed;
179 }
180
FreeList(Thread * self,size_t num_ptrs,mirror::Object ** ptrs)181 size_t DlMallocSpace::FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) {
182 DCHECK(ptrs != nullptr);
183
184 // Don't need the lock to calculate the size of the freed pointers.
185 size_t bytes_freed = 0;
186 for (size_t i = 0; i < num_ptrs; i++) {
187 mirror::Object* ptr = ptrs[i];
188 const size_t look_ahead = 8;
189 if (kPrefetchDuringDlMallocFreeList && i + look_ahead < num_ptrs) {
190 // The head of chunk for the allocation is sizeof(size_t) behind the allocation.
191 __builtin_prefetch(reinterpret_cast<char*>(ptrs[i + look_ahead]) - sizeof(size_t));
192 }
193 bytes_freed += AllocationSizeNonvirtual(ptr, nullptr);
194 }
195
196 if (kRecentFreeCount > 0) {
197 MutexLock mu(self, lock_);
198 for (size_t i = 0; i < num_ptrs; i++) {
199 RegisterRecentFree(ptrs[i]);
200 }
201 }
202
203 if (kDebugSpaces) {
204 size_t num_broken_ptrs = 0;
205 for (size_t i = 0; i < num_ptrs; i++) {
206 if (!Contains(ptrs[i])) {
207 num_broken_ptrs++;
208 LOG(ERROR) << "FreeList[" << i << "] (" << ptrs[i] << ") not in bounds of heap " << *this;
209 } else {
210 size_t size = mspace_usable_size(ptrs[i]);
211 memset(ptrs[i], 0xEF, size);
212 }
213 }
214 CHECK_EQ(num_broken_ptrs, 0u);
215 }
216
217 {
218 MutexLock mu(self, lock_);
219 mspace_bulk_free(mspace_, reinterpret_cast<void**>(ptrs), num_ptrs);
220 return bytes_freed;
221 }
222 }
223
Trim()224 size_t DlMallocSpace::Trim() {
225 MutexLock mu(Thread::Current(), lock_);
226 // Trim to release memory at the end of the space.
227 mspace_trim(mspace_, 0);
228 // Visit space looking for page-sized holes to advise the kernel we don't need.
229 size_t reclaimed = 0;
230 mspace_inspect_all(mspace_, DlmallocMadviseCallback, &reclaimed);
231 return reclaimed;
232 }
233
Walk(void (* callback)(void * start,void * end,size_t num_bytes,void * callback_arg),void * arg)234 void DlMallocSpace::Walk(void(*callback)(void *start, void *end, size_t num_bytes, void* callback_arg),
235 void* arg) {
236 MutexLock mu(Thread::Current(), lock_);
237 mspace_inspect_all(mspace_, callback, arg);
238 callback(nullptr, nullptr, 0, arg); // Indicate end of a space.
239 }
240
GetFootprint()241 size_t DlMallocSpace::GetFootprint() {
242 MutexLock mu(Thread::Current(), lock_);
243 return mspace_footprint(mspace_);
244 }
245
GetFootprintLimit()246 size_t DlMallocSpace::GetFootprintLimit() {
247 MutexLock mu(Thread::Current(), lock_);
248 return mspace_footprint_limit(mspace_);
249 }
250
SetFootprintLimit(size_t new_size)251 void DlMallocSpace::SetFootprintLimit(size_t new_size) {
252 MutexLock mu(Thread::Current(), lock_);
253 VLOG(heap) << "DlMallocSpace::SetFootprintLimit " << PrettySize(new_size);
254 // Compare against the actual footprint, rather than the Size(), because the heap may not have
255 // grown all the way to the allowed size yet.
256 size_t current_space_size = mspace_footprint(mspace_);
257 if (new_size < current_space_size) {
258 // Don't let the space grow any more.
259 new_size = current_space_size;
260 }
261 mspace_set_footprint_limit(mspace_, new_size);
262 }
263
GetBytesAllocated()264 uint64_t DlMallocSpace::GetBytesAllocated() {
265 MutexLock mu(Thread::Current(), lock_);
266 size_t bytes_allocated = 0;
267 mspace_inspect_all(mspace_, DlmallocBytesAllocatedCallback, &bytes_allocated);
268 return bytes_allocated;
269 }
270
GetObjectsAllocated()271 uint64_t DlMallocSpace::GetObjectsAllocated() {
272 MutexLock mu(Thread::Current(), lock_);
273 size_t objects_allocated = 0;
274 mspace_inspect_all(mspace_, DlmallocObjectsAllocatedCallback, &objects_allocated);
275 return objects_allocated;
276 }
277
Clear()278 void DlMallocSpace::Clear() {
279 size_t footprint_limit = GetFootprintLimit();
280 madvise(GetMemMap()->Begin(), GetMemMap()->Size(), MADV_DONTNEED);
281 live_bitmap_->Clear();
282 mark_bitmap_->Clear();
283 SetEnd(Begin() + starting_size_);
284 mspace_ = CreateMspace(mem_map_->Begin(), starting_size_, initial_size_);
285 SetFootprintLimit(footprint_limit);
286 }
287
288 #ifndef NDEBUG
CheckMoreCoreForPrecondition()289 void DlMallocSpace::CheckMoreCoreForPrecondition() {
290 lock_.AssertHeld(Thread::Current());
291 }
292 #endif
293
MSpaceChunkCallback(void * start,void * end,size_t used_bytes,void * arg)294 static void MSpaceChunkCallback(void* start, void* end, size_t used_bytes, void* arg) {
295 size_t chunk_size = reinterpret_cast<uint8_t*>(end) - reinterpret_cast<uint8_t*>(start);
296 if (used_bytes < chunk_size) {
297 size_t chunk_free_bytes = chunk_size - used_bytes;
298 size_t& max_contiguous_allocation = *reinterpret_cast<size_t*>(arg);
299 max_contiguous_allocation = std::max(max_contiguous_allocation, chunk_free_bytes);
300 }
301 }
302
LogFragmentationAllocFailure(std::ostream & os,size_t failed_alloc_bytes ATTRIBUTE_UNUSED)303 void DlMallocSpace::LogFragmentationAllocFailure(std::ostream& os,
304 size_t failed_alloc_bytes ATTRIBUTE_UNUSED) {
305 Thread* const self = Thread::Current();
306 size_t max_contiguous_allocation = 0;
307 // To allow the Walk/InspectAll() to exclusively-lock the mutator
308 // lock, temporarily release the shared access to the mutator
309 // lock here by transitioning to the suspended state.
310 Locks::mutator_lock_->AssertSharedHeld(self);
311 ScopedThreadSuspension sts(self, kSuspended);
312 Walk(MSpaceChunkCallback, &max_contiguous_allocation);
313 os << "; failed due to fragmentation (largest possible contiguous allocation "
314 << max_contiguous_allocation << " bytes)";
315 }
316
317 } // namespace space
318
319 namespace allocator {
320
321 // Implement the dlmalloc morecore callback.
ArtDlMallocMoreCore(void * mspace,intptr_t increment)322 void* ArtDlMallocMoreCore(void* mspace, intptr_t increment) SHARED_REQUIRES(Locks::mutator_lock_) {
323 Runtime* runtime = Runtime::Current();
324 Heap* heap = runtime->GetHeap();
325 ::art::gc::space::DlMallocSpace* dlmalloc_space = heap->GetDlMallocSpace();
326 // Support for multiple DlMalloc provided by a slow path.
327 if (UNLIKELY(dlmalloc_space == nullptr || dlmalloc_space->GetMspace() != mspace)) {
328 if (LIKELY(runtime->GetJit() != nullptr)) {
329 jit::JitCodeCache* code_cache = runtime->GetJit()->GetCodeCache();
330 if (code_cache->OwnsSpace(mspace)) {
331 return code_cache->MoreCore(mspace, increment);
332 }
333 }
334 dlmalloc_space = nullptr;
335 for (space::ContinuousSpace* space : heap->GetContinuousSpaces()) {
336 if (space->IsDlMallocSpace()) {
337 ::art::gc::space::DlMallocSpace* cur_dlmalloc_space = space->AsDlMallocSpace();
338 if (cur_dlmalloc_space->GetMspace() == mspace) {
339 dlmalloc_space = cur_dlmalloc_space;
340 break;
341 }
342 }
343 }
344 CHECK(dlmalloc_space != nullptr) << "Couldn't find DlmMallocSpace with mspace=" << mspace;
345 }
346 return dlmalloc_space->MoreCore(increment);
347 }
348
349 } // namespace allocator
350
351 } // namespace gc
352 } // namespace art
353