1
2 /*
3 * Copyright (C) 2013 The Android Open Source Project
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18 #include "rosalloc_space-inl.h"
19
20 #include "base/logging.h" // For VLOG.
21 #include "base/time_utils.h"
22 #include "base/utils.h"
23 #include "gc/accounting/card_table.h"
24 #include "gc/accounting/space_bitmap-inl.h"
25 #include "gc/heap.h"
26 #include "memory_tool_malloc_space-inl.h"
27 #include "mirror/class-inl.h"
28 #include "mirror/object-inl.h"
29 #include "runtime.h"
30 #include "scoped_thread_state_change-inl.h"
31 #include "thread.h"
32 #include "thread_list.h"
33
34 namespace art {
35 namespace gc {
36 namespace space {
37
38 static constexpr bool kPrefetchDuringRosAllocFreeList = false;
39 static constexpr size_t kPrefetchLookAhead = 8;
40 // Use this only for verification, it is not safe to use since the class of the object may have
41 // been freed.
42 static constexpr bool kVerifyFreedBytes = false;
43
44 // TODO: Fix
45 // template class MemoryToolMallocSpace<RosAllocSpace, allocator::RosAlloc*>;
46
RosAllocSpace(MemMap * mem_map,size_t initial_size,const std::string & name,art::gc::allocator::RosAlloc * rosalloc,uint8_t * begin,uint8_t * end,uint8_t * limit,size_t growth_limit,bool can_move_objects,size_t starting_size,bool low_memory_mode)47 RosAllocSpace::RosAllocSpace(MemMap* mem_map, size_t initial_size, const std::string& name,
48 art::gc::allocator::RosAlloc* rosalloc, uint8_t* begin, uint8_t* end,
49 uint8_t* limit, size_t growth_limit, bool can_move_objects,
50 size_t starting_size, bool low_memory_mode)
51 : MallocSpace(name, mem_map, begin, end, limit, growth_limit, true, can_move_objects,
52 starting_size, initial_size),
53 rosalloc_(rosalloc), low_memory_mode_(low_memory_mode) {
54 CHECK(rosalloc != nullptr);
55 }
56
CreateFromMemMap(MemMap * mem_map,const std::string & name,size_t starting_size,size_t initial_size,size_t growth_limit,size_t capacity,bool low_memory_mode,bool can_move_objects)57 RosAllocSpace* RosAllocSpace::CreateFromMemMap(MemMap* mem_map, const std::string& name,
58 size_t starting_size, size_t initial_size,
59 size_t growth_limit, size_t capacity,
60 bool low_memory_mode, bool can_move_objects) {
61 DCHECK(mem_map != nullptr);
62
63 bool running_on_memory_tool = Runtime::Current()->IsRunningOnMemoryTool();
64
65 allocator::RosAlloc* rosalloc = CreateRosAlloc(mem_map->Begin(), starting_size, initial_size,
66 capacity, low_memory_mode, running_on_memory_tool);
67 if (rosalloc == nullptr) {
68 LOG(ERROR) << "Failed to initialize rosalloc for alloc space (" << name << ")";
69 return nullptr;
70 }
71
72 // Protect memory beyond the starting size. MoreCore will add r/w permissions when necessory
73 uint8_t* end = mem_map->Begin() + starting_size;
74 if (capacity - starting_size > 0) {
75 CheckedCall(mprotect, name.c_str(), end, capacity - starting_size, PROT_NONE);
76 }
77
78 // Everything is set so record in immutable structure and leave
79 uint8_t* begin = mem_map->Begin();
80 // TODO: Fix RosAllocSpace to support Valgrind/ASan. There is currently some issues with
81 // AllocationSize caused by redzones. b/12944686
82 if (running_on_memory_tool) {
83 return new MemoryToolMallocSpace<RosAllocSpace, kDefaultMemoryToolRedZoneBytes, false, true>(
84 mem_map, initial_size, name, rosalloc, begin, end, begin + capacity, growth_limit,
85 can_move_objects, starting_size, low_memory_mode);
86 } else {
87 return new RosAllocSpace(mem_map, initial_size, name, rosalloc, begin, end, begin + capacity,
88 growth_limit, can_move_objects, starting_size, low_memory_mode);
89 }
90 }
91
~RosAllocSpace()92 RosAllocSpace::~RosAllocSpace() {
93 delete rosalloc_;
94 }
95
Create(const std::string & name,size_t initial_size,size_t growth_limit,size_t capacity,uint8_t * requested_begin,bool low_memory_mode,bool can_move_objects)96 RosAllocSpace* RosAllocSpace::Create(const std::string& name, size_t initial_size,
97 size_t growth_limit, size_t capacity, uint8_t* requested_begin,
98 bool low_memory_mode, bool can_move_objects) {
99 uint64_t start_time = 0;
100 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
101 start_time = NanoTime();
102 VLOG(startup) << "RosAllocSpace::Create entering " << name
103 << " initial_size=" << PrettySize(initial_size)
104 << " growth_limit=" << PrettySize(growth_limit)
105 << " capacity=" << PrettySize(capacity)
106 << " requested_begin=" << reinterpret_cast<void*>(requested_begin);
107 }
108
109 // Memory we promise to rosalloc before it asks for morecore.
110 // Note: making this value large means that large allocations are unlikely to succeed as rosalloc
111 // will ask for this memory from sys_alloc which will fail as the footprint (this value plus the
112 // size of the large allocation) will be greater than the footprint limit.
113 size_t starting_size = Heap::kDefaultStartingSize;
114 MemMap* mem_map = CreateMemMap(name, starting_size, &initial_size, &growth_limit, &capacity,
115 requested_begin);
116 if (mem_map == nullptr) {
117 LOG(ERROR) << "Failed to create mem map for alloc space (" << name << ") of size "
118 << PrettySize(capacity);
119 return nullptr;
120 }
121
122 RosAllocSpace* space = CreateFromMemMap(mem_map, name, starting_size, initial_size,
123 growth_limit, capacity, low_memory_mode,
124 can_move_objects);
125 // We start out with only the initial size possibly containing objects.
126 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
127 LOG(INFO) << "RosAllocSpace::Create exiting (" << PrettyDuration(NanoTime() - start_time)
128 << " ) " << *space;
129 }
130 return space;
131 }
132
CreateRosAlloc(void * begin,size_t morecore_start,size_t initial_size,size_t maximum_size,bool low_memory_mode,bool running_on_memory_tool)133 allocator::RosAlloc* RosAllocSpace::CreateRosAlloc(void* begin, size_t morecore_start,
134 size_t initial_size,
135 size_t maximum_size, bool low_memory_mode,
136 bool running_on_memory_tool) {
137 // clear errno to allow PLOG on error
138 errno = 0;
139 // create rosalloc using our backing storage starting at begin and
140 // with a footprint of morecore_start. When morecore_start bytes of
141 // memory is exhaused morecore will be called.
142 allocator::RosAlloc* rosalloc = new art::gc::allocator::RosAlloc(
143 begin, morecore_start, maximum_size,
144 low_memory_mode ?
145 art::gc::allocator::RosAlloc::kPageReleaseModeAll :
146 art::gc::allocator::RosAlloc::kPageReleaseModeSizeAndEnd,
147 running_on_memory_tool);
148 if (rosalloc != nullptr) {
149 rosalloc->SetFootprintLimit(initial_size);
150 } else {
151 PLOG(ERROR) << "RosAlloc::Create failed";
152 }
153 return rosalloc;
154 }
155
AllocWithGrowth(Thread * self,size_t num_bytes,size_t * bytes_allocated,size_t * usable_size,size_t * bytes_tl_bulk_allocated)156 mirror::Object* RosAllocSpace::AllocWithGrowth(Thread* self, size_t num_bytes,
157 size_t* bytes_allocated, size_t* usable_size,
158 size_t* bytes_tl_bulk_allocated) {
159 mirror::Object* result;
160 {
161 MutexLock mu(self, lock_);
162 // Grow as much as possible within the space.
163 size_t max_allowed = Capacity();
164 rosalloc_->SetFootprintLimit(max_allowed);
165 // Try the allocation.
166 result = AllocCommon(self, num_bytes, bytes_allocated, usable_size,
167 bytes_tl_bulk_allocated);
168 // Shrink back down as small as possible.
169 size_t footprint = rosalloc_->Footprint();
170 rosalloc_->SetFootprintLimit(footprint);
171 }
172 // Note RosAlloc zeroes memory internally.
173 // Return the new allocation or null.
174 CHECK(!kDebugSpaces || result == nullptr || Contains(result));
175 return result;
176 }
177
CreateInstance(MemMap * mem_map,const std::string & name,void * allocator,uint8_t * begin,uint8_t * end,uint8_t * limit,size_t growth_limit,bool can_move_objects)178 MallocSpace* RosAllocSpace::CreateInstance(MemMap* mem_map, const std::string& name,
179 void* allocator, uint8_t* begin, uint8_t* end,
180 uint8_t* limit, size_t growth_limit,
181 bool can_move_objects) {
182 if (Runtime::Current()->IsRunningOnMemoryTool()) {
183 return new MemoryToolMallocSpace<RosAllocSpace, kDefaultMemoryToolRedZoneBytes, false, true>(
184 mem_map, initial_size_, name, reinterpret_cast<allocator::RosAlloc*>(allocator), begin, end,
185 limit, growth_limit, can_move_objects, starting_size_, low_memory_mode_);
186 } else {
187 return new RosAllocSpace(mem_map, initial_size_, name,
188 reinterpret_cast<allocator::RosAlloc*>(allocator), begin, end, limit,
189 growth_limit, can_move_objects, starting_size_, low_memory_mode_);
190 }
191 }
192
Free(Thread * self,mirror::Object * ptr)193 size_t RosAllocSpace::Free(Thread* self, mirror::Object* ptr) {
194 if (kDebugSpaces) {
195 CHECK(ptr != nullptr);
196 CHECK(Contains(ptr)) << "Free (" << ptr << ") not in bounds of heap " << *this;
197 }
198 if (kRecentFreeCount > 0) {
199 MutexLock mu(self, lock_);
200 RegisterRecentFree(ptr);
201 }
202 return rosalloc_->Free(self, ptr);
203 }
204
FreeList(Thread * self,size_t num_ptrs,mirror::Object ** ptrs)205 size_t RosAllocSpace::FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) {
206 DCHECK(ptrs != nullptr);
207
208 size_t verify_bytes = 0;
209 for (size_t i = 0; i < num_ptrs; i++) {
210 if (kPrefetchDuringRosAllocFreeList && i + kPrefetchLookAhead < num_ptrs) {
211 __builtin_prefetch(reinterpret_cast<char*>(ptrs[i + kPrefetchLookAhead]));
212 }
213 if (kVerifyFreedBytes) {
214 verify_bytes += AllocationSizeNonvirtual<true>(ptrs[i], nullptr);
215 }
216 }
217
218 if (kRecentFreeCount > 0) {
219 MutexLock mu(self, lock_);
220 for (size_t i = 0; i < num_ptrs; i++) {
221 RegisterRecentFree(ptrs[i]);
222 }
223 }
224
225 if (kDebugSpaces) {
226 size_t num_broken_ptrs = 0;
227 for (size_t i = 0; i < num_ptrs; i++) {
228 if (!Contains(ptrs[i])) {
229 num_broken_ptrs++;
230 LOG(ERROR) << "FreeList[" << i << "] (" << ptrs[i] << ") not in bounds of heap " << *this;
231 } else {
232 size_t size = rosalloc_->UsableSize(ptrs[i]);
233 memset(ptrs[i], 0xEF, size);
234 }
235 }
236 CHECK_EQ(num_broken_ptrs, 0u);
237 }
238
239 const size_t bytes_freed = rosalloc_->BulkFree(self, reinterpret_cast<void**>(ptrs), num_ptrs);
240 if (kVerifyFreedBytes) {
241 CHECK_EQ(verify_bytes, bytes_freed);
242 }
243 return bytes_freed;
244 }
245
Trim()246 size_t RosAllocSpace::Trim() {
247 VLOG(heap) << "RosAllocSpace::Trim() ";
248 {
249 Thread* const self = Thread::Current();
250 // SOA required for Rosalloc::Trim() -> ArtRosAllocMoreCore() -> Heap::GetRosAllocSpace.
251 ScopedObjectAccess soa(self);
252 MutexLock mu(self, lock_);
253 // Trim to release memory at the end of the space.
254 rosalloc_->Trim();
255 }
256 // Attempt to release pages if it does not release all empty pages.
257 if (!rosalloc_->DoesReleaseAllPages()) {
258 return rosalloc_->ReleasePages();
259 }
260 return 0;
261 }
262
Walk(void (* callback)(void * start,void * end,size_t num_bytes,void * callback_arg),void * arg)263 void RosAllocSpace::Walk(void(*callback)(void *start, void *end, size_t num_bytes, void* callback_arg),
264 void* arg) {
265 InspectAllRosAlloc(callback, arg, true);
266 }
267
GetFootprint()268 size_t RosAllocSpace::GetFootprint() {
269 MutexLock mu(Thread::Current(), lock_);
270 return rosalloc_->Footprint();
271 }
272
GetFootprintLimit()273 size_t RosAllocSpace::GetFootprintLimit() {
274 MutexLock mu(Thread::Current(), lock_);
275 return rosalloc_->FootprintLimit();
276 }
277
SetFootprintLimit(size_t new_size)278 void RosAllocSpace::SetFootprintLimit(size_t new_size) {
279 MutexLock mu(Thread::Current(), lock_);
280 VLOG(heap) << "RosAllocSpace::SetFootprintLimit " << PrettySize(new_size);
281 // Compare against the actual footprint, rather than the Size(), because the heap may not have
282 // grown all the way to the allowed size yet.
283 size_t current_space_size = rosalloc_->Footprint();
284 if (new_size < current_space_size) {
285 // Don't let the space grow any more.
286 new_size = current_space_size;
287 }
288 rosalloc_->SetFootprintLimit(new_size);
289 }
290
GetBytesAllocated()291 uint64_t RosAllocSpace::GetBytesAllocated() {
292 size_t bytes_allocated = 0;
293 InspectAllRosAlloc(art::gc::allocator::RosAlloc::BytesAllocatedCallback, &bytes_allocated, false);
294 return bytes_allocated;
295 }
296
GetObjectsAllocated()297 uint64_t RosAllocSpace::GetObjectsAllocated() {
298 size_t objects_allocated = 0;
299 InspectAllRosAlloc(art::gc::allocator::RosAlloc::ObjectsAllocatedCallback, &objects_allocated, false);
300 return objects_allocated;
301 }
302
InspectAllRosAllocWithSuspendAll(void (* callback)(void * start,void * end,size_t num_bytes,void * callback_arg),void * arg,bool do_null_callback_at_end)303 void RosAllocSpace::InspectAllRosAllocWithSuspendAll(
304 void (*callback)(void *start, void *end, size_t num_bytes, void* callback_arg),
305 void* arg, bool do_null_callback_at_end) NO_THREAD_SAFETY_ANALYSIS {
306 // TODO: NO_THREAD_SAFETY_ANALYSIS.
307 Thread* self = Thread::Current();
308 ScopedSuspendAll ssa(__FUNCTION__);
309 MutexLock mu(self, *Locks::runtime_shutdown_lock_);
310 MutexLock mu2(self, *Locks::thread_list_lock_);
311 rosalloc_->InspectAll(callback, arg);
312 if (do_null_callback_at_end) {
313 callback(nullptr, nullptr, 0, arg); // Indicate end of a space.
314 }
315 }
316
InspectAllRosAlloc(void (* callback)(void * start,void * end,size_t num_bytes,void * callback_arg),void * arg,bool do_null_callback_at_end)317 void RosAllocSpace::InspectAllRosAlloc(void (*callback)(void *start, void *end, size_t num_bytes, void* callback_arg),
318 void* arg, bool do_null_callback_at_end) NO_THREAD_SAFETY_ANALYSIS {
319 // TODO: NO_THREAD_SAFETY_ANALYSIS.
320 Thread* self = Thread::Current();
321 if (Locks::mutator_lock_->IsExclusiveHeld(self)) {
322 // The mutators are already suspended. For example, a call path
323 // from SignalCatcher::HandleSigQuit().
324 rosalloc_->InspectAll(callback, arg);
325 if (do_null_callback_at_end) {
326 callback(nullptr, nullptr, 0, arg); // Indicate end of a space.
327 }
328 } else if (Locks::mutator_lock_->IsSharedHeld(self)) {
329 // The mutators are not suspended yet and we have a shared access
330 // to the mutator lock. Temporarily release the shared access by
331 // transitioning to the suspend state, and suspend the mutators.
332 ScopedThreadSuspension sts(self, kSuspended);
333 InspectAllRosAllocWithSuspendAll(callback, arg, do_null_callback_at_end);
334 } else {
335 // The mutators are not suspended yet. Suspend the mutators.
336 InspectAllRosAllocWithSuspendAll(callback, arg, do_null_callback_at_end);
337 }
338 }
339
RevokeThreadLocalBuffers(Thread * thread)340 size_t RosAllocSpace::RevokeThreadLocalBuffers(Thread* thread) {
341 return rosalloc_->RevokeThreadLocalRuns(thread);
342 }
343
RevokeAllThreadLocalBuffers()344 size_t RosAllocSpace::RevokeAllThreadLocalBuffers() {
345 return rosalloc_->RevokeAllThreadLocalRuns();
346 }
347
AssertThreadLocalBuffersAreRevoked(Thread * thread)348 void RosAllocSpace::AssertThreadLocalBuffersAreRevoked(Thread* thread) {
349 if (kIsDebugBuild) {
350 rosalloc_->AssertThreadLocalRunsAreRevoked(thread);
351 }
352 }
353
AssertAllThreadLocalBuffersAreRevoked()354 void RosAllocSpace::AssertAllThreadLocalBuffersAreRevoked() {
355 if (kIsDebugBuild) {
356 rosalloc_->AssertAllThreadLocalRunsAreRevoked();
357 }
358 }
359
Clear()360 void RosAllocSpace::Clear() {
361 size_t footprint_limit = GetFootprintLimit();
362 madvise(GetMemMap()->Begin(), GetMemMap()->Size(), MADV_DONTNEED);
363 live_bitmap_->Clear();
364 mark_bitmap_->Clear();
365 SetEnd(begin_ + starting_size_);
366 delete rosalloc_;
367 rosalloc_ = CreateRosAlloc(mem_map_->Begin(), starting_size_, initial_size_,
368 NonGrowthLimitCapacity(), low_memory_mode_,
369 Runtime::Current()->IsRunningOnMemoryTool());
370 SetFootprintLimit(footprint_limit);
371 }
372
DumpStats(std::ostream & os)373 void RosAllocSpace::DumpStats(std::ostream& os) {
374 ScopedSuspendAll ssa(__FUNCTION__);
375 rosalloc_->DumpStats(os);
376 }
377
378 template<bool kMaybeIsRunningOnMemoryTool>
AllocationSizeNonvirtual(mirror::Object * obj,size_t * usable_size)379 size_t RosAllocSpace::AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size) {
380 // obj is a valid object. Use its class in the header to get the size.
381 // Don't use verification since the object may be dead if we are sweeping.
382 size_t size = obj->SizeOf<kVerifyNone>();
383 bool add_redzones = false;
384 if (kMaybeIsRunningOnMemoryTool) {
385 add_redzones = RUNNING_ON_MEMORY_TOOL ? kMemoryToolAddsRedzones : 0;
386 if (add_redzones) {
387 size += 2 * kDefaultMemoryToolRedZoneBytes;
388 }
389 } else {
390 DCHECK_EQ(RUNNING_ON_MEMORY_TOOL, 0U);
391 }
392 size_t size_by_size = rosalloc_->UsableSize(size);
393 if (kIsDebugBuild) {
394 // On memory tool, the red zone has an impact...
395 const uint8_t* obj_ptr = reinterpret_cast<const uint8_t*>(obj);
396 size_t size_by_ptr = rosalloc_->UsableSize(
397 obj_ptr - (add_redzones ? kDefaultMemoryToolRedZoneBytes : 0));
398 if (size_by_size != size_by_ptr) {
399 LOG(INFO) << "Found a bad sized obj of size " << size
400 << " at " << std::hex << reinterpret_cast<intptr_t>(obj_ptr) << std::dec
401 << " size_by_size=" << size_by_size << " size_by_ptr=" << size_by_ptr;
402 }
403 DCHECK_EQ(size_by_size, size_by_ptr);
404 }
405 if (usable_size != nullptr) {
406 *usable_size = size_by_size;
407 }
408 return size_by_size;
409 }
410
411 } // namespace space
412
413 namespace allocator {
414
415 // Callback from rosalloc when it needs to increase the footprint.
ArtRosAllocMoreCore(allocator::RosAlloc * rosalloc,intptr_t increment)416 void* ArtRosAllocMoreCore(allocator::RosAlloc* rosalloc, intptr_t increment)
417 REQUIRES_SHARED(Locks::mutator_lock_) {
418 Heap* heap = Runtime::Current()->GetHeap();
419 art::gc::space::RosAllocSpace* rosalloc_space = heap->GetRosAllocSpace(rosalloc);
420 DCHECK(rosalloc_space != nullptr);
421 DCHECK_EQ(rosalloc_space->GetRosAlloc(), rosalloc);
422 return rosalloc_space->MoreCore(increment);
423 }
424
425 } // namespace allocator
426
427 } // namespace gc
428 } // namespace art
429