1
2 /*
3 * Copyright (C) 2013 The Android Open Source Project
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18 #include "rosalloc_space-inl.h"
19
20 #define ATRACE_TAG ATRACE_TAG_DALVIK
21 #include "cutils/trace.h"
22
23 #include "base/time_utils.h"
24 #include "gc/accounting/card_table.h"
25 #include "gc/accounting/space_bitmap-inl.h"
26 #include "gc/heap.h"
27 #include "mirror/class-inl.h"
28 #include "mirror/object-inl.h"
29 #include "runtime.h"
30 #include "thread.h"
31 #include "thread_list.h"
32 #include "utils.h"
33 #include "valgrind_malloc_space-inl.h"
34
35 namespace art {
36 namespace gc {
37 namespace space {
38
39 static constexpr bool kPrefetchDuringRosAllocFreeList = false;
40 static constexpr size_t kPrefetchLookAhead = 8;
41 // Use this only for verification, it is not safe to use since the class of the object may have
42 // been freed.
43 static constexpr bool kVerifyFreedBytes = false;
44
45 // TODO: Fix
46 // template class ValgrindMallocSpace<RosAllocSpace, allocator::RosAlloc*>;
47
RosAllocSpace(MemMap * mem_map,size_t initial_size,const std::string & name,art::gc::allocator::RosAlloc * rosalloc,uint8_t * begin,uint8_t * end,uint8_t * limit,size_t growth_limit,bool can_move_objects,size_t starting_size,bool low_memory_mode)48 RosAllocSpace::RosAllocSpace(MemMap* mem_map, size_t initial_size, const std::string& name,
49 art::gc::allocator::RosAlloc* rosalloc, uint8_t* begin, uint8_t* end,
50 uint8_t* limit, size_t growth_limit, bool can_move_objects,
51 size_t starting_size, bool low_memory_mode)
52 : MallocSpace(name, mem_map, begin, end, limit, growth_limit, true, can_move_objects,
53 starting_size, initial_size),
54 rosalloc_(rosalloc), low_memory_mode_(low_memory_mode) {
55 CHECK(rosalloc != nullptr);
56 }
57
CreateFromMemMap(MemMap * mem_map,const std::string & name,size_t starting_size,size_t initial_size,size_t growth_limit,size_t capacity,bool low_memory_mode,bool can_move_objects)58 RosAllocSpace* RosAllocSpace::CreateFromMemMap(MemMap* mem_map, const std::string& name,
59 size_t starting_size, size_t initial_size,
60 size_t growth_limit, size_t capacity,
61 bool low_memory_mode, bool can_move_objects) {
62 DCHECK(mem_map != nullptr);
63
64 bool running_on_valgrind = Runtime::Current()->RunningOnValgrind();
65
66 allocator::RosAlloc* rosalloc = CreateRosAlloc(mem_map->Begin(), starting_size, initial_size,
67 capacity, low_memory_mode, running_on_valgrind);
68 if (rosalloc == nullptr) {
69 LOG(ERROR) << "Failed to initialize rosalloc for alloc space (" << name << ")";
70 return nullptr;
71 }
72
73 // Protect memory beyond the starting size. MoreCore will add r/w permissions when necessory
74 uint8_t* end = mem_map->Begin() + starting_size;
75 if (capacity - starting_size > 0) {
76 CHECK_MEMORY_CALL(mprotect, (end, capacity - starting_size, PROT_NONE), name);
77 }
78
79 // Everything is set so record in immutable structure and leave
80 uint8_t* begin = mem_map->Begin();
81 // TODO: Fix RosAllocSpace to support valgrind. There is currently some issues with
82 // AllocationSize caused by redzones. b/12944686
83 if (running_on_valgrind) {
84 return new ValgrindMallocSpace<RosAllocSpace, kDefaultValgrindRedZoneBytes, false, true>(
85 mem_map, initial_size, name, rosalloc, begin, end, begin + capacity, growth_limit,
86 can_move_objects, starting_size, low_memory_mode);
87 } else {
88 return new RosAllocSpace(mem_map, initial_size, name, rosalloc, begin, end, begin + capacity,
89 growth_limit, can_move_objects, starting_size, low_memory_mode);
90 }
91 }
92
~RosAllocSpace()93 RosAllocSpace::~RosAllocSpace() {
94 delete rosalloc_;
95 }
96
Create(const std::string & name,size_t initial_size,size_t growth_limit,size_t capacity,uint8_t * requested_begin,bool low_memory_mode,bool can_move_objects)97 RosAllocSpace* RosAllocSpace::Create(const std::string& name, size_t initial_size,
98 size_t growth_limit, size_t capacity, uint8_t* requested_begin,
99 bool low_memory_mode, bool can_move_objects) {
100 uint64_t start_time = 0;
101 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
102 start_time = NanoTime();
103 VLOG(startup) << "RosAllocSpace::Create entering " << name
104 << " initial_size=" << PrettySize(initial_size)
105 << " growth_limit=" << PrettySize(growth_limit)
106 << " capacity=" << PrettySize(capacity)
107 << " requested_begin=" << reinterpret_cast<void*>(requested_begin);
108 }
109
110 // Memory we promise to rosalloc before it asks for morecore.
111 // Note: making this value large means that large allocations are unlikely to succeed as rosalloc
112 // will ask for this memory from sys_alloc which will fail as the footprint (this value plus the
113 // size of the large allocation) will be greater than the footprint limit.
114 size_t starting_size = Heap::kDefaultStartingSize;
115 MemMap* mem_map = CreateMemMap(name, starting_size, &initial_size, &growth_limit, &capacity,
116 requested_begin);
117 if (mem_map == nullptr) {
118 LOG(ERROR) << "Failed to create mem map for alloc space (" << name << ") of size "
119 << PrettySize(capacity);
120 return nullptr;
121 }
122
123 RosAllocSpace* space = CreateFromMemMap(mem_map, name, starting_size, initial_size,
124 growth_limit, capacity, low_memory_mode,
125 can_move_objects);
126 // We start out with only the initial size possibly containing objects.
127 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
128 LOG(INFO) << "RosAllocSpace::Create exiting (" << PrettyDuration(NanoTime() - start_time)
129 << " ) " << *space;
130 }
131 return space;
132 }
133
CreateRosAlloc(void * begin,size_t morecore_start,size_t initial_size,size_t maximum_size,bool low_memory_mode,bool running_on_valgrind)134 allocator::RosAlloc* RosAllocSpace::CreateRosAlloc(void* begin, size_t morecore_start,
135 size_t initial_size,
136 size_t maximum_size, bool low_memory_mode,
137 bool running_on_valgrind) {
138 // clear errno to allow PLOG on error
139 errno = 0;
140 // create rosalloc using our backing storage starting at begin and
141 // with a footprint of morecore_start. When morecore_start bytes of
142 // memory is exhaused morecore will be called.
143 allocator::RosAlloc* rosalloc = new art::gc::allocator::RosAlloc(
144 begin, morecore_start, maximum_size,
145 low_memory_mode ?
146 art::gc::allocator::RosAlloc::kPageReleaseModeAll :
147 art::gc::allocator::RosAlloc::kPageReleaseModeSizeAndEnd,
148 running_on_valgrind);
149 if (rosalloc != nullptr) {
150 rosalloc->SetFootprintLimit(initial_size);
151 } else {
152 PLOG(ERROR) << "RosAlloc::Create failed";
153 }
154 return rosalloc;
155 }
156
AllocWithGrowth(Thread * self,size_t num_bytes,size_t * bytes_allocated,size_t * usable_size,size_t * bytes_tl_bulk_allocated)157 mirror::Object* RosAllocSpace::AllocWithGrowth(Thread* self, size_t num_bytes,
158 size_t* bytes_allocated, size_t* usable_size,
159 size_t* bytes_tl_bulk_allocated) {
160 mirror::Object* result;
161 {
162 MutexLock mu(self, lock_);
163 // Grow as much as possible within the space.
164 size_t max_allowed = Capacity();
165 rosalloc_->SetFootprintLimit(max_allowed);
166 // Try the allocation.
167 result = AllocCommon(self, num_bytes, bytes_allocated, usable_size,
168 bytes_tl_bulk_allocated);
169 // Shrink back down as small as possible.
170 size_t footprint = rosalloc_->Footprint();
171 rosalloc_->SetFootprintLimit(footprint);
172 }
173 // Note RosAlloc zeroes memory internally.
174 // Return the new allocation or null.
175 CHECK(!kDebugSpaces || result == nullptr || Contains(result));
176 return result;
177 }
178
CreateInstance(MemMap * mem_map,const std::string & name,void * allocator,uint8_t * begin,uint8_t * end,uint8_t * limit,size_t growth_limit,bool can_move_objects)179 MallocSpace* RosAllocSpace::CreateInstance(MemMap* mem_map, const std::string& name,
180 void* allocator, uint8_t* begin, uint8_t* end,
181 uint8_t* limit, size_t growth_limit,
182 bool can_move_objects) {
183 if (Runtime::Current()->RunningOnValgrind()) {
184 return new ValgrindMallocSpace<RosAllocSpace, kDefaultValgrindRedZoneBytes, false, true>(
185 mem_map, initial_size_, name, reinterpret_cast<allocator::RosAlloc*>(allocator), begin, end,
186 limit, growth_limit, can_move_objects, starting_size_, low_memory_mode_);
187 } else {
188 return new RosAllocSpace(mem_map, initial_size_, name,
189 reinterpret_cast<allocator::RosAlloc*>(allocator), begin, end, limit,
190 growth_limit, can_move_objects, starting_size_, low_memory_mode_);
191 }
192 }
193
Free(Thread * self,mirror::Object * ptr)194 size_t RosAllocSpace::Free(Thread* self, mirror::Object* ptr) {
195 if (kDebugSpaces) {
196 CHECK(ptr != nullptr);
197 CHECK(Contains(ptr)) << "Free (" << ptr << ") not in bounds of heap " << *this;
198 }
199 if (kRecentFreeCount > 0) {
200 MutexLock mu(self, lock_);
201 RegisterRecentFree(ptr);
202 }
203 return rosalloc_->Free(self, ptr);
204 }
205
FreeList(Thread * self,size_t num_ptrs,mirror::Object ** ptrs)206 size_t RosAllocSpace::FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) {
207 DCHECK(ptrs != nullptr);
208
209 size_t verify_bytes = 0;
210 for (size_t i = 0; i < num_ptrs; i++) {
211 if (kPrefetchDuringRosAllocFreeList && i + kPrefetchLookAhead < num_ptrs) {
212 __builtin_prefetch(reinterpret_cast<char*>(ptrs[i + kPrefetchLookAhead]));
213 }
214 if (kVerifyFreedBytes) {
215 verify_bytes += AllocationSizeNonvirtual<true>(ptrs[i], nullptr);
216 }
217 }
218
219 if (kRecentFreeCount > 0) {
220 MutexLock mu(self, lock_);
221 for (size_t i = 0; i < num_ptrs; i++) {
222 RegisterRecentFree(ptrs[i]);
223 }
224 }
225
226 if (kDebugSpaces) {
227 size_t num_broken_ptrs = 0;
228 for (size_t i = 0; i < num_ptrs; i++) {
229 if (!Contains(ptrs[i])) {
230 num_broken_ptrs++;
231 LOG(ERROR) << "FreeList[" << i << "] (" << ptrs[i] << ") not in bounds of heap " << *this;
232 } else {
233 size_t size = rosalloc_->UsableSize(ptrs[i]);
234 memset(ptrs[i], 0xEF, size);
235 }
236 }
237 CHECK_EQ(num_broken_ptrs, 0u);
238 }
239
240 const size_t bytes_freed = rosalloc_->BulkFree(self, reinterpret_cast<void**>(ptrs), num_ptrs);
241 if (kVerifyFreedBytes) {
242 CHECK_EQ(verify_bytes, bytes_freed);
243 }
244 return bytes_freed;
245 }
246
Trim()247 size_t RosAllocSpace::Trim() {
248 VLOG(heap) << "RosAllocSpace::Trim() ";
249 {
250 MutexLock mu(Thread::Current(), lock_);
251 // Trim to release memory at the end of the space.
252 rosalloc_->Trim();
253 }
254 // Attempt to release pages if it does not release all empty pages.
255 if (!rosalloc_->DoesReleaseAllPages()) {
256 return rosalloc_->ReleasePages();
257 }
258 return 0;
259 }
260
Walk(void (* callback)(void * start,void * end,size_t num_bytes,void * callback_arg),void * arg)261 void RosAllocSpace::Walk(void(*callback)(void *start, void *end, size_t num_bytes, void* callback_arg),
262 void* arg) {
263 InspectAllRosAlloc(callback, arg, true);
264 }
265
GetFootprint()266 size_t RosAllocSpace::GetFootprint() {
267 MutexLock mu(Thread::Current(), lock_);
268 return rosalloc_->Footprint();
269 }
270
GetFootprintLimit()271 size_t RosAllocSpace::GetFootprintLimit() {
272 MutexLock mu(Thread::Current(), lock_);
273 return rosalloc_->FootprintLimit();
274 }
275
SetFootprintLimit(size_t new_size)276 void RosAllocSpace::SetFootprintLimit(size_t new_size) {
277 MutexLock mu(Thread::Current(), lock_);
278 VLOG(heap) << "RosAllocSpace::SetFootprintLimit " << PrettySize(new_size);
279 // Compare against the actual footprint, rather than the Size(), because the heap may not have
280 // grown all the way to the allowed size yet.
281 size_t current_space_size = rosalloc_->Footprint();
282 if (new_size < current_space_size) {
283 // Don't let the space grow any more.
284 new_size = current_space_size;
285 }
286 rosalloc_->SetFootprintLimit(new_size);
287 }
288
GetBytesAllocated()289 uint64_t RosAllocSpace::GetBytesAllocated() {
290 size_t bytes_allocated = 0;
291 InspectAllRosAlloc(art::gc::allocator::RosAlloc::BytesAllocatedCallback, &bytes_allocated, false);
292 return bytes_allocated;
293 }
294
GetObjectsAllocated()295 uint64_t RosAllocSpace::GetObjectsAllocated() {
296 size_t objects_allocated = 0;
297 InspectAllRosAlloc(art::gc::allocator::RosAlloc::ObjectsAllocatedCallback, &objects_allocated, false);
298 return objects_allocated;
299 }
300
InspectAllRosAllocWithSuspendAll(void (* callback)(void * start,void * end,size_t num_bytes,void * callback_arg),void * arg,bool do_null_callback_at_end)301 void RosAllocSpace::InspectAllRosAllocWithSuspendAll(
302 void (*callback)(void *start, void *end, size_t num_bytes, void* callback_arg),
303 void* arg, bool do_null_callback_at_end) NO_THREAD_SAFETY_ANALYSIS {
304 // TODO: NO_THREAD_SAFETY_ANALYSIS.
305 Thread* self = Thread::Current();
306 ThreadList* tl = Runtime::Current()->GetThreadList();
307 tl->SuspendAll(__FUNCTION__);
308 {
309 MutexLock mu(self, *Locks::runtime_shutdown_lock_);
310 MutexLock mu2(self, *Locks::thread_list_lock_);
311 rosalloc_->InspectAll(callback, arg);
312 if (do_null_callback_at_end) {
313 callback(nullptr, nullptr, 0, arg); // Indicate end of a space.
314 }
315 }
316 tl->ResumeAll();
317 }
318
InspectAllRosAlloc(void (* callback)(void * start,void * end,size_t num_bytes,void * callback_arg),void * arg,bool do_null_callback_at_end)319 void RosAllocSpace::InspectAllRosAlloc(void (*callback)(void *start, void *end, size_t num_bytes, void* callback_arg),
320 void* arg, bool do_null_callback_at_end) NO_THREAD_SAFETY_ANALYSIS {
321 // TODO: NO_THREAD_SAFETY_ANALYSIS.
322 Thread* self = Thread::Current();
323 if (Locks::mutator_lock_->IsExclusiveHeld(self)) {
324 // The mutators are already suspended. For example, a call path
325 // from SignalCatcher::HandleSigQuit().
326 rosalloc_->InspectAll(callback, arg);
327 if (do_null_callback_at_end) {
328 callback(nullptr, nullptr, 0, arg); // Indicate end of a space.
329 }
330 } else if (Locks::mutator_lock_->IsSharedHeld(self)) {
331 // The mutators are not suspended yet and we have a shared access
332 // to the mutator lock. Temporarily release the shared access by
333 // transitioning to the suspend state, and suspend the mutators.
334 self->TransitionFromRunnableToSuspended(kSuspended);
335 InspectAllRosAllocWithSuspendAll(callback, arg, do_null_callback_at_end);
336 self->TransitionFromSuspendedToRunnable();
337 Locks::mutator_lock_->AssertSharedHeld(self);
338 } else {
339 // The mutators are not suspended yet. Suspend the mutators.
340 InspectAllRosAllocWithSuspendAll(callback, arg, do_null_callback_at_end);
341 }
342 }
343
RevokeThreadLocalBuffers(Thread * thread)344 size_t RosAllocSpace::RevokeThreadLocalBuffers(Thread* thread) {
345 return rosalloc_->RevokeThreadLocalRuns(thread);
346 }
347
RevokeAllThreadLocalBuffers()348 size_t RosAllocSpace::RevokeAllThreadLocalBuffers() {
349 return rosalloc_->RevokeAllThreadLocalRuns();
350 }
351
AssertThreadLocalBuffersAreRevoked(Thread * thread)352 void RosAllocSpace::AssertThreadLocalBuffersAreRevoked(Thread* thread) {
353 if (kIsDebugBuild) {
354 rosalloc_->AssertThreadLocalRunsAreRevoked(thread);
355 }
356 }
357
AssertAllThreadLocalBuffersAreRevoked()358 void RosAllocSpace::AssertAllThreadLocalBuffersAreRevoked() {
359 if (kIsDebugBuild) {
360 rosalloc_->AssertAllThreadLocalRunsAreRevoked();
361 }
362 }
363
Clear()364 void RosAllocSpace::Clear() {
365 size_t footprint_limit = GetFootprintLimit();
366 madvise(GetMemMap()->Begin(), GetMemMap()->Size(), MADV_DONTNEED);
367 live_bitmap_->Clear();
368 mark_bitmap_->Clear();
369 SetEnd(begin_ + starting_size_);
370 delete rosalloc_;
371 rosalloc_ = CreateRosAlloc(mem_map_->Begin(), starting_size_, initial_size_,
372 NonGrowthLimitCapacity(), low_memory_mode_,
373 Runtime::Current()->RunningOnValgrind());
374 SetFootprintLimit(footprint_limit);
375 }
376
377 } // namespace space
378
379 namespace allocator {
380
381 // Callback from rosalloc when it needs to increase the footprint.
ArtRosAllocMoreCore(allocator::RosAlloc * rosalloc,intptr_t increment)382 void* ArtRosAllocMoreCore(allocator::RosAlloc* rosalloc, intptr_t increment) {
383 Heap* heap = Runtime::Current()->GetHeap();
384 art::gc::space::RosAllocSpace* rosalloc_space = heap->GetRosAllocSpace(rosalloc);
385 DCHECK(rosalloc_space != nullptr);
386 DCHECK_EQ(rosalloc_space->GetRosAlloc(), rosalloc);
387 return rosalloc_space->MoreCore(increment);
388 }
389
390 } // namespace allocator
391
392 } // namespace gc
393 } // namespace art
394