1 
2 /*
3  * Copyright (C) 2013 The Android Open Source Project
4  *
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at
8  *
9  *      http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  */
17 
18 #include "rosalloc_space-inl.h"
19 
20 #include "base/time_utils.h"
21 #include "gc/accounting/card_table.h"
22 #include "gc/accounting/space_bitmap-inl.h"
23 #include "gc/heap.h"
24 #include "mirror/class-inl.h"
25 #include "mirror/object-inl.h"
26 #include "runtime.h"
27 #include "thread.h"
28 #include "thread_list.h"
29 #include "utils.h"
30 #include "memory_tool_malloc_space-inl.h"
31 
32 namespace art {
33 namespace gc {
34 namespace space {
35 
36 static constexpr bool kPrefetchDuringRosAllocFreeList = false;
37 static constexpr size_t kPrefetchLookAhead = 8;
38 // Use this only for verification, it is not safe to use since the class of the object may have
39 // been freed.
40 static constexpr bool kVerifyFreedBytes = false;
41 
42 // TODO: Fix
43 // template class MemoryToolMallocSpace<RosAllocSpace, allocator::RosAlloc*>;
44 
RosAllocSpace(MemMap * mem_map,size_t initial_size,const std::string & name,art::gc::allocator::RosAlloc * rosalloc,uint8_t * begin,uint8_t * end,uint8_t * limit,size_t growth_limit,bool can_move_objects,size_t starting_size,bool low_memory_mode)45 RosAllocSpace::RosAllocSpace(MemMap* mem_map, size_t initial_size, const std::string& name,
46                              art::gc::allocator::RosAlloc* rosalloc, uint8_t* begin, uint8_t* end,
47                              uint8_t* limit, size_t growth_limit, bool can_move_objects,
48                              size_t starting_size, bool low_memory_mode)
49     : MallocSpace(name, mem_map, begin, end, limit, growth_limit, true, can_move_objects,
50                   starting_size, initial_size),
51       rosalloc_(rosalloc), low_memory_mode_(low_memory_mode) {
52   CHECK(rosalloc != nullptr);
53 }
54 
CreateFromMemMap(MemMap * mem_map,const std::string & name,size_t starting_size,size_t initial_size,size_t growth_limit,size_t capacity,bool low_memory_mode,bool can_move_objects)55 RosAllocSpace* RosAllocSpace::CreateFromMemMap(MemMap* mem_map, const std::string& name,
56                                                size_t starting_size, size_t initial_size,
57                                                size_t growth_limit, size_t capacity,
58                                                bool low_memory_mode, bool can_move_objects) {
59   DCHECK(mem_map != nullptr);
60 
61   bool running_on_memory_tool = Runtime::Current()->IsRunningOnMemoryTool();
62 
63   allocator::RosAlloc* rosalloc = CreateRosAlloc(mem_map->Begin(), starting_size, initial_size,
64                                                  capacity, low_memory_mode, running_on_memory_tool);
65   if (rosalloc == nullptr) {
66     LOG(ERROR) << "Failed to initialize rosalloc for alloc space (" << name << ")";
67     return nullptr;
68   }
69 
70   // Protect memory beyond the starting size. MoreCore will add r/w permissions when necessory
71   uint8_t* end = mem_map->Begin() + starting_size;
72   if (capacity - starting_size > 0) {
73     CHECK_MEMORY_CALL(mprotect, (end, capacity - starting_size, PROT_NONE), name);
74   }
75 
76   // Everything is set so record in immutable structure and leave
77   uint8_t* begin = mem_map->Begin();
78   // TODO: Fix RosAllocSpace to support Valgrind/ASan. There is currently some issues with
79   // AllocationSize caused by redzones. b/12944686
80   if (running_on_memory_tool) {
81     return new MemoryToolMallocSpace<RosAllocSpace, kDefaultMemoryToolRedZoneBytes, false, true>(
82         mem_map, initial_size, name, rosalloc, begin, end, begin + capacity, growth_limit,
83         can_move_objects, starting_size, low_memory_mode);
84   } else {
85     return new RosAllocSpace(mem_map, initial_size, name, rosalloc, begin, end, begin + capacity,
86                              growth_limit, can_move_objects, starting_size, low_memory_mode);
87   }
88 }
89 
~RosAllocSpace()90 RosAllocSpace::~RosAllocSpace() {
91   delete rosalloc_;
92 }
93 
Create(const std::string & name,size_t initial_size,size_t growth_limit,size_t capacity,uint8_t * requested_begin,bool low_memory_mode,bool can_move_objects)94 RosAllocSpace* RosAllocSpace::Create(const std::string& name, size_t initial_size,
95                                      size_t growth_limit, size_t capacity, uint8_t* requested_begin,
96                                      bool low_memory_mode, bool can_move_objects) {
97   uint64_t start_time = 0;
98   if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
99     start_time = NanoTime();
100     VLOG(startup) << "RosAllocSpace::Create entering " << name
101                   << " initial_size=" << PrettySize(initial_size)
102                   << " growth_limit=" << PrettySize(growth_limit)
103                   << " capacity=" << PrettySize(capacity)
104                   << " requested_begin=" << reinterpret_cast<void*>(requested_begin);
105   }
106 
107   // Memory we promise to rosalloc before it asks for morecore.
108   // Note: making this value large means that large allocations are unlikely to succeed as rosalloc
109   // will ask for this memory from sys_alloc which will fail as the footprint (this value plus the
110   // size of the large allocation) will be greater than the footprint limit.
111   size_t starting_size = Heap::kDefaultStartingSize;
112   MemMap* mem_map = CreateMemMap(name, starting_size, &initial_size, &growth_limit, &capacity,
113                                  requested_begin);
114   if (mem_map == nullptr) {
115     LOG(ERROR) << "Failed to create mem map for alloc space (" << name << ") of size "
116                << PrettySize(capacity);
117     return nullptr;
118   }
119 
120   RosAllocSpace* space = CreateFromMemMap(mem_map, name, starting_size, initial_size,
121                                           growth_limit, capacity, low_memory_mode,
122                                           can_move_objects);
123   // We start out with only the initial size possibly containing objects.
124   if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
125     LOG(INFO) << "RosAllocSpace::Create exiting (" << PrettyDuration(NanoTime() - start_time)
126         << " ) " << *space;
127   }
128   return space;
129 }
130 
CreateRosAlloc(void * begin,size_t morecore_start,size_t initial_size,size_t maximum_size,bool low_memory_mode,bool running_on_memory_tool)131 allocator::RosAlloc* RosAllocSpace::CreateRosAlloc(void* begin, size_t morecore_start,
132                                                    size_t initial_size,
133                                                    size_t maximum_size, bool low_memory_mode,
134                                                    bool running_on_memory_tool) {
135   // clear errno to allow PLOG on error
136   errno = 0;
137   // create rosalloc using our backing storage starting at begin and
138   // with a footprint of morecore_start. When morecore_start bytes of
139   // memory is exhaused morecore will be called.
140   allocator::RosAlloc* rosalloc = new art::gc::allocator::RosAlloc(
141       begin, morecore_start, maximum_size,
142       low_memory_mode ?
143           art::gc::allocator::RosAlloc::kPageReleaseModeAll :
144           art::gc::allocator::RosAlloc::kPageReleaseModeSizeAndEnd,
145       running_on_memory_tool);
146   if (rosalloc != nullptr) {
147     rosalloc->SetFootprintLimit(initial_size);
148   } else {
149     PLOG(ERROR) << "RosAlloc::Create failed";
150   }
151   return rosalloc;
152 }
153 
AllocWithGrowth(Thread * self,size_t num_bytes,size_t * bytes_allocated,size_t * usable_size,size_t * bytes_tl_bulk_allocated)154 mirror::Object* RosAllocSpace::AllocWithGrowth(Thread* self, size_t num_bytes,
155                                                size_t* bytes_allocated, size_t* usable_size,
156                                                size_t* bytes_tl_bulk_allocated) {
157   mirror::Object* result;
158   {
159     MutexLock mu(self, lock_);
160     // Grow as much as possible within the space.
161     size_t max_allowed = Capacity();
162     rosalloc_->SetFootprintLimit(max_allowed);
163     // Try the allocation.
164     result = AllocCommon(self, num_bytes, bytes_allocated, usable_size,
165                          bytes_tl_bulk_allocated);
166     // Shrink back down as small as possible.
167     size_t footprint = rosalloc_->Footprint();
168     rosalloc_->SetFootprintLimit(footprint);
169   }
170   // Note RosAlloc zeroes memory internally.
171   // Return the new allocation or null.
172   CHECK(!kDebugSpaces || result == nullptr || Contains(result));
173   return result;
174 }
175 
CreateInstance(MemMap * mem_map,const std::string & name,void * allocator,uint8_t * begin,uint8_t * end,uint8_t * limit,size_t growth_limit,bool can_move_objects)176 MallocSpace* RosAllocSpace::CreateInstance(MemMap* mem_map, const std::string& name,
177                                            void* allocator, uint8_t* begin, uint8_t* end,
178                                            uint8_t* limit, size_t growth_limit,
179                                            bool can_move_objects) {
180   if (Runtime::Current()->IsRunningOnMemoryTool()) {
181     return new MemoryToolMallocSpace<RosAllocSpace, kDefaultMemoryToolRedZoneBytes, false, true>(
182         mem_map, initial_size_, name, reinterpret_cast<allocator::RosAlloc*>(allocator), begin, end,
183         limit, growth_limit, can_move_objects, starting_size_, low_memory_mode_);
184   } else {
185     return new RosAllocSpace(mem_map, initial_size_, name,
186                              reinterpret_cast<allocator::RosAlloc*>(allocator), begin, end, limit,
187                              growth_limit, can_move_objects, starting_size_, low_memory_mode_);
188   }
189 }
190 
Free(Thread * self,mirror::Object * ptr)191 size_t RosAllocSpace::Free(Thread* self, mirror::Object* ptr) {
192   if (kDebugSpaces) {
193     CHECK(ptr != nullptr);
194     CHECK(Contains(ptr)) << "Free (" << ptr << ") not in bounds of heap " << *this;
195   }
196   if (kRecentFreeCount > 0) {
197     MutexLock mu(self, lock_);
198     RegisterRecentFree(ptr);
199   }
200   return rosalloc_->Free(self, ptr);
201 }
202 
FreeList(Thread * self,size_t num_ptrs,mirror::Object ** ptrs)203 size_t RosAllocSpace::FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) {
204   DCHECK(ptrs != nullptr);
205 
206   size_t verify_bytes = 0;
207   for (size_t i = 0; i < num_ptrs; i++) {
208     if (kPrefetchDuringRosAllocFreeList && i + kPrefetchLookAhead < num_ptrs) {
209       __builtin_prefetch(reinterpret_cast<char*>(ptrs[i + kPrefetchLookAhead]));
210     }
211     if (kVerifyFreedBytes) {
212       verify_bytes += AllocationSizeNonvirtual<true>(ptrs[i], nullptr);
213     }
214   }
215 
216   if (kRecentFreeCount > 0) {
217     MutexLock mu(self, lock_);
218     for (size_t i = 0; i < num_ptrs; i++) {
219       RegisterRecentFree(ptrs[i]);
220     }
221   }
222 
223   if (kDebugSpaces) {
224     size_t num_broken_ptrs = 0;
225     for (size_t i = 0; i < num_ptrs; i++) {
226       if (!Contains(ptrs[i])) {
227         num_broken_ptrs++;
228         LOG(ERROR) << "FreeList[" << i << "] (" << ptrs[i] << ") not in bounds of heap " << *this;
229       } else {
230         size_t size = rosalloc_->UsableSize(ptrs[i]);
231         memset(ptrs[i], 0xEF, size);
232       }
233     }
234     CHECK_EQ(num_broken_ptrs, 0u);
235   }
236 
237   const size_t bytes_freed = rosalloc_->BulkFree(self, reinterpret_cast<void**>(ptrs), num_ptrs);
238   if (kVerifyFreedBytes) {
239     CHECK_EQ(verify_bytes, bytes_freed);
240   }
241   return bytes_freed;
242 }
243 
Trim()244 size_t RosAllocSpace::Trim() {
245   VLOG(heap) << "RosAllocSpace::Trim() ";
246   {
247     Thread* const self = Thread::Current();
248     // SOA required for Rosalloc::Trim() -> ArtRosAllocMoreCore() -> Heap::GetRosAllocSpace.
249     ScopedObjectAccess soa(self);
250     MutexLock mu(self, lock_);
251     // Trim to release memory at the end of the space.
252     rosalloc_->Trim();
253   }
254   // Attempt to release pages if it does not release all empty pages.
255   if (!rosalloc_->DoesReleaseAllPages()) {
256     return rosalloc_->ReleasePages();
257   }
258   return 0;
259 }
260 
Walk(void (* callback)(void * start,void * end,size_t num_bytes,void * callback_arg),void * arg)261 void RosAllocSpace::Walk(void(*callback)(void *start, void *end, size_t num_bytes, void* callback_arg),
262                          void* arg) {
263   InspectAllRosAlloc(callback, arg, true);
264 }
265 
GetFootprint()266 size_t RosAllocSpace::GetFootprint() {
267   MutexLock mu(Thread::Current(), lock_);
268   return rosalloc_->Footprint();
269 }
270 
GetFootprintLimit()271 size_t RosAllocSpace::GetFootprintLimit() {
272   MutexLock mu(Thread::Current(), lock_);
273   return rosalloc_->FootprintLimit();
274 }
275 
SetFootprintLimit(size_t new_size)276 void RosAllocSpace::SetFootprintLimit(size_t new_size) {
277   MutexLock mu(Thread::Current(), lock_);
278   VLOG(heap) << "RosAllocSpace::SetFootprintLimit " << PrettySize(new_size);
279   // Compare against the actual footprint, rather than the Size(), because the heap may not have
280   // grown all the way to the allowed size yet.
281   size_t current_space_size = rosalloc_->Footprint();
282   if (new_size < current_space_size) {
283     // Don't let the space grow any more.
284     new_size = current_space_size;
285   }
286   rosalloc_->SetFootprintLimit(new_size);
287 }
288 
GetBytesAllocated()289 uint64_t RosAllocSpace::GetBytesAllocated() {
290   size_t bytes_allocated = 0;
291   InspectAllRosAlloc(art::gc::allocator::RosAlloc::BytesAllocatedCallback, &bytes_allocated, false);
292   return bytes_allocated;
293 }
294 
GetObjectsAllocated()295 uint64_t RosAllocSpace::GetObjectsAllocated() {
296   size_t objects_allocated = 0;
297   InspectAllRosAlloc(art::gc::allocator::RosAlloc::ObjectsAllocatedCallback, &objects_allocated, false);
298   return objects_allocated;
299 }
300 
InspectAllRosAllocWithSuspendAll(void (* callback)(void * start,void * end,size_t num_bytes,void * callback_arg),void * arg,bool do_null_callback_at_end)301 void RosAllocSpace::InspectAllRosAllocWithSuspendAll(
302     void (*callback)(void *start, void *end, size_t num_bytes, void* callback_arg),
303     void* arg, bool do_null_callback_at_end) NO_THREAD_SAFETY_ANALYSIS {
304   // TODO: NO_THREAD_SAFETY_ANALYSIS.
305   Thread* self = Thread::Current();
306   ScopedSuspendAll ssa(__FUNCTION__);
307   MutexLock mu(self, *Locks::runtime_shutdown_lock_);
308   MutexLock mu2(self, *Locks::thread_list_lock_);
309   rosalloc_->InspectAll(callback, arg);
310   if (do_null_callback_at_end) {
311     callback(nullptr, nullptr, 0, arg);  // Indicate end of a space.
312   }
313 }
314 
InspectAllRosAlloc(void (* callback)(void * start,void * end,size_t num_bytes,void * callback_arg),void * arg,bool do_null_callback_at_end)315 void RosAllocSpace::InspectAllRosAlloc(void (*callback)(void *start, void *end, size_t num_bytes, void* callback_arg),
316                                        void* arg, bool do_null_callback_at_end) NO_THREAD_SAFETY_ANALYSIS {
317   // TODO: NO_THREAD_SAFETY_ANALYSIS.
318   Thread* self = Thread::Current();
319   if (Locks::mutator_lock_->IsExclusiveHeld(self)) {
320     // The mutators are already suspended. For example, a call path
321     // from SignalCatcher::HandleSigQuit().
322     rosalloc_->InspectAll(callback, arg);
323     if (do_null_callback_at_end) {
324       callback(nullptr, nullptr, 0, arg);  // Indicate end of a space.
325     }
326   } else if (Locks::mutator_lock_->IsSharedHeld(self)) {
327     // The mutators are not suspended yet and we have a shared access
328     // to the mutator lock. Temporarily release the shared access by
329     // transitioning to the suspend state, and suspend the mutators.
330     ScopedThreadSuspension sts(self, kSuspended);
331     InspectAllRosAllocWithSuspendAll(callback, arg, do_null_callback_at_end);
332   } else {
333     // The mutators are not suspended yet. Suspend the mutators.
334     InspectAllRosAllocWithSuspendAll(callback, arg, do_null_callback_at_end);
335   }
336 }
337 
RevokeThreadLocalBuffers(Thread * thread)338 size_t RosAllocSpace::RevokeThreadLocalBuffers(Thread* thread) {
339   return rosalloc_->RevokeThreadLocalRuns(thread);
340 }
341 
RevokeAllThreadLocalBuffers()342 size_t RosAllocSpace::RevokeAllThreadLocalBuffers() {
343   return rosalloc_->RevokeAllThreadLocalRuns();
344 }
345 
AssertThreadLocalBuffersAreRevoked(Thread * thread)346 void RosAllocSpace::AssertThreadLocalBuffersAreRevoked(Thread* thread) {
347   if (kIsDebugBuild) {
348     rosalloc_->AssertThreadLocalRunsAreRevoked(thread);
349   }
350 }
351 
AssertAllThreadLocalBuffersAreRevoked()352 void RosAllocSpace::AssertAllThreadLocalBuffersAreRevoked() {
353   if (kIsDebugBuild) {
354     rosalloc_->AssertAllThreadLocalRunsAreRevoked();
355   }
356 }
357 
Clear()358 void RosAllocSpace::Clear() {
359   size_t footprint_limit = GetFootprintLimit();
360   madvise(GetMemMap()->Begin(), GetMemMap()->Size(), MADV_DONTNEED);
361   live_bitmap_->Clear();
362   mark_bitmap_->Clear();
363   SetEnd(begin_ + starting_size_);
364   delete rosalloc_;
365   rosalloc_ = CreateRosAlloc(mem_map_->Begin(), starting_size_, initial_size_,
366                              NonGrowthLimitCapacity(), low_memory_mode_,
367                              Runtime::Current()->IsRunningOnMemoryTool());
368   SetFootprintLimit(footprint_limit);
369 }
370 
DumpStats(std::ostream & os)371 void RosAllocSpace::DumpStats(std::ostream& os) {
372   ScopedSuspendAll ssa(__FUNCTION__);
373   rosalloc_->DumpStats(os);
374 }
375 
376 }  // namespace space
377 
378 namespace allocator {
379 
380 // Callback from rosalloc when it needs to increase the footprint.
ArtRosAllocMoreCore(allocator::RosAlloc * rosalloc,intptr_t increment)381 void* ArtRosAllocMoreCore(allocator::RosAlloc* rosalloc, intptr_t increment)
382     SHARED_REQUIRES(Locks::mutator_lock_) {
383   Heap* heap = Runtime::Current()->GetHeap();
384   art::gc::space::RosAllocSpace* rosalloc_space = heap->GetRosAllocSpace(rosalloc);
385   DCHECK(rosalloc_space != nullptr);
386   DCHECK_EQ(rosalloc_space->GetRosAlloc(), rosalloc);
387   return rosalloc_space->MoreCore(increment);
388 }
389 
390 }  // namespace allocator
391 
392 }  // namespace gc
393 }  // namespace art
394