1 /*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "heap.h"
18
19 #define ATRACE_TAG ATRACE_TAG_DALVIK
20 #include <cutils/trace.h>
21
22 #include <limits>
23 #include <memory>
24 #include <vector>
25
26 #include "base/allocator.h"
27 #include "base/histogram-inl.h"
28 #include "base/stl_util.h"
29 #include "common_throws.h"
30 #include "cutils/sched_policy.h"
31 #include "debugger.h"
32 #include "gc/accounting/atomic_stack.h"
33 #include "gc/accounting/card_table-inl.h"
34 #include "gc/accounting/heap_bitmap-inl.h"
35 #include "gc/accounting/mod_union_table.h"
36 #include "gc/accounting/mod_union_table-inl.h"
37 #include "gc/accounting/remembered_set.h"
38 #include "gc/accounting/space_bitmap-inl.h"
39 #include "gc/collector/concurrent_copying.h"
40 #include "gc/collector/mark_compact.h"
41 #include "gc/collector/mark_sweep-inl.h"
42 #include "gc/collector/partial_mark_sweep.h"
43 #include "gc/collector/semi_space.h"
44 #include "gc/collector/sticky_mark_sweep.h"
45 #include "gc/reference_processor.h"
46 #include "gc/space/bump_pointer_space.h"
47 #include "gc/space/dlmalloc_space-inl.h"
48 #include "gc/space/image_space.h"
49 #include "gc/space/large_object_space.h"
50 #include "gc/space/rosalloc_space-inl.h"
51 #include "gc/space/space-inl.h"
52 #include "gc/space/zygote_space.h"
53 #include "entrypoints/quick/quick_alloc_entrypoints.h"
54 #include "heap-inl.h"
55 #include "image.h"
56 #include "intern_table.h"
57 #include "mirror/art_field-inl.h"
58 #include "mirror/class-inl.h"
59 #include "mirror/object.h"
60 #include "mirror/object-inl.h"
61 #include "mirror/object_array-inl.h"
62 #include "mirror/reference-inl.h"
63 #include "os.h"
64 #include "reflection.h"
65 #include "runtime.h"
66 #include "ScopedLocalRef.h"
67 #include "scoped_thread_state_change.h"
68 #include "handle_scope-inl.h"
69 #include "thread_list.h"
70 #include "well_known_classes.h"
71
72 namespace art {
73
74 namespace gc {
75
76 static constexpr size_t kCollectorTransitionStressIterations = 0;
77 static constexpr size_t kCollectorTransitionStressWait = 10 * 1000; // Microseconds
78 // Minimum amount of remaining bytes before a concurrent GC is triggered.
79 static constexpr size_t kMinConcurrentRemainingBytes = 128 * KB;
80 static constexpr size_t kMaxConcurrentRemainingBytes = 512 * KB;
81 // Sticky GC throughput adjustment, divided by 4. Increasing this causes sticky GC to occur more
82 // relative to partial/full GC. This may be desirable since sticky GCs interfere less with mutator
83 // threads (lower pauses, use less memory bandwidth).
84 static constexpr double kStickyGcThroughputAdjustment = 1.0;
85 // Whether or not we use the free list large object space. Only use it if USE_ART_LOW_4G_ALLOCATOR
86 // since this means that we have to use the slow msync loop in MemMap::MapAnonymous.
87 #if USE_ART_LOW_4G_ALLOCATOR
88 static constexpr bool kUseFreeListSpaceForLOS = true;
89 #else
90 static constexpr bool kUseFreeListSpaceForLOS = false;
91 #endif
92 // Whether or not we compact the zygote in PreZygoteFork.
93 static constexpr bool kCompactZygote = kMovingCollector;
94 // How many reserve entries are at the end of the allocation stack, these are only needed if the
95 // allocation stack overflows.
96 static constexpr size_t kAllocationStackReserveSize = 1024;
97 // Default mark stack size in bytes.
98 static const size_t kDefaultMarkStackSize = 64 * KB;
99 // Define space name.
100 static const char* kDlMallocSpaceName[2] = {"main dlmalloc space", "main dlmalloc space 1"};
101 static const char* kRosAllocSpaceName[2] = {"main rosalloc space", "main rosalloc space 1"};
102 static const char* kMemMapSpaceName[2] = {"main space", "main space 1"};
103 static const char* kNonMovingSpaceName = "non moving space";
104 static const char* kZygoteSpaceName = "zygote space";
105 static constexpr size_t kGSSBumpPointerSpaceCapacity = 32 * MB;
106 static constexpr bool kGCALotMode = false;
107 // GC alot mode uses a small allocation stack to stress test a lot of GC.
108 static constexpr size_t kGcAlotAllocationStackSize = 4 * KB /
109 sizeof(mirror::HeapReference<mirror::Object>);
110 // Verify objet has a small allocation stack size since searching the allocation stack is slow.
111 static constexpr size_t kVerifyObjectAllocationStackSize = 16 * KB /
112 sizeof(mirror::HeapReference<mirror::Object>);
113 static constexpr size_t kDefaultAllocationStackSize = 8 * MB /
114 sizeof(mirror::HeapReference<mirror::Object>);
115
Heap(size_t initial_size,size_t growth_limit,size_t min_free,size_t max_free,double target_utilization,double foreground_heap_growth_multiplier,size_t capacity,size_t non_moving_space_capacity,const std::string & image_file_name,const InstructionSet image_instruction_set,CollectorType foreground_collector_type,CollectorType background_collector_type,size_t parallel_gc_threads,size_t conc_gc_threads,bool low_memory_mode,size_t long_pause_log_threshold,size_t long_gc_log_threshold,bool ignore_max_footprint,bool use_tlab,bool verify_pre_gc_heap,bool verify_pre_sweeping_heap,bool verify_post_gc_heap,bool verify_pre_gc_rosalloc,bool verify_pre_sweeping_rosalloc,bool verify_post_gc_rosalloc,bool use_homogeneous_space_compaction_for_oom,uint64_t min_interval_homogeneous_space_compaction_by_oom)116 Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max_free,
117 double target_utilization, double foreground_heap_growth_multiplier,
118 size_t capacity, size_t non_moving_space_capacity, const std::string& image_file_name,
119 const InstructionSet image_instruction_set, CollectorType foreground_collector_type,
120 CollectorType background_collector_type, size_t parallel_gc_threads,
121 size_t conc_gc_threads, bool low_memory_mode,
122 size_t long_pause_log_threshold, size_t long_gc_log_threshold,
123 bool ignore_max_footprint, bool use_tlab,
124 bool verify_pre_gc_heap, bool verify_pre_sweeping_heap, bool verify_post_gc_heap,
125 bool verify_pre_gc_rosalloc, bool verify_pre_sweeping_rosalloc,
126 bool verify_post_gc_rosalloc, bool use_homogeneous_space_compaction_for_oom,
127 uint64_t min_interval_homogeneous_space_compaction_by_oom)
128 : non_moving_space_(nullptr),
129 rosalloc_space_(nullptr),
130 dlmalloc_space_(nullptr),
131 main_space_(nullptr),
132 collector_type_(kCollectorTypeNone),
133 foreground_collector_type_(foreground_collector_type),
134 background_collector_type_(background_collector_type),
135 desired_collector_type_(foreground_collector_type_),
136 heap_trim_request_lock_(nullptr),
137 last_trim_time_(0),
138 heap_transition_or_trim_target_time_(0),
139 heap_trim_request_pending_(false),
140 parallel_gc_threads_(parallel_gc_threads),
141 conc_gc_threads_(conc_gc_threads),
142 low_memory_mode_(low_memory_mode),
143 long_pause_log_threshold_(long_pause_log_threshold),
144 long_gc_log_threshold_(long_gc_log_threshold),
145 ignore_max_footprint_(ignore_max_footprint),
146 zygote_creation_lock_("zygote creation lock", kZygoteCreationLock),
147 have_zygote_space_(false),
148 large_object_threshold_(std::numeric_limits<size_t>::max()), // Starts out disabled.
149 collector_type_running_(kCollectorTypeNone),
150 last_gc_type_(collector::kGcTypeNone),
151 next_gc_type_(collector::kGcTypePartial),
152 capacity_(capacity),
153 growth_limit_(growth_limit),
154 max_allowed_footprint_(initial_size),
155 native_footprint_gc_watermark_(initial_size),
156 native_need_to_run_finalization_(false),
157 // Initially assume we perceive jank in case the process state is never updated.
158 process_state_(kProcessStateJankPerceptible),
159 concurrent_start_bytes_(std::numeric_limits<size_t>::max()),
160 total_bytes_freed_ever_(0),
161 total_objects_freed_ever_(0),
162 num_bytes_allocated_(0),
163 native_bytes_allocated_(0),
164 verify_missing_card_marks_(false),
165 verify_system_weaks_(false),
166 verify_pre_gc_heap_(verify_pre_gc_heap),
167 verify_pre_sweeping_heap_(verify_pre_sweeping_heap),
168 verify_post_gc_heap_(verify_post_gc_heap),
169 verify_mod_union_table_(false),
170 verify_pre_gc_rosalloc_(verify_pre_gc_rosalloc),
171 verify_pre_sweeping_rosalloc_(verify_pre_sweeping_rosalloc),
172 verify_post_gc_rosalloc_(verify_post_gc_rosalloc),
173 last_gc_time_ns_(NanoTime()),
174 allocation_rate_(0),
175 /* For GC a lot mode, we limit the allocations stacks to be kGcAlotInterval allocations. This
176 * causes a lot of GC since we do a GC for alloc whenever the stack is full. When heap
177 * verification is enabled, we limit the size of allocation stacks to speed up their
178 * searching.
179 */
180 max_allocation_stack_size_(kGCALotMode ? kGcAlotAllocationStackSize
181 : (kVerifyObjectSupport > kVerifyObjectModeFast) ? kVerifyObjectAllocationStackSize :
182 kDefaultAllocationStackSize),
183 current_allocator_(kAllocatorTypeDlMalloc),
184 current_non_moving_allocator_(kAllocatorTypeNonMoving),
185 bump_pointer_space_(nullptr),
186 temp_space_(nullptr),
187 min_free_(min_free),
188 max_free_(max_free),
189 target_utilization_(target_utilization),
190 foreground_heap_growth_multiplier_(foreground_heap_growth_multiplier),
191 total_wait_time_(0),
192 total_allocation_time_(0),
193 verify_object_mode_(kVerifyObjectModeDisabled),
194 disable_moving_gc_count_(0),
195 running_on_valgrind_(Runtime::Current()->RunningOnValgrind()),
196 use_tlab_(use_tlab),
197 main_space_backup_(nullptr),
198 min_interval_homogeneous_space_compaction_by_oom_(
199 min_interval_homogeneous_space_compaction_by_oom),
200 last_time_homogeneous_space_compaction_by_oom_(NanoTime()),
201 use_homogeneous_space_compaction_for_oom_(use_homogeneous_space_compaction_for_oom) {
202 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
203 LOG(INFO) << "Heap() entering";
204 }
205 // If we aren't the zygote, switch to the default non zygote allocator. This may update the
206 // entrypoints.
207 const bool is_zygote = Runtime::Current()->IsZygote();
208 if (!is_zygote) {
209 large_object_threshold_ = kDefaultLargeObjectThreshold;
210 // Background compaction is currently not supported for command line runs.
211 if (background_collector_type_ != foreground_collector_type_) {
212 VLOG(heap) << "Disabling background compaction for non zygote";
213 background_collector_type_ = foreground_collector_type_;
214 }
215 }
216 ChangeCollector(desired_collector_type_);
217 live_bitmap_.reset(new accounting::HeapBitmap(this));
218 mark_bitmap_.reset(new accounting::HeapBitmap(this));
219 // Requested begin for the alloc space, to follow the mapped image and oat files
220 byte* requested_alloc_space_begin = nullptr;
221 if (!image_file_name.empty()) {
222 std::string error_msg;
223 space::ImageSpace* image_space = space::ImageSpace::Create(image_file_name.c_str(),
224 image_instruction_set,
225 &error_msg);
226 if (image_space != nullptr) {
227 AddSpace(image_space);
228 // Oat files referenced by image files immediately follow them in memory, ensure alloc space
229 // isn't going to get in the middle
230 byte* oat_file_end_addr = image_space->GetImageHeader().GetOatFileEnd();
231 CHECK_GT(oat_file_end_addr, image_space->End());
232 requested_alloc_space_begin = AlignUp(oat_file_end_addr, kPageSize);
233 } else {
234 LOG(WARNING) << "Could not create image space with image file '" << image_file_name << "'. "
235 << "Attempting to fall back to imageless running. Error was: " << error_msg;
236 }
237 }
238 /*
239 requested_alloc_space_begin -> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
240 +- nonmoving space (non_moving_space_capacity)+-
241 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
242 +-????????????????????????????????????????????+-
243 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
244 +-main alloc space / bump space 1 (capacity_) +-
245 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
246 +-????????????????????????????????????????????+-
247 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
248 +-main alloc space2 / bump space 2 (capacity_)+-
249 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
250 */
251 // We don't have hspace compaction enabled with GSS.
252 if (foreground_collector_type_ == kCollectorTypeGSS) {
253 use_homogeneous_space_compaction_for_oom_ = false;
254 }
255 bool support_homogeneous_space_compaction =
256 background_collector_type_ == gc::kCollectorTypeHomogeneousSpaceCompact ||
257 use_homogeneous_space_compaction_for_oom_;
258 // We may use the same space the main space for the non moving space if we don't need to compact
259 // from the main space.
260 // This is not the case if we support homogeneous compaction or have a moving background
261 // collector type.
262 bool separate_non_moving_space = is_zygote ||
263 support_homogeneous_space_compaction || IsMovingGc(foreground_collector_type_) ||
264 IsMovingGc(background_collector_type_);
265 if (foreground_collector_type == kCollectorTypeGSS) {
266 separate_non_moving_space = false;
267 }
268 std::unique_ptr<MemMap> main_mem_map_1;
269 std::unique_ptr<MemMap> main_mem_map_2;
270 byte* request_begin = requested_alloc_space_begin;
271 if (request_begin != nullptr && separate_non_moving_space) {
272 request_begin += non_moving_space_capacity;
273 }
274 std::string error_str;
275 std::unique_ptr<MemMap> non_moving_space_mem_map;
276 if (separate_non_moving_space) {
277 // If we are the zygote, the non moving space becomes the zygote space when we run
278 // PreZygoteFork the first time. In this case, call the map "zygote space" since we can't
279 // rename the mem map later.
280 const char* space_name = is_zygote ? kZygoteSpaceName: kNonMovingSpaceName;
281 // Reserve the non moving mem map before the other two since it needs to be at a specific
282 // address.
283 non_moving_space_mem_map.reset(
284 MemMap::MapAnonymous(space_name, requested_alloc_space_begin,
285 non_moving_space_capacity, PROT_READ | PROT_WRITE, true, &error_str));
286 CHECK(non_moving_space_mem_map != nullptr) << error_str;
287 // Try to reserve virtual memory at a lower address if we have a separate non moving space.
288 request_begin = reinterpret_cast<byte*>(300 * MB);
289 }
290 // Attempt to create 2 mem maps at or after the requested begin.
291 main_mem_map_1.reset(MapAnonymousPreferredAddress(kMemMapSpaceName[0], request_begin, capacity_,
292 PROT_READ | PROT_WRITE, &error_str));
293 CHECK(main_mem_map_1.get() != nullptr) << error_str;
294 if (support_homogeneous_space_compaction ||
295 background_collector_type_ == kCollectorTypeSS ||
296 foreground_collector_type_ == kCollectorTypeSS) {
297 main_mem_map_2.reset(MapAnonymousPreferredAddress(kMemMapSpaceName[1], main_mem_map_1->End(),
298 capacity_, PROT_READ | PROT_WRITE,
299 &error_str));
300 CHECK(main_mem_map_2.get() != nullptr) << error_str;
301 }
302 // Create the non moving space first so that bitmaps don't take up the address range.
303 if (separate_non_moving_space) {
304 // Non moving space is always dlmalloc since we currently don't have support for multiple
305 // active rosalloc spaces.
306 const size_t size = non_moving_space_mem_map->Size();
307 non_moving_space_ = space::DlMallocSpace::CreateFromMemMap(
308 non_moving_space_mem_map.release(), "zygote / non moving space", kDefaultStartingSize,
309 initial_size, size, size, false);
310 non_moving_space_->SetFootprintLimit(non_moving_space_->Capacity());
311 CHECK(non_moving_space_ != nullptr) << "Failed creating non moving space "
312 << requested_alloc_space_begin;
313 AddSpace(non_moving_space_);
314 }
315 // Create other spaces based on whether or not we have a moving GC.
316 if (IsMovingGc(foreground_collector_type_) && foreground_collector_type_ != kCollectorTypeGSS) {
317 // Create bump pointer spaces.
318 // We only to create the bump pointer if the foreground collector is a compacting GC.
319 // TODO: Place bump-pointer spaces somewhere to minimize size of card table.
320 bump_pointer_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space 1",
321 main_mem_map_1.release());
322 CHECK(bump_pointer_space_ != nullptr) << "Failed to create bump pointer space";
323 AddSpace(bump_pointer_space_);
324 temp_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space 2",
325 main_mem_map_2.release());
326 CHECK(temp_space_ != nullptr) << "Failed to create bump pointer space";
327 AddSpace(temp_space_);
328 CHECK(separate_non_moving_space);
329 } else {
330 CreateMainMallocSpace(main_mem_map_1.release(), initial_size, growth_limit_, capacity_);
331 CHECK(main_space_ != nullptr);
332 AddSpace(main_space_);
333 if (!separate_non_moving_space) {
334 non_moving_space_ = main_space_;
335 CHECK(!non_moving_space_->CanMoveObjects());
336 }
337 if (foreground_collector_type_ == kCollectorTypeGSS) {
338 CHECK_EQ(foreground_collector_type_, background_collector_type_);
339 // Create bump pointer spaces instead of a backup space.
340 main_mem_map_2.release();
341 bump_pointer_space_ = space::BumpPointerSpace::Create("Bump pointer space 1",
342 kGSSBumpPointerSpaceCapacity, nullptr);
343 CHECK(bump_pointer_space_ != nullptr);
344 AddSpace(bump_pointer_space_);
345 temp_space_ = space::BumpPointerSpace::Create("Bump pointer space 2",
346 kGSSBumpPointerSpaceCapacity, nullptr);
347 CHECK(temp_space_ != nullptr);
348 AddSpace(temp_space_);
349 } else if (main_mem_map_2.get() != nullptr) {
350 const char* name = kUseRosAlloc ? kRosAllocSpaceName[1] : kDlMallocSpaceName[1];
351 main_space_backup_.reset(CreateMallocSpaceFromMemMap(main_mem_map_2.release(), initial_size,
352 growth_limit_, capacity_, name, true));
353 CHECK(main_space_backup_.get() != nullptr);
354 // Add the space so its accounted for in the heap_begin and heap_end.
355 AddSpace(main_space_backup_.get());
356 }
357 }
358 CHECK(non_moving_space_ != nullptr);
359 CHECK(!non_moving_space_->CanMoveObjects());
360 // Allocate the large object space.
361 if (kUseFreeListSpaceForLOS) {
362 large_object_space_ = space::FreeListSpace::Create("large object space", nullptr, capacity_);
363 } else {
364 large_object_space_ = space::LargeObjectMapSpace::Create("large object space");
365 }
366 CHECK(large_object_space_ != nullptr) << "Failed to create large object space";
367 AddSpace(large_object_space_);
368 // Compute heap capacity. Continuous spaces are sorted in order of Begin().
369 CHECK(!continuous_spaces_.empty());
370 // Relies on the spaces being sorted.
371 byte* heap_begin = continuous_spaces_.front()->Begin();
372 byte* heap_end = continuous_spaces_.back()->Limit();
373 size_t heap_capacity = heap_end - heap_begin;
374 // Remove the main backup space since it slows down the GC to have unused extra spaces.
375 // TODO: Avoid needing to do this.
376 if (main_space_backup_.get() != nullptr) {
377 RemoveSpace(main_space_backup_.get());
378 }
379 // Allocate the card table.
380 card_table_.reset(accounting::CardTable::Create(heap_begin, heap_capacity));
381 CHECK(card_table_.get() != NULL) << "Failed to create card table";
382 // Card cache for now since it makes it easier for us to update the references to the copying
383 // spaces.
384 accounting::ModUnionTable* mod_union_table =
385 new accounting::ModUnionTableToZygoteAllocspace("Image mod-union table", this,
386 GetImageSpace());
387 CHECK(mod_union_table != nullptr) << "Failed to create image mod-union table";
388 AddModUnionTable(mod_union_table);
389 if (collector::SemiSpace::kUseRememberedSet && non_moving_space_ != main_space_) {
390 accounting::RememberedSet* non_moving_space_rem_set =
391 new accounting::RememberedSet("Non-moving space remembered set", this, non_moving_space_);
392 CHECK(non_moving_space_rem_set != nullptr) << "Failed to create non-moving space remembered set";
393 AddRememberedSet(non_moving_space_rem_set);
394 }
395 // TODO: Count objects in the image space here?
396 num_bytes_allocated_.StoreRelaxed(0);
397 mark_stack_.reset(accounting::ObjectStack::Create("mark stack", kDefaultMarkStackSize,
398 kDefaultMarkStackSize));
399 const size_t alloc_stack_capacity = max_allocation_stack_size_ + kAllocationStackReserveSize;
400 allocation_stack_.reset(accounting::ObjectStack::Create(
401 "allocation stack", max_allocation_stack_size_, alloc_stack_capacity));
402 live_stack_.reset(accounting::ObjectStack::Create(
403 "live stack", max_allocation_stack_size_, alloc_stack_capacity));
404 // It's still too early to take a lock because there are no threads yet, but we can create locks
405 // now. We don't create it earlier to make it clear that you can't use locks during heap
406 // initialization.
407 gc_complete_lock_ = new Mutex("GC complete lock");
408 gc_complete_cond_.reset(new ConditionVariable("GC complete condition variable",
409 *gc_complete_lock_));
410 heap_trim_request_lock_ = new Mutex("Heap trim request lock");
411 last_gc_size_ = GetBytesAllocated();
412 if (ignore_max_footprint_) {
413 SetIdealFootprint(std::numeric_limits<size_t>::max());
414 concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
415 }
416 CHECK_NE(max_allowed_footprint_, 0U);
417 // Create our garbage collectors.
418 for (size_t i = 0; i < 2; ++i) {
419 const bool concurrent = i != 0;
420 garbage_collectors_.push_back(new collector::MarkSweep(this, concurrent));
421 garbage_collectors_.push_back(new collector::PartialMarkSweep(this, concurrent));
422 garbage_collectors_.push_back(new collector::StickyMarkSweep(this, concurrent));
423 }
424 if (kMovingCollector) {
425 // TODO: Clean this up.
426 const bool generational = foreground_collector_type_ == kCollectorTypeGSS;
427 semi_space_collector_ = new collector::SemiSpace(this, generational,
428 generational ? "generational" : "");
429 garbage_collectors_.push_back(semi_space_collector_);
430 concurrent_copying_collector_ = new collector::ConcurrentCopying(this);
431 garbage_collectors_.push_back(concurrent_copying_collector_);
432 mark_compact_collector_ = new collector::MarkCompact(this);
433 garbage_collectors_.push_back(mark_compact_collector_);
434 }
435 if (GetImageSpace() != nullptr && non_moving_space_ != nullptr) {
436 // Check that there's no gap between the image space and the non moving space so that the
437 // immune region won't break (eg. due to a large object allocated in the gap).
438 bool no_gap = MemMap::CheckNoGaps(GetImageSpace()->GetMemMap(),
439 non_moving_space_->GetMemMap());
440 if (!no_gap) {
441 MemMap::DumpMaps(LOG(ERROR));
442 LOG(FATAL) << "There's a gap between the image space and the main space";
443 }
444 }
445 if (running_on_valgrind_) {
446 Runtime::Current()->GetInstrumentation()->InstrumentQuickAllocEntryPoints();
447 }
448 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
449 LOG(INFO) << "Heap() exiting";
450 }
451 }
452
MapAnonymousPreferredAddress(const char * name,byte * request_begin,size_t capacity,int prot_flags,std::string * out_error_str)453 MemMap* Heap::MapAnonymousPreferredAddress(const char* name, byte* request_begin, size_t capacity,
454 int prot_flags, std::string* out_error_str) {
455 while (true) {
456 MemMap* map = MemMap::MapAnonymous(kMemMapSpaceName[0], request_begin, capacity,
457 PROT_READ | PROT_WRITE, true, out_error_str);
458 if (map != nullptr || request_begin == nullptr) {
459 return map;
460 }
461 // Retry a second time with no specified request begin.
462 request_begin = nullptr;
463 }
464 return nullptr;
465 }
466
CreateMallocSpaceFromMemMap(MemMap * mem_map,size_t initial_size,size_t growth_limit,size_t capacity,const char * name,bool can_move_objects)467 space::MallocSpace* Heap::CreateMallocSpaceFromMemMap(MemMap* mem_map, size_t initial_size,
468 size_t growth_limit, size_t capacity,
469 const char* name, bool can_move_objects) {
470 space::MallocSpace* malloc_space = nullptr;
471 if (kUseRosAlloc) {
472 // Create rosalloc space.
473 malloc_space = space::RosAllocSpace::CreateFromMemMap(mem_map, name, kDefaultStartingSize,
474 initial_size, growth_limit, capacity,
475 low_memory_mode_, can_move_objects);
476 } else {
477 malloc_space = space::DlMallocSpace::CreateFromMemMap(mem_map, name, kDefaultStartingSize,
478 initial_size, growth_limit, capacity,
479 can_move_objects);
480 }
481 if (collector::SemiSpace::kUseRememberedSet) {
482 accounting::RememberedSet* rem_set =
483 new accounting::RememberedSet(std::string(name) + " remembered set", this, malloc_space);
484 CHECK(rem_set != nullptr) << "Failed to create main space remembered set";
485 AddRememberedSet(rem_set);
486 }
487 CHECK(malloc_space != nullptr) << "Failed to create " << name;
488 malloc_space->SetFootprintLimit(malloc_space->Capacity());
489 return malloc_space;
490 }
491
CreateMainMallocSpace(MemMap * mem_map,size_t initial_size,size_t growth_limit,size_t capacity)492 void Heap::CreateMainMallocSpace(MemMap* mem_map, size_t initial_size, size_t growth_limit,
493 size_t capacity) {
494 // Is background compaction is enabled?
495 bool can_move_objects = IsMovingGc(background_collector_type_) !=
496 IsMovingGc(foreground_collector_type_) || use_homogeneous_space_compaction_for_oom_;
497 // If we are the zygote and don't yet have a zygote space, it means that the zygote fork will
498 // happen in the future. If this happens and we have kCompactZygote enabled we wish to compact
499 // from the main space to the zygote space. If background compaction is enabled, always pass in
500 // that we can move objets.
501 if (kCompactZygote && Runtime::Current()->IsZygote() && !can_move_objects) {
502 // After the zygote we want this to be false if we don't have background compaction enabled so
503 // that getting primitive array elements is faster.
504 // We never have homogeneous compaction with GSS and don't need a space with movable objects.
505 can_move_objects = !have_zygote_space_ && foreground_collector_type_ != kCollectorTypeGSS;
506 }
507 if (collector::SemiSpace::kUseRememberedSet && main_space_ != nullptr) {
508 RemoveRememberedSet(main_space_);
509 }
510 const char* name = kUseRosAlloc ? kRosAllocSpaceName[0] : kDlMallocSpaceName[0];
511 main_space_ = CreateMallocSpaceFromMemMap(mem_map, initial_size, growth_limit, capacity, name,
512 can_move_objects);
513 SetSpaceAsDefault(main_space_);
514 VLOG(heap) << "Created main space " << main_space_;
515 }
516
ChangeAllocator(AllocatorType allocator)517 void Heap::ChangeAllocator(AllocatorType allocator) {
518 if (current_allocator_ != allocator) {
519 // These two allocators are only used internally and don't have any entrypoints.
520 CHECK_NE(allocator, kAllocatorTypeLOS);
521 CHECK_NE(allocator, kAllocatorTypeNonMoving);
522 current_allocator_ = allocator;
523 MutexLock mu(nullptr, *Locks::runtime_shutdown_lock_);
524 SetQuickAllocEntryPointsAllocator(current_allocator_);
525 Runtime::Current()->GetInstrumentation()->ResetQuickAllocEntryPoints();
526 }
527 }
528
DisableMovingGc()529 void Heap::DisableMovingGc() {
530 if (IsMovingGc(foreground_collector_type_)) {
531 foreground_collector_type_ = kCollectorTypeCMS;
532 }
533 if (IsMovingGc(background_collector_type_)) {
534 background_collector_type_ = foreground_collector_type_;
535 }
536 TransitionCollector(foreground_collector_type_);
537 ThreadList* tl = Runtime::Current()->GetThreadList();
538 Thread* self = Thread::Current();
539 ScopedThreadStateChange tsc(self, kSuspended);
540 tl->SuspendAll();
541 // Something may have caused the transition to fail.
542 if (!IsMovingGc(collector_type_) && non_moving_space_ != main_space_) {
543 CHECK(main_space_ != nullptr);
544 // The allocation stack may have non movable objects in it. We need to flush it since the GC
545 // can't only handle marking allocation stack objects of one non moving space and one main
546 // space.
547 {
548 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
549 FlushAllocStack();
550 }
551 main_space_->DisableMovingObjects();
552 non_moving_space_ = main_space_;
553 CHECK(!non_moving_space_->CanMoveObjects());
554 }
555 tl->ResumeAll();
556 }
557
SafeGetClassDescriptor(mirror::Class * klass)558 std::string Heap::SafeGetClassDescriptor(mirror::Class* klass) {
559 if (!IsValidContinuousSpaceObjectAddress(klass)) {
560 return StringPrintf("<non heap address klass %p>", klass);
561 }
562 mirror::Class* component_type = klass->GetComponentType<kVerifyNone>();
563 if (IsValidContinuousSpaceObjectAddress(component_type) && klass->IsArrayClass<kVerifyNone>()) {
564 std::string result("[");
565 result += SafeGetClassDescriptor(component_type);
566 return result;
567 } else if (UNLIKELY(klass->IsPrimitive<kVerifyNone>())) {
568 return Primitive::Descriptor(klass->GetPrimitiveType<kVerifyNone>());
569 } else if (UNLIKELY(klass->IsProxyClass<kVerifyNone>())) {
570 return Runtime::Current()->GetClassLinker()->GetDescriptorForProxy(klass);
571 } else {
572 mirror::DexCache* dex_cache = klass->GetDexCache<kVerifyNone>();
573 if (!IsValidContinuousSpaceObjectAddress(dex_cache)) {
574 return StringPrintf("<non heap address dex_cache %p>", dex_cache);
575 }
576 const DexFile* dex_file = dex_cache->GetDexFile();
577 uint16_t class_def_idx = klass->GetDexClassDefIndex();
578 if (class_def_idx == DexFile::kDexNoIndex16) {
579 return "<class def not found>";
580 }
581 const DexFile::ClassDef& class_def = dex_file->GetClassDef(class_def_idx);
582 const DexFile::TypeId& type_id = dex_file->GetTypeId(class_def.class_idx_);
583 return dex_file->GetTypeDescriptor(type_id);
584 }
585 }
586
SafePrettyTypeOf(mirror::Object * obj)587 std::string Heap::SafePrettyTypeOf(mirror::Object* obj) {
588 if (obj == nullptr) {
589 return "null";
590 }
591 mirror::Class* klass = obj->GetClass<kVerifyNone>();
592 if (klass == nullptr) {
593 return "(class=null)";
594 }
595 std::string result(SafeGetClassDescriptor(klass));
596 if (obj->IsClass()) {
597 result += "<" + SafeGetClassDescriptor(obj->AsClass<kVerifyNone>()) + ">";
598 }
599 return result;
600 }
601
DumpObject(std::ostream & stream,mirror::Object * obj)602 void Heap::DumpObject(std::ostream& stream, mirror::Object* obj) {
603 if (obj == nullptr) {
604 stream << "(obj=null)";
605 return;
606 }
607 if (IsAligned<kObjectAlignment>(obj)) {
608 space::Space* space = nullptr;
609 // Don't use find space since it only finds spaces which actually contain objects instead of
610 // spaces which may contain objects (e.g. cleared bump pointer spaces).
611 for (const auto& cur_space : continuous_spaces_) {
612 if (cur_space->HasAddress(obj)) {
613 space = cur_space;
614 break;
615 }
616 }
617 // Unprotect all the spaces.
618 for (const auto& space : continuous_spaces_) {
619 mprotect(space->Begin(), space->Capacity(), PROT_READ | PROT_WRITE);
620 }
621 stream << "Object " << obj;
622 if (space != nullptr) {
623 stream << " in space " << *space;
624 }
625 mirror::Class* klass = obj->GetClass<kVerifyNone>();
626 stream << "\nclass=" << klass;
627 if (klass != nullptr) {
628 stream << " type= " << SafePrettyTypeOf(obj);
629 }
630 // Re-protect the address we faulted on.
631 mprotect(AlignDown(obj, kPageSize), kPageSize, PROT_NONE);
632 }
633 }
634
IsCompilingBoot() const635 bool Heap::IsCompilingBoot() const {
636 if (!Runtime::Current()->IsCompiler()) {
637 return false;
638 }
639 for (const auto& space : continuous_spaces_) {
640 if (space->IsImageSpace() || space->IsZygoteSpace()) {
641 return false;
642 }
643 }
644 return true;
645 }
646
HasImageSpace() const647 bool Heap::HasImageSpace() const {
648 for (const auto& space : continuous_spaces_) {
649 if (space->IsImageSpace()) {
650 return true;
651 }
652 }
653 return false;
654 }
655
IncrementDisableMovingGC(Thread * self)656 void Heap::IncrementDisableMovingGC(Thread* self) {
657 // Need to do this holding the lock to prevent races where the GC is about to run / running when
658 // we attempt to disable it.
659 ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
660 MutexLock mu(self, *gc_complete_lock_);
661 ++disable_moving_gc_count_;
662 if (IsMovingGc(collector_type_running_)) {
663 WaitForGcToCompleteLocked(kGcCauseDisableMovingGc, self);
664 }
665 }
666
DecrementDisableMovingGC(Thread * self)667 void Heap::DecrementDisableMovingGC(Thread* self) {
668 MutexLock mu(self, *gc_complete_lock_);
669 CHECK_GE(disable_moving_gc_count_, 0U);
670 --disable_moving_gc_count_;
671 }
672
UpdateProcessState(ProcessState process_state)673 void Heap::UpdateProcessState(ProcessState process_state) {
674 if (process_state_ != process_state) {
675 process_state_ = process_state;
676 for (size_t i = 1; i <= kCollectorTransitionStressIterations; ++i) {
677 // Start at index 1 to avoid "is always false" warning.
678 // Have iteration 1 always transition the collector.
679 TransitionCollector((((i & 1) == 1) == (process_state_ == kProcessStateJankPerceptible))
680 ? foreground_collector_type_ : background_collector_type_);
681 usleep(kCollectorTransitionStressWait);
682 }
683 if (process_state_ == kProcessStateJankPerceptible) {
684 // Transition back to foreground right away to prevent jank.
685 RequestCollectorTransition(foreground_collector_type_, 0);
686 } else {
687 // Don't delay for debug builds since we may want to stress test the GC.
688 // If background_collector_type_ is kCollectorTypeHomogeneousSpaceCompact then we have
689 // special handling which does a homogenous space compaction once but then doesn't transition
690 // the collector.
691 RequestCollectorTransition(background_collector_type_,
692 kIsDebugBuild ? 0 : kCollectorTransitionWait);
693 }
694 }
695 }
696
CreateThreadPool()697 void Heap::CreateThreadPool() {
698 const size_t num_threads = std::max(parallel_gc_threads_, conc_gc_threads_);
699 if (num_threads != 0) {
700 thread_pool_.reset(new ThreadPool("Heap thread pool", num_threads));
701 }
702 }
703
VisitObjects(ObjectCallback callback,void * arg)704 void Heap::VisitObjects(ObjectCallback callback, void* arg) {
705 Thread* self = Thread::Current();
706 // GCs can move objects, so don't allow this.
707 const char* old_cause = self->StartAssertNoThreadSuspension("Visiting objects");
708 if (bump_pointer_space_ != nullptr) {
709 // Visit objects in bump pointer space.
710 bump_pointer_space_->Walk(callback, arg);
711 }
712 // TODO: Switch to standard begin and end to use ranged a based loop.
713 for (mirror::Object** it = allocation_stack_->Begin(), **end = allocation_stack_->End();
714 it < end; ++it) {
715 mirror::Object* obj = *it;
716 if (obj != nullptr && obj->GetClass() != nullptr) {
717 // Avoid the race condition caused by the object not yet being written into the allocation
718 // stack or the class not yet being written in the object. Or, if kUseThreadLocalAllocationStack,
719 // there can be nulls on the allocation stack.
720 callback(obj, arg);
721 }
722 }
723 GetLiveBitmap()->Walk(callback, arg);
724 self->EndAssertNoThreadSuspension(old_cause);
725 }
726
MarkAllocStackAsLive(accounting::ObjectStack * stack)727 void Heap::MarkAllocStackAsLive(accounting::ObjectStack* stack) {
728 space::ContinuousSpace* space1 = main_space_ != nullptr ? main_space_ : non_moving_space_;
729 space::ContinuousSpace* space2 = non_moving_space_;
730 // TODO: Generalize this to n bitmaps?
731 CHECK(space1 != nullptr);
732 CHECK(space2 != nullptr);
733 MarkAllocStack(space1->GetLiveBitmap(), space2->GetLiveBitmap(),
734 large_object_space_->GetLiveBitmap(), stack);
735 }
736
DeleteThreadPool()737 void Heap::DeleteThreadPool() {
738 thread_pool_.reset(nullptr);
739 }
740
AddSpace(space::Space * space)741 void Heap::AddSpace(space::Space* space) {
742 CHECK(space != nullptr);
743 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
744 if (space->IsContinuousSpace()) {
745 DCHECK(!space->IsDiscontinuousSpace());
746 space::ContinuousSpace* continuous_space = space->AsContinuousSpace();
747 // Continuous spaces don't necessarily have bitmaps.
748 accounting::ContinuousSpaceBitmap* live_bitmap = continuous_space->GetLiveBitmap();
749 accounting::ContinuousSpaceBitmap* mark_bitmap = continuous_space->GetMarkBitmap();
750 if (live_bitmap != nullptr) {
751 CHECK(mark_bitmap != nullptr);
752 live_bitmap_->AddContinuousSpaceBitmap(live_bitmap);
753 mark_bitmap_->AddContinuousSpaceBitmap(mark_bitmap);
754 }
755 continuous_spaces_.push_back(continuous_space);
756 // Ensure that spaces remain sorted in increasing order of start address.
757 std::sort(continuous_spaces_.begin(), continuous_spaces_.end(),
758 [](const space::ContinuousSpace* a, const space::ContinuousSpace* b) {
759 return a->Begin() < b->Begin();
760 });
761 } else {
762 CHECK(space->IsDiscontinuousSpace());
763 space::DiscontinuousSpace* discontinuous_space = space->AsDiscontinuousSpace();
764 live_bitmap_->AddLargeObjectBitmap(discontinuous_space->GetLiveBitmap());
765 mark_bitmap_->AddLargeObjectBitmap(discontinuous_space->GetMarkBitmap());
766 discontinuous_spaces_.push_back(discontinuous_space);
767 }
768 if (space->IsAllocSpace()) {
769 alloc_spaces_.push_back(space->AsAllocSpace());
770 }
771 }
772
SetSpaceAsDefault(space::ContinuousSpace * continuous_space)773 void Heap::SetSpaceAsDefault(space::ContinuousSpace* continuous_space) {
774 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
775 if (continuous_space->IsDlMallocSpace()) {
776 dlmalloc_space_ = continuous_space->AsDlMallocSpace();
777 } else if (continuous_space->IsRosAllocSpace()) {
778 rosalloc_space_ = continuous_space->AsRosAllocSpace();
779 }
780 }
781
RemoveSpace(space::Space * space)782 void Heap::RemoveSpace(space::Space* space) {
783 DCHECK(space != nullptr);
784 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
785 if (space->IsContinuousSpace()) {
786 DCHECK(!space->IsDiscontinuousSpace());
787 space::ContinuousSpace* continuous_space = space->AsContinuousSpace();
788 // Continuous spaces don't necessarily have bitmaps.
789 accounting::ContinuousSpaceBitmap* live_bitmap = continuous_space->GetLiveBitmap();
790 accounting::ContinuousSpaceBitmap* mark_bitmap = continuous_space->GetMarkBitmap();
791 if (live_bitmap != nullptr) {
792 DCHECK(mark_bitmap != nullptr);
793 live_bitmap_->RemoveContinuousSpaceBitmap(live_bitmap);
794 mark_bitmap_->RemoveContinuousSpaceBitmap(mark_bitmap);
795 }
796 auto it = std::find(continuous_spaces_.begin(), continuous_spaces_.end(), continuous_space);
797 DCHECK(it != continuous_spaces_.end());
798 continuous_spaces_.erase(it);
799 } else {
800 DCHECK(space->IsDiscontinuousSpace());
801 space::DiscontinuousSpace* discontinuous_space = space->AsDiscontinuousSpace();
802 live_bitmap_->RemoveLargeObjectBitmap(discontinuous_space->GetLiveBitmap());
803 mark_bitmap_->RemoveLargeObjectBitmap(discontinuous_space->GetMarkBitmap());
804 auto it = std::find(discontinuous_spaces_.begin(), discontinuous_spaces_.end(),
805 discontinuous_space);
806 DCHECK(it != discontinuous_spaces_.end());
807 discontinuous_spaces_.erase(it);
808 }
809 if (space->IsAllocSpace()) {
810 auto it = std::find(alloc_spaces_.begin(), alloc_spaces_.end(), space->AsAllocSpace());
811 DCHECK(it != alloc_spaces_.end());
812 alloc_spaces_.erase(it);
813 }
814 }
815
DumpGcPerformanceInfo(std::ostream & os)816 void Heap::DumpGcPerformanceInfo(std::ostream& os) {
817 // Dump cumulative timings.
818 os << "Dumping cumulative Gc timings\n";
819 uint64_t total_duration = 0;
820 // Dump cumulative loggers for each GC type.
821 uint64_t total_paused_time = 0;
822 for (auto& collector : garbage_collectors_) {
823 const CumulativeLogger& logger = collector->GetCumulativeTimings();
824 const size_t iterations = logger.GetIterations();
825 const Histogram<uint64_t>& pause_histogram = collector->GetPauseHistogram();
826 if (iterations != 0 && pause_histogram.SampleSize() != 0) {
827 os << ConstDumpable<CumulativeLogger>(logger);
828 const uint64_t total_ns = logger.GetTotalNs();
829 const uint64_t total_pause_ns = collector->GetTotalPausedTimeNs();
830 double seconds = NsToMs(logger.GetTotalNs()) / 1000.0;
831 const uint64_t freed_bytes = collector->GetTotalFreedBytes();
832 const uint64_t freed_objects = collector->GetTotalFreedObjects();
833 Histogram<uint64_t>::CumulativeData cumulative_data;
834 pause_histogram.CreateHistogram(&cumulative_data);
835 pause_histogram.PrintConfidenceIntervals(os, 0.99, cumulative_data);
836 os << collector->GetName() << " total time: " << PrettyDuration(total_ns)
837 << " mean time: " << PrettyDuration(total_ns / iterations) << "\n"
838 << collector->GetName() << " freed: " << freed_objects
839 << " objects with total size " << PrettySize(freed_bytes) << "\n"
840 << collector->GetName() << " throughput: " << freed_objects / seconds << "/s / "
841 << PrettySize(freed_bytes / seconds) << "/s\n";
842 total_duration += total_ns;
843 total_paused_time += total_pause_ns;
844 }
845 collector->ResetMeasurements();
846 }
847 uint64_t allocation_time =
848 static_cast<uint64_t>(total_allocation_time_.LoadRelaxed()) * kTimeAdjust;
849 if (total_duration != 0) {
850 const double total_seconds = static_cast<double>(total_duration / 1000) / 1000000.0;
851 os << "Total time spent in GC: " << PrettyDuration(total_duration) << "\n";
852 os << "Mean GC size throughput: "
853 << PrettySize(GetBytesFreedEver() / total_seconds) << "/s\n";
854 os << "Mean GC object throughput: "
855 << (GetObjectsFreedEver() / total_seconds) << " objects/s\n";
856 }
857 uint64_t total_objects_allocated = GetObjectsAllocatedEver();
858 os << "Total number of allocations " << total_objects_allocated << "\n";
859 uint64_t total_bytes_allocated = GetBytesAllocatedEver();
860 os << "Total bytes allocated " << PrettySize(total_bytes_allocated) << "\n";
861 os << "Free memory " << PrettySize(GetFreeMemory()) << "\n";
862 os << "Free memory until GC " << PrettySize(GetFreeMemoryUntilGC()) << "\n";
863 os << "Free memory until OOME " << PrettySize(GetFreeMemoryUntilOOME()) << "\n";
864 os << "Total memory " << PrettySize(GetTotalMemory()) << "\n";
865 os << "Max memory " << PrettySize(GetMaxMemory()) << "\n";
866 if (kMeasureAllocationTime) {
867 os << "Total time spent allocating: " << PrettyDuration(allocation_time) << "\n";
868 os << "Mean allocation time: " << PrettyDuration(allocation_time / total_objects_allocated)
869 << "\n";
870 }
871 os << "Total mutator paused time: " << PrettyDuration(total_paused_time) << "\n";
872 os << "Total time waiting for GC to complete: " << PrettyDuration(total_wait_time_) << "\n";
873 BaseMutex::DumpAll(os);
874 }
875
~Heap()876 Heap::~Heap() {
877 VLOG(heap) << "Starting ~Heap()";
878 STLDeleteElements(&garbage_collectors_);
879 // If we don't reset then the mark stack complains in its destructor.
880 allocation_stack_->Reset();
881 live_stack_->Reset();
882 STLDeleteValues(&mod_union_tables_);
883 STLDeleteValues(&remembered_sets_);
884 STLDeleteElements(&continuous_spaces_);
885 STLDeleteElements(&discontinuous_spaces_);
886 delete gc_complete_lock_;
887 delete heap_trim_request_lock_;
888 VLOG(heap) << "Finished ~Heap()";
889 }
890
FindContinuousSpaceFromObject(const mirror::Object * obj,bool fail_ok) const891 space::ContinuousSpace* Heap::FindContinuousSpaceFromObject(const mirror::Object* obj,
892 bool fail_ok) const {
893 for (const auto& space : continuous_spaces_) {
894 if (space->Contains(obj)) {
895 return space;
896 }
897 }
898 if (!fail_ok) {
899 LOG(FATAL) << "object " << reinterpret_cast<const void*>(obj) << " not inside any spaces!";
900 }
901 return NULL;
902 }
903
FindDiscontinuousSpaceFromObject(const mirror::Object * obj,bool fail_ok) const904 space::DiscontinuousSpace* Heap::FindDiscontinuousSpaceFromObject(const mirror::Object* obj,
905 bool fail_ok) const {
906 for (const auto& space : discontinuous_spaces_) {
907 if (space->Contains(obj)) {
908 return space;
909 }
910 }
911 if (!fail_ok) {
912 LOG(FATAL) << "object " << reinterpret_cast<const void*>(obj) << " not inside any spaces!";
913 }
914 return NULL;
915 }
916
FindSpaceFromObject(const mirror::Object * obj,bool fail_ok) const917 space::Space* Heap::FindSpaceFromObject(const mirror::Object* obj, bool fail_ok) const {
918 space::Space* result = FindContinuousSpaceFromObject(obj, true);
919 if (result != NULL) {
920 return result;
921 }
922 return FindDiscontinuousSpaceFromObject(obj, true);
923 }
924
GetImageSpace() const925 space::ImageSpace* Heap::GetImageSpace() const {
926 for (const auto& space : continuous_spaces_) {
927 if (space->IsImageSpace()) {
928 return space->AsImageSpace();
929 }
930 }
931 return NULL;
932 }
933
ThrowOutOfMemoryError(Thread * self,size_t byte_count,AllocatorType allocator_type)934 void Heap::ThrowOutOfMemoryError(Thread* self, size_t byte_count, AllocatorType allocator_type) {
935 std::ostringstream oss;
936 size_t total_bytes_free = GetFreeMemory();
937 oss << "Failed to allocate a " << byte_count << " byte allocation with " << total_bytes_free
938 << " free bytes and " << PrettySize(GetFreeMemoryUntilOOME()) << " until OOM";
939 // If the allocation failed due to fragmentation, print out the largest continuous allocation.
940 if (total_bytes_free >= byte_count) {
941 space::AllocSpace* space = nullptr;
942 if (allocator_type == kAllocatorTypeNonMoving) {
943 space = non_moving_space_;
944 } else if (allocator_type == kAllocatorTypeRosAlloc ||
945 allocator_type == kAllocatorTypeDlMalloc) {
946 space = main_space_;
947 } else if (allocator_type == kAllocatorTypeBumpPointer ||
948 allocator_type == kAllocatorTypeTLAB) {
949 space = bump_pointer_space_;
950 }
951 if (space != nullptr) {
952 space->LogFragmentationAllocFailure(oss, byte_count);
953 }
954 }
955 self->ThrowOutOfMemoryError(oss.str().c_str());
956 }
957
DoPendingTransitionOrTrim()958 void Heap::DoPendingTransitionOrTrim() {
959 Thread* self = Thread::Current();
960 CollectorType desired_collector_type;
961 // Wait until we reach the desired transition time.
962 while (true) {
963 uint64_t wait_time;
964 {
965 MutexLock mu(self, *heap_trim_request_lock_);
966 desired_collector_type = desired_collector_type_;
967 uint64_t current_time = NanoTime();
968 if (current_time >= heap_transition_or_trim_target_time_) {
969 break;
970 }
971 wait_time = heap_transition_or_trim_target_time_ - current_time;
972 }
973 ScopedThreadStateChange tsc(self, kSleeping);
974 usleep(wait_time / 1000); // Usleep takes microseconds.
975 }
976 // Launch homogeneous space compaction if it is desired.
977 if (desired_collector_type == kCollectorTypeHomogeneousSpaceCompact) {
978 if (!CareAboutPauseTimes()) {
979 PerformHomogeneousSpaceCompact();
980 }
981 // No need to Trim(). Homogeneous space compaction may free more virtual and physical memory.
982 desired_collector_type = collector_type_;
983 return;
984 }
985 // Transition the collector if the desired collector type is not the same as the current
986 // collector type.
987 TransitionCollector(desired_collector_type);
988 if (!CareAboutPauseTimes()) {
989 // Deflate the monitors, this can cause a pause but shouldn't matter since we don't care
990 // about pauses.
991 Runtime* runtime = Runtime::Current();
992 runtime->GetThreadList()->SuspendAll();
993 uint64_t start_time = NanoTime();
994 size_t count = runtime->GetMonitorList()->DeflateMonitors();
995 VLOG(heap) << "Deflating " << count << " monitors took "
996 << PrettyDuration(NanoTime() - start_time);
997 runtime->GetThreadList()->ResumeAll();
998 }
999 // Do a heap trim if it is needed.
1000 Trim();
1001 }
1002
1003 class TrimIndirectReferenceTableClosure : public Closure {
1004 public:
TrimIndirectReferenceTableClosure(Barrier * barrier)1005 explicit TrimIndirectReferenceTableClosure(Barrier* barrier) : barrier_(barrier) {
1006 }
Run(Thread * thread)1007 virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
1008 ATRACE_BEGIN("Trimming reference table");
1009 thread->GetJniEnv()->locals.Trim();
1010 ATRACE_END();
1011 barrier_->Pass(Thread::Current());
1012 }
1013
1014 private:
1015 Barrier* const barrier_;
1016 };
1017
1018
Trim()1019 void Heap::Trim() {
1020 Thread* self = Thread::Current();
1021 {
1022 MutexLock mu(self, *heap_trim_request_lock_);
1023 if (!heap_trim_request_pending_ || last_trim_time_ + kHeapTrimWait >= NanoTime()) {
1024 return;
1025 }
1026 last_trim_time_ = NanoTime();
1027 heap_trim_request_pending_ = false;
1028 }
1029 {
1030 // Need to do this before acquiring the locks since we don't want to get suspended while
1031 // holding any locks.
1032 ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
1033 // Pretend we are doing a GC to prevent background compaction from deleting the space we are
1034 // trimming.
1035 MutexLock mu(self, *gc_complete_lock_);
1036 // Ensure there is only one GC at a time.
1037 WaitForGcToCompleteLocked(kGcCauseTrim, self);
1038 collector_type_running_ = kCollectorTypeHeapTrim;
1039 }
1040 // Trim reference tables.
1041 {
1042 ScopedObjectAccess soa(self);
1043 JavaVMExt* vm = soa.Vm();
1044 // Trim globals indirect reference table.
1045 {
1046 WriterMutexLock mu(self, vm->globals_lock);
1047 vm->globals.Trim();
1048 }
1049 // Trim locals indirect reference tables.
1050 Barrier barrier(0);
1051 TrimIndirectReferenceTableClosure closure(&barrier);
1052 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
1053 size_t barrier_count = Runtime::Current()->GetThreadList()->RunCheckpoint(&closure);
1054 barrier.Increment(self, barrier_count);
1055 }
1056 uint64_t start_ns = NanoTime();
1057 // Trim the managed spaces.
1058 uint64_t total_alloc_space_allocated = 0;
1059 uint64_t total_alloc_space_size = 0;
1060 uint64_t managed_reclaimed = 0;
1061 for (const auto& space : continuous_spaces_) {
1062 if (space->IsMallocSpace()) {
1063 gc::space::MallocSpace* malloc_space = space->AsMallocSpace();
1064 if (malloc_space->IsRosAllocSpace() || !CareAboutPauseTimes()) {
1065 // Don't trim dlmalloc spaces if we care about pauses since this can hold the space lock
1066 // for a long period of time.
1067 managed_reclaimed += malloc_space->Trim();
1068 }
1069 total_alloc_space_size += malloc_space->Size();
1070 }
1071 }
1072 total_alloc_space_allocated = GetBytesAllocated() - large_object_space_->GetBytesAllocated();
1073 if (bump_pointer_space_ != nullptr) {
1074 total_alloc_space_allocated -= bump_pointer_space_->Size();
1075 }
1076 const float managed_utilization = static_cast<float>(total_alloc_space_allocated) /
1077 static_cast<float>(total_alloc_space_size);
1078 uint64_t gc_heap_end_ns = NanoTime();
1079 // We never move things in the native heap, so we can finish the GC at this point.
1080 FinishGC(self, collector::kGcTypeNone);
1081 size_t native_reclaimed = 0;
1082 // Only trim the native heap if we don't care about pauses.
1083 if (!CareAboutPauseTimes()) {
1084 #if defined(USE_DLMALLOC)
1085 // Trim the native heap.
1086 dlmalloc_trim(0);
1087 dlmalloc_inspect_all(DlmallocMadviseCallback, &native_reclaimed);
1088 #elif defined(USE_JEMALLOC)
1089 // Jemalloc does it's own internal trimming.
1090 #else
1091 UNIMPLEMENTED(WARNING) << "Add trimming support";
1092 #endif
1093 }
1094 uint64_t end_ns = NanoTime();
1095 VLOG(heap) << "Heap trim of managed (duration=" << PrettyDuration(gc_heap_end_ns - start_ns)
1096 << ", advised=" << PrettySize(managed_reclaimed) << ") and native (duration="
1097 << PrettyDuration(end_ns - gc_heap_end_ns) << ", advised=" << PrettySize(native_reclaimed)
1098 << ") heaps. Managed heap utilization of " << static_cast<int>(100 * managed_utilization)
1099 << "%.";
1100 }
1101
IsValidObjectAddress(const mirror::Object * obj) const1102 bool Heap::IsValidObjectAddress(const mirror::Object* obj) const {
1103 // Note: we deliberately don't take the lock here, and mustn't test anything that would require
1104 // taking the lock.
1105 if (obj == nullptr) {
1106 return true;
1107 }
1108 return IsAligned<kObjectAlignment>(obj) && FindSpaceFromObject(obj, true) != nullptr;
1109 }
1110
IsNonDiscontinuousSpaceHeapAddress(const mirror::Object * obj) const1111 bool Heap::IsNonDiscontinuousSpaceHeapAddress(const mirror::Object* obj) const {
1112 return FindContinuousSpaceFromObject(obj, true) != nullptr;
1113 }
1114
IsValidContinuousSpaceObjectAddress(const mirror::Object * obj) const1115 bool Heap::IsValidContinuousSpaceObjectAddress(const mirror::Object* obj) const {
1116 if (obj == nullptr || !IsAligned<kObjectAlignment>(obj)) {
1117 return false;
1118 }
1119 for (const auto& space : continuous_spaces_) {
1120 if (space->HasAddress(obj)) {
1121 return true;
1122 }
1123 }
1124 return false;
1125 }
1126
IsLiveObjectLocked(mirror::Object * obj,bool search_allocation_stack,bool search_live_stack,bool sorted)1127 bool Heap::IsLiveObjectLocked(mirror::Object* obj, bool search_allocation_stack,
1128 bool search_live_stack, bool sorted) {
1129 if (UNLIKELY(!IsAligned<kObjectAlignment>(obj))) {
1130 return false;
1131 }
1132 if (bump_pointer_space_ != nullptr && bump_pointer_space_->HasAddress(obj)) {
1133 mirror::Class* klass = obj->GetClass<kVerifyNone>();
1134 if (obj == klass) {
1135 // This case happens for java.lang.Class.
1136 return true;
1137 }
1138 return VerifyClassClass(klass) && IsLiveObjectLocked(klass);
1139 } else if (temp_space_ != nullptr && temp_space_->HasAddress(obj)) {
1140 // If we are in the allocated region of the temp space, then we are probably live (e.g. during
1141 // a GC). When a GC isn't running End() - Begin() is 0 which means no objects are contained.
1142 return temp_space_->Contains(obj);
1143 }
1144 space::ContinuousSpace* c_space = FindContinuousSpaceFromObject(obj, true);
1145 space::DiscontinuousSpace* d_space = nullptr;
1146 if (c_space != nullptr) {
1147 if (c_space->GetLiveBitmap()->Test(obj)) {
1148 return true;
1149 }
1150 } else {
1151 d_space = FindDiscontinuousSpaceFromObject(obj, true);
1152 if (d_space != nullptr) {
1153 if (d_space->GetLiveBitmap()->Test(obj)) {
1154 return true;
1155 }
1156 }
1157 }
1158 // This is covering the allocation/live stack swapping that is done without mutators suspended.
1159 for (size_t i = 0; i < (sorted ? 1 : 5); ++i) {
1160 if (i > 0) {
1161 NanoSleep(MsToNs(10));
1162 }
1163 if (search_allocation_stack) {
1164 if (sorted) {
1165 if (allocation_stack_->ContainsSorted(obj)) {
1166 return true;
1167 }
1168 } else if (allocation_stack_->Contains(obj)) {
1169 return true;
1170 }
1171 }
1172
1173 if (search_live_stack) {
1174 if (sorted) {
1175 if (live_stack_->ContainsSorted(obj)) {
1176 return true;
1177 }
1178 } else if (live_stack_->Contains(obj)) {
1179 return true;
1180 }
1181 }
1182 }
1183 // We need to check the bitmaps again since there is a race where we mark something as live and
1184 // then clear the stack containing it.
1185 if (c_space != nullptr) {
1186 if (c_space->GetLiveBitmap()->Test(obj)) {
1187 return true;
1188 }
1189 } else {
1190 d_space = FindDiscontinuousSpaceFromObject(obj, true);
1191 if (d_space != nullptr && d_space->GetLiveBitmap()->Test(obj)) {
1192 return true;
1193 }
1194 }
1195 return false;
1196 }
1197
DumpSpaces() const1198 std::string Heap::DumpSpaces() const {
1199 std::ostringstream oss;
1200 DumpSpaces(oss);
1201 return oss.str();
1202 }
1203
DumpSpaces(std::ostream & stream) const1204 void Heap::DumpSpaces(std::ostream& stream) const {
1205 for (const auto& space : continuous_spaces_) {
1206 accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
1207 accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap();
1208 stream << space << " " << *space << "\n";
1209 if (live_bitmap != nullptr) {
1210 stream << live_bitmap << " " << *live_bitmap << "\n";
1211 }
1212 if (mark_bitmap != nullptr) {
1213 stream << mark_bitmap << " " << *mark_bitmap << "\n";
1214 }
1215 }
1216 for (const auto& space : discontinuous_spaces_) {
1217 stream << space << " " << *space << "\n";
1218 }
1219 }
1220
VerifyObjectBody(mirror::Object * obj)1221 void Heap::VerifyObjectBody(mirror::Object* obj) {
1222 if (verify_object_mode_ == kVerifyObjectModeDisabled) {
1223 return;
1224 }
1225
1226 // Ignore early dawn of the universe verifications.
1227 if (UNLIKELY(static_cast<size_t>(num_bytes_allocated_.LoadRelaxed()) < 10 * KB)) {
1228 return;
1229 }
1230 CHECK(IsAligned<kObjectAlignment>(obj)) << "Object isn't aligned: " << obj;
1231 mirror::Class* c = obj->GetFieldObject<mirror::Class, kVerifyNone>(mirror::Object::ClassOffset());
1232 CHECK(c != nullptr) << "Null class in object " << obj;
1233 CHECK(IsAligned<kObjectAlignment>(c)) << "Class " << c << " not aligned in object " << obj;
1234 CHECK(VerifyClassClass(c));
1235
1236 if (verify_object_mode_ > kVerifyObjectModeFast) {
1237 // Note: the bitmap tests below are racy since we don't hold the heap bitmap lock.
1238 CHECK(IsLiveObjectLocked(obj)) << "Object is dead " << obj << "\n" << DumpSpaces();
1239 }
1240 }
1241
VerificationCallback(mirror::Object * obj,void * arg)1242 void Heap::VerificationCallback(mirror::Object* obj, void* arg) {
1243 reinterpret_cast<Heap*>(arg)->VerifyObjectBody(obj);
1244 }
1245
VerifyHeap()1246 void Heap::VerifyHeap() {
1247 ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
1248 GetLiveBitmap()->Walk(Heap::VerificationCallback, this);
1249 }
1250
RecordFree(uint64_t freed_objects,int64_t freed_bytes)1251 void Heap::RecordFree(uint64_t freed_objects, int64_t freed_bytes) {
1252 // Use signed comparison since freed bytes can be negative when background compaction foreground
1253 // transitions occurs. This is caused by the moving objects from a bump pointer space to a
1254 // free list backed space typically increasing memory footprint due to padding and binning.
1255 DCHECK_LE(freed_bytes, static_cast<int64_t>(num_bytes_allocated_.LoadRelaxed()));
1256 // Note: This relies on 2s complement for handling negative freed_bytes.
1257 num_bytes_allocated_.FetchAndSubSequentiallyConsistent(static_cast<ssize_t>(freed_bytes));
1258 if (Runtime::Current()->HasStatsEnabled()) {
1259 RuntimeStats* thread_stats = Thread::Current()->GetStats();
1260 thread_stats->freed_objects += freed_objects;
1261 thread_stats->freed_bytes += freed_bytes;
1262 // TODO: Do this concurrently.
1263 RuntimeStats* global_stats = Runtime::Current()->GetStats();
1264 global_stats->freed_objects += freed_objects;
1265 global_stats->freed_bytes += freed_bytes;
1266 }
1267 }
1268
GetRosAllocSpace(gc::allocator::RosAlloc * rosalloc) const1269 space::RosAllocSpace* Heap::GetRosAllocSpace(gc::allocator::RosAlloc* rosalloc) const {
1270 for (const auto& space : continuous_spaces_) {
1271 if (space->AsContinuousSpace()->IsRosAllocSpace()) {
1272 if (space->AsContinuousSpace()->AsRosAllocSpace()->GetRosAlloc() == rosalloc) {
1273 return space->AsContinuousSpace()->AsRosAllocSpace();
1274 }
1275 }
1276 }
1277 return nullptr;
1278 }
1279
AllocateInternalWithGc(Thread * self,AllocatorType allocator,size_t alloc_size,size_t * bytes_allocated,size_t * usable_size,mirror::Class ** klass)1280 mirror::Object* Heap::AllocateInternalWithGc(Thread* self, AllocatorType allocator,
1281 size_t alloc_size, size_t* bytes_allocated,
1282 size_t* usable_size,
1283 mirror::Class** klass) {
1284 bool was_default_allocator = allocator == GetCurrentAllocator();
1285 // Make sure there is no pending exception since we may need to throw an OOME.
1286 self->AssertNoPendingException();
1287 DCHECK(klass != nullptr);
1288 StackHandleScope<1> hs(self);
1289 HandleWrapper<mirror::Class> h(hs.NewHandleWrapper(klass));
1290 klass = nullptr; // Invalidate for safety.
1291 // The allocation failed. If the GC is running, block until it completes, and then retry the
1292 // allocation.
1293 collector::GcType last_gc = WaitForGcToComplete(kGcCauseForAlloc, self);
1294 if (last_gc != collector::kGcTypeNone) {
1295 // If we were the default allocator but the allocator changed while we were suspended,
1296 // abort the allocation.
1297 if (was_default_allocator && allocator != GetCurrentAllocator()) {
1298 return nullptr;
1299 }
1300 // A GC was in progress and we blocked, retry allocation now that memory has been freed.
1301 mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated,
1302 usable_size);
1303 if (ptr != nullptr) {
1304 return ptr;
1305 }
1306 }
1307
1308 collector::GcType tried_type = next_gc_type_;
1309 const bool gc_ran =
1310 CollectGarbageInternal(tried_type, kGcCauseForAlloc, false) != collector::kGcTypeNone;
1311 if (was_default_allocator && allocator != GetCurrentAllocator()) {
1312 return nullptr;
1313 }
1314 if (gc_ran) {
1315 mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated,
1316 usable_size);
1317 if (ptr != nullptr) {
1318 return ptr;
1319 }
1320 }
1321
1322 // Loop through our different Gc types and try to Gc until we get enough free memory.
1323 for (collector::GcType gc_type : gc_plan_) {
1324 if (gc_type == tried_type) {
1325 continue;
1326 }
1327 // Attempt to run the collector, if we succeed, re-try the allocation.
1328 const bool gc_ran =
1329 CollectGarbageInternal(gc_type, kGcCauseForAlloc, false) != collector::kGcTypeNone;
1330 if (was_default_allocator && allocator != GetCurrentAllocator()) {
1331 return nullptr;
1332 }
1333 if (gc_ran) {
1334 // Did we free sufficient memory for the allocation to succeed?
1335 mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated,
1336 usable_size);
1337 if (ptr != nullptr) {
1338 return ptr;
1339 }
1340 }
1341 }
1342 // Allocations have failed after GCs; this is an exceptional state.
1343 // Try harder, growing the heap if necessary.
1344 mirror::Object* ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated,
1345 usable_size);
1346 if (ptr != nullptr) {
1347 return ptr;
1348 }
1349 // Most allocations should have succeeded by now, so the heap is really full, really fragmented,
1350 // or the requested size is really big. Do another GC, collecting SoftReferences this time. The
1351 // VM spec requires that all SoftReferences have been collected and cleared before throwing
1352 // OOME.
1353 VLOG(gc) << "Forcing collection of SoftReferences for " << PrettySize(alloc_size)
1354 << " allocation";
1355 // TODO: Run finalization, but this may cause more allocations to occur.
1356 // We don't need a WaitForGcToComplete here either.
1357 DCHECK(!gc_plan_.empty());
1358 CollectGarbageInternal(gc_plan_.back(), kGcCauseForAlloc, true);
1359 if (was_default_allocator && allocator != GetCurrentAllocator()) {
1360 return nullptr;
1361 }
1362 ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated, usable_size);
1363 if (ptr == nullptr) {
1364 const uint64_t current_time = NanoTime();
1365 switch (allocator) {
1366 case kAllocatorTypeRosAlloc:
1367 // Fall-through.
1368 case kAllocatorTypeDlMalloc: {
1369 if (use_homogeneous_space_compaction_for_oom_ &&
1370 current_time - last_time_homogeneous_space_compaction_by_oom_ >
1371 min_interval_homogeneous_space_compaction_by_oom_) {
1372 last_time_homogeneous_space_compaction_by_oom_ = current_time;
1373 HomogeneousSpaceCompactResult result = PerformHomogeneousSpaceCompact();
1374 switch (result) {
1375 case HomogeneousSpaceCompactResult::kSuccess:
1376 // If the allocation succeeded, we delayed an oom.
1377 ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated,
1378 usable_size);
1379 if (ptr != nullptr) {
1380 count_delayed_oom_++;
1381 }
1382 break;
1383 case HomogeneousSpaceCompactResult::kErrorReject:
1384 // Reject due to disabled moving GC.
1385 break;
1386 case HomogeneousSpaceCompactResult::kErrorVMShuttingDown:
1387 // Throw OOM by default.
1388 break;
1389 default: {
1390 LOG(FATAL) << "Unimplemented homogeneous space compaction result "
1391 << static_cast<size_t>(result);
1392 }
1393 }
1394 // Always print that we ran homogeneous space compation since this can cause jank.
1395 VLOG(heap) << "Ran heap homogeneous space compaction, "
1396 << " requested defragmentation "
1397 << count_requested_homogeneous_space_compaction_.LoadSequentiallyConsistent()
1398 << " performed defragmentation "
1399 << count_performed_homogeneous_space_compaction_.LoadSequentiallyConsistent()
1400 << " ignored homogeneous space compaction "
1401 << count_ignored_homogeneous_space_compaction_.LoadSequentiallyConsistent()
1402 << " delayed count = "
1403 << count_delayed_oom_.LoadSequentiallyConsistent();
1404 }
1405 break;
1406 }
1407 case kAllocatorTypeNonMoving: {
1408 // Try to transition the heap if the allocation failure was due to the space being full.
1409 if (!IsOutOfMemoryOnAllocation<false>(allocator, alloc_size)) {
1410 // If we aren't out of memory then the OOM was probably from the non moving space being
1411 // full. Attempt to disable compaction and turn the main space into a non moving space.
1412 DisableMovingGc();
1413 // If we are still a moving GC then something must have caused the transition to fail.
1414 if (IsMovingGc(collector_type_)) {
1415 MutexLock mu(self, *gc_complete_lock_);
1416 // If we couldn't disable moving GC, just throw OOME and return null.
1417 LOG(WARNING) << "Couldn't disable moving GC with disable GC count "
1418 << disable_moving_gc_count_;
1419 } else {
1420 LOG(WARNING) << "Disabled moving GC due to the non moving space being full";
1421 ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated,
1422 usable_size);
1423 }
1424 }
1425 break;
1426 }
1427 default: {
1428 // Do nothing for others allocators.
1429 }
1430 }
1431 }
1432 // If the allocation hasn't succeeded by this point, throw an OOM error.
1433 if (ptr == nullptr) {
1434 ThrowOutOfMemoryError(self, alloc_size, allocator);
1435 }
1436 return ptr;
1437 }
1438
SetTargetHeapUtilization(float target)1439 void Heap::SetTargetHeapUtilization(float target) {
1440 DCHECK_GT(target, 0.0f); // asserted in Java code
1441 DCHECK_LT(target, 1.0f);
1442 target_utilization_ = target;
1443 }
1444
GetObjectsAllocated() const1445 size_t Heap::GetObjectsAllocated() const {
1446 size_t total = 0;
1447 for (space::AllocSpace* space : alloc_spaces_) {
1448 total += space->GetObjectsAllocated();
1449 }
1450 return total;
1451 }
1452
GetObjectsAllocatedEver() const1453 uint64_t Heap::GetObjectsAllocatedEver() const {
1454 return GetObjectsFreedEver() + GetObjectsAllocated();
1455 }
1456
GetBytesAllocatedEver() const1457 uint64_t Heap::GetBytesAllocatedEver() const {
1458 return GetBytesFreedEver() + GetBytesAllocated();
1459 }
1460
1461 class InstanceCounter {
1462 public:
InstanceCounter(const std::vector<mirror::Class * > & classes,bool use_is_assignable_from,uint64_t * counts)1463 InstanceCounter(const std::vector<mirror::Class*>& classes, bool use_is_assignable_from, uint64_t* counts)
1464 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
1465 : classes_(classes), use_is_assignable_from_(use_is_assignable_from), counts_(counts) {
1466 }
Callback(mirror::Object * obj,void * arg)1467 static void Callback(mirror::Object* obj, void* arg)
1468 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
1469 InstanceCounter* instance_counter = reinterpret_cast<InstanceCounter*>(arg);
1470 mirror::Class* instance_class = obj->GetClass();
1471 CHECK(instance_class != nullptr);
1472 for (size_t i = 0; i < instance_counter->classes_.size(); ++i) {
1473 if (instance_counter->use_is_assignable_from_) {
1474 if (instance_counter->classes_[i]->IsAssignableFrom(instance_class)) {
1475 ++instance_counter->counts_[i];
1476 }
1477 } else if (instance_class == instance_counter->classes_[i]) {
1478 ++instance_counter->counts_[i];
1479 }
1480 }
1481 }
1482
1483 private:
1484 const std::vector<mirror::Class*>& classes_;
1485 bool use_is_assignable_from_;
1486 uint64_t* const counts_;
1487 DISALLOW_COPY_AND_ASSIGN(InstanceCounter);
1488 };
1489
CountInstances(const std::vector<mirror::Class * > & classes,bool use_is_assignable_from,uint64_t * counts)1490 void Heap::CountInstances(const std::vector<mirror::Class*>& classes, bool use_is_assignable_from,
1491 uint64_t* counts) {
1492 // Can't do any GC in this function since this may move classes.
1493 Thread* self = Thread::Current();
1494 auto* old_cause = self->StartAssertNoThreadSuspension("CountInstances");
1495 InstanceCounter counter(classes, use_is_assignable_from, counts);
1496 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
1497 VisitObjects(InstanceCounter::Callback, &counter);
1498 self->EndAssertNoThreadSuspension(old_cause);
1499 }
1500
1501 class InstanceCollector {
1502 public:
InstanceCollector(mirror::Class * c,int32_t max_count,std::vector<mirror::Object * > & instances)1503 InstanceCollector(mirror::Class* c, int32_t max_count, std::vector<mirror::Object*>& instances)
1504 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
1505 : class_(c), max_count_(max_count), instances_(instances) {
1506 }
Callback(mirror::Object * obj,void * arg)1507 static void Callback(mirror::Object* obj, void* arg)
1508 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
1509 DCHECK(arg != nullptr);
1510 InstanceCollector* instance_collector = reinterpret_cast<InstanceCollector*>(arg);
1511 mirror::Class* instance_class = obj->GetClass();
1512 if (instance_class == instance_collector->class_) {
1513 if (instance_collector->max_count_ == 0 ||
1514 instance_collector->instances_.size() < instance_collector->max_count_) {
1515 instance_collector->instances_.push_back(obj);
1516 }
1517 }
1518 }
1519
1520 private:
1521 mirror::Class* class_;
1522 uint32_t max_count_;
1523 std::vector<mirror::Object*>& instances_;
1524 DISALLOW_COPY_AND_ASSIGN(InstanceCollector);
1525 };
1526
GetInstances(mirror::Class * c,int32_t max_count,std::vector<mirror::Object * > & instances)1527 void Heap::GetInstances(mirror::Class* c, int32_t max_count,
1528 std::vector<mirror::Object*>& instances) {
1529 // Can't do any GC in this function since this may move classes.
1530 Thread* self = Thread::Current();
1531 auto* old_cause = self->StartAssertNoThreadSuspension("GetInstances");
1532 InstanceCollector collector(c, max_count, instances);
1533 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
1534 VisitObjects(&InstanceCollector::Callback, &collector);
1535 self->EndAssertNoThreadSuspension(old_cause);
1536 }
1537
1538 class ReferringObjectsFinder {
1539 public:
ReferringObjectsFinder(mirror::Object * object,int32_t max_count,std::vector<mirror::Object * > & referring_objects)1540 ReferringObjectsFinder(mirror::Object* object, int32_t max_count,
1541 std::vector<mirror::Object*>& referring_objects)
1542 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
1543 : object_(object), max_count_(max_count), referring_objects_(referring_objects) {
1544 }
1545
Callback(mirror::Object * obj,void * arg)1546 static void Callback(mirror::Object* obj, void* arg)
1547 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
1548 reinterpret_cast<ReferringObjectsFinder*>(arg)->operator()(obj);
1549 }
1550
1551 // For bitmap Visit.
1552 // TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for
1553 // annotalysis on visitors.
operator ()(mirror::Object * o) const1554 void operator()(mirror::Object* o) const NO_THREAD_SAFETY_ANALYSIS {
1555 o->VisitReferences<true>(*this, VoidFunctor());
1556 }
1557
1558 // For Object::VisitReferences.
operator ()(mirror::Object * obj,MemberOffset offset,bool) const1559 void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */) const
1560 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1561 mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset);
1562 if (ref == object_ && (max_count_ == 0 || referring_objects_.size() < max_count_)) {
1563 referring_objects_.push_back(obj);
1564 }
1565 }
1566
1567 private:
1568 mirror::Object* object_;
1569 uint32_t max_count_;
1570 std::vector<mirror::Object*>& referring_objects_;
1571 DISALLOW_COPY_AND_ASSIGN(ReferringObjectsFinder);
1572 };
1573
GetReferringObjects(mirror::Object * o,int32_t max_count,std::vector<mirror::Object * > & referring_objects)1574 void Heap::GetReferringObjects(mirror::Object* o, int32_t max_count,
1575 std::vector<mirror::Object*>& referring_objects) {
1576 // Can't do any GC in this function since this may move the object o.
1577 Thread* self = Thread::Current();
1578 auto* old_cause = self->StartAssertNoThreadSuspension("GetReferringObjects");
1579 ReferringObjectsFinder finder(o, max_count, referring_objects);
1580 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
1581 VisitObjects(&ReferringObjectsFinder::Callback, &finder);
1582 self->EndAssertNoThreadSuspension(old_cause);
1583 }
1584
CollectGarbage(bool clear_soft_references)1585 void Heap::CollectGarbage(bool clear_soft_references) {
1586 // Even if we waited for a GC we still need to do another GC since weaks allocated during the
1587 // last GC will not have necessarily been cleared.
1588 CollectGarbageInternal(gc_plan_.back(), kGcCauseExplicit, clear_soft_references);
1589 }
1590
PerformHomogeneousSpaceCompact()1591 HomogeneousSpaceCompactResult Heap::PerformHomogeneousSpaceCompact() {
1592 Thread* self = Thread::Current();
1593 // Inc requested homogeneous space compaction.
1594 count_requested_homogeneous_space_compaction_++;
1595 // Store performed homogeneous space compaction at a new request arrival.
1596 ThreadList* tl = Runtime::Current()->GetThreadList();
1597 ScopedThreadStateChange tsc(self, kWaitingPerformingGc);
1598 Locks::mutator_lock_->AssertNotHeld(self);
1599 {
1600 ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
1601 MutexLock mu(self, *gc_complete_lock_);
1602 // Ensure there is only one GC at a time.
1603 WaitForGcToCompleteLocked(kGcCauseHomogeneousSpaceCompact, self);
1604 // Homogeneous space compaction is a copying transition, can't run it if the moving GC disable count
1605 // is non zero.
1606 // If the collector type changed to something which doesn't benefit from homogeneous space compaction,
1607 // exit.
1608 if (disable_moving_gc_count_ != 0 || IsMovingGc(collector_type_) ||
1609 !main_space_->CanMoveObjects()) {
1610 return HomogeneousSpaceCompactResult::kErrorReject;
1611 }
1612 collector_type_running_ = kCollectorTypeHomogeneousSpaceCompact;
1613 }
1614 if (Runtime::Current()->IsShuttingDown(self)) {
1615 // Don't allow heap transitions to happen if the runtime is shutting down since these can
1616 // cause objects to get finalized.
1617 FinishGC(self, collector::kGcTypeNone);
1618 return HomogeneousSpaceCompactResult::kErrorVMShuttingDown;
1619 }
1620 // Suspend all threads.
1621 tl->SuspendAll();
1622 uint64_t start_time = NanoTime();
1623 // Launch compaction.
1624 space::MallocSpace* to_space = main_space_backup_.release();
1625 space::MallocSpace* from_space = main_space_;
1626 to_space->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
1627 const uint64_t space_size_before_compaction = from_space->Size();
1628 AddSpace(to_space);
1629 // Make sure that we will have enough room to copy.
1630 CHECK_GE(to_space->GetFootprintLimit(), from_space->GetFootprintLimit());
1631 Compact(to_space, from_space, kGcCauseHomogeneousSpaceCompact);
1632 // Leave as prot read so that we can still run ROSAlloc verification on this space.
1633 from_space->GetMemMap()->Protect(PROT_READ);
1634 const uint64_t space_size_after_compaction = to_space->Size();
1635 main_space_ = to_space;
1636 main_space_backup_.reset(from_space);
1637 RemoveSpace(from_space);
1638 SetSpaceAsDefault(main_space_); // Set as default to reset the proper dlmalloc space.
1639 // Update performed homogeneous space compaction count.
1640 count_performed_homogeneous_space_compaction_++;
1641 // Print statics log and resume all threads.
1642 uint64_t duration = NanoTime() - start_time;
1643 VLOG(heap) << "Heap homogeneous space compaction took " << PrettyDuration(duration) << " size: "
1644 << PrettySize(space_size_before_compaction) << " -> "
1645 << PrettySize(space_size_after_compaction) << " compact-ratio: "
1646 << std::fixed << static_cast<double>(space_size_after_compaction) /
1647 static_cast<double>(space_size_before_compaction);
1648 tl->ResumeAll();
1649 // Finish GC.
1650 reference_processor_.EnqueueClearedReferences(self);
1651 GrowForUtilization(semi_space_collector_);
1652 FinishGC(self, collector::kGcTypeFull);
1653 return HomogeneousSpaceCompactResult::kSuccess;
1654 }
1655
1656
TransitionCollector(CollectorType collector_type)1657 void Heap::TransitionCollector(CollectorType collector_type) {
1658 if (collector_type == collector_type_) {
1659 return;
1660 }
1661 VLOG(heap) << "TransitionCollector: " << static_cast<int>(collector_type_)
1662 << " -> " << static_cast<int>(collector_type);
1663 uint64_t start_time = NanoTime();
1664 uint32_t before_allocated = num_bytes_allocated_.LoadSequentiallyConsistent();
1665 Runtime* const runtime = Runtime::Current();
1666 ThreadList* const tl = runtime->GetThreadList();
1667 Thread* const self = Thread::Current();
1668 ScopedThreadStateChange tsc(self, kWaitingPerformingGc);
1669 Locks::mutator_lock_->AssertNotHeld(self);
1670 // Busy wait until we can GC (StartGC can fail if we have a non-zero
1671 // compacting_gc_disable_count_, this should rarely occurs).
1672 for (;;) {
1673 {
1674 ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
1675 MutexLock mu(self, *gc_complete_lock_);
1676 // Ensure there is only one GC at a time.
1677 WaitForGcToCompleteLocked(kGcCauseCollectorTransition, self);
1678 // Currently we only need a heap transition if we switch from a moving collector to a
1679 // non-moving one, or visa versa.
1680 const bool copying_transition = IsMovingGc(collector_type_) != IsMovingGc(collector_type);
1681 // If someone else beat us to it and changed the collector before we could, exit.
1682 // This is safe to do before the suspend all since we set the collector_type_running_ before
1683 // we exit the loop. If another thread attempts to do the heap transition before we exit,
1684 // then it would get blocked on WaitForGcToCompleteLocked.
1685 if (collector_type == collector_type_) {
1686 return;
1687 }
1688 // GC can be disabled if someone has a used GetPrimitiveArrayCritical but not yet released.
1689 if (!copying_transition || disable_moving_gc_count_ == 0) {
1690 // TODO: Not hard code in semi-space collector?
1691 collector_type_running_ = copying_transition ? kCollectorTypeSS : collector_type;
1692 break;
1693 }
1694 }
1695 usleep(1000);
1696 }
1697 if (runtime->IsShuttingDown(self)) {
1698 // Don't allow heap transitions to happen if the runtime is shutting down since these can
1699 // cause objects to get finalized.
1700 FinishGC(self, collector::kGcTypeNone);
1701 return;
1702 }
1703 tl->SuspendAll();
1704 switch (collector_type) {
1705 case kCollectorTypeSS: {
1706 if (!IsMovingGc(collector_type_)) {
1707 // Create the bump pointer space from the backup space.
1708 CHECK(main_space_backup_ != nullptr);
1709 std::unique_ptr<MemMap> mem_map(main_space_backup_->ReleaseMemMap());
1710 // We are transitioning from non moving GC -> moving GC, since we copied from the bump
1711 // pointer space last transition it will be protected.
1712 CHECK(mem_map != nullptr);
1713 mem_map->Protect(PROT_READ | PROT_WRITE);
1714 bump_pointer_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space",
1715 mem_map.release());
1716 AddSpace(bump_pointer_space_);
1717 Compact(bump_pointer_space_, main_space_, kGcCauseCollectorTransition);
1718 // Use the now empty main space mem map for the bump pointer temp space.
1719 mem_map.reset(main_space_->ReleaseMemMap());
1720 // Unset the pointers just in case.
1721 if (dlmalloc_space_ == main_space_) {
1722 dlmalloc_space_ = nullptr;
1723 } else if (rosalloc_space_ == main_space_) {
1724 rosalloc_space_ = nullptr;
1725 }
1726 // Remove the main space so that we don't try to trim it, this doens't work for debug
1727 // builds since RosAlloc attempts to read the magic number from a protected page.
1728 RemoveSpace(main_space_);
1729 RemoveRememberedSet(main_space_);
1730 delete main_space_; // Delete the space since it has been removed.
1731 main_space_ = nullptr;
1732 RemoveRememberedSet(main_space_backup_.get());
1733 main_space_backup_.reset(nullptr); // Deletes the space.
1734 temp_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space 2",
1735 mem_map.release());
1736 AddSpace(temp_space_);
1737 }
1738 break;
1739 }
1740 case kCollectorTypeMS:
1741 // Fall through.
1742 case kCollectorTypeCMS: {
1743 if (IsMovingGc(collector_type_)) {
1744 CHECK(temp_space_ != nullptr);
1745 std::unique_ptr<MemMap> mem_map(temp_space_->ReleaseMemMap());
1746 RemoveSpace(temp_space_);
1747 temp_space_ = nullptr;
1748 mem_map->Protect(PROT_READ | PROT_WRITE);
1749 CreateMainMallocSpace(mem_map.get(), kDefaultInitialSize,
1750 std::min(mem_map->Size(), growth_limit_), mem_map->Size());
1751 mem_map.release();
1752 // Compact to the main space from the bump pointer space, don't need to swap semispaces.
1753 AddSpace(main_space_);
1754 Compact(main_space_, bump_pointer_space_, kGcCauseCollectorTransition);
1755 mem_map.reset(bump_pointer_space_->ReleaseMemMap());
1756 RemoveSpace(bump_pointer_space_);
1757 bump_pointer_space_ = nullptr;
1758 const char* name = kUseRosAlloc ? kRosAllocSpaceName[1] : kDlMallocSpaceName[1];
1759 // Temporarily unprotect the backup mem map so rosalloc can write the debug magic number.
1760 if (kIsDebugBuild && kUseRosAlloc) {
1761 mem_map->Protect(PROT_READ | PROT_WRITE);
1762 }
1763 main_space_backup_.reset(CreateMallocSpaceFromMemMap(
1764 mem_map.get(), kDefaultInitialSize, std::min(mem_map->Size(), growth_limit_),
1765 mem_map->Size(), name, true));
1766 if (kIsDebugBuild && kUseRosAlloc) {
1767 mem_map->Protect(PROT_NONE);
1768 }
1769 mem_map.release();
1770 }
1771 break;
1772 }
1773 default: {
1774 LOG(FATAL) << "Attempted to transition to invalid collector type "
1775 << static_cast<size_t>(collector_type);
1776 break;
1777 }
1778 }
1779 ChangeCollector(collector_type);
1780 tl->ResumeAll();
1781 // Can't call into java code with all threads suspended.
1782 reference_processor_.EnqueueClearedReferences(self);
1783 uint64_t duration = NanoTime() - start_time;
1784 GrowForUtilization(semi_space_collector_);
1785 FinishGC(self, collector::kGcTypeFull);
1786 int32_t after_allocated = num_bytes_allocated_.LoadSequentiallyConsistent();
1787 int32_t delta_allocated = before_allocated - after_allocated;
1788 std::string saved_str;
1789 if (delta_allocated >= 0) {
1790 saved_str = " saved at least " + PrettySize(delta_allocated);
1791 } else {
1792 saved_str = " expanded " + PrettySize(-delta_allocated);
1793 }
1794 VLOG(heap) << "Heap transition to " << process_state_ << " took "
1795 << PrettyDuration(duration) << saved_str;
1796 }
1797
ChangeCollector(CollectorType collector_type)1798 void Heap::ChangeCollector(CollectorType collector_type) {
1799 // TODO: Only do this with all mutators suspended to avoid races.
1800 if (collector_type != collector_type_) {
1801 if (collector_type == kCollectorTypeMC) {
1802 // Don't allow mark compact unless support is compiled in.
1803 CHECK(kMarkCompactSupport);
1804 }
1805 collector_type_ = collector_type;
1806 gc_plan_.clear();
1807 switch (collector_type_) {
1808 case kCollectorTypeCC: // Fall-through.
1809 case kCollectorTypeMC: // Fall-through.
1810 case kCollectorTypeSS: // Fall-through.
1811 case kCollectorTypeGSS: {
1812 gc_plan_.push_back(collector::kGcTypeFull);
1813 if (use_tlab_) {
1814 ChangeAllocator(kAllocatorTypeTLAB);
1815 } else {
1816 ChangeAllocator(kAllocatorTypeBumpPointer);
1817 }
1818 break;
1819 }
1820 case kCollectorTypeMS: {
1821 gc_plan_.push_back(collector::kGcTypeSticky);
1822 gc_plan_.push_back(collector::kGcTypePartial);
1823 gc_plan_.push_back(collector::kGcTypeFull);
1824 ChangeAllocator(kUseRosAlloc ? kAllocatorTypeRosAlloc : kAllocatorTypeDlMalloc);
1825 break;
1826 }
1827 case kCollectorTypeCMS: {
1828 gc_plan_.push_back(collector::kGcTypeSticky);
1829 gc_plan_.push_back(collector::kGcTypePartial);
1830 gc_plan_.push_back(collector::kGcTypeFull);
1831 ChangeAllocator(kUseRosAlloc ? kAllocatorTypeRosAlloc : kAllocatorTypeDlMalloc);
1832 break;
1833 }
1834 default: {
1835 LOG(FATAL) << "Unimplemented";
1836 }
1837 }
1838 if (IsGcConcurrent()) {
1839 concurrent_start_bytes_ =
1840 std::max(max_allowed_footprint_, kMinConcurrentRemainingBytes) - kMinConcurrentRemainingBytes;
1841 } else {
1842 concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
1843 }
1844 }
1845 }
1846
1847 // Special compacting collector which uses sub-optimal bin packing to reduce zygote space size.
1848 class ZygoteCompactingCollector FINAL : public collector::SemiSpace {
1849 public:
ZygoteCompactingCollector(gc::Heap * heap)1850 explicit ZygoteCompactingCollector(gc::Heap* heap) : SemiSpace(heap, false, "zygote collector"),
1851 bin_live_bitmap_(nullptr), bin_mark_bitmap_(nullptr) {
1852 }
1853
BuildBins(space::ContinuousSpace * space)1854 void BuildBins(space::ContinuousSpace* space) {
1855 bin_live_bitmap_ = space->GetLiveBitmap();
1856 bin_mark_bitmap_ = space->GetMarkBitmap();
1857 BinContext context;
1858 context.prev_ = reinterpret_cast<uintptr_t>(space->Begin());
1859 context.collector_ = this;
1860 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
1861 // Note: This requires traversing the space in increasing order of object addresses.
1862 bin_live_bitmap_->Walk(Callback, reinterpret_cast<void*>(&context));
1863 // Add the last bin which spans after the last object to the end of the space.
1864 AddBin(reinterpret_cast<uintptr_t>(space->End()) - context.prev_, context.prev_);
1865 }
1866
1867 private:
1868 struct BinContext {
1869 uintptr_t prev_; // The end of the previous object.
1870 ZygoteCompactingCollector* collector_;
1871 };
1872 // Maps from bin sizes to locations.
1873 std::multimap<size_t, uintptr_t> bins_;
1874 // Live bitmap of the space which contains the bins.
1875 accounting::ContinuousSpaceBitmap* bin_live_bitmap_;
1876 // Mark bitmap of the space which contains the bins.
1877 accounting::ContinuousSpaceBitmap* bin_mark_bitmap_;
1878
Callback(mirror::Object * obj,void * arg)1879 static void Callback(mirror::Object* obj, void* arg)
1880 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1881 DCHECK(arg != nullptr);
1882 BinContext* context = reinterpret_cast<BinContext*>(arg);
1883 ZygoteCompactingCollector* collector = context->collector_;
1884 uintptr_t object_addr = reinterpret_cast<uintptr_t>(obj);
1885 size_t bin_size = object_addr - context->prev_;
1886 // Add the bin consisting of the end of the previous object to the start of the current object.
1887 collector->AddBin(bin_size, context->prev_);
1888 context->prev_ = object_addr + RoundUp(obj->SizeOf(), kObjectAlignment);
1889 }
1890
AddBin(size_t size,uintptr_t position)1891 void AddBin(size_t size, uintptr_t position) {
1892 if (size != 0) {
1893 bins_.insert(std::make_pair(size, position));
1894 }
1895 }
1896
ShouldSweepSpace(space::ContinuousSpace * space) const1897 virtual bool ShouldSweepSpace(space::ContinuousSpace* space) const {
1898 // Don't sweep any spaces since we probably blasted the internal accounting of the free list
1899 // allocator.
1900 return false;
1901 }
1902
MarkNonForwardedObject(mirror::Object * obj)1903 virtual mirror::Object* MarkNonForwardedObject(mirror::Object* obj)
1904 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
1905 size_t object_size = RoundUp(obj->SizeOf(), kObjectAlignment);
1906 mirror::Object* forward_address;
1907 // Find the smallest bin which we can move obj in.
1908 auto it = bins_.lower_bound(object_size);
1909 if (it == bins_.end()) {
1910 // No available space in the bins, place it in the target space instead (grows the zygote
1911 // space).
1912 size_t bytes_allocated;
1913 forward_address = to_space_->Alloc(self_, object_size, &bytes_allocated, nullptr);
1914 if (to_space_live_bitmap_ != nullptr) {
1915 to_space_live_bitmap_->Set(forward_address);
1916 } else {
1917 GetHeap()->GetNonMovingSpace()->GetLiveBitmap()->Set(forward_address);
1918 GetHeap()->GetNonMovingSpace()->GetMarkBitmap()->Set(forward_address);
1919 }
1920 } else {
1921 size_t size = it->first;
1922 uintptr_t pos = it->second;
1923 bins_.erase(it); // Erase the old bin which we replace with the new smaller bin.
1924 forward_address = reinterpret_cast<mirror::Object*>(pos);
1925 // Set the live and mark bits so that sweeping system weaks works properly.
1926 bin_live_bitmap_->Set(forward_address);
1927 bin_mark_bitmap_->Set(forward_address);
1928 DCHECK_GE(size, object_size);
1929 AddBin(size - object_size, pos + object_size); // Add a new bin with the remaining space.
1930 }
1931 // Copy the object over to its new location.
1932 memcpy(reinterpret_cast<void*>(forward_address), obj, object_size);
1933 if (kUseBakerOrBrooksReadBarrier) {
1934 obj->AssertReadBarrierPointer();
1935 if (kUseBrooksReadBarrier) {
1936 DCHECK_EQ(forward_address->GetReadBarrierPointer(), obj);
1937 forward_address->SetReadBarrierPointer(forward_address);
1938 }
1939 forward_address->AssertReadBarrierPointer();
1940 }
1941 return forward_address;
1942 }
1943 };
1944
UnBindBitmaps()1945 void Heap::UnBindBitmaps() {
1946 TimingLogger::ScopedTiming t("UnBindBitmaps", GetCurrentGcIteration()->GetTimings());
1947 for (const auto& space : GetContinuousSpaces()) {
1948 if (space->IsContinuousMemMapAllocSpace()) {
1949 space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
1950 if (alloc_space->HasBoundBitmaps()) {
1951 alloc_space->UnBindBitmaps();
1952 }
1953 }
1954 }
1955 }
1956
PreZygoteFork()1957 void Heap::PreZygoteFork() {
1958 CollectGarbageInternal(collector::kGcTypeFull, kGcCauseBackground, false);
1959 Thread* self = Thread::Current();
1960 MutexLock mu(self, zygote_creation_lock_);
1961 // Try to see if we have any Zygote spaces.
1962 if (have_zygote_space_) {
1963 return;
1964 }
1965 Runtime::Current()->GetInternTable()->SwapPostZygoteWithPreZygote();
1966 Runtime::Current()->GetClassLinker()->MoveClassTableToPreZygote();
1967 VLOG(heap) << "Starting PreZygoteFork";
1968 // Trim the pages at the end of the non moving space.
1969 non_moving_space_->Trim();
1970 // The end of the non-moving space may be protected, unprotect it so that we can copy the zygote
1971 // there.
1972 non_moving_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
1973 const bool same_space = non_moving_space_ == main_space_;
1974 if (kCompactZygote) {
1975 // Can't compact if the non moving space is the same as the main space.
1976 DCHECK(semi_space_collector_ != nullptr);
1977 // Temporarily disable rosalloc verification because the zygote
1978 // compaction will mess up the rosalloc internal metadata.
1979 ScopedDisableRosAllocVerification disable_rosalloc_verif(this);
1980 ZygoteCompactingCollector zygote_collector(this);
1981 zygote_collector.BuildBins(non_moving_space_);
1982 // Create a new bump pointer space which we will compact into.
1983 space::BumpPointerSpace target_space("zygote bump space", non_moving_space_->End(),
1984 non_moving_space_->Limit());
1985 // Compact the bump pointer space to a new zygote bump pointer space.
1986 bool reset_main_space = false;
1987 if (IsMovingGc(collector_type_)) {
1988 zygote_collector.SetFromSpace(bump_pointer_space_);
1989 } else {
1990 CHECK(main_space_ != nullptr);
1991 // Copy from the main space.
1992 zygote_collector.SetFromSpace(main_space_);
1993 reset_main_space = true;
1994 }
1995 zygote_collector.SetToSpace(&target_space);
1996 zygote_collector.SetSwapSemiSpaces(false);
1997 zygote_collector.Run(kGcCauseCollectorTransition, false);
1998 if (reset_main_space) {
1999 main_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
2000 madvise(main_space_->Begin(), main_space_->Capacity(), MADV_DONTNEED);
2001 MemMap* mem_map = main_space_->ReleaseMemMap();
2002 RemoveSpace(main_space_);
2003 space::Space* old_main_space = main_space_;
2004 CreateMainMallocSpace(mem_map, kDefaultInitialSize, std::min(mem_map->Size(), growth_limit_),
2005 mem_map->Size());
2006 delete old_main_space;
2007 AddSpace(main_space_);
2008 } else {
2009 bump_pointer_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
2010 }
2011 if (temp_space_ != nullptr) {
2012 CHECK(temp_space_->IsEmpty());
2013 }
2014 total_objects_freed_ever_ += GetCurrentGcIteration()->GetFreedObjects();
2015 total_bytes_freed_ever_ += GetCurrentGcIteration()->GetFreedBytes();
2016 // Update the end and write out image.
2017 non_moving_space_->SetEnd(target_space.End());
2018 non_moving_space_->SetLimit(target_space.Limit());
2019 VLOG(heap) << "Zygote space size " << non_moving_space_->Size() << " bytes";
2020 }
2021 // Change the collector to the post zygote one.
2022 ChangeCollector(foreground_collector_type_);
2023 // Save the old space so that we can remove it after we complete creating the zygote space.
2024 space::MallocSpace* old_alloc_space = non_moving_space_;
2025 // Turn the current alloc space into a zygote space and obtain the new alloc space composed of
2026 // the remaining available space.
2027 // Remove the old space before creating the zygote space since creating the zygote space sets
2028 // the old alloc space's bitmaps to nullptr.
2029 RemoveSpace(old_alloc_space);
2030 if (collector::SemiSpace::kUseRememberedSet) {
2031 // Sanity bound check.
2032 FindRememberedSetFromSpace(old_alloc_space)->AssertAllDirtyCardsAreWithinSpace();
2033 // Remove the remembered set for the now zygote space (the old
2034 // non-moving space). Note now that we have compacted objects into
2035 // the zygote space, the data in the remembered set is no longer
2036 // needed. The zygote space will instead have a mod-union table
2037 // from this point on.
2038 RemoveRememberedSet(old_alloc_space);
2039 }
2040 // Remaining space becomes the new non moving space.
2041 space::ZygoteSpace* zygote_space = old_alloc_space->CreateZygoteSpace(kNonMovingSpaceName,
2042 low_memory_mode_,
2043 &non_moving_space_);
2044 CHECK(!non_moving_space_->CanMoveObjects());
2045 if (same_space) {
2046 main_space_ = non_moving_space_;
2047 SetSpaceAsDefault(main_space_);
2048 }
2049 delete old_alloc_space;
2050 CHECK(zygote_space != nullptr) << "Failed creating zygote space";
2051 AddSpace(zygote_space);
2052 non_moving_space_->SetFootprintLimit(non_moving_space_->Capacity());
2053 AddSpace(non_moving_space_);
2054 have_zygote_space_ = true;
2055 // Enable large object space allocations.
2056 large_object_threshold_ = kDefaultLargeObjectThreshold;
2057 // Create the zygote space mod union table.
2058 accounting::ModUnionTable* mod_union_table =
2059 new accounting::ModUnionTableCardCache("zygote space mod-union table", this, zygote_space);
2060 CHECK(mod_union_table != nullptr) << "Failed to create zygote space mod-union table";
2061 AddModUnionTable(mod_union_table);
2062 if (collector::SemiSpace::kUseRememberedSet) {
2063 // Add a new remembered set for the post-zygote non-moving space.
2064 accounting::RememberedSet* post_zygote_non_moving_space_rem_set =
2065 new accounting::RememberedSet("Post-zygote non-moving space remembered set", this,
2066 non_moving_space_);
2067 CHECK(post_zygote_non_moving_space_rem_set != nullptr)
2068 << "Failed to create post-zygote non-moving space remembered set";
2069 AddRememberedSet(post_zygote_non_moving_space_rem_set);
2070 }
2071 }
2072
FlushAllocStack()2073 void Heap::FlushAllocStack() {
2074 MarkAllocStackAsLive(allocation_stack_.get());
2075 allocation_stack_->Reset();
2076 }
2077
MarkAllocStack(accounting::ContinuousSpaceBitmap * bitmap1,accounting::ContinuousSpaceBitmap * bitmap2,accounting::LargeObjectBitmap * large_objects,accounting::ObjectStack * stack)2078 void Heap::MarkAllocStack(accounting::ContinuousSpaceBitmap* bitmap1,
2079 accounting::ContinuousSpaceBitmap* bitmap2,
2080 accounting::LargeObjectBitmap* large_objects,
2081 accounting::ObjectStack* stack) {
2082 DCHECK(bitmap1 != nullptr);
2083 DCHECK(bitmap2 != nullptr);
2084 mirror::Object** limit = stack->End();
2085 for (mirror::Object** it = stack->Begin(); it != limit; ++it) {
2086 const mirror::Object* obj = *it;
2087 if (!kUseThreadLocalAllocationStack || obj != nullptr) {
2088 if (bitmap1->HasAddress(obj)) {
2089 bitmap1->Set(obj);
2090 } else if (bitmap2->HasAddress(obj)) {
2091 bitmap2->Set(obj);
2092 } else {
2093 large_objects->Set(obj);
2094 }
2095 }
2096 }
2097 }
2098
SwapSemiSpaces()2099 void Heap::SwapSemiSpaces() {
2100 CHECK(bump_pointer_space_ != nullptr);
2101 CHECK(temp_space_ != nullptr);
2102 std::swap(bump_pointer_space_, temp_space_);
2103 }
2104
Compact(space::ContinuousMemMapAllocSpace * target_space,space::ContinuousMemMapAllocSpace * source_space,GcCause gc_cause)2105 void Heap::Compact(space::ContinuousMemMapAllocSpace* target_space,
2106 space::ContinuousMemMapAllocSpace* source_space,
2107 GcCause gc_cause) {
2108 CHECK(kMovingCollector);
2109 if (target_space != source_space) {
2110 // Don't swap spaces since this isn't a typical semi space collection.
2111 semi_space_collector_->SetSwapSemiSpaces(false);
2112 semi_space_collector_->SetFromSpace(source_space);
2113 semi_space_collector_->SetToSpace(target_space);
2114 semi_space_collector_->Run(gc_cause, false);
2115 } else {
2116 CHECK(target_space->IsBumpPointerSpace())
2117 << "In-place compaction is only supported for bump pointer spaces";
2118 mark_compact_collector_->SetSpace(target_space->AsBumpPointerSpace());
2119 mark_compact_collector_->Run(kGcCauseCollectorTransition, false);
2120 }
2121 }
2122
CollectGarbageInternal(collector::GcType gc_type,GcCause gc_cause,bool clear_soft_references)2123 collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type, GcCause gc_cause,
2124 bool clear_soft_references) {
2125 Thread* self = Thread::Current();
2126 Runtime* runtime = Runtime::Current();
2127 // If the heap can't run the GC, silently fail and return that no GC was run.
2128 switch (gc_type) {
2129 case collector::kGcTypePartial: {
2130 if (!have_zygote_space_) {
2131 return collector::kGcTypeNone;
2132 }
2133 break;
2134 }
2135 default: {
2136 // Other GC types don't have any special cases which makes them not runnable. The main case
2137 // here is full GC.
2138 }
2139 }
2140 ScopedThreadStateChange tsc(self, kWaitingPerformingGc);
2141 Locks::mutator_lock_->AssertNotHeld(self);
2142 if (self->IsHandlingStackOverflow()) {
2143 LOG(WARNING) << "Performing GC on a thread that is handling a stack overflow.";
2144 }
2145 bool compacting_gc;
2146 {
2147 gc_complete_lock_->AssertNotHeld(self);
2148 ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
2149 MutexLock mu(self, *gc_complete_lock_);
2150 // Ensure there is only one GC at a time.
2151 WaitForGcToCompleteLocked(gc_cause, self);
2152 compacting_gc = IsMovingGc(collector_type_);
2153 // GC can be disabled if someone has a used GetPrimitiveArrayCritical.
2154 if (compacting_gc && disable_moving_gc_count_ != 0) {
2155 LOG(WARNING) << "Skipping GC due to disable moving GC count " << disable_moving_gc_count_;
2156 return collector::kGcTypeNone;
2157 }
2158 collector_type_running_ = collector_type_;
2159 }
2160
2161 if (gc_cause == kGcCauseForAlloc && runtime->HasStatsEnabled()) {
2162 ++runtime->GetStats()->gc_for_alloc_count;
2163 ++self->GetStats()->gc_for_alloc_count;
2164 }
2165 uint64_t gc_start_time_ns = NanoTime();
2166 uint64_t gc_start_size = GetBytesAllocated();
2167 // Approximate allocation rate in bytes / second.
2168 uint64_t ms_delta = NsToMs(gc_start_time_ns - last_gc_time_ns_);
2169 // Back to back GCs can cause 0 ms of wait time in between GC invocations.
2170 if (LIKELY(ms_delta != 0)) {
2171 allocation_rate_ = ((gc_start_size - last_gc_size_) * 1000) / ms_delta;
2172 ATRACE_INT("Allocation rate KB/s", allocation_rate_ / KB);
2173 VLOG(heap) << "Allocation rate: " << PrettySize(allocation_rate_) << "/s";
2174 }
2175
2176 DCHECK_LT(gc_type, collector::kGcTypeMax);
2177 DCHECK_NE(gc_type, collector::kGcTypeNone);
2178
2179 collector::GarbageCollector* collector = nullptr;
2180 // TODO: Clean this up.
2181 if (compacting_gc) {
2182 DCHECK(current_allocator_ == kAllocatorTypeBumpPointer ||
2183 current_allocator_ == kAllocatorTypeTLAB);
2184 switch (collector_type_) {
2185 case kCollectorTypeSS:
2186 // Fall-through.
2187 case kCollectorTypeGSS:
2188 semi_space_collector_->SetFromSpace(bump_pointer_space_);
2189 semi_space_collector_->SetToSpace(temp_space_);
2190 semi_space_collector_->SetSwapSemiSpaces(true);
2191 collector = semi_space_collector_;
2192 break;
2193 case kCollectorTypeCC:
2194 collector = concurrent_copying_collector_;
2195 break;
2196 case kCollectorTypeMC:
2197 mark_compact_collector_->SetSpace(bump_pointer_space_);
2198 collector = mark_compact_collector_;
2199 break;
2200 default:
2201 LOG(FATAL) << "Invalid collector type " << static_cast<size_t>(collector_type_);
2202 }
2203 if (collector != mark_compact_collector_) {
2204 temp_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
2205 CHECK(temp_space_->IsEmpty());
2206 }
2207 gc_type = collector::kGcTypeFull; // TODO: Not hard code this in.
2208 } else if (current_allocator_ == kAllocatorTypeRosAlloc ||
2209 current_allocator_ == kAllocatorTypeDlMalloc) {
2210 collector = FindCollectorByGcType(gc_type);
2211 } else {
2212 LOG(FATAL) << "Invalid current allocator " << current_allocator_;
2213 }
2214 if (IsGcConcurrent()) {
2215 // Disable concurrent GC check so that we don't have spammy JNI requests.
2216 // This gets recalculated in GrowForUtilization. It is important that it is disabled /
2217 // calculated in the same thread so that there aren't any races that can cause it to become
2218 // permanantly disabled. b/17942071
2219 concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
2220 }
2221 CHECK(collector != nullptr)
2222 << "Could not find garbage collector with collector_type="
2223 << static_cast<size_t>(collector_type_) << " and gc_type=" << gc_type;
2224 collector->Run(gc_cause, clear_soft_references || runtime->IsZygote());
2225 total_objects_freed_ever_ += GetCurrentGcIteration()->GetFreedObjects();
2226 total_bytes_freed_ever_ += GetCurrentGcIteration()->GetFreedBytes();
2227 RequestHeapTrim();
2228 // Enqueue cleared references.
2229 reference_processor_.EnqueueClearedReferences(self);
2230 // Grow the heap so that we know when to perform the next GC.
2231 GrowForUtilization(collector);
2232 const size_t duration = GetCurrentGcIteration()->GetDurationNs();
2233 const std::vector<uint64_t>& pause_times = GetCurrentGcIteration()->GetPauseTimes();
2234 // Print the GC if it is an explicit GC (e.g. Runtime.gc()) or a slow GC
2235 // (mutator time blocked >= long_pause_log_threshold_).
2236 bool log_gc = gc_cause == kGcCauseExplicit;
2237 if (!log_gc && CareAboutPauseTimes()) {
2238 // GC for alloc pauses the allocating thread, so consider it as a pause.
2239 log_gc = duration > long_gc_log_threshold_ ||
2240 (gc_cause == kGcCauseForAlloc && duration > long_pause_log_threshold_);
2241 for (uint64_t pause : pause_times) {
2242 log_gc = log_gc || pause >= long_pause_log_threshold_;
2243 }
2244 }
2245 if (log_gc) {
2246 const size_t percent_free = GetPercentFree();
2247 const size_t current_heap_size = GetBytesAllocated();
2248 const size_t total_memory = GetTotalMemory();
2249 std::ostringstream pause_string;
2250 for (size_t i = 0; i < pause_times.size(); ++i) {
2251 pause_string << PrettyDuration((pause_times[i] / 1000) * 1000)
2252 << ((i != pause_times.size() - 1) ? "," : "");
2253 }
2254 LOG(INFO) << gc_cause << " " << collector->GetName()
2255 << " GC freed " << current_gc_iteration_.GetFreedObjects() << "("
2256 << PrettySize(current_gc_iteration_.GetFreedBytes()) << ") AllocSpace objects, "
2257 << current_gc_iteration_.GetFreedLargeObjects() << "("
2258 << PrettySize(current_gc_iteration_.GetFreedLargeObjectBytes()) << ") LOS objects, "
2259 << percent_free << "% free, " << PrettySize(current_heap_size) << "/"
2260 << PrettySize(total_memory) << ", " << "paused " << pause_string.str()
2261 << " total " << PrettyDuration((duration / 1000) * 1000);
2262 VLOG(heap) << ConstDumpable<TimingLogger>(*current_gc_iteration_.GetTimings());
2263 }
2264 FinishGC(self, gc_type);
2265 // Inform DDMS that a GC completed.
2266 Dbg::GcDidFinish();
2267 return gc_type;
2268 }
2269
FinishGC(Thread * self,collector::GcType gc_type)2270 void Heap::FinishGC(Thread* self, collector::GcType gc_type) {
2271 MutexLock mu(self, *gc_complete_lock_);
2272 collector_type_running_ = kCollectorTypeNone;
2273 if (gc_type != collector::kGcTypeNone) {
2274 last_gc_type_ = gc_type;
2275 }
2276 // Wake anyone who may have been waiting for the GC to complete.
2277 gc_complete_cond_->Broadcast(self);
2278 }
2279
RootMatchesObjectVisitor(mirror::Object ** root,void * arg,const RootInfo &)2280 static void RootMatchesObjectVisitor(mirror::Object** root, void* arg,
2281 const RootInfo& /*root_info*/) {
2282 mirror::Object* obj = reinterpret_cast<mirror::Object*>(arg);
2283 if (*root == obj) {
2284 LOG(INFO) << "Object " << obj << " is a root";
2285 }
2286 }
2287
2288 class ScanVisitor {
2289 public:
operator ()(const mirror::Object * obj) const2290 void operator()(const mirror::Object* obj) const {
2291 LOG(ERROR) << "Would have rescanned object " << obj;
2292 }
2293 };
2294
2295 // Verify a reference from an object.
2296 class VerifyReferenceVisitor {
2297 public:
VerifyReferenceVisitor(Heap * heap,Atomic<size_t> * fail_count,bool verify_referent)2298 explicit VerifyReferenceVisitor(Heap* heap, Atomic<size_t>* fail_count, bool verify_referent)
2299 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_)
2300 : heap_(heap), fail_count_(fail_count), verify_referent_(verify_referent) {}
2301
GetFailureCount() const2302 size_t GetFailureCount() const {
2303 return fail_count_->LoadSequentiallyConsistent();
2304 }
2305
operator ()(mirror::Class * klass,mirror::Reference * ref) const2306 void operator()(mirror::Class* klass, mirror::Reference* ref) const
2307 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
2308 if (verify_referent_) {
2309 VerifyReference(ref, ref->GetReferent(), mirror::Reference::ReferentOffset());
2310 }
2311 }
2312
operator ()(mirror::Object * obj,MemberOffset offset,bool) const2313 void operator()(mirror::Object* obj, MemberOffset offset, bool /*is_static*/) const
2314 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
2315 VerifyReference(obj, obj->GetFieldObject<mirror::Object>(offset), offset);
2316 }
2317
IsLive(mirror::Object * obj) const2318 bool IsLive(mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS {
2319 return heap_->IsLiveObjectLocked(obj, true, false, true);
2320 }
2321
VerifyRootCallback(mirror::Object ** root,void * arg,const RootInfo & root_info)2322 static void VerifyRootCallback(mirror::Object** root, void* arg, const RootInfo& root_info)
2323 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
2324 VerifyReferenceVisitor* visitor = reinterpret_cast<VerifyReferenceVisitor*>(arg);
2325 if (!visitor->VerifyReference(nullptr, *root, MemberOffset(0))) {
2326 LOG(ERROR) << "Root " << *root << " is dead with type " << PrettyTypeOf(*root)
2327 << " thread_id= " << root_info.GetThreadId() << " root_type= " << root_info.GetType();
2328 }
2329 }
2330
2331 private:
2332 // TODO: Fix the no thread safety analysis.
2333 // Returns false on failure.
VerifyReference(mirror::Object * obj,mirror::Object * ref,MemberOffset offset) const2334 bool VerifyReference(mirror::Object* obj, mirror::Object* ref, MemberOffset offset) const
2335 NO_THREAD_SAFETY_ANALYSIS {
2336 if (ref == nullptr || IsLive(ref)) {
2337 // Verify that the reference is live.
2338 return true;
2339 }
2340 if (fail_count_->FetchAndAddSequentiallyConsistent(1) == 0) {
2341 // Print message on only on first failure to prevent spam.
2342 LOG(ERROR) << "!!!!!!!!!!!!!!Heap corruption detected!!!!!!!!!!!!!!!!!!!";
2343 }
2344 if (obj != nullptr) {
2345 // Only do this part for non roots.
2346 accounting::CardTable* card_table = heap_->GetCardTable();
2347 accounting::ObjectStack* alloc_stack = heap_->allocation_stack_.get();
2348 accounting::ObjectStack* live_stack = heap_->live_stack_.get();
2349 byte* card_addr = card_table->CardFromAddr(obj);
2350 LOG(ERROR) << "Object " << obj << " references dead object " << ref << " at offset "
2351 << offset << "\n card value = " << static_cast<int>(*card_addr);
2352 if (heap_->IsValidObjectAddress(obj->GetClass())) {
2353 LOG(ERROR) << "Obj type " << PrettyTypeOf(obj);
2354 } else {
2355 LOG(ERROR) << "Object " << obj << " class(" << obj->GetClass() << ") not a heap address";
2356 }
2357
2358 // Attempt to find the class inside of the recently freed objects.
2359 space::ContinuousSpace* ref_space = heap_->FindContinuousSpaceFromObject(ref, true);
2360 if (ref_space != nullptr && ref_space->IsMallocSpace()) {
2361 space::MallocSpace* space = ref_space->AsMallocSpace();
2362 mirror::Class* ref_class = space->FindRecentFreedObject(ref);
2363 if (ref_class != nullptr) {
2364 LOG(ERROR) << "Reference " << ref << " found as a recently freed object with class "
2365 << PrettyClass(ref_class);
2366 } else {
2367 LOG(ERROR) << "Reference " << ref << " not found as a recently freed object";
2368 }
2369 }
2370
2371 if (ref->GetClass() != nullptr && heap_->IsValidObjectAddress(ref->GetClass()) &&
2372 ref->GetClass()->IsClass()) {
2373 LOG(ERROR) << "Ref type " << PrettyTypeOf(ref);
2374 } else {
2375 LOG(ERROR) << "Ref " << ref << " class(" << ref->GetClass()
2376 << ") is not a valid heap address";
2377 }
2378
2379 card_table->CheckAddrIsInCardTable(reinterpret_cast<const byte*>(obj));
2380 void* cover_begin = card_table->AddrFromCard(card_addr);
2381 void* cover_end = reinterpret_cast<void*>(reinterpret_cast<size_t>(cover_begin) +
2382 accounting::CardTable::kCardSize);
2383 LOG(ERROR) << "Card " << reinterpret_cast<void*>(card_addr) << " covers " << cover_begin
2384 << "-" << cover_end;
2385 accounting::ContinuousSpaceBitmap* bitmap =
2386 heap_->GetLiveBitmap()->GetContinuousSpaceBitmap(obj);
2387
2388 if (bitmap == nullptr) {
2389 LOG(ERROR) << "Object " << obj << " has no bitmap";
2390 if (!VerifyClassClass(obj->GetClass())) {
2391 LOG(ERROR) << "Object " << obj << " failed class verification!";
2392 }
2393 } else {
2394 // Print out how the object is live.
2395 if (bitmap->Test(obj)) {
2396 LOG(ERROR) << "Object " << obj << " found in live bitmap";
2397 }
2398 if (alloc_stack->Contains(const_cast<mirror::Object*>(obj))) {
2399 LOG(ERROR) << "Object " << obj << " found in allocation stack";
2400 }
2401 if (live_stack->Contains(const_cast<mirror::Object*>(obj))) {
2402 LOG(ERROR) << "Object " << obj << " found in live stack";
2403 }
2404 if (alloc_stack->Contains(const_cast<mirror::Object*>(ref))) {
2405 LOG(ERROR) << "Ref " << ref << " found in allocation stack";
2406 }
2407 if (live_stack->Contains(const_cast<mirror::Object*>(ref))) {
2408 LOG(ERROR) << "Ref " << ref << " found in live stack";
2409 }
2410 // Attempt to see if the card table missed the reference.
2411 ScanVisitor scan_visitor;
2412 byte* byte_cover_begin = reinterpret_cast<byte*>(card_table->AddrFromCard(card_addr));
2413 card_table->Scan(bitmap, byte_cover_begin,
2414 byte_cover_begin + accounting::CardTable::kCardSize, scan_visitor);
2415 }
2416
2417 // Search to see if any of the roots reference our object.
2418 void* arg = const_cast<void*>(reinterpret_cast<const void*>(obj));
2419 Runtime::Current()->VisitRoots(&RootMatchesObjectVisitor, arg);
2420
2421 // Search to see if any of the roots reference our reference.
2422 arg = const_cast<void*>(reinterpret_cast<const void*>(ref));
2423 Runtime::Current()->VisitRoots(&RootMatchesObjectVisitor, arg);
2424 }
2425 return false;
2426 }
2427
2428 Heap* const heap_;
2429 Atomic<size_t>* const fail_count_;
2430 const bool verify_referent_;
2431 };
2432
2433 // Verify all references within an object, for use with HeapBitmap::Visit.
2434 class VerifyObjectVisitor {
2435 public:
VerifyObjectVisitor(Heap * heap,Atomic<size_t> * fail_count,bool verify_referent)2436 explicit VerifyObjectVisitor(Heap* heap, Atomic<size_t>* fail_count, bool verify_referent)
2437 : heap_(heap), fail_count_(fail_count), verify_referent_(verify_referent) {
2438 }
2439
operator ()(mirror::Object * obj) const2440 void operator()(mirror::Object* obj) const
2441 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
2442 // Note: we are verifying the references in obj but not obj itself, this is because obj must
2443 // be live or else how did we find it in the live bitmap?
2444 VerifyReferenceVisitor visitor(heap_, fail_count_, verify_referent_);
2445 // The class doesn't count as a reference but we should verify it anyways.
2446 obj->VisitReferences<true>(visitor, visitor);
2447 }
2448
VisitCallback(mirror::Object * obj,void * arg)2449 static void VisitCallback(mirror::Object* obj, void* arg)
2450 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
2451 VerifyObjectVisitor* visitor = reinterpret_cast<VerifyObjectVisitor*>(arg);
2452 visitor->operator()(obj);
2453 }
2454
GetFailureCount() const2455 size_t GetFailureCount() const {
2456 return fail_count_->LoadSequentiallyConsistent();
2457 }
2458
2459 private:
2460 Heap* const heap_;
2461 Atomic<size_t>* const fail_count_;
2462 const bool verify_referent_;
2463 };
2464
PushOnAllocationStackWithInternalGC(Thread * self,mirror::Object ** obj)2465 void Heap::PushOnAllocationStackWithInternalGC(Thread* self, mirror::Object** obj) {
2466 // Slow path, the allocation stack push back must have already failed.
2467 DCHECK(!allocation_stack_->AtomicPushBack(*obj));
2468 do {
2469 // TODO: Add handle VerifyObject.
2470 StackHandleScope<1> hs(self);
2471 HandleWrapper<mirror::Object> wrapper(hs.NewHandleWrapper(obj));
2472 // Push our object into the reserve region of the allocaiton stack. This is only required due
2473 // to heap verification requiring that roots are live (either in the live bitmap or in the
2474 // allocation stack).
2475 CHECK(allocation_stack_->AtomicPushBackIgnoreGrowthLimit(*obj));
2476 CollectGarbageInternal(collector::kGcTypeSticky, kGcCauseForAlloc, false);
2477 } while (!allocation_stack_->AtomicPushBack(*obj));
2478 }
2479
PushOnThreadLocalAllocationStackWithInternalGC(Thread * self,mirror::Object ** obj)2480 void Heap::PushOnThreadLocalAllocationStackWithInternalGC(Thread* self, mirror::Object** obj) {
2481 // Slow path, the allocation stack push back must have already failed.
2482 DCHECK(!self->PushOnThreadLocalAllocationStack(*obj));
2483 mirror::Object** start_address;
2484 mirror::Object** end_address;
2485 while (!allocation_stack_->AtomicBumpBack(kThreadLocalAllocationStackSize, &start_address,
2486 &end_address)) {
2487 // TODO: Add handle VerifyObject.
2488 StackHandleScope<1> hs(self);
2489 HandleWrapper<mirror::Object> wrapper(hs.NewHandleWrapper(obj));
2490 // Push our object into the reserve region of the allocaiton stack. This is only required due
2491 // to heap verification requiring that roots are live (either in the live bitmap or in the
2492 // allocation stack).
2493 CHECK(allocation_stack_->AtomicPushBackIgnoreGrowthLimit(*obj));
2494 // Push into the reserve allocation stack.
2495 CollectGarbageInternal(collector::kGcTypeSticky, kGcCauseForAlloc, false);
2496 }
2497 self->SetThreadLocalAllocationStack(start_address, end_address);
2498 // Retry on the new thread-local allocation stack.
2499 CHECK(self->PushOnThreadLocalAllocationStack(*obj)); // Must succeed.
2500 }
2501
2502 // Must do this with mutators suspended since we are directly accessing the allocation stacks.
VerifyHeapReferences(bool verify_referents)2503 size_t Heap::VerifyHeapReferences(bool verify_referents) {
2504 Thread* self = Thread::Current();
2505 Locks::mutator_lock_->AssertExclusiveHeld(self);
2506 // Lets sort our allocation stacks so that we can efficiently binary search them.
2507 allocation_stack_->Sort();
2508 live_stack_->Sort();
2509 // Since we sorted the allocation stack content, need to revoke all
2510 // thread-local allocation stacks.
2511 RevokeAllThreadLocalAllocationStacks(self);
2512 Atomic<size_t> fail_count_(0);
2513 VerifyObjectVisitor visitor(this, &fail_count_, verify_referents);
2514 // Verify objects in the allocation stack since these will be objects which were:
2515 // 1. Allocated prior to the GC (pre GC verification).
2516 // 2. Allocated during the GC (pre sweep GC verification).
2517 // We don't want to verify the objects in the live stack since they themselves may be
2518 // pointing to dead objects if they are not reachable.
2519 VisitObjects(VerifyObjectVisitor::VisitCallback, &visitor);
2520 // Verify the roots:
2521 Runtime::Current()->VisitRoots(VerifyReferenceVisitor::VerifyRootCallback, &visitor);
2522 if (visitor.GetFailureCount() > 0) {
2523 // Dump mod-union tables.
2524 for (const auto& table_pair : mod_union_tables_) {
2525 accounting::ModUnionTable* mod_union_table = table_pair.second;
2526 mod_union_table->Dump(LOG(ERROR) << mod_union_table->GetName() << ": ");
2527 }
2528 // Dump remembered sets.
2529 for (const auto& table_pair : remembered_sets_) {
2530 accounting::RememberedSet* remembered_set = table_pair.second;
2531 remembered_set->Dump(LOG(ERROR) << remembered_set->GetName() << ": ");
2532 }
2533 DumpSpaces(LOG(ERROR));
2534 }
2535 return visitor.GetFailureCount();
2536 }
2537
2538 class VerifyReferenceCardVisitor {
2539 public:
VerifyReferenceCardVisitor(Heap * heap,bool * failed)2540 VerifyReferenceCardVisitor(Heap* heap, bool* failed)
2541 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_,
2542 Locks::heap_bitmap_lock_)
2543 : heap_(heap), failed_(failed) {
2544 }
2545
2546 // TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for
2547 // annotalysis on visitors.
operator ()(mirror::Object * obj,MemberOffset offset,bool is_static) const2548 void operator()(mirror::Object* obj, MemberOffset offset, bool is_static) const
2549 NO_THREAD_SAFETY_ANALYSIS {
2550 mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset);
2551 // Filter out class references since changing an object's class does not mark the card as dirty.
2552 // Also handles large objects, since the only reference they hold is a class reference.
2553 if (ref != nullptr && !ref->IsClass()) {
2554 accounting::CardTable* card_table = heap_->GetCardTable();
2555 // If the object is not dirty and it is referencing something in the live stack other than
2556 // class, then it must be on a dirty card.
2557 if (!card_table->AddrIsInCardTable(obj)) {
2558 LOG(ERROR) << "Object " << obj << " is not in the address range of the card table";
2559 *failed_ = true;
2560 } else if (!card_table->IsDirty(obj)) {
2561 // TODO: Check mod-union tables.
2562 // Card should be either kCardDirty if it got re-dirtied after we aged it, or
2563 // kCardDirty - 1 if it didnt get touched since we aged it.
2564 accounting::ObjectStack* live_stack = heap_->live_stack_.get();
2565 if (live_stack->ContainsSorted(ref)) {
2566 if (live_stack->ContainsSorted(obj)) {
2567 LOG(ERROR) << "Object " << obj << " found in live stack";
2568 }
2569 if (heap_->GetLiveBitmap()->Test(obj)) {
2570 LOG(ERROR) << "Object " << obj << " found in live bitmap";
2571 }
2572 LOG(ERROR) << "Object " << obj << " " << PrettyTypeOf(obj)
2573 << " references " << ref << " " << PrettyTypeOf(ref) << " in live stack";
2574
2575 // Print which field of the object is dead.
2576 if (!obj->IsObjectArray()) {
2577 mirror::Class* klass = is_static ? obj->AsClass() : obj->GetClass();
2578 CHECK(klass != NULL);
2579 mirror::ObjectArray<mirror::ArtField>* fields = is_static ? klass->GetSFields()
2580 : klass->GetIFields();
2581 CHECK(fields != NULL);
2582 for (int32_t i = 0; i < fields->GetLength(); ++i) {
2583 mirror::ArtField* cur = fields->Get(i);
2584 if (cur->GetOffset().Int32Value() == offset.Int32Value()) {
2585 LOG(ERROR) << (is_static ? "Static " : "") << "field in the live stack is "
2586 << PrettyField(cur);
2587 break;
2588 }
2589 }
2590 } else {
2591 mirror::ObjectArray<mirror::Object>* object_array =
2592 obj->AsObjectArray<mirror::Object>();
2593 for (int32_t i = 0; i < object_array->GetLength(); ++i) {
2594 if (object_array->Get(i) == ref) {
2595 LOG(ERROR) << (is_static ? "Static " : "") << "obj[" << i << "] = ref";
2596 }
2597 }
2598 }
2599
2600 *failed_ = true;
2601 }
2602 }
2603 }
2604 }
2605
2606 private:
2607 Heap* const heap_;
2608 bool* const failed_;
2609 };
2610
2611 class VerifyLiveStackReferences {
2612 public:
VerifyLiveStackReferences(Heap * heap)2613 explicit VerifyLiveStackReferences(Heap* heap)
2614 : heap_(heap),
2615 failed_(false) {}
2616
operator ()(mirror::Object * obj) const2617 void operator()(mirror::Object* obj) const
2618 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
2619 VerifyReferenceCardVisitor visitor(heap_, const_cast<bool*>(&failed_));
2620 obj->VisitReferences<true>(visitor, VoidFunctor());
2621 }
2622
Failed() const2623 bool Failed() const {
2624 return failed_;
2625 }
2626
2627 private:
2628 Heap* const heap_;
2629 bool failed_;
2630 };
2631
VerifyMissingCardMarks()2632 bool Heap::VerifyMissingCardMarks() {
2633 Thread* self = Thread::Current();
2634 Locks::mutator_lock_->AssertExclusiveHeld(self);
2635 // We need to sort the live stack since we binary search it.
2636 live_stack_->Sort();
2637 // Since we sorted the allocation stack content, need to revoke all
2638 // thread-local allocation stacks.
2639 RevokeAllThreadLocalAllocationStacks(self);
2640 VerifyLiveStackReferences visitor(this);
2641 GetLiveBitmap()->Visit(visitor);
2642 // We can verify objects in the live stack since none of these should reference dead objects.
2643 for (mirror::Object** it = live_stack_->Begin(); it != live_stack_->End(); ++it) {
2644 if (!kUseThreadLocalAllocationStack || *it != nullptr) {
2645 visitor(*it);
2646 }
2647 }
2648 return !visitor.Failed();
2649 }
2650
SwapStacks(Thread * self)2651 void Heap::SwapStacks(Thread* self) {
2652 if (kUseThreadLocalAllocationStack) {
2653 live_stack_->AssertAllZero();
2654 }
2655 allocation_stack_.swap(live_stack_);
2656 }
2657
RevokeAllThreadLocalAllocationStacks(Thread * self)2658 void Heap::RevokeAllThreadLocalAllocationStacks(Thread* self) {
2659 // This must be called only during the pause.
2660 CHECK(Locks::mutator_lock_->IsExclusiveHeld(self));
2661 MutexLock mu(self, *Locks::runtime_shutdown_lock_);
2662 MutexLock mu2(self, *Locks::thread_list_lock_);
2663 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
2664 for (Thread* t : thread_list) {
2665 t->RevokeThreadLocalAllocationStack();
2666 }
2667 }
2668
AssertAllBumpPointerSpaceThreadLocalBuffersAreRevoked()2669 void Heap::AssertAllBumpPointerSpaceThreadLocalBuffersAreRevoked() {
2670 if (kIsDebugBuild) {
2671 if (bump_pointer_space_ != nullptr) {
2672 bump_pointer_space_->AssertAllThreadLocalBuffersAreRevoked();
2673 }
2674 }
2675 }
2676
FindModUnionTableFromSpace(space::Space * space)2677 accounting::ModUnionTable* Heap::FindModUnionTableFromSpace(space::Space* space) {
2678 auto it = mod_union_tables_.find(space);
2679 if (it == mod_union_tables_.end()) {
2680 return nullptr;
2681 }
2682 return it->second;
2683 }
2684
FindRememberedSetFromSpace(space::Space * space)2685 accounting::RememberedSet* Heap::FindRememberedSetFromSpace(space::Space* space) {
2686 auto it = remembered_sets_.find(space);
2687 if (it == remembered_sets_.end()) {
2688 return nullptr;
2689 }
2690 return it->second;
2691 }
2692
ProcessCards(TimingLogger * timings,bool use_rem_sets)2693 void Heap::ProcessCards(TimingLogger* timings, bool use_rem_sets) {
2694 TimingLogger::ScopedTiming t(__FUNCTION__, timings);
2695 // Clear cards and keep track of cards cleared in the mod-union table.
2696 for (const auto& space : continuous_spaces_) {
2697 accounting::ModUnionTable* table = FindModUnionTableFromSpace(space);
2698 accounting::RememberedSet* rem_set = FindRememberedSetFromSpace(space);
2699 if (table != nullptr) {
2700 const char* name = space->IsZygoteSpace() ? "ZygoteModUnionClearCards" :
2701 "ImageModUnionClearCards";
2702 TimingLogger::ScopedTiming t(name, timings);
2703 table->ClearCards();
2704 } else if (use_rem_sets && rem_set != nullptr) {
2705 DCHECK(collector::SemiSpace::kUseRememberedSet && collector_type_ == kCollectorTypeGSS)
2706 << static_cast<int>(collector_type_);
2707 TimingLogger::ScopedTiming t("AllocSpaceRemSetClearCards", timings);
2708 rem_set->ClearCards();
2709 } else if (space->GetType() != space::kSpaceTypeBumpPointerSpace) {
2710 TimingLogger::ScopedTiming t("AllocSpaceClearCards", timings);
2711 // No mod union table for the AllocSpace. Age the cards so that the GC knows that these cards
2712 // were dirty before the GC started.
2713 // TODO: Need to use atomic for the case where aged(cleaning thread) -> dirty(other thread)
2714 // -> clean(cleaning thread).
2715 // The races are we either end up with: Aged card, unaged card. Since we have the checkpoint
2716 // roots and then we scan / update mod union tables after. We will always scan either card.
2717 // If we end up with the non aged card, we scan it it in the pause.
2718 card_table_->ModifyCardsAtomic(space->Begin(), space->End(), AgeCardVisitor(),
2719 VoidFunctor());
2720 }
2721 }
2722 }
2723
IdentityMarkHeapReferenceCallback(mirror::HeapReference<mirror::Object> *,void *)2724 static void IdentityMarkHeapReferenceCallback(mirror::HeapReference<mirror::Object>*, void*) {
2725 }
2726
PreGcVerificationPaused(collector::GarbageCollector * gc)2727 void Heap::PreGcVerificationPaused(collector::GarbageCollector* gc) {
2728 Thread* const self = Thread::Current();
2729 TimingLogger* const timings = current_gc_iteration_.GetTimings();
2730 TimingLogger::ScopedTiming t(__FUNCTION__, timings);
2731 if (verify_pre_gc_heap_) {
2732 TimingLogger::ScopedTiming t("(Paused)PreGcVerifyHeapReferences", timings);
2733 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
2734 size_t failures = VerifyHeapReferences();
2735 if (failures > 0) {
2736 LOG(FATAL) << "Pre " << gc->GetName() << " heap verification failed with " << failures
2737 << " failures";
2738 }
2739 }
2740 // Check that all objects which reference things in the live stack are on dirty cards.
2741 if (verify_missing_card_marks_) {
2742 TimingLogger::ScopedTiming t("(Paused)PreGcVerifyMissingCardMarks", timings);
2743 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
2744 SwapStacks(self);
2745 // Sort the live stack so that we can quickly binary search it later.
2746 CHECK(VerifyMissingCardMarks()) << "Pre " << gc->GetName()
2747 << " missing card mark verification failed\n" << DumpSpaces();
2748 SwapStacks(self);
2749 }
2750 if (verify_mod_union_table_) {
2751 TimingLogger::ScopedTiming t("(Paused)PreGcVerifyModUnionTables", timings);
2752 ReaderMutexLock reader_lock(self, *Locks::heap_bitmap_lock_);
2753 for (const auto& table_pair : mod_union_tables_) {
2754 accounting::ModUnionTable* mod_union_table = table_pair.second;
2755 mod_union_table->UpdateAndMarkReferences(IdentityMarkHeapReferenceCallback, nullptr);
2756 mod_union_table->Verify();
2757 }
2758 }
2759 }
2760
PreGcVerification(collector::GarbageCollector * gc)2761 void Heap::PreGcVerification(collector::GarbageCollector* gc) {
2762 if (verify_pre_gc_heap_ || verify_missing_card_marks_ || verify_mod_union_table_) {
2763 collector::GarbageCollector::ScopedPause pause(gc);
2764 PreGcVerificationPaused(gc);
2765 }
2766 }
2767
PrePauseRosAllocVerification(collector::GarbageCollector * gc)2768 void Heap::PrePauseRosAllocVerification(collector::GarbageCollector* gc) {
2769 // TODO: Add a new runtime option for this?
2770 if (verify_pre_gc_rosalloc_) {
2771 RosAllocVerification(current_gc_iteration_.GetTimings(), "PreGcRosAllocVerification");
2772 }
2773 }
2774
PreSweepingGcVerification(collector::GarbageCollector * gc)2775 void Heap::PreSweepingGcVerification(collector::GarbageCollector* gc) {
2776 Thread* const self = Thread::Current();
2777 TimingLogger* const timings = current_gc_iteration_.GetTimings();
2778 TimingLogger::ScopedTiming t(__FUNCTION__, timings);
2779 // Called before sweeping occurs since we want to make sure we are not going so reclaim any
2780 // reachable objects.
2781 if (verify_pre_sweeping_heap_) {
2782 TimingLogger::ScopedTiming t("(Paused)PostSweepingVerifyHeapReferences", timings);
2783 CHECK_NE(self->GetState(), kRunnable);
2784 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
2785 // Swapping bound bitmaps does nothing.
2786 gc->SwapBitmaps();
2787 // Pass in false since concurrent reference processing can mean that the reference referents
2788 // may point to dead objects at the point which PreSweepingGcVerification is called.
2789 size_t failures = VerifyHeapReferences(false);
2790 if (failures > 0) {
2791 LOG(FATAL) << "Pre sweeping " << gc->GetName() << " GC verification failed with " << failures
2792 << " failures";
2793 }
2794 gc->SwapBitmaps();
2795 }
2796 if (verify_pre_sweeping_rosalloc_) {
2797 RosAllocVerification(timings, "PreSweepingRosAllocVerification");
2798 }
2799 }
2800
PostGcVerificationPaused(collector::GarbageCollector * gc)2801 void Heap::PostGcVerificationPaused(collector::GarbageCollector* gc) {
2802 // Only pause if we have to do some verification.
2803 Thread* const self = Thread::Current();
2804 TimingLogger* const timings = GetCurrentGcIteration()->GetTimings();
2805 TimingLogger::ScopedTiming t(__FUNCTION__, timings);
2806 if (verify_system_weaks_) {
2807 ReaderMutexLock mu2(self, *Locks::heap_bitmap_lock_);
2808 collector::MarkSweep* mark_sweep = down_cast<collector::MarkSweep*>(gc);
2809 mark_sweep->VerifySystemWeaks();
2810 }
2811 if (verify_post_gc_rosalloc_) {
2812 RosAllocVerification(timings, "(Paused)PostGcRosAllocVerification");
2813 }
2814 if (verify_post_gc_heap_) {
2815 TimingLogger::ScopedTiming t("(Paused)PostGcVerifyHeapReferences", timings);
2816 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
2817 size_t failures = VerifyHeapReferences();
2818 if (failures > 0) {
2819 LOG(FATAL) << "Pre " << gc->GetName() << " heap verification failed with " << failures
2820 << " failures";
2821 }
2822 }
2823 }
2824
PostGcVerification(collector::GarbageCollector * gc)2825 void Heap::PostGcVerification(collector::GarbageCollector* gc) {
2826 if (verify_system_weaks_ || verify_post_gc_rosalloc_ || verify_post_gc_heap_) {
2827 collector::GarbageCollector::ScopedPause pause(gc);
2828 PostGcVerificationPaused(gc);
2829 }
2830 }
2831
RosAllocVerification(TimingLogger * timings,const char * name)2832 void Heap::RosAllocVerification(TimingLogger* timings, const char* name) {
2833 TimingLogger::ScopedTiming t(name, timings);
2834 for (const auto& space : continuous_spaces_) {
2835 if (space->IsRosAllocSpace()) {
2836 VLOG(heap) << name << " : " << space->GetName();
2837 space->AsRosAllocSpace()->Verify();
2838 }
2839 }
2840 }
2841
WaitForGcToComplete(GcCause cause,Thread * self)2842 collector::GcType Heap::WaitForGcToComplete(GcCause cause, Thread* self) {
2843 ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
2844 MutexLock mu(self, *gc_complete_lock_);
2845 return WaitForGcToCompleteLocked(cause, self);
2846 }
2847
WaitForGcToCompleteLocked(GcCause cause,Thread * self)2848 collector::GcType Heap::WaitForGcToCompleteLocked(GcCause cause, Thread* self) {
2849 collector::GcType last_gc_type = collector::kGcTypeNone;
2850 uint64_t wait_start = NanoTime();
2851 while (collector_type_running_ != kCollectorTypeNone) {
2852 ATRACE_BEGIN("GC: Wait For Completion");
2853 // We must wait, change thread state then sleep on gc_complete_cond_;
2854 gc_complete_cond_->Wait(self);
2855 last_gc_type = last_gc_type_;
2856 ATRACE_END();
2857 }
2858 uint64_t wait_time = NanoTime() - wait_start;
2859 total_wait_time_ += wait_time;
2860 if (wait_time > long_pause_log_threshold_) {
2861 LOG(INFO) << "WaitForGcToComplete blocked for " << PrettyDuration(wait_time)
2862 << " for cause " << cause;
2863 }
2864 return last_gc_type;
2865 }
2866
DumpForSigQuit(std::ostream & os)2867 void Heap::DumpForSigQuit(std::ostream& os) {
2868 os << "Heap: " << GetPercentFree() << "% free, " << PrettySize(GetBytesAllocated()) << "/"
2869 << PrettySize(GetTotalMemory()) << "; " << GetObjectsAllocated() << " objects\n";
2870 DumpGcPerformanceInfo(os);
2871 }
2872
GetPercentFree()2873 size_t Heap::GetPercentFree() {
2874 return static_cast<size_t>(100.0f * static_cast<float>(GetFreeMemory()) / max_allowed_footprint_);
2875 }
2876
SetIdealFootprint(size_t max_allowed_footprint)2877 void Heap::SetIdealFootprint(size_t max_allowed_footprint) {
2878 if (max_allowed_footprint > GetMaxMemory()) {
2879 VLOG(gc) << "Clamp target GC heap from " << PrettySize(max_allowed_footprint) << " to "
2880 << PrettySize(GetMaxMemory());
2881 max_allowed_footprint = GetMaxMemory();
2882 }
2883 max_allowed_footprint_ = max_allowed_footprint;
2884 }
2885
IsMovableObject(const mirror::Object * obj) const2886 bool Heap::IsMovableObject(const mirror::Object* obj) const {
2887 if (kMovingCollector) {
2888 space::Space* space = FindContinuousSpaceFromObject(obj, true);
2889 if (space != nullptr) {
2890 // TODO: Check large object?
2891 return space->CanMoveObjects();
2892 }
2893 }
2894 return false;
2895 }
2896
UpdateMaxNativeFootprint()2897 void Heap::UpdateMaxNativeFootprint() {
2898 size_t native_size = native_bytes_allocated_.LoadRelaxed();
2899 // TODO: Tune the native heap utilization to be a value other than the java heap utilization.
2900 size_t target_size = native_size / GetTargetHeapUtilization();
2901 if (target_size > native_size + max_free_) {
2902 target_size = native_size + max_free_;
2903 } else if (target_size < native_size + min_free_) {
2904 target_size = native_size + min_free_;
2905 }
2906 native_footprint_gc_watermark_ = std::min(growth_limit_, target_size);
2907 }
2908
FindCollectorByGcType(collector::GcType gc_type)2909 collector::GarbageCollector* Heap::FindCollectorByGcType(collector::GcType gc_type) {
2910 for (const auto& collector : garbage_collectors_) {
2911 if (collector->GetCollectorType() == collector_type_ &&
2912 collector->GetGcType() == gc_type) {
2913 return collector;
2914 }
2915 }
2916 return nullptr;
2917 }
2918
HeapGrowthMultiplier() const2919 double Heap::HeapGrowthMultiplier() const {
2920 // If we don't care about pause times we are background, so return 1.0.
2921 if (!CareAboutPauseTimes() || IsLowMemoryMode()) {
2922 return 1.0;
2923 }
2924 return foreground_heap_growth_multiplier_;
2925 }
2926
GrowForUtilization(collector::GarbageCollector * collector_ran)2927 void Heap::GrowForUtilization(collector::GarbageCollector* collector_ran) {
2928 // We know what our utilization is at this moment.
2929 // This doesn't actually resize any memory. It just lets the heap grow more when necessary.
2930 const uint64_t bytes_allocated = GetBytesAllocated();
2931 last_gc_size_ = bytes_allocated;
2932 last_gc_time_ns_ = NanoTime();
2933 uint64_t target_size;
2934 collector::GcType gc_type = collector_ran->GetGcType();
2935 if (gc_type != collector::kGcTypeSticky) {
2936 // Grow the heap for non sticky GC.
2937 const float multiplier = HeapGrowthMultiplier(); // Use the multiplier to grow more for
2938 // foreground.
2939 intptr_t delta = bytes_allocated / GetTargetHeapUtilization() - bytes_allocated;
2940 CHECK_GE(delta, 0);
2941 target_size = bytes_allocated + delta * multiplier;
2942 target_size = std::min(target_size,
2943 bytes_allocated + static_cast<uint64_t>(max_free_ * multiplier));
2944 target_size = std::max(target_size,
2945 bytes_allocated + static_cast<uint64_t>(min_free_ * multiplier));
2946 native_need_to_run_finalization_ = true;
2947 next_gc_type_ = collector::kGcTypeSticky;
2948 } else {
2949 collector::GcType non_sticky_gc_type =
2950 have_zygote_space_ ? collector::kGcTypePartial : collector::kGcTypeFull;
2951 // Find what the next non sticky collector will be.
2952 collector::GarbageCollector* non_sticky_collector = FindCollectorByGcType(non_sticky_gc_type);
2953 // If the throughput of the current sticky GC >= throughput of the non sticky collector, then
2954 // do another sticky collection next.
2955 // We also check that the bytes allocated aren't over the footprint limit in order to prevent a
2956 // pathological case where dead objects which aren't reclaimed by sticky could get accumulated
2957 // if the sticky GC throughput always remained >= the full/partial throughput.
2958 if (current_gc_iteration_.GetEstimatedThroughput() * kStickyGcThroughputAdjustment >=
2959 non_sticky_collector->GetEstimatedMeanThroughput() &&
2960 non_sticky_collector->NumberOfIterations() > 0 &&
2961 bytes_allocated <= max_allowed_footprint_) {
2962 next_gc_type_ = collector::kGcTypeSticky;
2963 } else {
2964 next_gc_type_ = non_sticky_gc_type;
2965 }
2966 // If we have freed enough memory, shrink the heap back down.
2967 if (bytes_allocated + max_free_ < max_allowed_footprint_) {
2968 target_size = bytes_allocated + max_free_;
2969 } else {
2970 target_size = std::max(bytes_allocated, static_cast<uint64_t>(max_allowed_footprint_));
2971 }
2972 }
2973 if (!ignore_max_footprint_) {
2974 SetIdealFootprint(target_size);
2975 if (IsGcConcurrent()) {
2976 // Calculate when to perform the next ConcurrentGC.
2977 // Calculate the estimated GC duration.
2978 const double gc_duration_seconds = NsToMs(current_gc_iteration_.GetDurationNs()) / 1000.0;
2979 // Estimate how many remaining bytes we will have when we need to start the next GC.
2980 size_t remaining_bytes = allocation_rate_ * gc_duration_seconds;
2981 remaining_bytes = std::min(remaining_bytes, kMaxConcurrentRemainingBytes);
2982 remaining_bytes = std::max(remaining_bytes, kMinConcurrentRemainingBytes);
2983 if (UNLIKELY(remaining_bytes > max_allowed_footprint_)) {
2984 // A never going to happen situation that from the estimated allocation rate we will exceed
2985 // the applications entire footprint with the given estimated allocation rate. Schedule
2986 // another GC nearly straight away.
2987 remaining_bytes = kMinConcurrentRemainingBytes;
2988 }
2989 DCHECK_LE(remaining_bytes, max_allowed_footprint_);
2990 DCHECK_LE(max_allowed_footprint_, GetMaxMemory());
2991 // Start a concurrent GC when we get close to the estimated remaining bytes. When the
2992 // allocation rate is very high, remaining_bytes could tell us that we should start a GC
2993 // right away.
2994 concurrent_start_bytes_ = std::max(max_allowed_footprint_ - remaining_bytes,
2995 static_cast<size_t>(bytes_allocated));
2996 }
2997 }
2998 }
2999
ClearGrowthLimit()3000 void Heap::ClearGrowthLimit() {
3001 growth_limit_ = capacity_;
3002 for (const auto& space : continuous_spaces_) {
3003 if (space->IsMallocSpace()) {
3004 gc::space::MallocSpace* malloc_space = space->AsMallocSpace();
3005 malloc_space->ClearGrowthLimit();
3006 malloc_space->SetFootprintLimit(malloc_space->Capacity());
3007 }
3008 }
3009 // This space isn't added for performance reasons.
3010 if (main_space_backup_.get() != nullptr) {
3011 main_space_backup_->ClearGrowthLimit();
3012 main_space_backup_->SetFootprintLimit(main_space_backup_->Capacity());
3013 }
3014 }
3015
AddFinalizerReference(Thread * self,mirror::Object ** object)3016 void Heap::AddFinalizerReference(Thread* self, mirror::Object** object) {
3017 ScopedObjectAccess soa(self);
3018 ScopedLocalRef<jobject> arg(self->GetJniEnv(), soa.AddLocalReference<jobject>(*object));
3019 jvalue args[1];
3020 args[0].l = arg.get();
3021 InvokeWithJValues(soa, nullptr, WellKnownClasses::java_lang_ref_FinalizerReference_add, args);
3022 // Restore object in case it gets moved.
3023 *object = soa.Decode<mirror::Object*>(arg.get());
3024 }
3025
RequestConcurrentGCAndSaveObject(Thread * self,mirror::Object ** obj)3026 void Heap::RequestConcurrentGCAndSaveObject(Thread* self, mirror::Object** obj) {
3027 StackHandleScope<1> hs(self);
3028 HandleWrapper<mirror::Object> wrapper(hs.NewHandleWrapper(obj));
3029 RequestConcurrentGC(self);
3030 }
3031
RequestConcurrentGC(Thread * self)3032 void Heap::RequestConcurrentGC(Thread* self) {
3033 // Make sure that we can do a concurrent GC.
3034 Runtime* runtime = Runtime::Current();
3035 if (runtime == nullptr || !runtime->IsFinishedStarting() || runtime->IsShuttingDown(self) ||
3036 self->IsHandlingStackOverflow()) {
3037 return;
3038 }
3039 JNIEnv* env = self->GetJniEnv();
3040 DCHECK(WellKnownClasses::java_lang_Daemons != nullptr);
3041 DCHECK(WellKnownClasses::java_lang_Daemons_requestGC != nullptr);
3042 env->CallStaticVoidMethod(WellKnownClasses::java_lang_Daemons,
3043 WellKnownClasses::java_lang_Daemons_requestGC);
3044 CHECK(!env->ExceptionCheck());
3045 }
3046
ConcurrentGC(Thread * self)3047 void Heap::ConcurrentGC(Thread* self) {
3048 if (Runtime::Current()->IsShuttingDown(self)) {
3049 return;
3050 }
3051 // Wait for any GCs currently running to finish.
3052 if (WaitForGcToComplete(kGcCauseBackground, self) == collector::kGcTypeNone) {
3053 // If the we can't run the GC type we wanted to run, find the next appropriate one and try that
3054 // instead. E.g. can't do partial, so do full instead.
3055 if (CollectGarbageInternal(next_gc_type_, kGcCauseBackground, false) ==
3056 collector::kGcTypeNone) {
3057 for (collector::GcType gc_type : gc_plan_) {
3058 // Attempt to run the collector, if we succeed, we are done.
3059 if (gc_type > next_gc_type_ &&
3060 CollectGarbageInternal(gc_type, kGcCauseBackground, false) != collector::kGcTypeNone) {
3061 break;
3062 }
3063 }
3064 }
3065 }
3066 }
3067
RequestCollectorTransition(CollectorType desired_collector_type,uint64_t delta_time)3068 void Heap::RequestCollectorTransition(CollectorType desired_collector_type, uint64_t delta_time) {
3069 Thread* self = Thread::Current();
3070 {
3071 MutexLock mu(self, *heap_trim_request_lock_);
3072 if (desired_collector_type_ == desired_collector_type) {
3073 return;
3074 }
3075 heap_transition_or_trim_target_time_ =
3076 std::max(heap_transition_or_trim_target_time_, NanoTime() + delta_time);
3077 desired_collector_type_ = desired_collector_type;
3078 }
3079 SignalHeapTrimDaemon(self);
3080 }
3081
RequestHeapTrim()3082 void Heap::RequestHeapTrim() {
3083 // GC completed and now we must decide whether to request a heap trim (advising pages back to the
3084 // kernel) or not. Issuing a request will also cause trimming of the libc heap. As a trim scans
3085 // a space it will hold its lock and can become a cause of jank.
3086 // Note, the large object space self trims and the Zygote space was trimmed and unchanging since
3087 // forking.
3088
3089 // We don't have a good measure of how worthwhile a trim might be. We can't use the live bitmap
3090 // because that only marks object heads, so a large array looks like lots of empty space. We
3091 // don't just call dlmalloc all the time, because the cost of an _attempted_ trim is proportional
3092 // to utilization (which is probably inversely proportional to how much benefit we can expect).
3093 // We could try mincore(2) but that's only a measure of how many pages we haven't given away,
3094 // not how much use we're making of those pages.
3095
3096 Thread* self = Thread::Current();
3097 Runtime* runtime = Runtime::Current();
3098 if (runtime == nullptr || !runtime->IsFinishedStarting() || runtime->IsShuttingDown(self) ||
3099 runtime->IsZygote()) {
3100 // Ignore the request if we are the zygote to prevent app launching lag due to sleep in heap
3101 // trimmer daemon. b/17310019
3102 // Heap trimming isn't supported without a Java runtime or Daemons (such as at dex2oat time)
3103 // Also: we do not wish to start a heap trim if the runtime is shutting down (a racy check
3104 // as we don't hold the lock while requesting the trim).
3105 return;
3106 }
3107 {
3108 MutexLock mu(self, *heap_trim_request_lock_);
3109 if (last_trim_time_ + kHeapTrimWait >= NanoTime()) {
3110 // We have done a heap trim in the last kHeapTrimWait nanosecs, don't request another one
3111 // just yet.
3112 return;
3113 }
3114 heap_trim_request_pending_ = true;
3115 uint64_t current_time = NanoTime();
3116 if (heap_transition_or_trim_target_time_ < current_time) {
3117 heap_transition_or_trim_target_time_ = current_time + kHeapTrimWait;
3118 }
3119 }
3120 // Notify the daemon thread which will actually do the heap trim.
3121 SignalHeapTrimDaemon(self);
3122 }
3123
SignalHeapTrimDaemon(Thread * self)3124 void Heap::SignalHeapTrimDaemon(Thread* self) {
3125 JNIEnv* env = self->GetJniEnv();
3126 DCHECK(WellKnownClasses::java_lang_Daemons != nullptr);
3127 DCHECK(WellKnownClasses::java_lang_Daemons_requestHeapTrim != nullptr);
3128 env->CallStaticVoidMethod(WellKnownClasses::java_lang_Daemons,
3129 WellKnownClasses::java_lang_Daemons_requestHeapTrim);
3130 CHECK(!env->ExceptionCheck());
3131 }
3132
RevokeThreadLocalBuffers(Thread * thread)3133 void Heap::RevokeThreadLocalBuffers(Thread* thread) {
3134 if (rosalloc_space_ != nullptr) {
3135 rosalloc_space_->RevokeThreadLocalBuffers(thread);
3136 }
3137 if (bump_pointer_space_ != nullptr) {
3138 bump_pointer_space_->RevokeThreadLocalBuffers(thread);
3139 }
3140 }
3141
RevokeRosAllocThreadLocalBuffers(Thread * thread)3142 void Heap::RevokeRosAllocThreadLocalBuffers(Thread* thread) {
3143 if (rosalloc_space_ != nullptr) {
3144 rosalloc_space_->RevokeThreadLocalBuffers(thread);
3145 }
3146 }
3147
RevokeAllThreadLocalBuffers()3148 void Heap::RevokeAllThreadLocalBuffers() {
3149 if (rosalloc_space_ != nullptr) {
3150 rosalloc_space_->RevokeAllThreadLocalBuffers();
3151 }
3152 if (bump_pointer_space_ != nullptr) {
3153 bump_pointer_space_->RevokeAllThreadLocalBuffers();
3154 }
3155 }
3156
IsGCRequestPending() const3157 bool Heap::IsGCRequestPending() const {
3158 return concurrent_start_bytes_ != std::numeric_limits<size_t>::max();
3159 }
3160
RunFinalization(JNIEnv * env)3161 void Heap::RunFinalization(JNIEnv* env) {
3162 // Can't do this in WellKnownClasses::Init since System is not properly set up at that point.
3163 if (WellKnownClasses::java_lang_System_runFinalization == nullptr) {
3164 CHECK(WellKnownClasses::java_lang_System != nullptr);
3165 WellKnownClasses::java_lang_System_runFinalization =
3166 CacheMethod(env, WellKnownClasses::java_lang_System, true, "runFinalization", "()V");
3167 CHECK(WellKnownClasses::java_lang_System_runFinalization != nullptr);
3168 }
3169 env->CallStaticVoidMethod(WellKnownClasses::java_lang_System,
3170 WellKnownClasses::java_lang_System_runFinalization);
3171 env->CallStaticVoidMethod(WellKnownClasses::java_lang_System,
3172 WellKnownClasses::java_lang_System_runFinalization);
3173 }
3174
RegisterNativeAllocation(JNIEnv * env,size_t bytes)3175 void Heap::RegisterNativeAllocation(JNIEnv* env, size_t bytes) {
3176 Thread* self = ThreadForEnv(env);
3177 if (native_need_to_run_finalization_) {
3178 RunFinalization(env);
3179 UpdateMaxNativeFootprint();
3180 native_need_to_run_finalization_ = false;
3181 }
3182 // Total number of native bytes allocated.
3183 size_t new_native_bytes_allocated = native_bytes_allocated_.FetchAndAddSequentiallyConsistent(bytes);
3184 new_native_bytes_allocated += bytes;
3185 if (new_native_bytes_allocated > native_footprint_gc_watermark_) {
3186 collector::GcType gc_type = have_zygote_space_ ? collector::kGcTypePartial :
3187 collector::kGcTypeFull;
3188
3189 // The second watermark is higher than the gc watermark. If you hit this it means you are
3190 // allocating native objects faster than the GC can keep up with.
3191 if (new_native_bytes_allocated > growth_limit_) {
3192 if (WaitForGcToComplete(kGcCauseForNativeAlloc, self) != collector::kGcTypeNone) {
3193 // Just finished a GC, attempt to run finalizers.
3194 RunFinalization(env);
3195 CHECK(!env->ExceptionCheck());
3196 }
3197 // If we still are over the watermark, attempt a GC for alloc and run finalizers.
3198 if (new_native_bytes_allocated > growth_limit_) {
3199 CollectGarbageInternal(gc_type, kGcCauseForNativeAlloc, false);
3200 RunFinalization(env);
3201 native_need_to_run_finalization_ = false;
3202 CHECK(!env->ExceptionCheck());
3203 }
3204 // We have just run finalizers, update the native watermark since it is very likely that
3205 // finalizers released native managed allocations.
3206 UpdateMaxNativeFootprint();
3207 } else if (!IsGCRequestPending()) {
3208 if (IsGcConcurrent()) {
3209 RequestConcurrentGC(self);
3210 } else {
3211 CollectGarbageInternal(gc_type, kGcCauseForNativeAlloc, false);
3212 }
3213 }
3214 }
3215 }
3216
RegisterNativeFree(JNIEnv * env,size_t bytes)3217 void Heap::RegisterNativeFree(JNIEnv* env, size_t bytes) {
3218 size_t expected_size;
3219 do {
3220 expected_size = native_bytes_allocated_.LoadRelaxed();
3221 if (UNLIKELY(bytes > expected_size)) {
3222 ScopedObjectAccess soa(env);
3223 env->ThrowNew(WellKnownClasses::java_lang_RuntimeException,
3224 StringPrintf("Attempted to free %zd native bytes with only %zd native bytes "
3225 "registered as allocated", bytes, expected_size).c_str());
3226 break;
3227 }
3228 } while (!native_bytes_allocated_.CompareExchangeWeakRelaxed(expected_size,
3229 expected_size - bytes));
3230 }
3231
GetTotalMemory() const3232 size_t Heap::GetTotalMemory() const {
3233 return std::max(max_allowed_footprint_, GetBytesAllocated());
3234 }
3235
AddModUnionTable(accounting::ModUnionTable * mod_union_table)3236 void Heap::AddModUnionTable(accounting::ModUnionTable* mod_union_table) {
3237 DCHECK(mod_union_table != nullptr);
3238 mod_union_tables_.Put(mod_union_table->GetSpace(), mod_union_table);
3239 }
3240
CheckPreconditionsForAllocObject(mirror::Class * c,size_t byte_count)3241 void Heap::CheckPreconditionsForAllocObject(mirror::Class* c, size_t byte_count) {
3242 CHECK(c == NULL || (c->IsClassClass() && byte_count >= sizeof(mirror::Class)) ||
3243 (c->IsVariableSize() || c->GetObjectSize() == byte_count));
3244 CHECK_GE(byte_count, sizeof(mirror::Object));
3245 }
3246
AddRememberedSet(accounting::RememberedSet * remembered_set)3247 void Heap::AddRememberedSet(accounting::RememberedSet* remembered_set) {
3248 CHECK(remembered_set != nullptr);
3249 space::Space* space = remembered_set->GetSpace();
3250 CHECK(space != nullptr);
3251 CHECK(remembered_sets_.find(space) == remembered_sets_.end()) << space;
3252 remembered_sets_.Put(space, remembered_set);
3253 CHECK(remembered_sets_.find(space) != remembered_sets_.end()) << space;
3254 }
3255
RemoveRememberedSet(space::Space * space)3256 void Heap::RemoveRememberedSet(space::Space* space) {
3257 CHECK(space != nullptr);
3258 auto it = remembered_sets_.find(space);
3259 CHECK(it != remembered_sets_.end());
3260 delete it->second;
3261 remembered_sets_.erase(it);
3262 CHECK(remembered_sets_.find(space) == remembered_sets_.end());
3263 }
3264
ClearMarkedObjects()3265 void Heap::ClearMarkedObjects() {
3266 // Clear all of the spaces' mark bitmaps.
3267 for (const auto& space : GetContinuousSpaces()) {
3268 accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap();
3269 if (space->GetLiveBitmap() != mark_bitmap) {
3270 mark_bitmap->Clear();
3271 }
3272 }
3273 // Clear the marked objects in the discontinous space object sets.
3274 for (const auto& space : GetDiscontinuousSpaces()) {
3275 space->GetMarkBitmap()->Clear();
3276 }
3277 }
3278
3279 } // namespace gc
3280 } // namespace art
3281