1 /*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "heap.h"
18
19 #define ATRACE_TAG ATRACE_TAG_DALVIK
20 #include <cutils/trace.h>
21
22 #include <limits>
23 #include <memory>
24 #include <unwind.h> // For GC verification.
25 #include <vector>
26
27 #include "art_field-inl.h"
28 #include "base/allocator.h"
29 #include "base/dumpable.h"
30 #include "base/histogram-inl.h"
31 #include "base/stl_util.h"
32 #include "base/time_utils.h"
33 #include "common_throws.h"
34 #include "cutils/sched_policy.h"
35 #include "debugger.h"
36 #include "dex_file-inl.h"
37 #include "gc/accounting/atomic_stack.h"
38 #include "gc/accounting/card_table-inl.h"
39 #include "gc/accounting/heap_bitmap-inl.h"
40 #include "gc/accounting/mod_union_table.h"
41 #include "gc/accounting/mod_union_table-inl.h"
42 #include "gc/accounting/remembered_set.h"
43 #include "gc/accounting/space_bitmap-inl.h"
44 #include "gc/collector/concurrent_copying.h"
45 #include "gc/collector/mark_compact.h"
46 #include "gc/collector/mark_sweep-inl.h"
47 #include "gc/collector/partial_mark_sweep.h"
48 #include "gc/collector/semi_space.h"
49 #include "gc/collector/sticky_mark_sweep.h"
50 #include "gc/reference_processor.h"
51 #include "gc/space/bump_pointer_space.h"
52 #include "gc/space/dlmalloc_space-inl.h"
53 #include "gc/space/image_space.h"
54 #include "gc/space/large_object_space.h"
55 #include "gc/space/region_space.h"
56 #include "gc/space/rosalloc_space-inl.h"
57 #include "gc/space/space-inl.h"
58 #include "gc/space/zygote_space.h"
59 #include "gc/task_processor.h"
60 #include "entrypoints/quick/quick_alloc_entrypoints.h"
61 #include "heap-inl.h"
62 #include "image.h"
63 #include "intern_table.h"
64 #include "mirror/class-inl.h"
65 #include "mirror/object.h"
66 #include "mirror/object-inl.h"
67 #include "mirror/object_array-inl.h"
68 #include "mirror/reference-inl.h"
69 #include "os.h"
70 #include "reflection.h"
71 #include "runtime.h"
72 #include "ScopedLocalRef.h"
73 #include "scoped_thread_state_change.h"
74 #include "handle_scope-inl.h"
75 #include "thread_list.h"
76 #include "well_known_classes.h"
77
78 namespace art {
79
80 namespace gc {
81
82 static constexpr size_t kCollectorTransitionStressIterations = 0;
83 static constexpr size_t kCollectorTransitionStressWait = 10 * 1000; // Microseconds
84 // Minimum amount of remaining bytes before a concurrent GC is triggered.
85 static constexpr size_t kMinConcurrentRemainingBytes = 128 * KB;
86 static constexpr size_t kMaxConcurrentRemainingBytes = 512 * KB;
87 // Sticky GC throughput adjustment, divided by 4. Increasing this causes sticky GC to occur more
88 // relative to partial/full GC. This may be desirable since sticky GCs interfere less with mutator
89 // threads (lower pauses, use less memory bandwidth).
90 static constexpr double kStickyGcThroughputAdjustment = 1.0;
91 // Whether or not we compact the zygote in PreZygoteFork.
92 static constexpr bool kCompactZygote = kMovingCollector;
93 // How many reserve entries are at the end of the allocation stack, these are only needed if the
94 // allocation stack overflows.
95 static constexpr size_t kAllocationStackReserveSize = 1024;
96 // Default mark stack size in bytes.
97 static const size_t kDefaultMarkStackSize = 64 * KB;
98 // Define space name.
99 static const char* kDlMallocSpaceName[2] = {"main dlmalloc space", "main dlmalloc space 1"};
100 static const char* kRosAllocSpaceName[2] = {"main rosalloc space", "main rosalloc space 1"};
101 static const char* kMemMapSpaceName[2] = {"main space", "main space 1"};
102 static const char* kNonMovingSpaceName = "non moving space";
103 static const char* kZygoteSpaceName = "zygote space";
104 static constexpr size_t kGSSBumpPointerSpaceCapacity = 32 * MB;
105 static constexpr bool kGCALotMode = false;
106 // GC alot mode uses a small allocation stack to stress test a lot of GC.
107 static constexpr size_t kGcAlotAllocationStackSize = 4 * KB /
108 sizeof(mirror::HeapReference<mirror::Object>);
109 // Verify objet has a small allocation stack size since searching the allocation stack is slow.
110 static constexpr size_t kVerifyObjectAllocationStackSize = 16 * KB /
111 sizeof(mirror::HeapReference<mirror::Object>);
112 static constexpr size_t kDefaultAllocationStackSize = 8 * MB /
113 sizeof(mirror::HeapReference<mirror::Object>);
114 // System.runFinalization can deadlock with native allocations, to deal with this, we have a
115 // timeout on how long we wait for finalizers to run. b/21544853
116 static constexpr uint64_t kNativeAllocationFinalizeTimeout = MsToNs(250u);
117
Heap(size_t initial_size,size_t growth_limit,size_t min_free,size_t max_free,double target_utilization,double foreground_heap_growth_multiplier,size_t capacity,size_t non_moving_space_capacity,const std::string & image_file_name,const InstructionSet image_instruction_set,CollectorType foreground_collector_type,CollectorType background_collector_type,space::LargeObjectSpaceType large_object_space_type,size_t large_object_threshold,size_t parallel_gc_threads,size_t conc_gc_threads,bool low_memory_mode,size_t long_pause_log_threshold,size_t long_gc_log_threshold,bool ignore_max_footprint,bool use_tlab,bool verify_pre_gc_heap,bool verify_pre_sweeping_heap,bool verify_post_gc_heap,bool verify_pre_gc_rosalloc,bool verify_pre_sweeping_rosalloc,bool verify_post_gc_rosalloc,bool gc_stress_mode,bool use_homogeneous_space_compaction_for_oom,uint64_t min_interval_homogeneous_space_compaction_by_oom)118 Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max_free,
119 double target_utilization, double foreground_heap_growth_multiplier,
120 size_t capacity, size_t non_moving_space_capacity, const std::string& image_file_name,
121 const InstructionSet image_instruction_set, CollectorType foreground_collector_type,
122 CollectorType background_collector_type,
123 space::LargeObjectSpaceType large_object_space_type, size_t large_object_threshold,
124 size_t parallel_gc_threads, size_t conc_gc_threads, bool low_memory_mode,
125 size_t long_pause_log_threshold, size_t long_gc_log_threshold,
126 bool ignore_max_footprint, bool use_tlab,
127 bool verify_pre_gc_heap, bool verify_pre_sweeping_heap, bool verify_post_gc_heap,
128 bool verify_pre_gc_rosalloc, bool verify_pre_sweeping_rosalloc,
129 bool verify_post_gc_rosalloc, bool gc_stress_mode,
130 bool use_homogeneous_space_compaction_for_oom,
131 uint64_t min_interval_homogeneous_space_compaction_by_oom)
132 : non_moving_space_(nullptr),
133 rosalloc_space_(nullptr),
134 dlmalloc_space_(nullptr),
135 main_space_(nullptr),
136 collector_type_(kCollectorTypeNone),
137 foreground_collector_type_(foreground_collector_type),
138 background_collector_type_(background_collector_type),
139 desired_collector_type_(foreground_collector_type_),
140 pending_task_lock_(nullptr),
141 parallel_gc_threads_(parallel_gc_threads),
142 conc_gc_threads_(conc_gc_threads),
143 low_memory_mode_(low_memory_mode),
144 long_pause_log_threshold_(long_pause_log_threshold),
145 long_gc_log_threshold_(long_gc_log_threshold),
146 ignore_max_footprint_(ignore_max_footprint),
147 zygote_creation_lock_("zygote creation lock", kZygoteCreationLock),
148 zygote_space_(nullptr),
149 large_object_threshold_(large_object_threshold),
150 collector_type_running_(kCollectorTypeNone),
151 last_gc_type_(collector::kGcTypeNone),
152 next_gc_type_(collector::kGcTypePartial),
153 capacity_(capacity),
154 growth_limit_(growth_limit),
155 max_allowed_footprint_(initial_size),
156 native_footprint_gc_watermark_(initial_size),
157 native_need_to_run_finalization_(false),
158 // Initially assume we perceive jank in case the process state is never updated.
159 process_state_(kProcessStateJankPerceptible),
160 concurrent_start_bytes_(std::numeric_limits<size_t>::max()),
161 total_bytes_freed_ever_(0),
162 total_objects_freed_ever_(0),
163 num_bytes_allocated_(0),
164 native_bytes_allocated_(0),
165 num_bytes_freed_revoke_(0),
166 verify_missing_card_marks_(false),
167 verify_system_weaks_(false),
168 verify_pre_gc_heap_(verify_pre_gc_heap),
169 verify_pre_sweeping_heap_(verify_pre_sweeping_heap),
170 verify_post_gc_heap_(verify_post_gc_heap),
171 verify_mod_union_table_(false),
172 verify_pre_gc_rosalloc_(verify_pre_gc_rosalloc),
173 verify_pre_sweeping_rosalloc_(verify_pre_sweeping_rosalloc),
174 verify_post_gc_rosalloc_(verify_post_gc_rosalloc),
175 gc_stress_mode_(gc_stress_mode),
176 /* For GC a lot mode, we limit the allocations stacks to be kGcAlotInterval allocations. This
177 * causes a lot of GC since we do a GC for alloc whenever the stack is full. When heap
178 * verification is enabled, we limit the size of allocation stacks to speed up their
179 * searching.
180 */
181 max_allocation_stack_size_(kGCALotMode ? kGcAlotAllocationStackSize
182 : (kVerifyObjectSupport > kVerifyObjectModeFast) ? kVerifyObjectAllocationStackSize :
183 kDefaultAllocationStackSize),
184 current_allocator_(kAllocatorTypeDlMalloc),
185 current_non_moving_allocator_(kAllocatorTypeNonMoving),
186 bump_pointer_space_(nullptr),
187 temp_space_(nullptr),
188 region_space_(nullptr),
189 min_free_(min_free),
190 max_free_(max_free),
191 target_utilization_(target_utilization),
192 foreground_heap_growth_multiplier_(foreground_heap_growth_multiplier),
193 total_wait_time_(0),
194 total_allocation_time_(0),
195 verify_object_mode_(kVerifyObjectModeDisabled),
196 disable_moving_gc_count_(0),
197 running_on_valgrind_(Runtime::Current()->RunningOnValgrind()),
198 use_tlab_(use_tlab),
199 main_space_backup_(nullptr),
200 min_interval_homogeneous_space_compaction_by_oom_(
201 min_interval_homogeneous_space_compaction_by_oom),
202 last_time_homogeneous_space_compaction_by_oom_(NanoTime()),
203 pending_collector_transition_(nullptr),
204 pending_heap_trim_(nullptr),
205 use_homogeneous_space_compaction_for_oom_(use_homogeneous_space_compaction_for_oom),
206 running_collection_is_blocking_(false),
207 blocking_gc_count_(0U),
208 blocking_gc_time_(0U),
209 last_update_time_gc_count_rate_histograms_( // Round down by the window duration.
210 (NanoTime() / kGcCountRateHistogramWindowDuration) * kGcCountRateHistogramWindowDuration),
211 gc_count_last_window_(0U),
212 blocking_gc_count_last_window_(0U),
213 gc_count_rate_histogram_("gc count rate histogram", 1U, kGcCountRateMaxBucketCount),
214 blocking_gc_count_rate_histogram_("blocking gc count rate histogram", 1U,
215 kGcCountRateMaxBucketCount),
216 backtrace_lock_(nullptr),
217 seen_backtrace_count_(0u),
218 unique_backtrace_count_(0u) {
219 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
220 LOG(INFO) << "Heap() entering";
221 }
222 Runtime* const runtime = Runtime::Current();
223 // If we aren't the zygote, switch to the default non zygote allocator. This may update the
224 // entrypoints.
225 const bool is_zygote = runtime->IsZygote();
226 if (!is_zygote) {
227 // Background compaction is currently not supported for command line runs.
228 if (background_collector_type_ != foreground_collector_type_) {
229 VLOG(heap) << "Disabling background compaction for non zygote";
230 background_collector_type_ = foreground_collector_type_;
231 }
232 }
233 ChangeCollector(desired_collector_type_);
234 live_bitmap_.reset(new accounting::HeapBitmap(this));
235 mark_bitmap_.reset(new accounting::HeapBitmap(this));
236 // Requested begin for the alloc space, to follow the mapped image and oat files
237 uint8_t* requested_alloc_space_begin = nullptr;
238 if (foreground_collector_type_ == kCollectorTypeCC) {
239 // Need to use a low address so that we can allocate a contiguous
240 // 2 * Xmx space when there's no image (dex2oat for target).
241 CHECK_GE(300 * MB, non_moving_space_capacity);
242 requested_alloc_space_begin = reinterpret_cast<uint8_t*>(300 * MB) - non_moving_space_capacity;
243 }
244 if (!image_file_name.empty()) {
245 ATRACE_BEGIN("ImageSpace::Create");
246 std::string error_msg;
247 auto* image_space = space::ImageSpace::Create(image_file_name.c_str(), image_instruction_set,
248 &error_msg);
249 ATRACE_END();
250 if (image_space != nullptr) {
251 AddSpace(image_space);
252 // Oat files referenced by image files immediately follow them in memory, ensure alloc space
253 // isn't going to get in the middle
254 uint8_t* oat_file_end_addr = image_space->GetImageHeader().GetOatFileEnd();
255 CHECK_GT(oat_file_end_addr, image_space->End());
256 requested_alloc_space_begin = AlignUp(oat_file_end_addr, kPageSize);
257 } else {
258 LOG(ERROR) << "Could not create image space with image file '" << image_file_name << "'. "
259 << "Attempting to fall back to imageless running. Error was: " << error_msg;
260 }
261 }
262 /*
263 requested_alloc_space_begin -> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
264 +- nonmoving space (non_moving_space_capacity)+-
265 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
266 +-????????????????????????????????????????????+-
267 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
268 +-main alloc space / bump space 1 (capacity_) +-
269 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
270 +-????????????????????????????????????????????+-
271 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
272 +-main alloc space2 / bump space 2 (capacity_)+-
273 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
274 */
275 // We don't have hspace compaction enabled with GSS or CC.
276 if (foreground_collector_type_ == kCollectorTypeGSS ||
277 foreground_collector_type_ == kCollectorTypeCC) {
278 use_homogeneous_space_compaction_for_oom_ = false;
279 }
280 bool support_homogeneous_space_compaction =
281 background_collector_type_ == gc::kCollectorTypeHomogeneousSpaceCompact ||
282 use_homogeneous_space_compaction_for_oom_;
283 // We may use the same space the main space for the non moving space if we don't need to compact
284 // from the main space.
285 // This is not the case if we support homogeneous compaction or have a moving background
286 // collector type.
287 bool separate_non_moving_space = is_zygote ||
288 support_homogeneous_space_compaction || IsMovingGc(foreground_collector_type_) ||
289 IsMovingGc(background_collector_type_);
290 if (foreground_collector_type == kCollectorTypeGSS) {
291 separate_non_moving_space = false;
292 }
293 std::unique_ptr<MemMap> main_mem_map_1;
294 std::unique_ptr<MemMap> main_mem_map_2;
295 uint8_t* request_begin = requested_alloc_space_begin;
296 if (request_begin != nullptr && separate_non_moving_space) {
297 request_begin += non_moving_space_capacity;
298 }
299 std::string error_str;
300 std::unique_ptr<MemMap> non_moving_space_mem_map;
301 ATRACE_BEGIN("Create heap maps");
302 if (separate_non_moving_space) {
303 // If we are the zygote, the non moving space becomes the zygote space when we run
304 // PreZygoteFork the first time. In this case, call the map "zygote space" since we can't
305 // rename the mem map later.
306 const char* space_name = is_zygote ? kZygoteSpaceName: kNonMovingSpaceName;
307 // Reserve the non moving mem map before the other two since it needs to be at a specific
308 // address.
309 non_moving_space_mem_map.reset(
310 MemMap::MapAnonymous(space_name, requested_alloc_space_begin,
311 non_moving_space_capacity, PROT_READ | PROT_WRITE, true, false,
312 &error_str));
313 CHECK(non_moving_space_mem_map != nullptr) << error_str;
314 // Try to reserve virtual memory at a lower address if we have a separate non moving space.
315 request_begin = reinterpret_cast<uint8_t*>(300 * MB);
316 }
317 // Attempt to create 2 mem maps at or after the requested begin.
318 if (foreground_collector_type_ != kCollectorTypeCC) {
319 if (separate_non_moving_space) {
320 main_mem_map_1.reset(MapAnonymousPreferredAddress(kMemMapSpaceName[0], request_begin,
321 capacity_, &error_str));
322 } else {
323 // If no separate non-moving space, the main space must come
324 // right after the image space to avoid a gap.
325 main_mem_map_1.reset(MemMap::MapAnonymous(kMemMapSpaceName[0], request_begin, capacity_,
326 PROT_READ | PROT_WRITE, true, false,
327 &error_str));
328 }
329 CHECK(main_mem_map_1.get() != nullptr) << error_str;
330 }
331 if (support_homogeneous_space_compaction ||
332 background_collector_type_ == kCollectorTypeSS ||
333 foreground_collector_type_ == kCollectorTypeSS) {
334 main_mem_map_2.reset(MapAnonymousPreferredAddress(kMemMapSpaceName[1], main_mem_map_1->End(),
335 capacity_, &error_str));
336 CHECK(main_mem_map_2.get() != nullptr) << error_str;
337 }
338 ATRACE_END();
339 ATRACE_BEGIN("Create spaces");
340 // Create the non moving space first so that bitmaps don't take up the address range.
341 if (separate_non_moving_space) {
342 // Non moving space is always dlmalloc since we currently don't have support for multiple
343 // active rosalloc spaces.
344 const size_t size = non_moving_space_mem_map->Size();
345 non_moving_space_ = space::DlMallocSpace::CreateFromMemMap(
346 non_moving_space_mem_map.release(), "zygote / non moving space", kDefaultStartingSize,
347 initial_size, size, size, false);
348 non_moving_space_->SetFootprintLimit(non_moving_space_->Capacity());
349 CHECK(non_moving_space_ != nullptr) << "Failed creating non moving space "
350 << requested_alloc_space_begin;
351 AddSpace(non_moving_space_);
352 }
353 // Create other spaces based on whether or not we have a moving GC.
354 if (foreground_collector_type_ == kCollectorTypeCC) {
355 region_space_ = space::RegionSpace::Create("Region space", capacity_ * 2, request_begin);
356 AddSpace(region_space_);
357 } else if (IsMovingGc(foreground_collector_type_) &&
358 foreground_collector_type_ != kCollectorTypeGSS) {
359 // Create bump pointer spaces.
360 // We only to create the bump pointer if the foreground collector is a compacting GC.
361 // TODO: Place bump-pointer spaces somewhere to minimize size of card table.
362 bump_pointer_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space 1",
363 main_mem_map_1.release());
364 CHECK(bump_pointer_space_ != nullptr) << "Failed to create bump pointer space";
365 AddSpace(bump_pointer_space_);
366 temp_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space 2",
367 main_mem_map_2.release());
368 CHECK(temp_space_ != nullptr) << "Failed to create bump pointer space";
369 AddSpace(temp_space_);
370 CHECK(separate_non_moving_space);
371 } else {
372 CreateMainMallocSpace(main_mem_map_1.release(), initial_size, growth_limit_, capacity_);
373 CHECK(main_space_ != nullptr);
374 AddSpace(main_space_);
375 if (!separate_non_moving_space) {
376 non_moving_space_ = main_space_;
377 CHECK(!non_moving_space_->CanMoveObjects());
378 }
379 if (foreground_collector_type_ == kCollectorTypeGSS) {
380 CHECK_EQ(foreground_collector_type_, background_collector_type_);
381 // Create bump pointer spaces instead of a backup space.
382 main_mem_map_2.release();
383 bump_pointer_space_ = space::BumpPointerSpace::Create("Bump pointer space 1",
384 kGSSBumpPointerSpaceCapacity, nullptr);
385 CHECK(bump_pointer_space_ != nullptr);
386 AddSpace(bump_pointer_space_);
387 temp_space_ = space::BumpPointerSpace::Create("Bump pointer space 2",
388 kGSSBumpPointerSpaceCapacity, nullptr);
389 CHECK(temp_space_ != nullptr);
390 AddSpace(temp_space_);
391 } else if (main_mem_map_2.get() != nullptr) {
392 const char* name = kUseRosAlloc ? kRosAllocSpaceName[1] : kDlMallocSpaceName[1];
393 main_space_backup_.reset(CreateMallocSpaceFromMemMap(main_mem_map_2.release(), initial_size,
394 growth_limit_, capacity_, name, true));
395 CHECK(main_space_backup_.get() != nullptr);
396 // Add the space so its accounted for in the heap_begin and heap_end.
397 AddSpace(main_space_backup_.get());
398 }
399 }
400 CHECK(non_moving_space_ != nullptr);
401 CHECK(!non_moving_space_->CanMoveObjects());
402 // Allocate the large object space.
403 if (large_object_space_type == space::LargeObjectSpaceType::kFreeList) {
404 large_object_space_ = space::FreeListSpace::Create("free list large object space", nullptr,
405 capacity_);
406 CHECK(large_object_space_ != nullptr) << "Failed to create large object space";
407 } else if (large_object_space_type == space::LargeObjectSpaceType::kMap) {
408 large_object_space_ = space::LargeObjectMapSpace::Create("mem map large object space");
409 CHECK(large_object_space_ != nullptr) << "Failed to create large object space";
410 } else {
411 // Disable the large object space by making the cutoff excessively large.
412 large_object_threshold_ = std::numeric_limits<size_t>::max();
413 large_object_space_ = nullptr;
414 }
415 if (large_object_space_ != nullptr) {
416 AddSpace(large_object_space_);
417 }
418 // Compute heap capacity. Continuous spaces are sorted in order of Begin().
419 CHECK(!continuous_spaces_.empty());
420 // Relies on the spaces being sorted.
421 uint8_t* heap_begin = continuous_spaces_.front()->Begin();
422 uint8_t* heap_end = continuous_spaces_.back()->Limit();
423 size_t heap_capacity = heap_end - heap_begin;
424 // Remove the main backup space since it slows down the GC to have unused extra spaces.
425 // TODO: Avoid needing to do this.
426 if (main_space_backup_.get() != nullptr) {
427 RemoveSpace(main_space_backup_.get());
428 }
429 ATRACE_END();
430 // Allocate the card table.
431 ATRACE_BEGIN("Create card table");
432 card_table_.reset(accounting::CardTable::Create(heap_begin, heap_capacity));
433 CHECK(card_table_.get() != nullptr) << "Failed to create card table";
434 ATRACE_END();
435 if (foreground_collector_type_ == kCollectorTypeCC && kUseTableLookupReadBarrier) {
436 rb_table_.reset(new accounting::ReadBarrierTable());
437 DCHECK(rb_table_->IsAllCleared());
438 }
439 if (GetImageSpace() != nullptr) {
440 // Don't add the image mod union table if we are running without an image, this can crash if
441 // we use the CardCache implementation.
442 accounting::ModUnionTable* mod_union_table = new accounting::ModUnionTableToZygoteAllocspace(
443 "Image mod-union table", this, GetImageSpace());
444 CHECK(mod_union_table != nullptr) << "Failed to create image mod-union table";
445 AddModUnionTable(mod_union_table);
446 }
447 if (collector::SemiSpace::kUseRememberedSet && non_moving_space_ != main_space_) {
448 accounting::RememberedSet* non_moving_space_rem_set =
449 new accounting::RememberedSet("Non-moving space remembered set", this, non_moving_space_);
450 CHECK(non_moving_space_rem_set != nullptr) << "Failed to create non-moving space remembered set";
451 AddRememberedSet(non_moving_space_rem_set);
452 }
453 // TODO: Count objects in the image space here?
454 num_bytes_allocated_.StoreRelaxed(0);
455 mark_stack_.reset(accounting::ObjectStack::Create("mark stack", kDefaultMarkStackSize,
456 kDefaultMarkStackSize));
457 const size_t alloc_stack_capacity = max_allocation_stack_size_ + kAllocationStackReserveSize;
458 allocation_stack_.reset(accounting::ObjectStack::Create(
459 "allocation stack", max_allocation_stack_size_, alloc_stack_capacity));
460 live_stack_.reset(accounting::ObjectStack::Create(
461 "live stack", max_allocation_stack_size_, alloc_stack_capacity));
462 // It's still too early to take a lock because there are no threads yet, but we can create locks
463 // now. We don't create it earlier to make it clear that you can't use locks during heap
464 // initialization.
465 gc_complete_lock_ = new Mutex("GC complete lock");
466 gc_complete_cond_.reset(new ConditionVariable("GC complete condition variable",
467 *gc_complete_lock_));
468 task_processor_.reset(new TaskProcessor());
469 pending_task_lock_ = new Mutex("Pending task lock");
470 if (ignore_max_footprint_) {
471 SetIdealFootprint(std::numeric_limits<size_t>::max());
472 concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
473 }
474 CHECK_NE(max_allowed_footprint_, 0U);
475 // Create our garbage collectors.
476 for (size_t i = 0; i < 2; ++i) {
477 const bool concurrent = i != 0;
478 if ((MayUseCollector(kCollectorTypeCMS) && concurrent) ||
479 (MayUseCollector(kCollectorTypeMS) && !concurrent)) {
480 garbage_collectors_.push_back(new collector::MarkSweep(this, concurrent));
481 garbage_collectors_.push_back(new collector::PartialMarkSweep(this, concurrent));
482 garbage_collectors_.push_back(new collector::StickyMarkSweep(this, concurrent));
483 }
484 }
485 if (kMovingCollector) {
486 if (MayUseCollector(kCollectorTypeSS) || MayUseCollector(kCollectorTypeGSS) ||
487 MayUseCollector(kCollectorTypeHomogeneousSpaceCompact) ||
488 use_homogeneous_space_compaction_for_oom_) {
489 // TODO: Clean this up.
490 const bool generational = foreground_collector_type_ == kCollectorTypeGSS;
491 semi_space_collector_ = new collector::SemiSpace(this, generational,
492 generational ? "generational" : "");
493 garbage_collectors_.push_back(semi_space_collector_);
494 }
495 if (MayUseCollector(kCollectorTypeCC)) {
496 concurrent_copying_collector_ = new collector::ConcurrentCopying(this);
497 garbage_collectors_.push_back(concurrent_copying_collector_);
498 }
499 if (MayUseCollector(kCollectorTypeMC)) {
500 mark_compact_collector_ = new collector::MarkCompact(this);
501 garbage_collectors_.push_back(mark_compact_collector_);
502 }
503 }
504 if (GetImageSpace() != nullptr && non_moving_space_ != nullptr &&
505 (is_zygote || separate_non_moving_space || foreground_collector_type_ == kCollectorTypeGSS)) {
506 // Check that there's no gap between the image space and the non moving space so that the
507 // immune region won't break (eg. due to a large object allocated in the gap). This is only
508 // required when we're the zygote or using GSS.
509 bool no_gap = MemMap::CheckNoGaps(GetImageSpace()->GetMemMap(),
510 non_moving_space_->GetMemMap());
511 if (!no_gap) {
512 PrintFileToLog("/proc/self/maps", LogSeverity::ERROR);
513 MemMap::DumpMaps(LOG(ERROR), true);
514 LOG(FATAL) << "There's a gap between the image space and the non-moving space";
515 }
516 }
517 instrumentation::Instrumentation* const instrumentation = runtime->GetInstrumentation();
518 if (gc_stress_mode_) {
519 backtrace_lock_ = new Mutex("GC complete lock");
520 }
521 if (running_on_valgrind_ || gc_stress_mode_) {
522 instrumentation->InstrumentQuickAllocEntryPoints();
523 }
524 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
525 LOG(INFO) << "Heap() exiting";
526 }
527 }
528
MapAnonymousPreferredAddress(const char * name,uint8_t * request_begin,size_t capacity,std::string * out_error_str)529 MemMap* Heap::MapAnonymousPreferredAddress(const char* name, uint8_t* request_begin,
530 size_t capacity, std::string* out_error_str) {
531 while (true) {
532 MemMap* map = MemMap::MapAnonymous(name, request_begin, capacity,
533 PROT_READ | PROT_WRITE, true, false, out_error_str);
534 if (map != nullptr || request_begin == nullptr) {
535 return map;
536 }
537 // Retry a second time with no specified request begin.
538 request_begin = nullptr;
539 }
540 }
541
MayUseCollector(CollectorType type) const542 bool Heap::MayUseCollector(CollectorType type) const {
543 return foreground_collector_type_ == type || background_collector_type_ == type;
544 }
545
CreateMallocSpaceFromMemMap(MemMap * mem_map,size_t initial_size,size_t growth_limit,size_t capacity,const char * name,bool can_move_objects)546 space::MallocSpace* Heap::CreateMallocSpaceFromMemMap(MemMap* mem_map, size_t initial_size,
547 size_t growth_limit, size_t capacity,
548 const char* name, bool can_move_objects) {
549 space::MallocSpace* malloc_space = nullptr;
550 if (kUseRosAlloc) {
551 // Create rosalloc space.
552 malloc_space = space::RosAllocSpace::CreateFromMemMap(mem_map, name, kDefaultStartingSize,
553 initial_size, growth_limit, capacity,
554 low_memory_mode_, can_move_objects);
555 } else {
556 malloc_space = space::DlMallocSpace::CreateFromMemMap(mem_map, name, kDefaultStartingSize,
557 initial_size, growth_limit, capacity,
558 can_move_objects);
559 }
560 if (collector::SemiSpace::kUseRememberedSet) {
561 accounting::RememberedSet* rem_set =
562 new accounting::RememberedSet(std::string(name) + " remembered set", this, malloc_space);
563 CHECK(rem_set != nullptr) << "Failed to create main space remembered set";
564 AddRememberedSet(rem_set);
565 }
566 CHECK(malloc_space != nullptr) << "Failed to create " << name;
567 malloc_space->SetFootprintLimit(malloc_space->Capacity());
568 return malloc_space;
569 }
570
CreateMainMallocSpace(MemMap * mem_map,size_t initial_size,size_t growth_limit,size_t capacity)571 void Heap::CreateMainMallocSpace(MemMap* mem_map, size_t initial_size, size_t growth_limit,
572 size_t capacity) {
573 // Is background compaction is enabled?
574 bool can_move_objects = IsMovingGc(background_collector_type_) !=
575 IsMovingGc(foreground_collector_type_) || use_homogeneous_space_compaction_for_oom_;
576 // If we are the zygote and don't yet have a zygote space, it means that the zygote fork will
577 // happen in the future. If this happens and we have kCompactZygote enabled we wish to compact
578 // from the main space to the zygote space. If background compaction is enabled, always pass in
579 // that we can move objets.
580 if (kCompactZygote && Runtime::Current()->IsZygote() && !can_move_objects) {
581 // After the zygote we want this to be false if we don't have background compaction enabled so
582 // that getting primitive array elements is faster.
583 // We never have homogeneous compaction with GSS and don't need a space with movable objects.
584 can_move_objects = !HasZygoteSpace() && foreground_collector_type_ != kCollectorTypeGSS;
585 }
586 if (collector::SemiSpace::kUseRememberedSet && main_space_ != nullptr) {
587 RemoveRememberedSet(main_space_);
588 }
589 const char* name = kUseRosAlloc ? kRosAllocSpaceName[0] : kDlMallocSpaceName[0];
590 main_space_ = CreateMallocSpaceFromMemMap(mem_map, initial_size, growth_limit, capacity, name,
591 can_move_objects);
592 SetSpaceAsDefault(main_space_);
593 VLOG(heap) << "Created main space " << main_space_;
594 }
595
ChangeAllocator(AllocatorType allocator)596 void Heap::ChangeAllocator(AllocatorType allocator) {
597 if (current_allocator_ != allocator) {
598 // These two allocators are only used internally and don't have any entrypoints.
599 CHECK_NE(allocator, kAllocatorTypeLOS);
600 CHECK_NE(allocator, kAllocatorTypeNonMoving);
601 current_allocator_ = allocator;
602 MutexLock mu(nullptr, *Locks::runtime_shutdown_lock_);
603 SetQuickAllocEntryPointsAllocator(current_allocator_);
604 Runtime::Current()->GetInstrumentation()->ResetQuickAllocEntryPoints();
605 }
606 }
607
DisableMovingGc()608 void Heap::DisableMovingGc() {
609 if (IsMovingGc(foreground_collector_type_)) {
610 foreground_collector_type_ = kCollectorTypeCMS;
611 }
612 if (IsMovingGc(background_collector_type_)) {
613 background_collector_type_ = foreground_collector_type_;
614 }
615 TransitionCollector(foreground_collector_type_);
616 ThreadList* tl = Runtime::Current()->GetThreadList();
617 Thread* self = Thread::Current();
618 ScopedThreadStateChange tsc(self, kSuspended);
619 tl->SuspendAll(__FUNCTION__);
620 // Something may have caused the transition to fail.
621 if (!IsMovingGc(collector_type_) && non_moving_space_ != main_space_) {
622 CHECK(main_space_ != nullptr);
623 // The allocation stack may have non movable objects in it. We need to flush it since the GC
624 // can't only handle marking allocation stack objects of one non moving space and one main
625 // space.
626 {
627 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
628 FlushAllocStack();
629 }
630 main_space_->DisableMovingObjects();
631 non_moving_space_ = main_space_;
632 CHECK(!non_moving_space_->CanMoveObjects());
633 }
634 tl->ResumeAll();
635 }
636
SafeGetClassDescriptor(mirror::Class * klass)637 std::string Heap::SafeGetClassDescriptor(mirror::Class* klass) {
638 if (!IsValidContinuousSpaceObjectAddress(klass)) {
639 return StringPrintf("<non heap address klass %p>", klass);
640 }
641 mirror::Class* component_type = klass->GetComponentType<kVerifyNone>();
642 if (IsValidContinuousSpaceObjectAddress(component_type) && klass->IsArrayClass<kVerifyNone>()) {
643 std::string result("[");
644 result += SafeGetClassDescriptor(component_type);
645 return result;
646 } else if (UNLIKELY(klass->IsPrimitive<kVerifyNone>())) {
647 return Primitive::Descriptor(klass->GetPrimitiveType<kVerifyNone>());
648 } else if (UNLIKELY(klass->IsProxyClass<kVerifyNone>())) {
649 return Runtime::Current()->GetClassLinker()->GetDescriptorForProxy(klass);
650 } else {
651 mirror::DexCache* dex_cache = klass->GetDexCache<kVerifyNone>();
652 if (!IsValidContinuousSpaceObjectAddress(dex_cache)) {
653 return StringPrintf("<non heap address dex_cache %p>", dex_cache);
654 }
655 const DexFile* dex_file = dex_cache->GetDexFile();
656 uint16_t class_def_idx = klass->GetDexClassDefIndex();
657 if (class_def_idx == DexFile::kDexNoIndex16) {
658 return "<class def not found>";
659 }
660 const DexFile::ClassDef& class_def = dex_file->GetClassDef(class_def_idx);
661 const DexFile::TypeId& type_id = dex_file->GetTypeId(class_def.class_idx_);
662 return dex_file->GetTypeDescriptor(type_id);
663 }
664 }
665
SafePrettyTypeOf(mirror::Object * obj)666 std::string Heap::SafePrettyTypeOf(mirror::Object* obj) {
667 if (obj == nullptr) {
668 return "null";
669 }
670 mirror::Class* klass = obj->GetClass<kVerifyNone>();
671 if (klass == nullptr) {
672 return "(class=null)";
673 }
674 std::string result(SafeGetClassDescriptor(klass));
675 if (obj->IsClass()) {
676 result += "<" + SafeGetClassDescriptor(obj->AsClass<kVerifyNone>()) + ">";
677 }
678 return result;
679 }
680
DumpObject(std::ostream & stream,mirror::Object * obj)681 void Heap::DumpObject(std::ostream& stream, mirror::Object* obj) {
682 if (obj == nullptr) {
683 stream << "(obj=null)";
684 return;
685 }
686 if (IsAligned<kObjectAlignment>(obj)) {
687 space::Space* space = nullptr;
688 // Don't use find space since it only finds spaces which actually contain objects instead of
689 // spaces which may contain objects (e.g. cleared bump pointer spaces).
690 for (const auto& cur_space : continuous_spaces_) {
691 if (cur_space->HasAddress(obj)) {
692 space = cur_space;
693 break;
694 }
695 }
696 // Unprotect all the spaces.
697 for (const auto& con_space : continuous_spaces_) {
698 mprotect(con_space->Begin(), con_space->Capacity(), PROT_READ | PROT_WRITE);
699 }
700 stream << "Object " << obj;
701 if (space != nullptr) {
702 stream << " in space " << *space;
703 }
704 mirror::Class* klass = obj->GetClass<kVerifyNone>();
705 stream << "\nclass=" << klass;
706 if (klass != nullptr) {
707 stream << " type= " << SafePrettyTypeOf(obj);
708 }
709 // Re-protect the address we faulted on.
710 mprotect(AlignDown(obj, kPageSize), kPageSize, PROT_NONE);
711 }
712 }
713
IsCompilingBoot() const714 bool Heap::IsCompilingBoot() const {
715 if (!Runtime::Current()->IsAotCompiler()) {
716 return false;
717 }
718 for (const auto& space : continuous_spaces_) {
719 if (space->IsImageSpace() || space->IsZygoteSpace()) {
720 return false;
721 }
722 }
723 return true;
724 }
725
HasImageSpace() const726 bool Heap::HasImageSpace() const {
727 for (const auto& space : continuous_spaces_) {
728 if (space->IsImageSpace()) {
729 return true;
730 }
731 }
732 return false;
733 }
734
IncrementDisableMovingGC(Thread * self)735 void Heap::IncrementDisableMovingGC(Thread* self) {
736 // Need to do this holding the lock to prevent races where the GC is about to run / running when
737 // we attempt to disable it.
738 ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
739 MutexLock mu(self, *gc_complete_lock_);
740 ++disable_moving_gc_count_;
741 if (IsMovingGc(collector_type_running_)) {
742 WaitForGcToCompleteLocked(kGcCauseDisableMovingGc, self);
743 }
744 }
745
DecrementDisableMovingGC(Thread * self)746 void Heap::DecrementDisableMovingGC(Thread* self) {
747 MutexLock mu(self, *gc_complete_lock_);
748 CHECK_GT(disable_moving_gc_count_, 0U);
749 --disable_moving_gc_count_;
750 }
751
UpdateProcessState(ProcessState process_state)752 void Heap::UpdateProcessState(ProcessState process_state) {
753 if (process_state_ != process_state) {
754 process_state_ = process_state;
755 for (size_t i = 1; i <= kCollectorTransitionStressIterations; ++i) {
756 // Start at index 1 to avoid "is always false" warning.
757 // Have iteration 1 always transition the collector.
758 TransitionCollector((((i & 1) == 1) == (process_state_ == kProcessStateJankPerceptible))
759 ? foreground_collector_type_ : background_collector_type_);
760 usleep(kCollectorTransitionStressWait);
761 }
762 if (process_state_ == kProcessStateJankPerceptible) {
763 // Transition back to foreground right away to prevent jank.
764 RequestCollectorTransition(foreground_collector_type_, 0);
765 } else {
766 // Don't delay for debug builds since we may want to stress test the GC.
767 // If background_collector_type_ is kCollectorTypeHomogeneousSpaceCompact then we have
768 // special handling which does a homogenous space compaction once but then doesn't transition
769 // the collector.
770 RequestCollectorTransition(background_collector_type_,
771 kIsDebugBuild ? 0 : kCollectorTransitionWait);
772 }
773 }
774 }
775
CreateThreadPool()776 void Heap::CreateThreadPool() {
777 const size_t num_threads = std::max(parallel_gc_threads_, conc_gc_threads_);
778 if (num_threads != 0) {
779 thread_pool_.reset(new ThreadPool("Heap thread pool", num_threads));
780 }
781 }
782
783 // Visit objects when threads aren't suspended. If concurrent moving
784 // GC, disable moving GC and suspend threads and then visit objects.
VisitObjects(ObjectCallback callback,void * arg)785 void Heap::VisitObjects(ObjectCallback callback, void* arg) {
786 Thread* self = Thread::Current();
787 Locks::mutator_lock_->AssertSharedHeld(self);
788 DCHECK(!Locks::mutator_lock_->IsExclusiveHeld(self)) << "Call VisitObjectsPaused() instead";
789 if (IsGcConcurrentAndMoving()) {
790 // Concurrent moving GC. Just suspending threads isn't sufficient
791 // because a collection isn't one big pause and we could suspend
792 // threads in the middle (between phases) of a concurrent moving
793 // collection where it's not easily known which objects are alive
794 // (both the region space and the non-moving space) or which
795 // copies of objects to visit, and the to-space invariant could be
796 // easily broken. Visit objects while GC isn't running by using
797 // IncrementDisableMovingGC() and threads are suspended.
798 IncrementDisableMovingGC(self);
799 self->TransitionFromRunnableToSuspended(kWaitingForVisitObjects);
800 ThreadList* tl = Runtime::Current()->GetThreadList();
801 tl->SuspendAll(__FUNCTION__);
802 VisitObjectsInternalRegionSpace(callback, arg);
803 VisitObjectsInternal(callback, arg);
804 tl->ResumeAll();
805 self->TransitionFromSuspendedToRunnable();
806 DecrementDisableMovingGC(self);
807 } else {
808 // GCs can move objects, so don't allow this.
809 ScopedAssertNoThreadSuspension ants(self, "Visiting objects");
810 DCHECK(region_space_ == nullptr);
811 VisitObjectsInternal(callback, arg);
812 }
813 }
814
815 // Visit objects when threads are already suspended.
VisitObjectsPaused(ObjectCallback callback,void * arg)816 void Heap::VisitObjectsPaused(ObjectCallback callback, void* arg) {
817 Thread* self = Thread::Current();
818 Locks::mutator_lock_->AssertExclusiveHeld(self);
819 VisitObjectsInternalRegionSpace(callback, arg);
820 VisitObjectsInternal(callback, arg);
821 }
822
823 // Visit objects in the region spaces.
VisitObjectsInternalRegionSpace(ObjectCallback callback,void * arg)824 void Heap::VisitObjectsInternalRegionSpace(ObjectCallback callback, void* arg) {
825 Thread* self = Thread::Current();
826 Locks::mutator_lock_->AssertExclusiveHeld(self);
827 if (region_space_ != nullptr) {
828 DCHECK(IsGcConcurrentAndMoving());
829 if (!zygote_creation_lock_.IsExclusiveHeld(self)) {
830 // Exclude the pre-zygote fork time where the semi-space collector
831 // calls VerifyHeapReferences() as part of the zygote compaction
832 // which then would call here without the moving GC disabled,
833 // which is fine.
834 DCHECK(IsMovingGCDisabled(self));
835 }
836 region_space_->Walk(callback, arg);
837 }
838 }
839
840 // Visit objects in the other spaces.
VisitObjectsInternal(ObjectCallback callback,void * arg)841 void Heap::VisitObjectsInternal(ObjectCallback callback, void* arg) {
842 if (bump_pointer_space_ != nullptr) {
843 // Visit objects in bump pointer space.
844 bump_pointer_space_->Walk(callback, arg);
845 }
846 // TODO: Switch to standard begin and end to use ranged a based loop.
847 for (auto* it = allocation_stack_->Begin(), *end = allocation_stack_->End(); it < end; ++it) {
848 mirror::Object* const obj = it->AsMirrorPtr();
849 if (obj != nullptr && obj->GetClass() != nullptr) {
850 // Avoid the race condition caused by the object not yet being written into the allocation
851 // stack or the class not yet being written in the object. Or, if
852 // kUseThreadLocalAllocationStack, there can be nulls on the allocation stack.
853 callback(obj, arg);
854 }
855 }
856 {
857 ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
858 GetLiveBitmap()->Walk(callback, arg);
859 }
860 }
861
MarkAllocStackAsLive(accounting::ObjectStack * stack)862 void Heap::MarkAllocStackAsLive(accounting::ObjectStack* stack) {
863 space::ContinuousSpace* space1 = main_space_ != nullptr ? main_space_ : non_moving_space_;
864 space::ContinuousSpace* space2 = non_moving_space_;
865 // TODO: Generalize this to n bitmaps?
866 CHECK(space1 != nullptr);
867 CHECK(space2 != nullptr);
868 MarkAllocStack(space1->GetLiveBitmap(), space2->GetLiveBitmap(),
869 (large_object_space_ != nullptr ? large_object_space_->GetLiveBitmap() : nullptr),
870 stack);
871 }
872
DeleteThreadPool()873 void Heap::DeleteThreadPool() {
874 thread_pool_.reset(nullptr);
875 }
876
AddSpace(space::Space * space)877 void Heap::AddSpace(space::Space* space) {
878 CHECK(space != nullptr);
879 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
880 if (space->IsContinuousSpace()) {
881 DCHECK(!space->IsDiscontinuousSpace());
882 space::ContinuousSpace* continuous_space = space->AsContinuousSpace();
883 // Continuous spaces don't necessarily have bitmaps.
884 accounting::ContinuousSpaceBitmap* live_bitmap = continuous_space->GetLiveBitmap();
885 accounting::ContinuousSpaceBitmap* mark_bitmap = continuous_space->GetMarkBitmap();
886 if (live_bitmap != nullptr) {
887 CHECK(mark_bitmap != nullptr);
888 live_bitmap_->AddContinuousSpaceBitmap(live_bitmap);
889 mark_bitmap_->AddContinuousSpaceBitmap(mark_bitmap);
890 }
891 continuous_spaces_.push_back(continuous_space);
892 // Ensure that spaces remain sorted in increasing order of start address.
893 std::sort(continuous_spaces_.begin(), continuous_spaces_.end(),
894 [](const space::ContinuousSpace* a, const space::ContinuousSpace* b) {
895 return a->Begin() < b->Begin();
896 });
897 } else {
898 CHECK(space->IsDiscontinuousSpace());
899 space::DiscontinuousSpace* discontinuous_space = space->AsDiscontinuousSpace();
900 live_bitmap_->AddLargeObjectBitmap(discontinuous_space->GetLiveBitmap());
901 mark_bitmap_->AddLargeObjectBitmap(discontinuous_space->GetMarkBitmap());
902 discontinuous_spaces_.push_back(discontinuous_space);
903 }
904 if (space->IsAllocSpace()) {
905 alloc_spaces_.push_back(space->AsAllocSpace());
906 }
907 }
908
SetSpaceAsDefault(space::ContinuousSpace * continuous_space)909 void Heap::SetSpaceAsDefault(space::ContinuousSpace* continuous_space) {
910 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
911 if (continuous_space->IsDlMallocSpace()) {
912 dlmalloc_space_ = continuous_space->AsDlMallocSpace();
913 } else if (continuous_space->IsRosAllocSpace()) {
914 rosalloc_space_ = continuous_space->AsRosAllocSpace();
915 }
916 }
917
RemoveSpace(space::Space * space)918 void Heap::RemoveSpace(space::Space* space) {
919 DCHECK(space != nullptr);
920 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
921 if (space->IsContinuousSpace()) {
922 DCHECK(!space->IsDiscontinuousSpace());
923 space::ContinuousSpace* continuous_space = space->AsContinuousSpace();
924 // Continuous spaces don't necessarily have bitmaps.
925 accounting::ContinuousSpaceBitmap* live_bitmap = continuous_space->GetLiveBitmap();
926 accounting::ContinuousSpaceBitmap* mark_bitmap = continuous_space->GetMarkBitmap();
927 if (live_bitmap != nullptr) {
928 DCHECK(mark_bitmap != nullptr);
929 live_bitmap_->RemoveContinuousSpaceBitmap(live_bitmap);
930 mark_bitmap_->RemoveContinuousSpaceBitmap(mark_bitmap);
931 }
932 auto it = std::find(continuous_spaces_.begin(), continuous_spaces_.end(), continuous_space);
933 DCHECK(it != continuous_spaces_.end());
934 continuous_spaces_.erase(it);
935 } else {
936 DCHECK(space->IsDiscontinuousSpace());
937 space::DiscontinuousSpace* discontinuous_space = space->AsDiscontinuousSpace();
938 live_bitmap_->RemoveLargeObjectBitmap(discontinuous_space->GetLiveBitmap());
939 mark_bitmap_->RemoveLargeObjectBitmap(discontinuous_space->GetMarkBitmap());
940 auto it = std::find(discontinuous_spaces_.begin(), discontinuous_spaces_.end(),
941 discontinuous_space);
942 DCHECK(it != discontinuous_spaces_.end());
943 discontinuous_spaces_.erase(it);
944 }
945 if (space->IsAllocSpace()) {
946 auto it = std::find(alloc_spaces_.begin(), alloc_spaces_.end(), space->AsAllocSpace());
947 DCHECK(it != alloc_spaces_.end());
948 alloc_spaces_.erase(it);
949 }
950 }
951
DumpGcPerformanceInfo(std::ostream & os)952 void Heap::DumpGcPerformanceInfo(std::ostream& os) {
953 // Dump cumulative timings.
954 os << "Dumping cumulative Gc timings\n";
955 uint64_t total_duration = 0;
956 // Dump cumulative loggers for each GC type.
957 uint64_t total_paused_time = 0;
958 for (auto& collector : garbage_collectors_) {
959 total_duration += collector->GetCumulativeTimings().GetTotalNs();
960 total_paused_time += collector->GetTotalPausedTimeNs();
961 collector->DumpPerformanceInfo(os);
962 }
963 uint64_t allocation_time =
964 static_cast<uint64_t>(total_allocation_time_.LoadRelaxed()) * kTimeAdjust;
965 if (total_duration != 0) {
966 const double total_seconds = static_cast<double>(total_duration / 1000) / 1000000.0;
967 os << "Total time spent in GC: " << PrettyDuration(total_duration) << "\n";
968 os << "Mean GC size throughput: "
969 << PrettySize(GetBytesFreedEver() / total_seconds) << "/s\n";
970 os << "Mean GC object throughput: "
971 << (GetObjectsFreedEver() / total_seconds) << " objects/s\n";
972 }
973 uint64_t total_objects_allocated = GetObjectsAllocatedEver();
974 os << "Total number of allocations " << total_objects_allocated << "\n";
975 os << "Total bytes allocated " << PrettySize(GetBytesAllocatedEver()) << "\n";
976 os << "Total bytes freed " << PrettySize(GetBytesFreedEver()) << "\n";
977 os << "Free memory " << PrettySize(GetFreeMemory()) << "\n";
978 os << "Free memory until GC " << PrettySize(GetFreeMemoryUntilGC()) << "\n";
979 os << "Free memory until OOME " << PrettySize(GetFreeMemoryUntilOOME()) << "\n";
980 os << "Total memory " << PrettySize(GetTotalMemory()) << "\n";
981 os << "Max memory " << PrettySize(GetMaxMemory()) << "\n";
982 if (kMeasureAllocationTime) {
983 os << "Total time spent allocating: " << PrettyDuration(allocation_time) << "\n";
984 os << "Mean allocation time: " << PrettyDuration(allocation_time / total_objects_allocated)
985 << "\n";
986 }
987 if (HasZygoteSpace()) {
988 os << "Zygote space size " << PrettySize(zygote_space_->Size()) << "\n";
989 }
990 os << "Total mutator paused time: " << PrettyDuration(total_paused_time) << "\n";
991 os << "Total time waiting for GC to complete: " << PrettyDuration(total_wait_time_) << "\n";
992 os << "Total GC count: " << GetGcCount() << "\n";
993 os << "Total GC time: " << PrettyDuration(GetGcTime()) << "\n";
994 os << "Total blocking GC count: " << GetBlockingGcCount() << "\n";
995 os << "Total blocking GC time: " << PrettyDuration(GetBlockingGcTime()) << "\n";
996
997 {
998 MutexLock mu(Thread::Current(), *gc_complete_lock_);
999 if (gc_count_rate_histogram_.SampleSize() > 0U) {
1000 os << "Histogram of GC count per " << NsToMs(kGcCountRateHistogramWindowDuration) << " ms: ";
1001 gc_count_rate_histogram_.DumpBins(os);
1002 os << "\n";
1003 }
1004 if (blocking_gc_count_rate_histogram_.SampleSize() > 0U) {
1005 os << "Histogram of blocking GC count per "
1006 << NsToMs(kGcCountRateHistogramWindowDuration) << " ms: ";
1007 blocking_gc_count_rate_histogram_.DumpBins(os);
1008 os << "\n";
1009 }
1010 }
1011
1012 BaseMutex::DumpAll(os);
1013 }
1014
ResetGcPerformanceInfo()1015 void Heap::ResetGcPerformanceInfo() {
1016 for (auto& collector : garbage_collectors_) {
1017 collector->ResetMeasurements();
1018 }
1019 total_allocation_time_.StoreRelaxed(0);
1020 total_bytes_freed_ever_ = 0;
1021 total_objects_freed_ever_ = 0;
1022 total_wait_time_ = 0;
1023 blocking_gc_count_ = 0;
1024 blocking_gc_time_ = 0;
1025 gc_count_last_window_ = 0;
1026 blocking_gc_count_last_window_ = 0;
1027 last_update_time_gc_count_rate_histograms_ = // Round down by the window duration.
1028 (NanoTime() / kGcCountRateHistogramWindowDuration) * kGcCountRateHistogramWindowDuration;
1029 {
1030 MutexLock mu(Thread::Current(), *gc_complete_lock_);
1031 gc_count_rate_histogram_.Reset();
1032 blocking_gc_count_rate_histogram_.Reset();
1033 }
1034 }
1035
GetGcCount() const1036 uint64_t Heap::GetGcCount() const {
1037 uint64_t gc_count = 0U;
1038 for (auto& collector : garbage_collectors_) {
1039 gc_count += collector->GetCumulativeTimings().GetIterations();
1040 }
1041 return gc_count;
1042 }
1043
GetGcTime() const1044 uint64_t Heap::GetGcTime() const {
1045 uint64_t gc_time = 0U;
1046 for (auto& collector : garbage_collectors_) {
1047 gc_time += collector->GetCumulativeTimings().GetTotalNs();
1048 }
1049 return gc_time;
1050 }
1051
GetBlockingGcCount() const1052 uint64_t Heap::GetBlockingGcCount() const {
1053 return blocking_gc_count_;
1054 }
1055
GetBlockingGcTime() const1056 uint64_t Heap::GetBlockingGcTime() const {
1057 return blocking_gc_time_;
1058 }
1059
DumpGcCountRateHistogram(std::ostream & os) const1060 void Heap::DumpGcCountRateHistogram(std::ostream& os) const {
1061 MutexLock mu(Thread::Current(), *gc_complete_lock_);
1062 if (gc_count_rate_histogram_.SampleSize() > 0U) {
1063 gc_count_rate_histogram_.DumpBins(os);
1064 }
1065 }
1066
DumpBlockingGcCountRateHistogram(std::ostream & os) const1067 void Heap::DumpBlockingGcCountRateHistogram(std::ostream& os) const {
1068 MutexLock mu(Thread::Current(), *gc_complete_lock_);
1069 if (blocking_gc_count_rate_histogram_.SampleSize() > 0U) {
1070 blocking_gc_count_rate_histogram_.DumpBins(os);
1071 }
1072 }
1073
~Heap()1074 Heap::~Heap() {
1075 VLOG(heap) << "Starting ~Heap()";
1076 STLDeleteElements(&garbage_collectors_);
1077 // If we don't reset then the mark stack complains in its destructor.
1078 allocation_stack_->Reset();
1079 live_stack_->Reset();
1080 STLDeleteValues(&mod_union_tables_);
1081 STLDeleteValues(&remembered_sets_);
1082 STLDeleteElements(&continuous_spaces_);
1083 STLDeleteElements(&discontinuous_spaces_);
1084 delete gc_complete_lock_;
1085 delete pending_task_lock_;
1086 delete backtrace_lock_;
1087 if (unique_backtrace_count_.LoadRelaxed() != 0 || seen_backtrace_count_.LoadRelaxed() != 0) {
1088 LOG(INFO) << "gc stress unique=" << unique_backtrace_count_.LoadRelaxed()
1089 << " total=" << seen_backtrace_count_.LoadRelaxed() +
1090 unique_backtrace_count_.LoadRelaxed();
1091 }
1092 VLOG(heap) << "Finished ~Heap()";
1093 }
1094
FindContinuousSpaceFromObject(const mirror::Object * obj,bool fail_ok) const1095 space::ContinuousSpace* Heap::FindContinuousSpaceFromObject(const mirror::Object* obj,
1096 bool fail_ok) const {
1097 for (const auto& space : continuous_spaces_) {
1098 if (space->Contains(obj)) {
1099 return space;
1100 }
1101 }
1102 if (!fail_ok) {
1103 LOG(FATAL) << "object " << reinterpret_cast<const void*>(obj) << " not inside any spaces!";
1104 }
1105 return nullptr;
1106 }
1107
FindDiscontinuousSpaceFromObject(const mirror::Object * obj,bool fail_ok) const1108 space::DiscontinuousSpace* Heap::FindDiscontinuousSpaceFromObject(const mirror::Object* obj,
1109 bool fail_ok) const {
1110 for (const auto& space : discontinuous_spaces_) {
1111 if (space->Contains(obj)) {
1112 return space;
1113 }
1114 }
1115 if (!fail_ok) {
1116 LOG(FATAL) << "object " << reinterpret_cast<const void*>(obj) << " not inside any spaces!";
1117 }
1118 return nullptr;
1119 }
1120
FindSpaceFromObject(const mirror::Object * obj,bool fail_ok) const1121 space::Space* Heap::FindSpaceFromObject(const mirror::Object* obj, bool fail_ok) const {
1122 space::Space* result = FindContinuousSpaceFromObject(obj, true);
1123 if (result != nullptr) {
1124 return result;
1125 }
1126 return FindDiscontinuousSpaceFromObject(obj, fail_ok);
1127 }
1128
GetImageSpace() const1129 space::ImageSpace* Heap::GetImageSpace() const {
1130 for (const auto& space : continuous_spaces_) {
1131 if (space->IsImageSpace()) {
1132 return space->AsImageSpace();
1133 }
1134 }
1135 return nullptr;
1136 }
1137
ThrowOutOfMemoryError(Thread * self,size_t byte_count,AllocatorType allocator_type)1138 void Heap::ThrowOutOfMemoryError(Thread* self, size_t byte_count, AllocatorType allocator_type) {
1139 std::ostringstream oss;
1140 size_t total_bytes_free = GetFreeMemory();
1141 oss << "Failed to allocate a " << byte_count << " byte allocation with " << total_bytes_free
1142 << " free bytes and " << PrettySize(GetFreeMemoryUntilOOME()) << " until OOM";
1143 // If the allocation failed due to fragmentation, print out the largest continuous allocation.
1144 if (total_bytes_free >= byte_count) {
1145 space::AllocSpace* space = nullptr;
1146 if (allocator_type == kAllocatorTypeNonMoving) {
1147 space = non_moving_space_;
1148 } else if (allocator_type == kAllocatorTypeRosAlloc ||
1149 allocator_type == kAllocatorTypeDlMalloc) {
1150 space = main_space_;
1151 } else if (allocator_type == kAllocatorTypeBumpPointer ||
1152 allocator_type == kAllocatorTypeTLAB) {
1153 space = bump_pointer_space_;
1154 } else if (allocator_type == kAllocatorTypeRegion ||
1155 allocator_type == kAllocatorTypeRegionTLAB) {
1156 space = region_space_;
1157 }
1158 if (space != nullptr) {
1159 space->LogFragmentationAllocFailure(oss, byte_count);
1160 }
1161 }
1162 self->ThrowOutOfMemoryError(oss.str().c_str());
1163 }
1164
DoPendingCollectorTransition()1165 void Heap::DoPendingCollectorTransition() {
1166 CollectorType desired_collector_type = desired_collector_type_;
1167 // Launch homogeneous space compaction if it is desired.
1168 if (desired_collector_type == kCollectorTypeHomogeneousSpaceCompact) {
1169 if (!CareAboutPauseTimes()) {
1170 PerformHomogeneousSpaceCompact();
1171 } else {
1172 VLOG(gc) << "Homogeneous compaction ignored due to jank perceptible process state";
1173 }
1174 } else {
1175 TransitionCollector(desired_collector_type);
1176 }
1177 }
1178
Trim(Thread * self)1179 void Heap::Trim(Thread* self) {
1180 if (!CareAboutPauseTimes()) {
1181 ATRACE_BEGIN("Deflating monitors");
1182 // Deflate the monitors, this can cause a pause but shouldn't matter since we don't care
1183 // about pauses.
1184 Runtime* runtime = Runtime::Current();
1185 runtime->GetThreadList()->SuspendAll(__FUNCTION__);
1186 uint64_t start_time = NanoTime();
1187 size_t count = runtime->GetMonitorList()->DeflateMonitors();
1188 VLOG(heap) << "Deflating " << count << " monitors took "
1189 << PrettyDuration(NanoTime() - start_time);
1190 runtime->GetThreadList()->ResumeAll();
1191 ATRACE_END();
1192 }
1193 TrimIndirectReferenceTables(self);
1194 TrimSpaces(self);
1195 }
1196
1197 class TrimIndirectReferenceTableClosure : public Closure {
1198 public:
TrimIndirectReferenceTableClosure(Barrier * barrier)1199 explicit TrimIndirectReferenceTableClosure(Barrier* barrier) : barrier_(barrier) {
1200 }
Run(Thread * thread)1201 virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
1202 ATRACE_BEGIN("Trimming reference table");
1203 thread->GetJniEnv()->locals.Trim();
1204 ATRACE_END();
1205 // If thread is a running mutator, then act on behalf of the trim thread.
1206 // See the code in ThreadList::RunCheckpoint.
1207 if (thread->GetState() == kRunnable) {
1208 barrier_->Pass(Thread::Current());
1209 }
1210 }
1211
1212 private:
1213 Barrier* const barrier_;
1214 };
1215
TrimIndirectReferenceTables(Thread * self)1216 void Heap::TrimIndirectReferenceTables(Thread* self) {
1217 ScopedObjectAccess soa(self);
1218 ATRACE_BEGIN(__FUNCTION__);
1219 JavaVMExt* vm = soa.Vm();
1220 // Trim globals indirect reference table.
1221 vm->TrimGlobals();
1222 // Trim locals indirect reference tables.
1223 Barrier barrier(0);
1224 TrimIndirectReferenceTableClosure closure(&barrier);
1225 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
1226 size_t barrier_count = Runtime::Current()->GetThreadList()->RunCheckpoint(&closure);
1227 if (barrier_count != 0) {
1228 barrier.Increment(self, barrier_count);
1229 }
1230 ATRACE_END();
1231 }
1232
TrimSpaces(Thread * self)1233 void Heap::TrimSpaces(Thread* self) {
1234 {
1235 // Need to do this before acquiring the locks since we don't want to get suspended while
1236 // holding any locks.
1237 ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
1238 // Pretend we are doing a GC to prevent background compaction from deleting the space we are
1239 // trimming.
1240 MutexLock mu(self, *gc_complete_lock_);
1241 // Ensure there is only one GC at a time.
1242 WaitForGcToCompleteLocked(kGcCauseTrim, self);
1243 collector_type_running_ = kCollectorTypeHeapTrim;
1244 }
1245 ATRACE_BEGIN(__FUNCTION__);
1246 const uint64_t start_ns = NanoTime();
1247 // Trim the managed spaces.
1248 uint64_t total_alloc_space_allocated = 0;
1249 uint64_t total_alloc_space_size = 0;
1250 uint64_t managed_reclaimed = 0;
1251 for (const auto& space : continuous_spaces_) {
1252 if (space->IsMallocSpace()) {
1253 gc::space::MallocSpace* malloc_space = space->AsMallocSpace();
1254 if (malloc_space->IsRosAllocSpace() || !CareAboutPauseTimes()) {
1255 // Don't trim dlmalloc spaces if we care about pauses since this can hold the space lock
1256 // for a long period of time.
1257 managed_reclaimed += malloc_space->Trim();
1258 }
1259 total_alloc_space_size += malloc_space->Size();
1260 }
1261 }
1262 total_alloc_space_allocated = GetBytesAllocated();
1263 if (large_object_space_ != nullptr) {
1264 total_alloc_space_allocated -= large_object_space_->GetBytesAllocated();
1265 }
1266 if (bump_pointer_space_ != nullptr) {
1267 total_alloc_space_allocated -= bump_pointer_space_->Size();
1268 }
1269 if (region_space_ != nullptr) {
1270 total_alloc_space_allocated -= region_space_->GetBytesAllocated();
1271 }
1272 const float managed_utilization = static_cast<float>(total_alloc_space_allocated) /
1273 static_cast<float>(total_alloc_space_size);
1274 uint64_t gc_heap_end_ns = NanoTime();
1275 // We never move things in the native heap, so we can finish the GC at this point.
1276 FinishGC(self, collector::kGcTypeNone);
1277 size_t native_reclaimed = 0;
1278
1279 #ifdef HAVE_ANDROID_OS
1280 // Only trim the native heap if we don't care about pauses.
1281 if (!CareAboutPauseTimes()) {
1282 #if defined(USE_DLMALLOC)
1283 // Trim the native heap.
1284 dlmalloc_trim(0);
1285 dlmalloc_inspect_all(DlmallocMadviseCallback, &native_reclaimed);
1286 #elif defined(USE_JEMALLOC)
1287 // Jemalloc does it's own internal trimming.
1288 #else
1289 UNIMPLEMENTED(WARNING) << "Add trimming support";
1290 #endif
1291 }
1292 #endif // HAVE_ANDROID_OS
1293 uint64_t end_ns = NanoTime();
1294 VLOG(heap) << "Heap trim of managed (duration=" << PrettyDuration(gc_heap_end_ns - start_ns)
1295 << ", advised=" << PrettySize(managed_reclaimed) << ") and native (duration="
1296 << PrettyDuration(end_ns - gc_heap_end_ns) << ", advised=" << PrettySize(native_reclaimed)
1297 << ") heaps. Managed heap utilization of " << static_cast<int>(100 * managed_utilization)
1298 << "%.";
1299 ATRACE_END();
1300 }
1301
IsValidObjectAddress(const mirror::Object * obj) const1302 bool Heap::IsValidObjectAddress(const mirror::Object* obj) const {
1303 // Note: we deliberately don't take the lock here, and mustn't test anything that would require
1304 // taking the lock.
1305 if (obj == nullptr) {
1306 return true;
1307 }
1308 return IsAligned<kObjectAlignment>(obj) && FindSpaceFromObject(obj, true) != nullptr;
1309 }
1310
IsNonDiscontinuousSpaceHeapAddress(const mirror::Object * obj) const1311 bool Heap::IsNonDiscontinuousSpaceHeapAddress(const mirror::Object* obj) const {
1312 return FindContinuousSpaceFromObject(obj, true) != nullptr;
1313 }
1314
IsValidContinuousSpaceObjectAddress(const mirror::Object * obj) const1315 bool Heap::IsValidContinuousSpaceObjectAddress(const mirror::Object* obj) const {
1316 if (obj == nullptr || !IsAligned<kObjectAlignment>(obj)) {
1317 return false;
1318 }
1319 for (const auto& space : continuous_spaces_) {
1320 if (space->HasAddress(obj)) {
1321 return true;
1322 }
1323 }
1324 return false;
1325 }
1326
IsLiveObjectLocked(mirror::Object * obj,bool search_allocation_stack,bool search_live_stack,bool sorted)1327 bool Heap::IsLiveObjectLocked(mirror::Object* obj, bool search_allocation_stack,
1328 bool search_live_stack, bool sorted) {
1329 if (UNLIKELY(!IsAligned<kObjectAlignment>(obj))) {
1330 return false;
1331 }
1332 if (bump_pointer_space_ != nullptr && bump_pointer_space_->HasAddress(obj)) {
1333 mirror::Class* klass = obj->GetClass<kVerifyNone>();
1334 if (obj == klass) {
1335 // This case happens for java.lang.Class.
1336 return true;
1337 }
1338 return VerifyClassClass(klass) && IsLiveObjectLocked(klass);
1339 } else if (temp_space_ != nullptr && temp_space_->HasAddress(obj)) {
1340 // If we are in the allocated region of the temp space, then we are probably live (e.g. during
1341 // a GC). When a GC isn't running End() - Begin() is 0 which means no objects are contained.
1342 return temp_space_->Contains(obj);
1343 }
1344 if (region_space_ != nullptr && region_space_->HasAddress(obj)) {
1345 return true;
1346 }
1347 space::ContinuousSpace* c_space = FindContinuousSpaceFromObject(obj, true);
1348 space::DiscontinuousSpace* d_space = nullptr;
1349 if (c_space != nullptr) {
1350 if (c_space->GetLiveBitmap()->Test(obj)) {
1351 return true;
1352 }
1353 } else {
1354 d_space = FindDiscontinuousSpaceFromObject(obj, true);
1355 if (d_space != nullptr) {
1356 if (d_space->GetLiveBitmap()->Test(obj)) {
1357 return true;
1358 }
1359 }
1360 }
1361 // This is covering the allocation/live stack swapping that is done without mutators suspended.
1362 for (size_t i = 0; i < (sorted ? 1 : 5); ++i) {
1363 if (i > 0) {
1364 NanoSleep(MsToNs(10));
1365 }
1366 if (search_allocation_stack) {
1367 if (sorted) {
1368 if (allocation_stack_->ContainsSorted(obj)) {
1369 return true;
1370 }
1371 } else if (allocation_stack_->Contains(obj)) {
1372 return true;
1373 }
1374 }
1375
1376 if (search_live_stack) {
1377 if (sorted) {
1378 if (live_stack_->ContainsSorted(obj)) {
1379 return true;
1380 }
1381 } else if (live_stack_->Contains(obj)) {
1382 return true;
1383 }
1384 }
1385 }
1386 // We need to check the bitmaps again since there is a race where we mark something as live and
1387 // then clear the stack containing it.
1388 if (c_space != nullptr) {
1389 if (c_space->GetLiveBitmap()->Test(obj)) {
1390 return true;
1391 }
1392 } else {
1393 d_space = FindDiscontinuousSpaceFromObject(obj, true);
1394 if (d_space != nullptr && d_space->GetLiveBitmap()->Test(obj)) {
1395 return true;
1396 }
1397 }
1398 return false;
1399 }
1400
DumpSpaces() const1401 std::string Heap::DumpSpaces() const {
1402 std::ostringstream oss;
1403 DumpSpaces(oss);
1404 return oss.str();
1405 }
1406
DumpSpaces(std::ostream & stream) const1407 void Heap::DumpSpaces(std::ostream& stream) const {
1408 for (const auto& space : continuous_spaces_) {
1409 accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
1410 accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap();
1411 stream << space << " " << *space << "\n";
1412 if (live_bitmap != nullptr) {
1413 stream << live_bitmap << " " << *live_bitmap << "\n";
1414 }
1415 if (mark_bitmap != nullptr) {
1416 stream << mark_bitmap << " " << *mark_bitmap << "\n";
1417 }
1418 }
1419 for (const auto& space : discontinuous_spaces_) {
1420 stream << space << " " << *space << "\n";
1421 }
1422 }
1423
VerifyObjectBody(mirror::Object * obj)1424 void Heap::VerifyObjectBody(mirror::Object* obj) {
1425 if (verify_object_mode_ == kVerifyObjectModeDisabled) {
1426 return;
1427 }
1428
1429 // Ignore early dawn of the universe verifications.
1430 if (UNLIKELY(static_cast<size_t>(num_bytes_allocated_.LoadRelaxed()) < 10 * KB)) {
1431 return;
1432 }
1433 CHECK(IsAligned<kObjectAlignment>(obj)) << "Object isn't aligned: " << obj;
1434 mirror::Class* c = obj->GetFieldObject<mirror::Class, kVerifyNone>(mirror::Object::ClassOffset());
1435 CHECK(c != nullptr) << "Null class in object " << obj;
1436 CHECK(IsAligned<kObjectAlignment>(c)) << "Class " << c << " not aligned in object " << obj;
1437 CHECK(VerifyClassClass(c));
1438
1439 if (verify_object_mode_ > kVerifyObjectModeFast) {
1440 // Note: the bitmap tests below are racy since we don't hold the heap bitmap lock.
1441 CHECK(IsLiveObjectLocked(obj)) << "Object is dead " << obj << "\n" << DumpSpaces();
1442 }
1443 }
1444
VerificationCallback(mirror::Object * obj,void * arg)1445 void Heap::VerificationCallback(mirror::Object* obj, void* arg) {
1446 reinterpret_cast<Heap*>(arg)->VerifyObjectBody(obj);
1447 }
1448
VerifyHeap()1449 void Heap::VerifyHeap() {
1450 ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
1451 GetLiveBitmap()->Walk(Heap::VerificationCallback, this);
1452 }
1453
RecordFree(uint64_t freed_objects,int64_t freed_bytes)1454 void Heap::RecordFree(uint64_t freed_objects, int64_t freed_bytes) {
1455 // Use signed comparison since freed bytes can be negative when background compaction foreground
1456 // transitions occurs. This is caused by the moving objects from a bump pointer space to a
1457 // free list backed space typically increasing memory footprint due to padding and binning.
1458 DCHECK_LE(freed_bytes, static_cast<int64_t>(num_bytes_allocated_.LoadRelaxed()));
1459 // Note: This relies on 2s complement for handling negative freed_bytes.
1460 num_bytes_allocated_.FetchAndSubSequentiallyConsistent(static_cast<ssize_t>(freed_bytes));
1461 if (Runtime::Current()->HasStatsEnabled()) {
1462 RuntimeStats* thread_stats = Thread::Current()->GetStats();
1463 thread_stats->freed_objects += freed_objects;
1464 thread_stats->freed_bytes += freed_bytes;
1465 // TODO: Do this concurrently.
1466 RuntimeStats* global_stats = Runtime::Current()->GetStats();
1467 global_stats->freed_objects += freed_objects;
1468 global_stats->freed_bytes += freed_bytes;
1469 }
1470 }
1471
RecordFreeRevoke()1472 void Heap::RecordFreeRevoke() {
1473 // Subtract num_bytes_freed_revoke_ from num_bytes_allocated_ to cancel out the
1474 // the ahead-of-time, bulk counting of bytes allocated in rosalloc thread-local buffers.
1475 // If there's a concurrent revoke, ok to not necessarily reset num_bytes_freed_revoke_
1476 // all the way to zero exactly as the remainder will be subtracted at the next GC.
1477 size_t bytes_freed = num_bytes_freed_revoke_.LoadSequentiallyConsistent();
1478 CHECK_GE(num_bytes_freed_revoke_.FetchAndSubSequentiallyConsistent(bytes_freed),
1479 bytes_freed) << "num_bytes_freed_revoke_ underflow";
1480 CHECK_GE(num_bytes_allocated_.FetchAndSubSequentiallyConsistent(bytes_freed),
1481 bytes_freed) << "num_bytes_allocated_ underflow";
1482 GetCurrentGcIteration()->SetFreedRevoke(bytes_freed);
1483 }
1484
GetRosAllocSpace(gc::allocator::RosAlloc * rosalloc) const1485 space::RosAllocSpace* Heap::GetRosAllocSpace(gc::allocator::RosAlloc* rosalloc) const {
1486 for (const auto& space : continuous_spaces_) {
1487 if (space->AsContinuousSpace()->IsRosAllocSpace()) {
1488 if (space->AsContinuousSpace()->AsRosAllocSpace()->GetRosAlloc() == rosalloc) {
1489 return space->AsContinuousSpace()->AsRosAllocSpace();
1490 }
1491 }
1492 }
1493 return nullptr;
1494 }
1495
AllocateInternalWithGc(Thread * self,AllocatorType allocator,size_t alloc_size,size_t * bytes_allocated,size_t * usable_size,size_t * bytes_tl_bulk_allocated,mirror::Class ** klass)1496 mirror::Object* Heap::AllocateInternalWithGc(Thread* self, AllocatorType allocator,
1497 size_t alloc_size, size_t* bytes_allocated,
1498 size_t* usable_size,
1499 size_t* bytes_tl_bulk_allocated,
1500 mirror::Class** klass) {
1501 bool was_default_allocator = allocator == GetCurrentAllocator();
1502 // Make sure there is no pending exception since we may need to throw an OOME.
1503 self->AssertNoPendingException();
1504 DCHECK(klass != nullptr);
1505 StackHandleScope<1> hs(self);
1506 HandleWrapper<mirror::Class> h(hs.NewHandleWrapper(klass));
1507 klass = nullptr; // Invalidate for safety.
1508 // The allocation failed. If the GC is running, block until it completes, and then retry the
1509 // allocation.
1510 collector::GcType last_gc = WaitForGcToComplete(kGcCauseForAlloc, self);
1511 if (last_gc != collector::kGcTypeNone) {
1512 // If we were the default allocator but the allocator changed while we were suspended,
1513 // abort the allocation.
1514 if (was_default_allocator && allocator != GetCurrentAllocator()) {
1515 return nullptr;
1516 }
1517 // A GC was in progress and we blocked, retry allocation now that memory has been freed.
1518 mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated,
1519 usable_size, bytes_tl_bulk_allocated);
1520 if (ptr != nullptr) {
1521 return ptr;
1522 }
1523 }
1524
1525 collector::GcType tried_type = next_gc_type_;
1526 const bool gc_ran =
1527 CollectGarbageInternal(tried_type, kGcCauseForAlloc, false) != collector::kGcTypeNone;
1528 if (was_default_allocator && allocator != GetCurrentAllocator()) {
1529 return nullptr;
1530 }
1531 if (gc_ran) {
1532 mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated,
1533 usable_size, bytes_tl_bulk_allocated);
1534 if (ptr != nullptr) {
1535 return ptr;
1536 }
1537 }
1538
1539 // Loop through our different Gc types and try to Gc until we get enough free memory.
1540 for (collector::GcType gc_type : gc_plan_) {
1541 if (gc_type == tried_type) {
1542 continue;
1543 }
1544 // Attempt to run the collector, if we succeed, re-try the allocation.
1545 const bool plan_gc_ran =
1546 CollectGarbageInternal(gc_type, kGcCauseForAlloc, false) != collector::kGcTypeNone;
1547 if (was_default_allocator && allocator != GetCurrentAllocator()) {
1548 return nullptr;
1549 }
1550 if (plan_gc_ran) {
1551 // Did we free sufficient memory for the allocation to succeed?
1552 mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated,
1553 usable_size, bytes_tl_bulk_allocated);
1554 if (ptr != nullptr) {
1555 return ptr;
1556 }
1557 }
1558 }
1559 // Allocations have failed after GCs; this is an exceptional state.
1560 // Try harder, growing the heap if necessary.
1561 mirror::Object* ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated,
1562 usable_size, bytes_tl_bulk_allocated);
1563 if (ptr != nullptr) {
1564 return ptr;
1565 }
1566 // Most allocations should have succeeded by now, so the heap is really full, really fragmented,
1567 // or the requested size is really big. Do another GC, collecting SoftReferences this time. The
1568 // VM spec requires that all SoftReferences have been collected and cleared before throwing
1569 // OOME.
1570 VLOG(gc) << "Forcing collection of SoftReferences for " << PrettySize(alloc_size)
1571 << " allocation";
1572 // TODO: Run finalization, but this may cause more allocations to occur.
1573 // We don't need a WaitForGcToComplete here either.
1574 DCHECK(!gc_plan_.empty());
1575 CollectGarbageInternal(gc_plan_.back(), kGcCauseForAlloc, true);
1576 if (was_default_allocator && allocator != GetCurrentAllocator()) {
1577 return nullptr;
1578 }
1579 ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated, usable_size,
1580 bytes_tl_bulk_allocated);
1581 if (ptr == nullptr) {
1582 const uint64_t current_time = NanoTime();
1583 switch (allocator) {
1584 case kAllocatorTypeRosAlloc:
1585 // Fall-through.
1586 case kAllocatorTypeDlMalloc: {
1587 if (use_homogeneous_space_compaction_for_oom_ &&
1588 current_time - last_time_homogeneous_space_compaction_by_oom_ >
1589 min_interval_homogeneous_space_compaction_by_oom_) {
1590 last_time_homogeneous_space_compaction_by_oom_ = current_time;
1591 HomogeneousSpaceCompactResult result = PerformHomogeneousSpaceCompact();
1592 switch (result) {
1593 case HomogeneousSpaceCompactResult::kSuccess:
1594 // If the allocation succeeded, we delayed an oom.
1595 ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated,
1596 usable_size, bytes_tl_bulk_allocated);
1597 if (ptr != nullptr) {
1598 count_delayed_oom_++;
1599 }
1600 break;
1601 case HomogeneousSpaceCompactResult::kErrorReject:
1602 // Reject due to disabled moving GC.
1603 break;
1604 case HomogeneousSpaceCompactResult::kErrorVMShuttingDown:
1605 // Throw OOM by default.
1606 break;
1607 default: {
1608 UNIMPLEMENTED(FATAL) << "homogeneous space compaction result: "
1609 << static_cast<size_t>(result);
1610 UNREACHABLE();
1611 }
1612 }
1613 // Always print that we ran homogeneous space compation since this can cause jank.
1614 VLOG(heap) << "Ran heap homogeneous space compaction, "
1615 << " requested defragmentation "
1616 << count_requested_homogeneous_space_compaction_.LoadSequentiallyConsistent()
1617 << " performed defragmentation "
1618 << count_performed_homogeneous_space_compaction_.LoadSequentiallyConsistent()
1619 << " ignored homogeneous space compaction "
1620 << count_ignored_homogeneous_space_compaction_.LoadSequentiallyConsistent()
1621 << " delayed count = "
1622 << count_delayed_oom_.LoadSequentiallyConsistent();
1623 }
1624 break;
1625 }
1626 case kAllocatorTypeNonMoving: {
1627 // Try to transition the heap if the allocation failure was due to the space being full.
1628 if (!IsOutOfMemoryOnAllocation<false>(allocator, alloc_size)) {
1629 // If we aren't out of memory then the OOM was probably from the non moving space being
1630 // full. Attempt to disable compaction and turn the main space into a non moving space.
1631 DisableMovingGc();
1632 // If we are still a moving GC then something must have caused the transition to fail.
1633 if (IsMovingGc(collector_type_)) {
1634 MutexLock mu(self, *gc_complete_lock_);
1635 // If we couldn't disable moving GC, just throw OOME and return null.
1636 LOG(WARNING) << "Couldn't disable moving GC with disable GC count "
1637 << disable_moving_gc_count_;
1638 } else {
1639 LOG(WARNING) << "Disabled moving GC due to the non moving space being full";
1640 ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated,
1641 usable_size, bytes_tl_bulk_allocated);
1642 }
1643 }
1644 break;
1645 }
1646 default: {
1647 // Do nothing for others allocators.
1648 }
1649 }
1650 }
1651 // If the allocation hasn't succeeded by this point, throw an OOM error.
1652 if (ptr == nullptr) {
1653 ThrowOutOfMemoryError(self, alloc_size, allocator);
1654 }
1655 return ptr;
1656 }
1657
SetTargetHeapUtilization(float target)1658 void Heap::SetTargetHeapUtilization(float target) {
1659 DCHECK_GT(target, 0.0f); // asserted in Java code
1660 DCHECK_LT(target, 1.0f);
1661 target_utilization_ = target;
1662 }
1663
GetObjectsAllocated() const1664 size_t Heap::GetObjectsAllocated() const {
1665 Thread* self = Thread::Current();
1666 ScopedThreadStateChange tsc(self, kWaitingForGetObjectsAllocated);
1667 auto* tl = Runtime::Current()->GetThreadList();
1668 // Need SuspendAll here to prevent lock violation if RosAlloc does it during InspectAll.
1669 tl->SuspendAll(__FUNCTION__);
1670 size_t total = 0;
1671 {
1672 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
1673 for (space::AllocSpace* space : alloc_spaces_) {
1674 total += space->GetObjectsAllocated();
1675 }
1676 }
1677 tl->ResumeAll();
1678 return total;
1679 }
1680
GetObjectsAllocatedEver() const1681 uint64_t Heap::GetObjectsAllocatedEver() const {
1682 uint64_t total = GetObjectsFreedEver();
1683 // If we are detached, we can't use GetObjectsAllocated since we can't change thread states.
1684 if (Thread::Current() != nullptr) {
1685 total += GetObjectsAllocated();
1686 }
1687 return total;
1688 }
1689
GetBytesAllocatedEver() const1690 uint64_t Heap::GetBytesAllocatedEver() const {
1691 return GetBytesFreedEver() + GetBytesAllocated();
1692 }
1693
1694 class InstanceCounter {
1695 public:
InstanceCounter(const std::vector<mirror::Class * > & classes,bool use_is_assignable_from,uint64_t * counts)1696 InstanceCounter(const std::vector<mirror::Class*>& classes, bool use_is_assignable_from, uint64_t* counts)
1697 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
1698 : classes_(classes), use_is_assignable_from_(use_is_assignable_from), counts_(counts) {
1699 }
Callback(mirror::Object * obj,void * arg)1700 static void Callback(mirror::Object* obj, void* arg)
1701 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
1702 InstanceCounter* instance_counter = reinterpret_cast<InstanceCounter*>(arg);
1703 mirror::Class* instance_class = obj->GetClass();
1704 CHECK(instance_class != nullptr);
1705 for (size_t i = 0; i < instance_counter->classes_.size(); ++i) {
1706 if (instance_counter->use_is_assignable_from_) {
1707 if (instance_counter->classes_[i]->IsAssignableFrom(instance_class)) {
1708 ++instance_counter->counts_[i];
1709 }
1710 } else if (instance_class == instance_counter->classes_[i]) {
1711 ++instance_counter->counts_[i];
1712 }
1713 }
1714 }
1715
1716 private:
1717 const std::vector<mirror::Class*>& classes_;
1718 bool use_is_assignable_from_;
1719 uint64_t* const counts_;
1720 DISALLOW_COPY_AND_ASSIGN(InstanceCounter);
1721 };
1722
CountInstances(const std::vector<mirror::Class * > & classes,bool use_is_assignable_from,uint64_t * counts)1723 void Heap::CountInstances(const std::vector<mirror::Class*>& classes, bool use_is_assignable_from,
1724 uint64_t* counts) {
1725 InstanceCounter counter(classes, use_is_assignable_from, counts);
1726 VisitObjects(InstanceCounter::Callback, &counter);
1727 }
1728
1729 class InstanceCollector {
1730 public:
InstanceCollector(mirror::Class * c,int32_t max_count,std::vector<mirror::Object * > & instances)1731 InstanceCollector(mirror::Class* c, int32_t max_count, std::vector<mirror::Object*>& instances)
1732 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
1733 : class_(c), max_count_(max_count), instances_(instances) {
1734 }
Callback(mirror::Object * obj,void * arg)1735 static void Callback(mirror::Object* obj, void* arg)
1736 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
1737 DCHECK(arg != nullptr);
1738 InstanceCollector* instance_collector = reinterpret_cast<InstanceCollector*>(arg);
1739 if (obj->GetClass() == instance_collector->class_) {
1740 if (instance_collector->max_count_ == 0 ||
1741 instance_collector->instances_.size() < instance_collector->max_count_) {
1742 instance_collector->instances_.push_back(obj);
1743 }
1744 }
1745 }
1746
1747 private:
1748 const mirror::Class* const class_;
1749 const uint32_t max_count_;
1750 std::vector<mirror::Object*>& instances_;
1751 DISALLOW_COPY_AND_ASSIGN(InstanceCollector);
1752 };
1753
GetInstances(mirror::Class * c,int32_t max_count,std::vector<mirror::Object * > & instances)1754 void Heap::GetInstances(mirror::Class* c, int32_t max_count,
1755 std::vector<mirror::Object*>& instances) {
1756 InstanceCollector collector(c, max_count, instances);
1757 VisitObjects(&InstanceCollector::Callback, &collector);
1758 }
1759
1760 class ReferringObjectsFinder {
1761 public:
ReferringObjectsFinder(mirror::Object * object,int32_t max_count,std::vector<mirror::Object * > & referring_objects)1762 ReferringObjectsFinder(mirror::Object* object, int32_t max_count,
1763 std::vector<mirror::Object*>& referring_objects)
1764 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
1765 : object_(object), max_count_(max_count), referring_objects_(referring_objects) {
1766 }
1767
Callback(mirror::Object * obj,void * arg)1768 static void Callback(mirror::Object* obj, void* arg)
1769 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
1770 reinterpret_cast<ReferringObjectsFinder*>(arg)->operator()(obj);
1771 }
1772
1773 // For bitmap Visit.
1774 // TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for
1775 // annotalysis on visitors.
operator ()(mirror::Object * o) const1776 void operator()(mirror::Object* o) const NO_THREAD_SAFETY_ANALYSIS {
1777 o->VisitReferences<true>(*this, VoidFunctor());
1778 }
1779
1780 // For Object::VisitReferences.
operator ()(mirror::Object * obj,MemberOffset offset,bool) const1781 void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */) const
1782 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1783 mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset);
1784 if (ref == object_ && (max_count_ == 0 || referring_objects_.size() < max_count_)) {
1785 referring_objects_.push_back(obj);
1786 }
1787 }
1788
1789 private:
1790 const mirror::Object* const object_;
1791 const uint32_t max_count_;
1792 std::vector<mirror::Object*>& referring_objects_;
1793 DISALLOW_COPY_AND_ASSIGN(ReferringObjectsFinder);
1794 };
1795
GetReferringObjects(mirror::Object * o,int32_t max_count,std::vector<mirror::Object * > & referring_objects)1796 void Heap::GetReferringObjects(mirror::Object* o, int32_t max_count,
1797 std::vector<mirror::Object*>& referring_objects) {
1798 ReferringObjectsFinder finder(o, max_count, referring_objects);
1799 VisitObjects(&ReferringObjectsFinder::Callback, &finder);
1800 }
1801
CollectGarbage(bool clear_soft_references)1802 void Heap::CollectGarbage(bool clear_soft_references) {
1803 // Even if we waited for a GC we still need to do another GC since weaks allocated during the
1804 // last GC will not have necessarily been cleared.
1805 CollectGarbageInternal(gc_plan_.back(), kGcCauseExplicit, clear_soft_references);
1806 }
1807
PerformHomogeneousSpaceCompact()1808 HomogeneousSpaceCompactResult Heap::PerformHomogeneousSpaceCompact() {
1809 Thread* self = Thread::Current();
1810 // Inc requested homogeneous space compaction.
1811 count_requested_homogeneous_space_compaction_++;
1812 // Store performed homogeneous space compaction at a new request arrival.
1813 ThreadList* tl = Runtime::Current()->GetThreadList();
1814 ScopedThreadStateChange tsc(self, kWaitingPerformingGc);
1815 Locks::mutator_lock_->AssertNotHeld(self);
1816 {
1817 ScopedThreadStateChange tsc2(self, kWaitingForGcToComplete);
1818 MutexLock mu(self, *gc_complete_lock_);
1819 // Ensure there is only one GC at a time.
1820 WaitForGcToCompleteLocked(kGcCauseHomogeneousSpaceCompact, self);
1821 // Homogeneous space compaction is a copying transition, can't run it if the moving GC disable count
1822 // is non zero.
1823 // If the collector type changed to something which doesn't benefit from homogeneous space compaction,
1824 // exit.
1825 if (disable_moving_gc_count_ != 0 || IsMovingGc(collector_type_) ||
1826 !main_space_->CanMoveObjects()) {
1827 return HomogeneousSpaceCompactResult::kErrorReject;
1828 }
1829 collector_type_running_ = kCollectorTypeHomogeneousSpaceCompact;
1830 }
1831 if (Runtime::Current()->IsShuttingDown(self)) {
1832 // Don't allow heap transitions to happen if the runtime is shutting down since these can
1833 // cause objects to get finalized.
1834 FinishGC(self, collector::kGcTypeNone);
1835 return HomogeneousSpaceCompactResult::kErrorVMShuttingDown;
1836 }
1837 // Suspend all threads.
1838 tl->SuspendAll(__FUNCTION__);
1839 uint64_t start_time = NanoTime();
1840 // Launch compaction.
1841 space::MallocSpace* to_space = main_space_backup_.release();
1842 space::MallocSpace* from_space = main_space_;
1843 to_space->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
1844 const uint64_t space_size_before_compaction = from_space->Size();
1845 AddSpace(to_space);
1846 // Make sure that we will have enough room to copy.
1847 CHECK_GE(to_space->GetFootprintLimit(), from_space->GetFootprintLimit());
1848 collector::GarbageCollector* collector = Compact(to_space, from_space,
1849 kGcCauseHomogeneousSpaceCompact);
1850 const uint64_t space_size_after_compaction = to_space->Size();
1851 main_space_ = to_space;
1852 main_space_backup_.reset(from_space);
1853 RemoveSpace(from_space);
1854 SetSpaceAsDefault(main_space_); // Set as default to reset the proper dlmalloc space.
1855 // Update performed homogeneous space compaction count.
1856 count_performed_homogeneous_space_compaction_++;
1857 // Print statics log and resume all threads.
1858 uint64_t duration = NanoTime() - start_time;
1859 VLOG(heap) << "Heap homogeneous space compaction took " << PrettyDuration(duration) << " size: "
1860 << PrettySize(space_size_before_compaction) << " -> "
1861 << PrettySize(space_size_after_compaction) << " compact-ratio: "
1862 << std::fixed << static_cast<double>(space_size_after_compaction) /
1863 static_cast<double>(space_size_before_compaction);
1864 tl->ResumeAll();
1865 // Finish GC.
1866 reference_processor_.EnqueueClearedReferences(self);
1867 GrowForUtilization(semi_space_collector_);
1868 LogGC(kGcCauseHomogeneousSpaceCompact, collector);
1869 FinishGC(self, collector::kGcTypeFull);
1870 return HomogeneousSpaceCompactResult::kSuccess;
1871 }
1872
TransitionCollector(CollectorType collector_type)1873 void Heap::TransitionCollector(CollectorType collector_type) {
1874 if (collector_type == collector_type_) {
1875 return;
1876 }
1877 VLOG(heap) << "TransitionCollector: " << static_cast<int>(collector_type_)
1878 << " -> " << static_cast<int>(collector_type);
1879 uint64_t start_time = NanoTime();
1880 uint32_t before_allocated = num_bytes_allocated_.LoadSequentiallyConsistent();
1881 Runtime* const runtime = Runtime::Current();
1882 ThreadList* const tl = runtime->GetThreadList();
1883 Thread* const self = Thread::Current();
1884 ScopedThreadStateChange tsc(self, kWaitingPerformingGc);
1885 Locks::mutator_lock_->AssertNotHeld(self);
1886 // Busy wait until we can GC (StartGC can fail if we have a non-zero
1887 // compacting_gc_disable_count_, this should rarely occurs).
1888 for (;;) {
1889 {
1890 ScopedThreadStateChange tsc2(self, kWaitingForGcToComplete);
1891 MutexLock mu(self, *gc_complete_lock_);
1892 // Ensure there is only one GC at a time.
1893 WaitForGcToCompleteLocked(kGcCauseCollectorTransition, self);
1894 // Currently we only need a heap transition if we switch from a moving collector to a
1895 // non-moving one, or visa versa.
1896 const bool copying_transition = IsMovingGc(collector_type_) != IsMovingGc(collector_type);
1897 // If someone else beat us to it and changed the collector before we could, exit.
1898 // This is safe to do before the suspend all since we set the collector_type_running_ before
1899 // we exit the loop. If another thread attempts to do the heap transition before we exit,
1900 // then it would get blocked on WaitForGcToCompleteLocked.
1901 if (collector_type == collector_type_) {
1902 return;
1903 }
1904 // GC can be disabled if someone has a used GetPrimitiveArrayCritical but not yet released.
1905 if (!copying_transition || disable_moving_gc_count_ == 0) {
1906 // TODO: Not hard code in semi-space collector?
1907 collector_type_running_ = copying_transition ? kCollectorTypeSS : collector_type;
1908 break;
1909 }
1910 }
1911 usleep(1000);
1912 }
1913 if (runtime->IsShuttingDown(self)) {
1914 // Don't allow heap transitions to happen if the runtime is shutting down since these can
1915 // cause objects to get finalized.
1916 FinishGC(self, collector::kGcTypeNone);
1917 return;
1918 }
1919 collector::GarbageCollector* collector = nullptr;
1920 tl->SuspendAll(__FUNCTION__);
1921 switch (collector_type) {
1922 case kCollectorTypeSS: {
1923 if (!IsMovingGc(collector_type_)) {
1924 // Create the bump pointer space from the backup space.
1925 CHECK(main_space_backup_ != nullptr);
1926 std::unique_ptr<MemMap> mem_map(main_space_backup_->ReleaseMemMap());
1927 // We are transitioning from non moving GC -> moving GC, since we copied from the bump
1928 // pointer space last transition it will be protected.
1929 CHECK(mem_map != nullptr);
1930 mem_map->Protect(PROT_READ | PROT_WRITE);
1931 bump_pointer_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space",
1932 mem_map.release());
1933 AddSpace(bump_pointer_space_);
1934 collector = Compact(bump_pointer_space_, main_space_, kGcCauseCollectorTransition);
1935 // Use the now empty main space mem map for the bump pointer temp space.
1936 mem_map.reset(main_space_->ReleaseMemMap());
1937 // Unset the pointers just in case.
1938 if (dlmalloc_space_ == main_space_) {
1939 dlmalloc_space_ = nullptr;
1940 } else if (rosalloc_space_ == main_space_) {
1941 rosalloc_space_ = nullptr;
1942 }
1943 // Remove the main space so that we don't try to trim it, this doens't work for debug
1944 // builds since RosAlloc attempts to read the magic number from a protected page.
1945 RemoveSpace(main_space_);
1946 RemoveRememberedSet(main_space_);
1947 delete main_space_; // Delete the space since it has been removed.
1948 main_space_ = nullptr;
1949 RemoveRememberedSet(main_space_backup_.get());
1950 main_space_backup_.reset(nullptr); // Deletes the space.
1951 temp_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space 2",
1952 mem_map.release());
1953 AddSpace(temp_space_);
1954 }
1955 break;
1956 }
1957 case kCollectorTypeMS:
1958 // Fall through.
1959 case kCollectorTypeCMS: {
1960 if (IsMovingGc(collector_type_)) {
1961 CHECK(temp_space_ != nullptr);
1962 std::unique_ptr<MemMap> mem_map(temp_space_->ReleaseMemMap());
1963 RemoveSpace(temp_space_);
1964 temp_space_ = nullptr;
1965 mem_map->Protect(PROT_READ | PROT_WRITE);
1966 CreateMainMallocSpace(mem_map.get(), kDefaultInitialSize,
1967 std::min(mem_map->Size(), growth_limit_), mem_map->Size());
1968 mem_map.release();
1969 // Compact to the main space from the bump pointer space, don't need to swap semispaces.
1970 AddSpace(main_space_);
1971 collector = Compact(main_space_, bump_pointer_space_, kGcCauseCollectorTransition);
1972 mem_map.reset(bump_pointer_space_->ReleaseMemMap());
1973 RemoveSpace(bump_pointer_space_);
1974 bump_pointer_space_ = nullptr;
1975 const char* name = kUseRosAlloc ? kRosAllocSpaceName[1] : kDlMallocSpaceName[1];
1976 // Temporarily unprotect the backup mem map so rosalloc can write the debug magic number.
1977 if (kIsDebugBuild && kUseRosAlloc) {
1978 mem_map->Protect(PROT_READ | PROT_WRITE);
1979 }
1980 main_space_backup_.reset(CreateMallocSpaceFromMemMap(
1981 mem_map.get(), kDefaultInitialSize, std::min(mem_map->Size(), growth_limit_),
1982 mem_map->Size(), name, true));
1983 if (kIsDebugBuild && kUseRosAlloc) {
1984 mem_map->Protect(PROT_NONE);
1985 }
1986 mem_map.release();
1987 }
1988 break;
1989 }
1990 default: {
1991 LOG(FATAL) << "Attempted to transition to invalid collector type "
1992 << static_cast<size_t>(collector_type);
1993 break;
1994 }
1995 }
1996 ChangeCollector(collector_type);
1997 tl->ResumeAll();
1998 // Can't call into java code with all threads suspended.
1999 reference_processor_.EnqueueClearedReferences(self);
2000 uint64_t duration = NanoTime() - start_time;
2001 GrowForUtilization(semi_space_collector_);
2002 DCHECK(collector != nullptr);
2003 LogGC(kGcCauseCollectorTransition, collector);
2004 FinishGC(self, collector::kGcTypeFull);
2005 int32_t after_allocated = num_bytes_allocated_.LoadSequentiallyConsistent();
2006 int32_t delta_allocated = before_allocated - after_allocated;
2007 std::string saved_str;
2008 if (delta_allocated >= 0) {
2009 saved_str = " saved at least " + PrettySize(delta_allocated);
2010 } else {
2011 saved_str = " expanded " + PrettySize(-delta_allocated);
2012 }
2013 VLOG(heap) << "Heap transition to " << process_state_ << " took "
2014 << PrettyDuration(duration) << saved_str;
2015 }
2016
ChangeCollector(CollectorType collector_type)2017 void Heap::ChangeCollector(CollectorType collector_type) {
2018 // TODO: Only do this with all mutators suspended to avoid races.
2019 if (collector_type != collector_type_) {
2020 if (collector_type == kCollectorTypeMC) {
2021 // Don't allow mark compact unless support is compiled in.
2022 CHECK(kMarkCompactSupport);
2023 }
2024 collector_type_ = collector_type;
2025 gc_plan_.clear();
2026 switch (collector_type_) {
2027 case kCollectorTypeCC: {
2028 gc_plan_.push_back(collector::kGcTypeFull);
2029 if (use_tlab_) {
2030 ChangeAllocator(kAllocatorTypeRegionTLAB);
2031 } else {
2032 ChangeAllocator(kAllocatorTypeRegion);
2033 }
2034 break;
2035 }
2036 case kCollectorTypeMC: // Fall-through.
2037 case kCollectorTypeSS: // Fall-through.
2038 case kCollectorTypeGSS: {
2039 gc_plan_.push_back(collector::kGcTypeFull);
2040 if (use_tlab_) {
2041 ChangeAllocator(kAllocatorTypeTLAB);
2042 } else {
2043 ChangeAllocator(kAllocatorTypeBumpPointer);
2044 }
2045 break;
2046 }
2047 case kCollectorTypeMS: {
2048 gc_plan_.push_back(collector::kGcTypeSticky);
2049 gc_plan_.push_back(collector::kGcTypePartial);
2050 gc_plan_.push_back(collector::kGcTypeFull);
2051 ChangeAllocator(kUseRosAlloc ? kAllocatorTypeRosAlloc : kAllocatorTypeDlMalloc);
2052 break;
2053 }
2054 case kCollectorTypeCMS: {
2055 gc_plan_.push_back(collector::kGcTypeSticky);
2056 gc_plan_.push_back(collector::kGcTypePartial);
2057 gc_plan_.push_back(collector::kGcTypeFull);
2058 ChangeAllocator(kUseRosAlloc ? kAllocatorTypeRosAlloc : kAllocatorTypeDlMalloc);
2059 break;
2060 }
2061 default: {
2062 UNIMPLEMENTED(FATAL);
2063 UNREACHABLE();
2064 }
2065 }
2066 if (IsGcConcurrent()) {
2067 concurrent_start_bytes_ =
2068 std::max(max_allowed_footprint_, kMinConcurrentRemainingBytes) - kMinConcurrentRemainingBytes;
2069 } else {
2070 concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
2071 }
2072 }
2073 }
2074
2075 // Special compacting collector which uses sub-optimal bin packing to reduce zygote space size.
2076 class ZygoteCompactingCollector FINAL : public collector::SemiSpace {
2077 public:
ZygoteCompactingCollector(gc::Heap * heap)2078 explicit ZygoteCompactingCollector(gc::Heap* heap) : SemiSpace(heap, false, "zygote collector"),
2079 bin_live_bitmap_(nullptr), bin_mark_bitmap_(nullptr) {
2080 }
2081
BuildBins(space::ContinuousSpace * space)2082 void BuildBins(space::ContinuousSpace* space) {
2083 bin_live_bitmap_ = space->GetLiveBitmap();
2084 bin_mark_bitmap_ = space->GetMarkBitmap();
2085 BinContext context;
2086 context.prev_ = reinterpret_cast<uintptr_t>(space->Begin());
2087 context.collector_ = this;
2088 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
2089 // Note: This requires traversing the space in increasing order of object addresses.
2090 bin_live_bitmap_->Walk(Callback, reinterpret_cast<void*>(&context));
2091 // Add the last bin which spans after the last object to the end of the space.
2092 AddBin(reinterpret_cast<uintptr_t>(space->End()) - context.prev_, context.prev_);
2093 }
2094
2095 private:
2096 struct BinContext {
2097 uintptr_t prev_; // The end of the previous object.
2098 ZygoteCompactingCollector* collector_;
2099 };
2100 // Maps from bin sizes to locations.
2101 std::multimap<size_t, uintptr_t> bins_;
2102 // Live bitmap of the space which contains the bins.
2103 accounting::ContinuousSpaceBitmap* bin_live_bitmap_;
2104 // Mark bitmap of the space which contains the bins.
2105 accounting::ContinuousSpaceBitmap* bin_mark_bitmap_;
2106
Callback(mirror::Object * obj,void * arg)2107 static void Callback(mirror::Object* obj, void* arg)
2108 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
2109 DCHECK(arg != nullptr);
2110 BinContext* context = reinterpret_cast<BinContext*>(arg);
2111 ZygoteCompactingCollector* collector = context->collector_;
2112 uintptr_t object_addr = reinterpret_cast<uintptr_t>(obj);
2113 size_t bin_size = object_addr - context->prev_;
2114 // Add the bin consisting of the end of the previous object to the start of the current object.
2115 collector->AddBin(bin_size, context->prev_);
2116 context->prev_ = object_addr + RoundUp(obj->SizeOf(), kObjectAlignment);
2117 }
2118
AddBin(size_t size,uintptr_t position)2119 void AddBin(size_t size, uintptr_t position) {
2120 if (size != 0) {
2121 bins_.insert(std::make_pair(size, position));
2122 }
2123 }
2124
ShouldSweepSpace(space::ContinuousSpace * space) const2125 virtual bool ShouldSweepSpace(space::ContinuousSpace* space) const {
2126 // Don't sweep any spaces since we probably blasted the internal accounting of the free list
2127 // allocator.
2128 UNUSED(space);
2129 return false;
2130 }
2131
MarkNonForwardedObject(mirror::Object * obj)2132 virtual mirror::Object* MarkNonForwardedObject(mirror::Object* obj)
2133 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
2134 size_t obj_size = obj->SizeOf();
2135 size_t alloc_size = RoundUp(obj_size, kObjectAlignment);
2136 mirror::Object* forward_address;
2137 // Find the smallest bin which we can move obj in.
2138 auto it = bins_.lower_bound(alloc_size);
2139 if (it == bins_.end()) {
2140 // No available space in the bins, place it in the target space instead (grows the zygote
2141 // space).
2142 size_t bytes_allocated, dummy;
2143 forward_address = to_space_->Alloc(self_, alloc_size, &bytes_allocated, nullptr, &dummy);
2144 if (to_space_live_bitmap_ != nullptr) {
2145 to_space_live_bitmap_->Set(forward_address);
2146 } else {
2147 GetHeap()->GetNonMovingSpace()->GetLiveBitmap()->Set(forward_address);
2148 GetHeap()->GetNonMovingSpace()->GetMarkBitmap()->Set(forward_address);
2149 }
2150 } else {
2151 size_t size = it->first;
2152 uintptr_t pos = it->second;
2153 bins_.erase(it); // Erase the old bin which we replace with the new smaller bin.
2154 forward_address = reinterpret_cast<mirror::Object*>(pos);
2155 // Set the live and mark bits so that sweeping system weaks works properly.
2156 bin_live_bitmap_->Set(forward_address);
2157 bin_mark_bitmap_->Set(forward_address);
2158 DCHECK_GE(size, alloc_size);
2159 // Add a new bin with the remaining space.
2160 AddBin(size - alloc_size, pos + alloc_size);
2161 }
2162 // Copy the object over to its new location. Don't use alloc_size to avoid valgrind error.
2163 memcpy(reinterpret_cast<void*>(forward_address), obj, obj_size);
2164 if (kUseBakerOrBrooksReadBarrier) {
2165 obj->AssertReadBarrierPointer();
2166 if (kUseBrooksReadBarrier) {
2167 DCHECK_EQ(forward_address->GetReadBarrierPointer(), obj);
2168 forward_address->SetReadBarrierPointer(forward_address);
2169 }
2170 forward_address->AssertReadBarrierPointer();
2171 }
2172 return forward_address;
2173 }
2174 };
2175
UnBindBitmaps()2176 void Heap::UnBindBitmaps() {
2177 TimingLogger::ScopedTiming t("UnBindBitmaps", GetCurrentGcIteration()->GetTimings());
2178 for (const auto& space : GetContinuousSpaces()) {
2179 if (space->IsContinuousMemMapAllocSpace()) {
2180 space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
2181 if (alloc_space->HasBoundBitmaps()) {
2182 alloc_space->UnBindBitmaps();
2183 }
2184 }
2185 }
2186 }
2187
PreZygoteFork()2188 void Heap::PreZygoteFork() {
2189 if (!HasZygoteSpace()) {
2190 // We still want to GC in case there is some unreachable non moving objects that could cause a
2191 // suboptimal bin packing when we compact the zygote space.
2192 CollectGarbageInternal(collector::kGcTypeFull, kGcCauseBackground, false);
2193 }
2194 Thread* self = Thread::Current();
2195 MutexLock mu(self, zygote_creation_lock_);
2196 // Try to see if we have any Zygote spaces.
2197 if (HasZygoteSpace()) {
2198 return;
2199 }
2200 Runtime::Current()->GetInternTable()->SwapPostZygoteWithPreZygote();
2201 Runtime::Current()->GetClassLinker()->MoveClassTableToPreZygote();
2202 VLOG(heap) << "Starting PreZygoteFork";
2203 // Trim the pages at the end of the non moving space.
2204 non_moving_space_->Trim();
2205 // The end of the non-moving space may be protected, unprotect it so that we can copy the zygote
2206 // there.
2207 non_moving_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
2208 const bool same_space = non_moving_space_ == main_space_;
2209 if (kCompactZygote) {
2210 // Temporarily disable rosalloc verification because the zygote
2211 // compaction will mess up the rosalloc internal metadata.
2212 ScopedDisableRosAllocVerification disable_rosalloc_verif(this);
2213 ZygoteCompactingCollector zygote_collector(this);
2214 zygote_collector.BuildBins(non_moving_space_);
2215 // Create a new bump pointer space which we will compact into.
2216 space::BumpPointerSpace target_space("zygote bump space", non_moving_space_->End(),
2217 non_moving_space_->Limit());
2218 // Compact the bump pointer space to a new zygote bump pointer space.
2219 bool reset_main_space = false;
2220 if (IsMovingGc(collector_type_)) {
2221 if (collector_type_ == kCollectorTypeCC) {
2222 zygote_collector.SetFromSpace(region_space_);
2223 } else {
2224 zygote_collector.SetFromSpace(bump_pointer_space_);
2225 }
2226 } else {
2227 CHECK(main_space_ != nullptr);
2228 CHECK_NE(main_space_, non_moving_space_)
2229 << "Does not make sense to compact within the same space";
2230 // Copy from the main space.
2231 zygote_collector.SetFromSpace(main_space_);
2232 reset_main_space = true;
2233 }
2234 zygote_collector.SetToSpace(&target_space);
2235 zygote_collector.SetSwapSemiSpaces(false);
2236 zygote_collector.Run(kGcCauseCollectorTransition, false);
2237 if (reset_main_space) {
2238 main_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
2239 madvise(main_space_->Begin(), main_space_->Capacity(), MADV_DONTNEED);
2240 MemMap* mem_map = main_space_->ReleaseMemMap();
2241 RemoveSpace(main_space_);
2242 space::Space* old_main_space = main_space_;
2243 CreateMainMallocSpace(mem_map, kDefaultInitialSize, std::min(mem_map->Size(), growth_limit_),
2244 mem_map->Size());
2245 delete old_main_space;
2246 AddSpace(main_space_);
2247 } else {
2248 if (collector_type_ == kCollectorTypeCC) {
2249 region_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
2250 } else {
2251 bump_pointer_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
2252 }
2253 }
2254 if (temp_space_ != nullptr) {
2255 CHECK(temp_space_->IsEmpty());
2256 }
2257 total_objects_freed_ever_ += GetCurrentGcIteration()->GetFreedObjects();
2258 total_bytes_freed_ever_ += GetCurrentGcIteration()->GetFreedBytes();
2259 // Update the end and write out image.
2260 non_moving_space_->SetEnd(target_space.End());
2261 non_moving_space_->SetLimit(target_space.Limit());
2262 VLOG(heap) << "Create zygote space with size=" << non_moving_space_->Size() << " bytes";
2263 }
2264 // Change the collector to the post zygote one.
2265 ChangeCollector(foreground_collector_type_);
2266 // Save the old space so that we can remove it after we complete creating the zygote space.
2267 space::MallocSpace* old_alloc_space = non_moving_space_;
2268 // Turn the current alloc space into a zygote space and obtain the new alloc space composed of
2269 // the remaining available space.
2270 // Remove the old space before creating the zygote space since creating the zygote space sets
2271 // the old alloc space's bitmaps to null.
2272 RemoveSpace(old_alloc_space);
2273 if (collector::SemiSpace::kUseRememberedSet) {
2274 // Sanity bound check.
2275 FindRememberedSetFromSpace(old_alloc_space)->AssertAllDirtyCardsAreWithinSpace();
2276 // Remove the remembered set for the now zygote space (the old
2277 // non-moving space). Note now that we have compacted objects into
2278 // the zygote space, the data in the remembered set is no longer
2279 // needed. The zygote space will instead have a mod-union table
2280 // from this point on.
2281 RemoveRememberedSet(old_alloc_space);
2282 }
2283 // Remaining space becomes the new non moving space.
2284 zygote_space_ = old_alloc_space->CreateZygoteSpace(kNonMovingSpaceName, low_memory_mode_,
2285 &non_moving_space_);
2286 CHECK(!non_moving_space_->CanMoveObjects());
2287 if (same_space) {
2288 main_space_ = non_moving_space_;
2289 SetSpaceAsDefault(main_space_);
2290 }
2291 delete old_alloc_space;
2292 CHECK(HasZygoteSpace()) << "Failed creating zygote space";
2293 AddSpace(zygote_space_);
2294 non_moving_space_->SetFootprintLimit(non_moving_space_->Capacity());
2295 AddSpace(non_moving_space_);
2296 // Create the zygote space mod union table.
2297 accounting::ModUnionTable* mod_union_table =
2298 new accounting::ModUnionTableCardCache("zygote space mod-union table", this,
2299 zygote_space_);
2300 CHECK(mod_union_table != nullptr) << "Failed to create zygote space mod-union table";
2301 // Set all the cards in the mod-union table since we don't know which objects contain references
2302 // to large objects.
2303 mod_union_table->SetCards();
2304 AddModUnionTable(mod_union_table);
2305 large_object_space_->SetAllLargeObjectsAsZygoteObjects(self);
2306 if (collector::SemiSpace::kUseRememberedSet) {
2307 // Add a new remembered set for the post-zygote non-moving space.
2308 accounting::RememberedSet* post_zygote_non_moving_space_rem_set =
2309 new accounting::RememberedSet("Post-zygote non-moving space remembered set", this,
2310 non_moving_space_);
2311 CHECK(post_zygote_non_moving_space_rem_set != nullptr)
2312 << "Failed to create post-zygote non-moving space remembered set";
2313 AddRememberedSet(post_zygote_non_moving_space_rem_set);
2314 }
2315 }
2316
FlushAllocStack()2317 void Heap::FlushAllocStack() {
2318 MarkAllocStackAsLive(allocation_stack_.get());
2319 allocation_stack_->Reset();
2320 }
2321
MarkAllocStack(accounting::ContinuousSpaceBitmap * bitmap1,accounting::ContinuousSpaceBitmap * bitmap2,accounting::LargeObjectBitmap * large_objects,accounting::ObjectStack * stack)2322 void Heap::MarkAllocStack(accounting::ContinuousSpaceBitmap* bitmap1,
2323 accounting::ContinuousSpaceBitmap* bitmap2,
2324 accounting::LargeObjectBitmap* large_objects,
2325 accounting::ObjectStack* stack) {
2326 DCHECK(bitmap1 != nullptr);
2327 DCHECK(bitmap2 != nullptr);
2328 const auto* limit = stack->End();
2329 for (auto* it = stack->Begin(); it != limit; ++it) {
2330 const mirror::Object* obj = it->AsMirrorPtr();
2331 if (!kUseThreadLocalAllocationStack || obj != nullptr) {
2332 if (bitmap1->HasAddress(obj)) {
2333 bitmap1->Set(obj);
2334 } else if (bitmap2->HasAddress(obj)) {
2335 bitmap2->Set(obj);
2336 } else {
2337 DCHECK(large_objects != nullptr);
2338 large_objects->Set(obj);
2339 }
2340 }
2341 }
2342 }
2343
SwapSemiSpaces()2344 void Heap::SwapSemiSpaces() {
2345 CHECK(bump_pointer_space_ != nullptr);
2346 CHECK(temp_space_ != nullptr);
2347 std::swap(bump_pointer_space_, temp_space_);
2348 }
2349
Compact(space::ContinuousMemMapAllocSpace * target_space,space::ContinuousMemMapAllocSpace * source_space,GcCause gc_cause)2350 collector::GarbageCollector* Heap::Compact(space::ContinuousMemMapAllocSpace* target_space,
2351 space::ContinuousMemMapAllocSpace* source_space,
2352 GcCause gc_cause) {
2353 CHECK(kMovingCollector);
2354 if (target_space != source_space) {
2355 // Don't swap spaces since this isn't a typical semi space collection.
2356 semi_space_collector_->SetSwapSemiSpaces(false);
2357 semi_space_collector_->SetFromSpace(source_space);
2358 semi_space_collector_->SetToSpace(target_space);
2359 semi_space_collector_->Run(gc_cause, false);
2360 return semi_space_collector_;
2361 } else {
2362 CHECK(target_space->IsBumpPointerSpace())
2363 << "In-place compaction is only supported for bump pointer spaces";
2364 mark_compact_collector_->SetSpace(target_space->AsBumpPointerSpace());
2365 mark_compact_collector_->Run(kGcCauseCollectorTransition, false);
2366 return mark_compact_collector_;
2367 }
2368 }
2369
CollectGarbageInternal(collector::GcType gc_type,GcCause gc_cause,bool clear_soft_references)2370 collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type, GcCause gc_cause,
2371 bool clear_soft_references) {
2372 Thread* self = Thread::Current();
2373 Runtime* runtime = Runtime::Current();
2374 // If the heap can't run the GC, silently fail and return that no GC was run.
2375 switch (gc_type) {
2376 case collector::kGcTypePartial: {
2377 if (!HasZygoteSpace()) {
2378 return collector::kGcTypeNone;
2379 }
2380 break;
2381 }
2382 default: {
2383 // Other GC types don't have any special cases which makes them not runnable. The main case
2384 // here is full GC.
2385 }
2386 }
2387 ScopedThreadStateChange tsc(self, kWaitingPerformingGc);
2388 Locks::mutator_lock_->AssertNotHeld(self);
2389 if (self->IsHandlingStackOverflow()) {
2390 // If we are throwing a stack overflow error we probably don't have enough remaining stack
2391 // space to run the GC.
2392 return collector::kGcTypeNone;
2393 }
2394 bool compacting_gc;
2395 {
2396 gc_complete_lock_->AssertNotHeld(self);
2397 ScopedThreadStateChange tsc2(self, kWaitingForGcToComplete);
2398 MutexLock mu(self, *gc_complete_lock_);
2399 // Ensure there is only one GC at a time.
2400 WaitForGcToCompleteLocked(gc_cause, self);
2401 compacting_gc = IsMovingGc(collector_type_);
2402 // GC can be disabled if someone has a used GetPrimitiveArrayCritical.
2403 if (compacting_gc && disable_moving_gc_count_ != 0) {
2404 LOG(WARNING) << "Skipping GC due to disable moving GC count " << disable_moving_gc_count_;
2405 return collector::kGcTypeNone;
2406 }
2407 collector_type_running_ = collector_type_;
2408 }
2409 if (gc_cause == kGcCauseForAlloc && runtime->HasStatsEnabled()) {
2410 ++runtime->GetStats()->gc_for_alloc_count;
2411 ++self->GetStats()->gc_for_alloc_count;
2412 }
2413 const uint64_t bytes_allocated_before_gc = GetBytesAllocated();
2414 // Approximate heap size.
2415 ATRACE_INT("Heap size (KB)", bytes_allocated_before_gc / KB);
2416
2417 DCHECK_LT(gc_type, collector::kGcTypeMax);
2418 DCHECK_NE(gc_type, collector::kGcTypeNone);
2419
2420 collector::GarbageCollector* collector = nullptr;
2421 // TODO: Clean this up.
2422 if (compacting_gc) {
2423 DCHECK(current_allocator_ == kAllocatorTypeBumpPointer ||
2424 current_allocator_ == kAllocatorTypeTLAB ||
2425 current_allocator_ == kAllocatorTypeRegion ||
2426 current_allocator_ == kAllocatorTypeRegionTLAB);
2427 switch (collector_type_) {
2428 case kCollectorTypeSS:
2429 // Fall-through.
2430 case kCollectorTypeGSS:
2431 semi_space_collector_->SetFromSpace(bump_pointer_space_);
2432 semi_space_collector_->SetToSpace(temp_space_);
2433 semi_space_collector_->SetSwapSemiSpaces(true);
2434 collector = semi_space_collector_;
2435 break;
2436 case kCollectorTypeCC:
2437 concurrent_copying_collector_->SetRegionSpace(region_space_);
2438 collector = concurrent_copying_collector_;
2439 break;
2440 case kCollectorTypeMC:
2441 mark_compact_collector_->SetSpace(bump_pointer_space_);
2442 collector = mark_compact_collector_;
2443 break;
2444 default:
2445 LOG(FATAL) << "Invalid collector type " << static_cast<size_t>(collector_type_);
2446 }
2447 if (collector != mark_compact_collector_ && collector != concurrent_copying_collector_) {
2448 temp_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
2449 CHECK(temp_space_->IsEmpty());
2450 }
2451 gc_type = collector::kGcTypeFull; // TODO: Not hard code this in.
2452 } else if (current_allocator_ == kAllocatorTypeRosAlloc ||
2453 current_allocator_ == kAllocatorTypeDlMalloc) {
2454 collector = FindCollectorByGcType(gc_type);
2455 } else {
2456 LOG(FATAL) << "Invalid current allocator " << current_allocator_;
2457 }
2458 if (IsGcConcurrent()) {
2459 // Disable concurrent GC check so that we don't have spammy JNI requests.
2460 // This gets recalculated in GrowForUtilization. It is important that it is disabled /
2461 // calculated in the same thread so that there aren't any races that can cause it to become
2462 // permanantly disabled. b/17942071
2463 concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
2464 }
2465 CHECK(collector != nullptr)
2466 << "Could not find garbage collector with collector_type="
2467 << static_cast<size_t>(collector_type_) << " and gc_type=" << gc_type;
2468 collector->Run(gc_cause, clear_soft_references || runtime->IsZygote());
2469 total_objects_freed_ever_ += GetCurrentGcIteration()->GetFreedObjects();
2470 total_bytes_freed_ever_ += GetCurrentGcIteration()->GetFreedBytes();
2471 RequestTrim(self);
2472 // Enqueue cleared references.
2473 reference_processor_.EnqueueClearedReferences(self);
2474 // Grow the heap so that we know when to perform the next GC.
2475 GrowForUtilization(collector, bytes_allocated_before_gc);
2476 LogGC(gc_cause, collector);
2477 FinishGC(self, gc_type);
2478 // Inform DDMS that a GC completed.
2479 Dbg::GcDidFinish();
2480 return gc_type;
2481 }
2482
LogGC(GcCause gc_cause,collector::GarbageCollector * collector)2483 void Heap::LogGC(GcCause gc_cause, collector::GarbageCollector* collector) {
2484 const size_t duration = GetCurrentGcIteration()->GetDurationNs();
2485 const std::vector<uint64_t>& pause_times = GetCurrentGcIteration()->GetPauseTimes();
2486 // Print the GC if it is an explicit GC (e.g. Runtime.gc()) or a slow GC
2487 // (mutator time blocked >= long_pause_log_threshold_).
2488 bool log_gc = gc_cause == kGcCauseExplicit;
2489 if (!log_gc && CareAboutPauseTimes()) {
2490 // GC for alloc pauses the allocating thread, so consider it as a pause.
2491 log_gc = duration > long_gc_log_threshold_ ||
2492 (gc_cause == kGcCauseForAlloc && duration > long_pause_log_threshold_);
2493 for (uint64_t pause : pause_times) {
2494 log_gc = log_gc || pause >= long_pause_log_threshold_;
2495 }
2496 }
2497 if (log_gc) {
2498 const size_t percent_free = GetPercentFree();
2499 const size_t current_heap_size = GetBytesAllocated();
2500 const size_t total_memory = GetTotalMemory();
2501 std::ostringstream pause_string;
2502 for (size_t i = 0; i < pause_times.size(); ++i) {
2503 pause_string << PrettyDuration((pause_times[i] / 1000) * 1000)
2504 << ((i != pause_times.size() - 1) ? "," : "");
2505 }
2506 LOG(INFO) << gc_cause << " " << collector->GetName()
2507 << " GC freed " << current_gc_iteration_.GetFreedObjects() << "("
2508 << PrettySize(current_gc_iteration_.GetFreedBytes()) << ") AllocSpace objects, "
2509 << current_gc_iteration_.GetFreedLargeObjects() << "("
2510 << PrettySize(current_gc_iteration_.GetFreedLargeObjectBytes()) << ") LOS objects, "
2511 << percent_free << "% free, " << PrettySize(current_heap_size) << "/"
2512 << PrettySize(total_memory) << ", " << "paused " << pause_string.str()
2513 << " total " << PrettyDuration((duration / 1000) * 1000);
2514 VLOG(heap) << Dumpable<TimingLogger>(*current_gc_iteration_.GetTimings());
2515 }
2516 }
2517
FinishGC(Thread * self,collector::GcType gc_type)2518 void Heap::FinishGC(Thread* self, collector::GcType gc_type) {
2519 MutexLock mu(self, *gc_complete_lock_);
2520 collector_type_running_ = kCollectorTypeNone;
2521 if (gc_type != collector::kGcTypeNone) {
2522 last_gc_type_ = gc_type;
2523
2524 // Update stats.
2525 ++gc_count_last_window_;
2526 if (running_collection_is_blocking_) {
2527 // If the currently running collection was a blocking one,
2528 // increment the counters and reset the flag.
2529 ++blocking_gc_count_;
2530 blocking_gc_time_ += GetCurrentGcIteration()->GetDurationNs();
2531 ++blocking_gc_count_last_window_;
2532 }
2533 // Update the gc count rate histograms if due.
2534 UpdateGcCountRateHistograms();
2535 }
2536 // Reset.
2537 running_collection_is_blocking_ = false;
2538 // Wake anyone who may have been waiting for the GC to complete.
2539 gc_complete_cond_->Broadcast(self);
2540 }
2541
UpdateGcCountRateHistograms()2542 void Heap::UpdateGcCountRateHistograms() {
2543 // Invariant: if the time since the last update includes more than
2544 // one windows, all the GC runs (if > 0) must have happened in first
2545 // window because otherwise the update must have already taken place
2546 // at an earlier GC run. So, we report the non-first windows with
2547 // zero counts to the histograms.
2548 DCHECK_EQ(last_update_time_gc_count_rate_histograms_ % kGcCountRateHistogramWindowDuration, 0U);
2549 uint64_t now = NanoTime();
2550 DCHECK_GE(now, last_update_time_gc_count_rate_histograms_);
2551 uint64_t time_since_last_update = now - last_update_time_gc_count_rate_histograms_;
2552 uint64_t num_of_windows = time_since_last_update / kGcCountRateHistogramWindowDuration;
2553 if (time_since_last_update >= kGcCountRateHistogramWindowDuration) {
2554 // Record the first window.
2555 gc_count_rate_histogram_.AddValue(gc_count_last_window_ - 1); // Exclude the current run.
2556 blocking_gc_count_rate_histogram_.AddValue(running_collection_is_blocking_ ?
2557 blocking_gc_count_last_window_ - 1 : blocking_gc_count_last_window_);
2558 // Record the other windows (with zero counts).
2559 for (uint64_t i = 0; i < num_of_windows - 1; ++i) {
2560 gc_count_rate_histogram_.AddValue(0);
2561 blocking_gc_count_rate_histogram_.AddValue(0);
2562 }
2563 // Update the last update time and reset the counters.
2564 last_update_time_gc_count_rate_histograms_ =
2565 (now / kGcCountRateHistogramWindowDuration) * kGcCountRateHistogramWindowDuration;
2566 gc_count_last_window_ = 1; // Include the current run.
2567 blocking_gc_count_last_window_ = running_collection_is_blocking_ ? 1 : 0;
2568 }
2569 DCHECK_EQ(last_update_time_gc_count_rate_histograms_ % kGcCountRateHistogramWindowDuration, 0U);
2570 }
2571
2572 class RootMatchesObjectVisitor : public SingleRootVisitor {
2573 public:
RootMatchesObjectVisitor(const mirror::Object * obj)2574 explicit RootMatchesObjectVisitor(const mirror::Object* obj) : obj_(obj) { }
2575
VisitRoot(mirror::Object * root,const RootInfo & info)2576 void VisitRoot(mirror::Object* root, const RootInfo& info)
2577 OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
2578 if (root == obj_) {
2579 LOG(INFO) << "Object " << obj_ << " is a root " << info.ToString();
2580 }
2581 }
2582
2583 private:
2584 const mirror::Object* const obj_;
2585 };
2586
2587
2588 class ScanVisitor {
2589 public:
operator ()(const mirror::Object * obj) const2590 void operator()(const mirror::Object* obj) const {
2591 LOG(ERROR) << "Would have rescanned object " << obj;
2592 }
2593 };
2594
2595 // Verify a reference from an object.
2596 class VerifyReferenceVisitor : public SingleRootVisitor {
2597 public:
VerifyReferenceVisitor(Heap * heap,Atomic<size_t> * fail_count,bool verify_referent)2598 explicit VerifyReferenceVisitor(Heap* heap, Atomic<size_t>* fail_count, bool verify_referent)
2599 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_)
2600 : heap_(heap), fail_count_(fail_count), verify_referent_(verify_referent) {}
2601
GetFailureCount() const2602 size_t GetFailureCount() const {
2603 return fail_count_->LoadSequentiallyConsistent();
2604 }
2605
operator ()(mirror::Class * klass,mirror::Reference * ref) const2606 void operator()(mirror::Class* klass, mirror::Reference* ref) const
2607 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
2608 UNUSED(klass);
2609 if (verify_referent_) {
2610 VerifyReference(ref, ref->GetReferent(), mirror::Reference::ReferentOffset());
2611 }
2612 }
2613
operator ()(mirror::Object * obj,MemberOffset offset,bool) const2614 void operator()(mirror::Object* obj, MemberOffset offset, bool /*is_static*/) const
2615 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
2616 VerifyReference(obj, obj->GetFieldObject<mirror::Object>(offset), offset);
2617 }
2618
IsLive(mirror::Object * obj) const2619 bool IsLive(mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS {
2620 return heap_->IsLiveObjectLocked(obj, true, false, true);
2621 }
2622
VisitRoot(mirror::Object * root,const RootInfo & root_info)2623 void VisitRoot(mirror::Object* root, const RootInfo& root_info) OVERRIDE
2624 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
2625 if (root == nullptr) {
2626 LOG(ERROR) << "Root is null with info " << root_info.GetType();
2627 } else if (!VerifyReference(nullptr, root, MemberOffset(0))) {
2628 LOG(ERROR) << "Root " << root << " is dead with type " << PrettyTypeOf(root)
2629 << " thread_id= " << root_info.GetThreadId() << " root_type= " << root_info.GetType();
2630 }
2631 }
2632
2633 private:
2634 // TODO: Fix the no thread safety analysis.
2635 // Returns false on failure.
VerifyReference(mirror::Object * obj,mirror::Object * ref,MemberOffset offset) const2636 bool VerifyReference(mirror::Object* obj, mirror::Object* ref, MemberOffset offset) const
2637 NO_THREAD_SAFETY_ANALYSIS {
2638 if (ref == nullptr || IsLive(ref)) {
2639 // Verify that the reference is live.
2640 return true;
2641 }
2642 if (fail_count_->FetchAndAddSequentiallyConsistent(1) == 0) {
2643 // Print message on only on first failure to prevent spam.
2644 LOG(ERROR) << "!!!!!!!!!!!!!!Heap corruption detected!!!!!!!!!!!!!!!!!!!";
2645 }
2646 if (obj != nullptr) {
2647 // Only do this part for non roots.
2648 accounting::CardTable* card_table = heap_->GetCardTable();
2649 accounting::ObjectStack* alloc_stack = heap_->allocation_stack_.get();
2650 accounting::ObjectStack* live_stack = heap_->live_stack_.get();
2651 uint8_t* card_addr = card_table->CardFromAddr(obj);
2652 LOG(ERROR) << "Object " << obj << " references dead object " << ref << " at offset "
2653 << offset << "\n card value = " << static_cast<int>(*card_addr);
2654 if (heap_->IsValidObjectAddress(obj->GetClass())) {
2655 LOG(ERROR) << "Obj type " << PrettyTypeOf(obj);
2656 } else {
2657 LOG(ERROR) << "Object " << obj << " class(" << obj->GetClass() << ") not a heap address";
2658 }
2659
2660 // Attempt to find the class inside of the recently freed objects.
2661 space::ContinuousSpace* ref_space = heap_->FindContinuousSpaceFromObject(ref, true);
2662 if (ref_space != nullptr && ref_space->IsMallocSpace()) {
2663 space::MallocSpace* space = ref_space->AsMallocSpace();
2664 mirror::Class* ref_class = space->FindRecentFreedObject(ref);
2665 if (ref_class != nullptr) {
2666 LOG(ERROR) << "Reference " << ref << " found as a recently freed object with class "
2667 << PrettyClass(ref_class);
2668 } else {
2669 LOG(ERROR) << "Reference " << ref << " not found as a recently freed object";
2670 }
2671 }
2672
2673 if (ref->GetClass() != nullptr && heap_->IsValidObjectAddress(ref->GetClass()) &&
2674 ref->GetClass()->IsClass()) {
2675 LOG(ERROR) << "Ref type " << PrettyTypeOf(ref);
2676 } else {
2677 LOG(ERROR) << "Ref " << ref << " class(" << ref->GetClass()
2678 << ") is not a valid heap address";
2679 }
2680
2681 card_table->CheckAddrIsInCardTable(reinterpret_cast<const uint8_t*>(obj));
2682 void* cover_begin = card_table->AddrFromCard(card_addr);
2683 void* cover_end = reinterpret_cast<void*>(reinterpret_cast<size_t>(cover_begin) +
2684 accounting::CardTable::kCardSize);
2685 LOG(ERROR) << "Card " << reinterpret_cast<void*>(card_addr) << " covers " << cover_begin
2686 << "-" << cover_end;
2687 accounting::ContinuousSpaceBitmap* bitmap =
2688 heap_->GetLiveBitmap()->GetContinuousSpaceBitmap(obj);
2689
2690 if (bitmap == nullptr) {
2691 LOG(ERROR) << "Object " << obj << " has no bitmap";
2692 if (!VerifyClassClass(obj->GetClass())) {
2693 LOG(ERROR) << "Object " << obj << " failed class verification!";
2694 }
2695 } else {
2696 // Print out how the object is live.
2697 if (bitmap->Test(obj)) {
2698 LOG(ERROR) << "Object " << obj << " found in live bitmap";
2699 }
2700 if (alloc_stack->Contains(const_cast<mirror::Object*>(obj))) {
2701 LOG(ERROR) << "Object " << obj << " found in allocation stack";
2702 }
2703 if (live_stack->Contains(const_cast<mirror::Object*>(obj))) {
2704 LOG(ERROR) << "Object " << obj << " found in live stack";
2705 }
2706 if (alloc_stack->Contains(const_cast<mirror::Object*>(ref))) {
2707 LOG(ERROR) << "Ref " << ref << " found in allocation stack";
2708 }
2709 if (live_stack->Contains(const_cast<mirror::Object*>(ref))) {
2710 LOG(ERROR) << "Ref " << ref << " found in live stack";
2711 }
2712 // Attempt to see if the card table missed the reference.
2713 ScanVisitor scan_visitor;
2714 uint8_t* byte_cover_begin = reinterpret_cast<uint8_t*>(card_table->AddrFromCard(card_addr));
2715 card_table->Scan<false>(bitmap, byte_cover_begin,
2716 byte_cover_begin + accounting::CardTable::kCardSize, scan_visitor);
2717 }
2718
2719 // Search to see if any of the roots reference our object.
2720 RootMatchesObjectVisitor visitor1(obj);
2721 Runtime::Current()->VisitRoots(&visitor1);
2722 // Search to see if any of the roots reference our reference.
2723 RootMatchesObjectVisitor visitor2(ref);
2724 Runtime::Current()->VisitRoots(&visitor2);
2725 }
2726 return false;
2727 }
2728
2729 Heap* const heap_;
2730 Atomic<size_t>* const fail_count_;
2731 const bool verify_referent_;
2732 };
2733
2734 // Verify all references within an object, for use with HeapBitmap::Visit.
2735 class VerifyObjectVisitor {
2736 public:
VerifyObjectVisitor(Heap * heap,Atomic<size_t> * fail_count,bool verify_referent)2737 explicit VerifyObjectVisitor(Heap* heap, Atomic<size_t>* fail_count, bool verify_referent)
2738 : heap_(heap), fail_count_(fail_count), verify_referent_(verify_referent) {
2739 }
2740
operator ()(mirror::Object * obj) const2741 void operator()(mirror::Object* obj) const
2742 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
2743 // Note: we are verifying the references in obj but not obj itself, this is because obj must
2744 // be live or else how did we find it in the live bitmap?
2745 VerifyReferenceVisitor visitor(heap_, fail_count_, verify_referent_);
2746 // The class doesn't count as a reference but we should verify it anyways.
2747 obj->VisitReferences<true>(visitor, visitor);
2748 }
2749
VisitCallback(mirror::Object * obj,void * arg)2750 static void VisitCallback(mirror::Object* obj, void* arg)
2751 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
2752 VerifyObjectVisitor* visitor = reinterpret_cast<VerifyObjectVisitor*>(arg);
2753 visitor->operator()(obj);
2754 }
2755
VerifyRoots()2756 void VerifyRoots() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
2757 LOCKS_EXCLUDED(Locks::heap_bitmap_lock_) {
2758 ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
2759 VerifyReferenceVisitor visitor(heap_, fail_count_, verify_referent_);
2760 Runtime::Current()->VisitRoots(&visitor);
2761 }
2762
GetFailureCount() const2763 size_t GetFailureCount() const {
2764 return fail_count_->LoadSequentiallyConsistent();
2765 }
2766
2767 private:
2768 Heap* const heap_;
2769 Atomic<size_t>* const fail_count_;
2770 const bool verify_referent_;
2771 };
2772
PushOnAllocationStackWithInternalGC(Thread * self,mirror::Object ** obj)2773 void Heap::PushOnAllocationStackWithInternalGC(Thread* self, mirror::Object** obj) {
2774 // Slow path, the allocation stack push back must have already failed.
2775 DCHECK(!allocation_stack_->AtomicPushBack(*obj));
2776 do {
2777 // TODO: Add handle VerifyObject.
2778 StackHandleScope<1> hs(self);
2779 HandleWrapper<mirror::Object> wrapper(hs.NewHandleWrapper(obj));
2780 // Push our object into the reserve region of the allocaiton stack. This is only required due
2781 // to heap verification requiring that roots are live (either in the live bitmap or in the
2782 // allocation stack).
2783 CHECK(allocation_stack_->AtomicPushBackIgnoreGrowthLimit(*obj));
2784 CollectGarbageInternal(collector::kGcTypeSticky, kGcCauseForAlloc, false);
2785 } while (!allocation_stack_->AtomicPushBack(*obj));
2786 }
2787
PushOnThreadLocalAllocationStackWithInternalGC(Thread * self,mirror::Object ** obj)2788 void Heap::PushOnThreadLocalAllocationStackWithInternalGC(Thread* self, mirror::Object** obj) {
2789 // Slow path, the allocation stack push back must have already failed.
2790 DCHECK(!self->PushOnThreadLocalAllocationStack(*obj));
2791 StackReference<mirror::Object>* start_address;
2792 StackReference<mirror::Object>* end_address;
2793 while (!allocation_stack_->AtomicBumpBack(kThreadLocalAllocationStackSize, &start_address,
2794 &end_address)) {
2795 // TODO: Add handle VerifyObject.
2796 StackHandleScope<1> hs(self);
2797 HandleWrapper<mirror::Object> wrapper(hs.NewHandleWrapper(obj));
2798 // Push our object into the reserve region of the allocaiton stack. This is only required due
2799 // to heap verification requiring that roots are live (either in the live bitmap or in the
2800 // allocation stack).
2801 CHECK(allocation_stack_->AtomicPushBackIgnoreGrowthLimit(*obj));
2802 // Push into the reserve allocation stack.
2803 CollectGarbageInternal(collector::kGcTypeSticky, kGcCauseForAlloc, false);
2804 }
2805 self->SetThreadLocalAllocationStack(start_address, end_address);
2806 // Retry on the new thread-local allocation stack.
2807 CHECK(self->PushOnThreadLocalAllocationStack(*obj)); // Must succeed.
2808 }
2809
2810 // Must do this with mutators suspended since we are directly accessing the allocation stacks.
VerifyHeapReferences(bool verify_referents)2811 size_t Heap::VerifyHeapReferences(bool verify_referents) {
2812 Thread* self = Thread::Current();
2813 Locks::mutator_lock_->AssertExclusiveHeld(self);
2814 // Lets sort our allocation stacks so that we can efficiently binary search them.
2815 allocation_stack_->Sort();
2816 live_stack_->Sort();
2817 // Since we sorted the allocation stack content, need to revoke all
2818 // thread-local allocation stacks.
2819 RevokeAllThreadLocalAllocationStacks(self);
2820 Atomic<size_t> fail_count_(0);
2821 VerifyObjectVisitor visitor(this, &fail_count_, verify_referents);
2822 // Verify objects in the allocation stack since these will be objects which were:
2823 // 1. Allocated prior to the GC (pre GC verification).
2824 // 2. Allocated during the GC (pre sweep GC verification).
2825 // We don't want to verify the objects in the live stack since they themselves may be
2826 // pointing to dead objects if they are not reachable.
2827 VisitObjectsPaused(VerifyObjectVisitor::VisitCallback, &visitor);
2828 // Verify the roots:
2829 visitor.VerifyRoots();
2830 if (visitor.GetFailureCount() > 0) {
2831 // Dump mod-union tables.
2832 for (const auto& table_pair : mod_union_tables_) {
2833 accounting::ModUnionTable* mod_union_table = table_pair.second;
2834 mod_union_table->Dump(LOG(ERROR) << mod_union_table->GetName() << ": ");
2835 }
2836 // Dump remembered sets.
2837 for (const auto& table_pair : remembered_sets_) {
2838 accounting::RememberedSet* remembered_set = table_pair.second;
2839 remembered_set->Dump(LOG(ERROR) << remembered_set->GetName() << ": ");
2840 }
2841 DumpSpaces(LOG(ERROR));
2842 }
2843 return visitor.GetFailureCount();
2844 }
2845
2846 class VerifyReferenceCardVisitor {
2847 public:
VerifyReferenceCardVisitor(Heap * heap,bool * failed)2848 VerifyReferenceCardVisitor(Heap* heap, bool* failed)
2849 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_,
2850 Locks::heap_bitmap_lock_)
2851 : heap_(heap), failed_(failed) {
2852 }
2853
2854 // TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for
2855 // annotalysis on visitors.
operator ()(mirror::Object * obj,MemberOffset offset,bool is_static) const2856 void operator()(mirror::Object* obj, MemberOffset offset, bool is_static) const
2857 NO_THREAD_SAFETY_ANALYSIS {
2858 mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset);
2859 // Filter out class references since changing an object's class does not mark the card as dirty.
2860 // Also handles large objects, since the only reference they hold is a class reference.
2861 if (ref != nullptr && !ref->IsClass()) {
2862 accounting::CardTable* card_table = heap_->GetCardTable();
2863 // If the object is not dirty and it is referencing something in the live stack other than
2864 // class, then it must be on a dirty card.
2865 if (!card_table->AddrIsInCardTable(obj)) {
2866 LOG(ERROR) << "Object " << obj << " is not in the address range of the card table";
2867 *failed_ = true;
2868 } else if (!card_table->IsDirty(obj)) {
2869 // TODO: Check mod-union tables.
2870 // Card should be either kCardDirty if it got re-dirtied after we aged it, or
2871 // kCardDirty - 1 if it didnt get touched since we aged it.
2872 accounting::ObjectStack* live_stack = heap_->live_stack_.get();
2873 if (live_stack->ContainsSorted(ref)) {
2874 if (live_stack->ContainsSorted(obj)) {
2875 LOG(ERROR) << "Object " << obj << " found in live stack";
2876 }
2877 if (heap_->GetLiveBitmap()->Test(obj)) {
2878 LOG(ERROR) << "Object " << obj << " found in live bitmap";
2879 }
2880 LOG(ERROR) << "Object " << obj << " " << PrettyTypeOf(obj)
2881 << " references " << ref << " " << PrettyTypeOf(ref) << " in live stack";
2882
2883 // Print which field of the object is dead.
2884 if (!obj->IsObjectArray()) {
2885 mirror::Class* klass = is_static ? obj->AsClass() : obj->GetClass();
2886 CHECK(klass != nullptr);
2887 auto* fields = is_static ? klass->GetSFields() : klass->GetIFields();
2888 auto num_fields = is_static ? klass->NumStaticFields() : klass->NumInstanceFields();
2889 CHECK_EQ(fields == nullptr, num_fields == 0u);
2890 for (size_t i = 0; i < num_fields; ++i) {
2891 ArtField* cur = &fields[i];
2892 if (cur->GetOffset().Int32Value() == offset.Int32Value()) {
2893 LOG(ERROR) << (is_static ? "Static " : "") << "field in the live stack is "
2894 << PrettyField(cur);
2895 break;
2896 }
2897 }
2898 } else {
2899 mirror::ObjectArray<mirror::Object>* object_array =
2900 obj->AsObjectArray<mirror::Object>();
2901 for (int32_t i = 0; i < object_array->GetLength(); ++i) {
2902 if (object_array->Get(i) == ref) {
2903 LOG(ERROR) << (is_static ? "Static " : "") << "obj[" << i << "] = ref";
2904 }
2905 }
2906 }
2907
2908 *failed_ = true;
2909 }
2910 }
2911 }
2912 }
2913
2914 private:
2915 Heap* const heap_;
2916 bool* const failed_;
2917 };
2918
2919 class VerifyLiveStackReferences {
2920 public:
VerifyLiveStackReferences(Heap * heap)2921 explicit VerifyLiveStackReferences(Heap* heap)
2922 : heap_(heap),
2923 failed_(false) {}
2924
operator ()(mirror::Object * obj) const2925 void operator()(mirror::Object* obj) const
2926 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
2927 VerifyReferenceCardVisitor visitor(heap_, const_cast<bool*>(&failed_));
2928 obj->VisitReferences<true>(visitor, VoidFunctor());
2929 }
2930
Failed() const2931 bool Failed() const {
2932 return failed_;
2933 }
2934
2935 private:
2936 Heap* const heap_;
2937 bool failed_;
2938 };
2939
VerifyMissingCardMarks()2940 bool Heap::VerifyMissingCardMarks() {
2941 Thread* self = Thread::Current();
2942 Locks::mutator_lock_->AssertExclusiveHeld(self);
2943 // We need to sort the live stack since we binary search it.
2944 live_stack_->Sort();
2945 // Since we sorted the allocation stack content, need to revoke all
2946 // thread-local allocation stacks.
2947 RevokeAllThreadLocalAllocationStacks(self);
2948 VerifyLiveStackReferences visitor(this);
2949 GetLiveBitmap()->Visit(visitor);
2950 // We can verify objects in the live stack since none of these should reference dead objects.
2951 for (auto* it = live_stack_->Begin(); it != live_stack_->End(); ++it) {
2952 if (!kUseThreadLocalAllocationStack || it->AsMirrorPtr() != nullptr) {
2953 visitor(it->AsMirrorPtr());
2954 }
2955 }
2956 return !visitor.Failed();
2957 }
2958
SwapStacks(Thread * self)2959 void Heap::SwapStacks(Thread* self) {
2960 UNUSED(self);
2961 if (kUseThreadLocalAllocationStack) {
2962 live_stack_->AssertAllZero();
2963 }
2964 allocation_stack_.swap(live_stack_);
2965 }
2966
RevokeAllThreadLocalAllocationStacks(Thread * self)2967 void Heap::RevokeAllThreadLocalAllocationStacks(Thread* self) {
2968 // This must be called only during the pause.
2969 DCHECK(Locks::mutator_lock_->IsExclusiveHeld(self));
2970 MutexLock mu(self, *Locks::runtime_shutdown_lock_);
2971 MutexLock mu2(self, *Locks::thread_list_lock_);
2972 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
2973 for (Thread* t : thread_list) {
2974 t->RevokeThreadLocalAllocationStack();
2975 }
2976 }
2977
AssertThreadLocalBuffersAreRevoked(Thread * thread)2978 void Heap::AssertThreadLocalBuffersAreRevoked(Thread* thread) {
2979 if (kIsDebugBuild) {
2980 if (rosalloc_space_ != nullptr) {
2981 rosalloc_space_->AssertThreadLocalBuffersAreRevoked(thread);
2982 }
2983 if (bump_pointer_space_ != nullptr) {
2984 bump_pointer_space_->AssertThreadLocalBuffersAreRevoked(thread);
2985 }
2986 }
2987 }
2988
AssertAllBumpPointerSpaceThreadLocalBuffersAreRevoked()2989 void Heap::AssertAllBumpPointerSpaceThreadLocalBuffersAreRevoked() {
2990 if (kIsDebugBuild) {
2991 if (bump_pointer_space_ != nullptr) {
2992 bump_pointer_space_->AssertAllThreadLocalBuffersAreRevoked();
2993 }
2994 }
2995 }
2996
FindModUnionTableFromSpace(space::Space * space)2997 accounting::ModUnionTable* Heap::FindModUnionTableFromSpace(space::Space* space) {
2998 auto it = mod_union_tables_.find(space);
2999 if (it == mod_union_tables_.end()) {
3000 return nullptr;
3001 }
3002 return it->second;
3003 }
3004
FindRememberedSetFromSpace(space::Space * space)3005 accounting::RememberedSet* Heap::FindRememberedSetFromSpace(space::Space* space) {
3006 auto it = remembered_sets_.find(space);
3007 if (it == remembered_sets_.end()) {
3008 return nullptr;
3009 }
3010 return it->second;
3011 }
3012
ProcessCards(TimingLogger * timings,bool use_rem_sets,bool process_alloc_space_cards,bool clear_alloc_space_cards)3013 void Heap::ProcessCards(TimingLogger* timings, bool use_rem_sets, bool process_alloc_space_cards,
3014 bool clear_alloc_space_cards) {
3015 TimingLogger::ScopedTiming t(__FUNCTION__, timings);
3016 // Clear cards and keep track of cards cleared in the mod-union table.
3017 for (const auto& space : continuous_spaces_) {
3018 accounting::ModUnionTable* table = FindModUnionTableFromSpace(space);
3019 accounting::RememberedSet* rem_set = FindRememberedSetFromSpace(space);
3020 if (table != nullptr) {
3021 const char* name = space->IsZygoteSpace() ? "ZygoteModUnionClearCards" :
3022 "ImageModUnionClearCards";
3023 TimingLogger::ScopedTiming t2(name, timings);
3024 table->ClearCards();
3025 } else if (use_rem_sets && rem_set != nullptr) {
3026 DCHECK(collector::SemiSpace::kUseRememberedSet && collector_type_ == kCollectorTypeGSS)
3027 << static_cast<int>(collector_type_);
3028 TimingLogger::ScopedTiming t2("AllocSpaceRemSetClearCards", timings);
3029 rem_set->ClearCards();
3030 } else if (process_alloc_space_cards) {
3031 TimingLogger::ScopedTiming t2("AllocSpaceClearCards", timings);
3032 if (clear_alloc_space_cards) {
3033 card_table_->ClearCardRange(space->Begin(), space->End());
3034 } else {
3035 // No mod union table for the AllocSpace. Age the cards so that the GC knows that these
3036 // cards were dirty before the GC started.
3037 // TODO: Need to use atomic for the case where aged(cleaning thread) -> dirty(other thread)
3038 // -> clean(cleaning thread).
3039 // The races are we either end up with: Aged card, unaged card. Since we have the
3040 // checkpoint roots and then we scan / update mod union tables after. We will always
3041 // scan either card. If we end up with the non aged card, we scan it it in the pause.
3042 card_table_->ModifyCardsAtomic(space->Begin(), space->End(), AgeCardVisitor(),
3043 VoidFunctor());
3044 }
3045 }
3046 }
3047 }
3048
IdentityMarkHeapReferenceCallback(mirror::HeapReference<mirror::Object> *,void *)3049 static void IdentityMarkHeapReferenceCallback(mirror::HeapReference<mirror::Object>*, void*) {
3050 }
3051
PreGcVerificationPaused(collector::GarbageCollector * gc)3052 void Heap::PreGcVerificationPaused(collector::GarbageCollector* gc) {
3053 Thread* const self = Thread::Current();
3054 TimingLogger* const timings = current_gc_iteration_.GetTimings();
3055 TimingLogger::ScopedTiming t(__FUNCTION__, timings);
3056 if (verify_pre_gc_heap_) {
3057 TimingLogger::ScopedTiming t2("(Paused)PreGcVerifyHeapReferences", timings);
3058 size_t failures = VerifyHeapReferences();
3059 if (failures > 0) {
3060 LOG(FATAL) << "Pre " << gc->GetName() << " heap verification failed with " << failures
3061 << " failures";
3062 }
3063 }
3064 // Check that all objects which reference things in the live stack are on dirty cards.
3065 if (verify_missing_card_marks_) {
3066 TimingLogger::ScopedTiming t2("(Paused)PreGcVerifyMissingCardMarks", timings);
3067 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
3068 SwapStacks(self);
3069 // Sort the live stack so that we can quickly binary search it later.
3070 CHECK(VerifyMissingCardMarks()) << "Pre " << gc->GetName()
3071 << " missing card mark verification failed\n" << DumpSpaces();
3072 SwapStacks(self);
3073 }
3074 if (verify_mod_union_table_) {
3075 TimingLogger::ScopedTiming t2("(Paused)PreGcVerifyModUnionTables", timings);
3076 ReaderMutexLock reader_lock(self, *Locks::heap_bitmap_lock_);
3077 for (const auto& table_pair : mod_union_tables_) {
3078 accounting::ModUnionTable* mod_union_table = table_pair.second;
3079 mod_union_table->UpdateAndMarkReferences(IdentityMarkHeapReferenceCallback, nullptr);
3080 mod_union_table->Verify();
3081 }
3082 }
3083 }
3084
PreGcVerification(collector::GarbageCollector * gc)3085 void Heap::PreGcVerification(collector::GarbageCollector* gc) {
3086 if (verify_pre_gc_heap_ || verify_missing_card_marks_ || verify_mod_union_table_) {
3087 collector::GarbageCollector::ScopedPause pause(gc);
3088 PreGcVerificationPaused(gc);
3089 }
3090 }
3091
PrePauseRosAllocVerification(collector::GarbageCollector * gc)3092 void Heap::PrePauseRosAllocVerification(collector::GarbageCollector* gc) {
3093 UNUSED(gc);
3094 // TODO: Add a new runtime option for this?
3095 if (verify_pre_gc_rosalloc_) {
3096 RosAllocVerification(current_gc_iteration_.GetTimings(), "PreGcRosAllocVerification");
3097 }
3098 }
3099
PreSweepingGcVerification(collector::GarbageCollector * gc)3100 void Heap::PreSweepingGcVerification(collector::GarbageCollector* gc) {
3101 Thread* const self = Thread::Current();
3102 TimingLogger* const timings = current_gc_iteration_.GetTimings();
3103 TimingLogger::ScopedTiming t(__FUNCTION__, timings);
3104 // Called before sweeping occurs since we want to make sure we are not going so reclaim any
3105 // reachable objects.
3106 if (verify_pre_sweeping_heap_) {
3107 TimingLogger::ScopedTiming t2("(Paused)PostSweepingVerifyHeapReferences", timings);
3108 CHECK_NE(self->GetState(), kRunnable);
3109 {
3110 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
3111 // Swapping bound bitmaps does nothing.
3112 gc->SwapBitmaps();
3113 }
3114 // Pass in false since concurrent reference processing can mean that the reference referents
3115 // may point to dead objects at the point which PreSweepingGcVerification is called.
3116 size_t failures = VerifyHeapReferences(false);
3117 if (failures > 0) {
3118 LOG(FATAL) << "Pre sweeping " << gc->GetName() << " GC verification failed with " << failures
3119 << " failures";
3120 }
3121 {
3122 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
3123 gc->SwapBitmaps();
3124 }
3125 }
3126 if (verify_pre_sweeping_rosalloc_) {
3127 RosAllocVerification(timings, "PreSweepingRosAllocVerification");
3128 }
3129 }
3130
PostGcVerificationPaused(collector::GarbageCollector * gc)3131 void Heap::PostGcVerificationPaused(collector::GarbageCollector* gc) {
3132 // Only pause if we have to do some verification.
3133 Thread* const self = Thread::Current();
3134 TimingLogger* const timings = GetCurrentGcIteration()->GetTimings();
3135 TimingLogger::ScopedTiming t(__FUNCTION__, timings);
3136 if (verify_system_weaks_) {
3137 ReaderMutexLock mu2(self, *Locks::heap_bitmap_lock_);
3138 collector::MarkSweep* mark_sweep = down_cast<collector::MarkSweep*>(gc);
3139 mark_sweep->VerifySystemWeaks();
3140 }
3141 if (verify_post_gc_rosalloc_) {
3142 RosAllocVerification(timings, "(Paused)PostGcRosAllocVerification");
3143 }
3144 if (verify_post_gc_heap_) {
3145 TimingLogger::ScopedTiming t2("(Paused)PostGcVerifyHeapReferences", timings);
3146 size_t failures = VerifyHeapReferences();
3147 if (failures > 0) {
3148 LOG(FATAL) << "Pre " << gc->GetName() << " heap verification failed with " << failures
3149 << " failures";
3150 }
3151 }
3152 }
3153
PostGcVerification(collector::GarbageCollector * gc)3154 void Heap::PostGcVerification(collector::GarbageCollector* gc) {
3155 if (verify_system_weaks_ || verify_post_gc_rosalloc_ || verify_post_gc_heap_) {
3156 collector::GarbageCollector::ScopedPause pause(gc);
3157 PostGcVerificationPaused(gc);
3158 }
3159 }
3160
RosAllocVerification(TimingLogger * timings,const char * name)3161 void Heap::RosAllocVerification(TimingLogger* timings, const char* name) {
3162 TimingLogger::ScopedTiming t(name, timings);
3163 for (const auto& space : continuous_spaces_) {
3164 if (space->IsRosAllocSpace()) {
3165 VLOG(heap) << name << " : " << space->GetName();
3166 space->AsRosAllocSpace()->Verify();
3167 }
3168 }
3169 }
3170
WaitForGcToComplete(GcCause cause,Thread * self)3171 collector::GcType Heap::WaitForGcToComplete(GcCause cause, Thread* self) {
3172 ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
3173 MutexLock mu(self, *gc_complete_lock_);
3174 return WaitForGcToCompleteLocked(cause, self);
3175 }
3176
WaitForGcToCompleteLocked(GcCause cause,Thread * self)3177 collector::GcType Heap::WaitForGcToCompleteLocked(GcCause cause, Thread* self) {
3178 collector::GcType last_gc_type = collector::kGcTypeNone;
3179 uint64_t wait_start = NanoTime();
3180 while (collector_type_running_ != kCollectorTypeNone) {
3181 if (self != task_processor_->GetRunningThread()) {
3182 // The current thread is about to wait for a currently running
3183 // collection to finish. If the waiting thread is not the heap
3184 // task daemon thread, the currently running collection is
3185 // considered as a blocking GC.
3186 running_collection_is_blocking_ = true;
3187 VLOG(gc) << "Waiting for a blocking GC " << cause;
3188 }
3189 ATRACE_BEGIN("GC: Wait For Completion");
3190 // We must wait, change thread state then sleep on gc_complete_cond_;
3191 gc_complete_cond_->Wait(self);
3192 last_gc_type = last_gc_type_;
3193 ATRACE_END();
3194 }
3195 uint64_t wait_time = NanoTime() - wait_start;
3196 total_wait_time_ += wait_time;
3197 if (wait_time > long_pause_log_threshold_) {
3198 LOG(INFO) << "WaitForGcToComplete blocked for " << PrettyDuration(wait_time)
3199 << " for cause " << cause;
3200 }
3201 if (self != task_processor_->GetRunningThread()) {
3202 // The current thread is about to run a collection. If the thread
3203 // is not the heap task daemon thread, it's considered as a
3204 // blocking GC (i.e., blocking itself).
3205 running_collection_is_blocking_ = true;
3206 VLOG(gc) << "Starting a blocking GC " << cause;
3207 }
3208 return last_gc_type;
3209 }
3210
DumpForSigQuit(std::ostream & os)3211 void Heap::DumpForSigQuit(std::ostream& os) {
3212 os << "Heap: " << GetPercentFree() << "% free, " << PrettySize(GetBytesAllocated()) << "/"
3213 << PrettySize(GetTotalMemory()) << "; " << GetObjectsAllocated() << " objects\n";
3214 DumpGcPerformanceInfo(os);
3215 }
3216
GetPercentFree()3217 size_t Heap::GetPercentFree() {
3218 return static_cast<size_t>(100.0f * static_cast<float>(GetFreeMemory()) / max_allowed_footprint_);
3219 }
3220
SetIdealFootprint(size_t max_allowed_footprint)3221 void Heap::SetIdealFootprint(size_t max_allowed_footprint) {
3222 if (max_allowed_footprint > GetMaxMemory()) {
3223 VLOG(gc) << "Clamp target GC heap from " << PrettySize(max_allowed_footprint) << " to "
3224 << PrettySize(GetMaxMemory());
3225 max_allowed_footprint = GetMaxMemory();
3226 }
3227 max_allowed_footprint_ = max_allowed_footprint;
3228 }
3229
IsMovableObject(const mirror::Object * obj) const3230 bool Heap::IsMovableObject(const mirror::Object* obj) const {
3231 if (kMovingCollector) {
3232 space::Space* space = FindContinuousSpaceFromObject(obj, true);
3233 if (space != nullptr) {
3234 // TODO: Check large object?
3235 return space->CanMoveObjects();
3236 }
3237 }
3238 return false;
3239 }
3240
UpdateMaxNativeFootprint()3241 void Heap::UpdateMaxNativeFootprint() {
3242 size_t native_size = native_bytes_allocated_.LoadRelaxed();
3243 // TODO: Tune the native heap utilization to be a value other than the java heap utilization.
3244 size_t target_size = native_size / GetTargetHeapUtilization();
3245 if (target_size > native_size + max_free_) {
3246 target_size = native_size + max_free_;
3247 } else if (target_size < native_size + min_free_) {
3248 target_size = native_size + min_free_;
3249 }
3250 native_footprint_gc_watermark_ = std::min(growth_limit_, target_size);
3251 }
3252
FindCollectorByGcType(collector::GcType gc_type)3253 collector::GarbageCollector* Heap::FindCollectorByGcType(collector::GcType gc_type) {
3254 for (const auto& collector : garbage_collectors_) {
3255 if (collector->GetCollectorType() == collector_type_ &&
3256 collector->GetGcType() == gc_type) {
3257 return collector;
3258 }
3259 }
3260 return nullptr;
3261 }
3262
HeapGrowthMultiplier() const3263 double Heap::HeapGrowthMultiplier() const {
3264 // If we don't care about pause times we are background, so return 1.0.
3265 if (!CareAboutPauseTimes() || IsLowMemoryMode()) {
3266 return 1.0;
3267 }
3268 return foreground_heap_growth_multiplier_;
3269 }
3270
GrowForUtilization(collector::GarbageCollector * collector_ran,uint64_t bytes_allocated_before_gc)3271 void Heap::GrowForUtilization(collector::GarbageCollector* collector_ran,
3272 uint64_t bytes_allocated_before_gc) {
3273 // We know what our utilization is at this moment.
3274 // This doesn't actually resize any memory. It just lets the heap grow more when necessary.
3275 const uint64_t bytes_allocated = GetBytesAllocated();
3276 uint64_t target_size;
3277 collector::GcType gc_type = collector_ran->GetGcType();
3278 const double multiplier = HeapGrowthMultiplier(); // Use the multiplier to grow more for
3279 // foreground.
3280 const uint64_t adjusted_min_free = static_cast<uint64_t>(min_free_ * multiplier);
3281 const uint64_t adjusted_max_free = static_cast<uint64_t>(max_free_ * multiplier);
3282 if (gc_type != collector::kGcTypeSticky) {
3283 // Grow the heap for non sticky GC.
3284 ssize_t delta = bytes_allocated / GetTargetHeapUtilization() - bytes_allocated;
3285 CHECK_GE(delta, 0);
3286 target_size = bytes_allocated + delta * multiplier;
3287 target_size = std::min(target_size, bytes_allocated + adjusted_max_free);
3288 target_size = std::max(target_size, bytes_allocated + adjusted_min_free);
3289 native_need_to_run_finalization_ = true;
3290 next_gc_type_ = collector::kGcTypeSticky;
3291 } else {
3292 collector::GcType non_sticky_gc_type =
3293 HasZygoteSpace() ? collector::kGcTypePartial : collector::kGcTypeFull;
3294 // Find what the next non sticky collector will be.
3295 collector::GarbageCollector* non_sticky_collector = FindCollectorByGcType(non_sticky_gc_type);
3296 // If the throughput of the current sticky GC >= throughput of the non sticky collector, then
3297 // do another sticky collection next.
3298 // We also check that the bytes allocated aren't over the footprint limit in order to prevent a
3299 // pathological case where dead objects which aren't reclaimed by sticky could get accumulated
3300 // if the sticky GC throughput always remained >= the full/partial throughput.
3301 if (current_gc_iteration_.GetEstimatedThroughput() * kStickyGcThroughputAdjustment >=
3302 non_sticky_collector->GetEstimatedMeanThroughput() &&
3303 non_sticky_collector->NumberOfIterations() > 0 &&
3304 bytes_allocated <= max_allowed_footprint_) {
3305 next_gc_type_ = collector::kGcTypeSticky;
3306 } else {
3307 next_gc_type_ = non_sticky_gc_type;
3308 }
3309 // If we have freed enough memory, shrink the heap back down.
3310 if (bytes_allocated + adjusted_max_free < max_allowed_footprint_) {
3311 target_size = bytes_allocated + adjusted_max_free;
3312 } else {
3313 target_size = std::max(bytes_allocated, static_cast<uint64_t>(max_allowed_footprint_));
3314 }
3315 }
3316 if (!ignore_max_footprint_) {
3317 SetIdealFootprint(target_size);
3318 if (IsGcConcurrent()) {
3319 const uint64_t freed_bytes = current_gc_iteration_.GetFreedBytes() +
3320 current_gc_iteration_.GetFreedLargeObjectBytes() +
3321 current_gc_iteration_.GetFreedRevokeBytes();
3322 // Bytes allocated will shrink by freed_bytes after the GC runs, so if we want to figure out
3323 // how many bytes were allocated during the GC we need to add freed_bytes back on.
3324 CHECK_GE(bytes_allocated + freed_bytes, bytes_allocated_before_gc);
3325 const uint64_t bytes_allocated_during_gc = bytes_allocated + freed_bytes -
3326 bytes_allocated_before_gc;
3327 // Calculate when to perform the next ConcurrentGC.
3328 // Calculate the estimated GC duration.
3329 const double gc_duration_seconds = NsToMs(current_gc_iteration_.GetDurationNs()) / 1000.0;
3330 // Estimate how many remaining bytes we will have when we need to start the next GC.
3331 size_t remaining_bytes = bytes_allocated_during_gc * gc_duration_seconds;
3332 remaining_bytes = std::min(remaining_bytes, kMaxConcurrentRemainingBytes);
3333 remaining_bytes = std::max(remaining_bytes, kMinConcurrentRemainingBytes);
3334 if (UNLIKELY(remaining_bytes > max_allowed_footprint_)) {
3335 // A never going to happen situation that from the estimated allocation rate we will exceed
3336 // the applications entire footprint with the given estimated allocation rate. Schedule
3337 // another GC nearly straight away.
3338 remaining_bytes = kMinConcurrentRemainingBytes;
3339 }
3340 DCHECK_LE(remaining_bytes, max_allowed_footprint_);
3341 DCHECK_LE(max_allowed_footprint_, GetMaxMemory());
3342 // Start a concurrent GC when we get close to the estimated remaining bytes. When the
3343 // allocation rate is very high, remaining_bytes could tell us that we should start a GC
3344 // right away.
3345 concurrent_start_bytes_ = std::max(max_allowed_footprint_ - remaining_bytes,
3346 static_cast<size_t>(bytes_allocated));
3347 }
3348 }
3349 }
3350
ClampGrowthLimit()3351 void Heap::ClampGrowthLimit() {
3352 // Use heap bitmap lock to guard against races with BindLiveToMarkBitmap.
3353 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
3354 capacity_ = growth_limit_;
3355 for (const auto& space : continuous_spaces_) {
3356 if (space->IsMallocSpace()) {
3357 gc::space::MallocSpace* malloc_space = space->AsMallocSpace();
3358 malloc_space->ClampGrowthLimit();
3359 }
3360 }
3361 // This space isn't added for performance reasons.
3362 if (main_space_backup_.get() != nullptr) {
3363 main_space_backup_->ClampGrowthLimit();
3364 }
3365 }
3366
ClearGrowthLimit()3367 void Heap::ClearGrowthLimit() {
3368 growth_limit_ = capacity_;
3369 for (const auto& space : continuous_spaces_) {
3370 if (space->IsMallocSpace()) {
3371 gc::space::MallocSpace* malloc_space = space->AsMallocSpace();
3372 malloc_space->ClearGrowthLimit();
3373 malloc_space->SetFootprintLimit(malloc_space->Capacity());
3374 }
3375 }
3376 // This space isn't added for performance reasons.
3377 if (main_space_backup_.get() != nullptr) {
3378 main_space_backup_->ClearGrowthLimit();
3379 main_space_backup_->SetFootprintLimit(main_space_backup_->Capacity());
3380 }
3381 }
3382
AddFinalizerReference(Thread * self,mirror::Object ** object)3383 void Heap::AddFinalizerReference(Thread* self, mirror::Object** object) {
3384 ScopedObjectAccess soa(self);
3385 ScopedLocalRef<jobject> arg(self->GetJniEnv(), soa.AddLocalReference<jobject>(*object));
3386 jvalue args[1];
3387 args[0].l = arg.get();
3388 InvokeWithJValues(soa, nullptr, WellKnownClasses::java_lang_ref_FinalizerReference_add, args);
3389 // Restore object in case it gets moved.
3390 *object = soa.Decode<mirror::Object*>(arg.get());
3391 }
3392
RequestConcurrentGCAndSaveObject(Thread * self,bool force_full,mirror::Object ** obj)3393 void Heap::RequestConcurrentGCAndSaveObject(Thread* self, bool force_full, mirror::Object** obj) {
3394 StackHandleScope<1> hs(self);
3395 HandleWrapper<mirror::Object> wrapper(hs.NewHandleWrapper(obj));
3396 RequestConcurrentGC(self, force_full);
3397 }
3398
3399 class Heap::ConcurrentGCTask : public HeapTask {
3400 public:
ConcurrentGCTask(uint64_t target_time,bool force_full)3401 explicit ConcurrentGCTask(uint64_t target_time, bool force_full)
3402 : HeapTask(target_time), force_full_(force_full) { }
Run(Thread * self)3403 virtual void Run(Thread* self) OVERRIDE {
3404 gc::Heap* heap = Runtime::Current()->GetHeap();
3405 heap->ConcurrentGC(self, force_full_);
3406 heap->ClearConcurrentGCRequest();
3407 }
3408
3409 private:
3410 const bool force_full_; // If true, force full (or partial) collection.
3411 };
3412
CanAddHeapTask(Thread * self)3413 static bool CanAddHeapTask(Thread* self) LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_) {
3414 Runtime* runtime = Runtime::Current();
3415 return runtime != nullptr && runtime->IsFinishedStarting() && !runtime->IsShuttingDown(self) &&
3416 !self->IsHandlingStackOverflow();
3417 }
3418
ClearConcurrentGCRequest()3419 void Heap::ClearConcurrentGCRequest() {
3420 concurrent_gc_pending_.StoreRelaxed(false);
3421 }
3422
RequestConcurrentGC(Thread * self,bool force_full)3423 void Heap::RequestConcurrentGC(Thread* self, bool force_full) {
3424 if (CanAddHeapTask(self) &&
3425 concurrent_gc_pending_.CompareExchangeStrongSequentiallyConsistent(false, true)) {
3426 task_processor_->AddTask(self, new ConcurrentGCTask(NanoTime(), // Start straight away.
3427 force_full));
3428 }
3429 }
3430
ConcurrentGC(Thread * self,bool force_full)3431 void Heap::ConcurrentGC(Thread* self, bool force_full) {
3432 if (!Runtime::Current()->IsShuttingDown(self)) {
3433 // Wait for any GCs currently running to finish.
3434 if (WaitForGcToComplete(kGcCauseBackground, self) == collector::kGcTypeNone) {
3435 // If the we can't run the GC type we wanted to run, find the next appropriate one and try that
3436 // instead. E.g. can't do partial, so do full instead.
3437 collector::GcType next_gc_type = next_gc_type_;
3438 // If forcing full and next gc type is sticky, override with a non-sticky type.
3439 if (force_full && next_gc_type == collector::kGcTypeSticky) {
3440 next_gc_type = HasZygoteSpace() ? collector::kGcTypePartial : collector::kGcTypeFull;
3441 }
3442 if (CollectGarbageInternal(next_gc_type, kGcCauseBackground, false) ==
3443 collector::kGcTypeNone) {
3444 for (collector::GcType gc_type : gc_plan_) {
3445 // Attempt to run the collector, if we succeed, we are done.
3446 if (gc_type > next_gc_type &&
3447 CollectGarbageInternal(gc_type, kGcCauseBackground, false) !=
3448 collector::kGcTypeNone) {
3449 break;
3450 }
3451 }
3452 }
3453 }
3454 }
3455 }
3456
3457 class Heap::CollectorTransitionTask : public HeapTask {
3458 public:
CollectorTransitionTask(uint64_t target_time)3459 explicit CollectorTransitionTask(uint64_t target_time) : HeapTask(target_time) { }
Run(Thread * self)3460 virtual void Run(Thread* self) OVERRIDE {
3461 gc::Heap* heap = Runtime::Current()->GetHeap();
3462 heap->DoPendingCollectorTransition();
3463 heap->ClearPendingCollectorTransition(self);
3464 }
3465 };
3466
ClearPendingCollectorTransition(Thread * self)3467 void Heap::ClearPendingCollectorTransition(Thread* self) {
3468 MutexLock mu(self, *pending_task_lock_);
3469 pending_collector_transition_ = nullptr;
3470 }
3471
RequestCollectorTransition(CollectorType desired_collector_type,uint64_t delta_time)3472 void Heap::RequestCollectorTransition(CollectorType desired_collector_type, uint64_t delta_time) {
3473 Thread* self = Thread::Current();
3474 desired_collector_type_ = desired_collector_type;
3475 if (desired_collector_type_ == collector_type_ || !CanAddHeapTask(self)) {
3476 return;
3477 }
3478 CollectorTransitionTask* added_task = nullptr;
3479 const uint64_t target_time = NanoTime() + delta_time;
3480 {
3481 MutexLock mu(self, *pending_task_lock_);
3482 // If we have an existing collector transition, update the targe time to be the new target.
3483 if (pending_collector_transition_ != nullptr) {
3484 task_processor_->UpdateTargetRunTime(self, pending_collector_transition_, target_time);
3485 return;
3486 }
3487 added_task = new CollectorTransitionTask(target_time);
3488 pending_collector_transition_ = added_task;
3489 }
3490 task_processor_->AddTask(self, added_task);
3491 }
3492
3493 class Heap::HeapTrimTask : public HeapTask {
3494 public:
HeapTrimTask(uint64_t delta_time)3495 explicit HeapTrimTask(uint64_t delta_time) : HeapTask(NanoTime() + delta_time) { }
Run(Thread * self)3496 virtual void Run(Thread* self) OVERRIDE {
3497 gc::Heap* heap = Runtime::Current()->GetHeap();
3498 heap->Trim(self);
3499 heap->ClearPendingTrim(self);
3500 }
3501 };
3502
ClearPendingTrim(Thread * self)3503 void Heap::ClearPendingTrim(Thread* self) {
3504 MutexLock mu(self, *pending_task_lock_);
3505 pending_heap_trim_ = nullptr;
3506 }
3507
RequestTrim(Thread * self)3508 void Heap::RequestTrim(Thread* self) {
3509 if (!CanAddHeapTask(self)) {
3510 return;
3511 }
3512 // GC completed and now we must decide whether to request a heap trim (advising pages back to the
3513 // kernel) or not. Issuing a request will also cause trimming of the libc heap. As a trim scans
3514 // a space it will hold its lock and can become a cause of jank.
3515 // Note, the large object space self trims and the Zygote space was trimmed and unchanging since
3516 // forking.
3517
3518 // We don't have a good measure of how worthwhile a trim might be. We can't use the live bitmap
3519 // because that only marks object heads, so a large array looks like lots of empty space. We
3520 // don't just call dlmalloc all the time, because the cost of an _attempted_ trim is proportional
3521 // to utilization (which is probably inversely proportional to how much benefit we can expect).
3522 // We could try mincore(2) but that's only a measure of how many pages we haven't given away,
3523 // not how much use we're making of those pages.
3524 HeapTrimTask* added_task = nullptr;
3525 {
3526 MutexLock mu(self, *pending_task_lock_);
3527 if (pending_heap_trim_ != nullptr) {
3528 // Already have a heap trim request in task processor, ignore this request.
3529 return;
3530 }
3531 added_task = new HeapTrimTask(kHeapTrimWait);
3532 pending_heap_trim_ = added_task;
3533 }
3534 task_processor_->AddTask(self, added_task);
3535 }
3536
RevokeThreadLocalBuffers(Thread * thread)3537 void Heap::RevokeThreadLocalBuffers(Thread* thread) {
3538 if (rosalloc_space_ != nullptr) {
3539 size_t freed_bytes_revoke = rosalloc_space_->RevokeThreadLocalBuffers(thread);
3540 if (freed_bytes_revoke > 0U) {
3541 num_bytes_freed_revoke_.FetchAndAddSequentiallyConsistent(freed_bytes_revoke);
3542 CHECK_GE(num_bytes_allocated_.LoadRelaxed(), num_bytes_freed_revoke_.LoadRelaxed());
3543 }
3544 }
3545 if (bump_pointer_space_ != nullptr) {
3546 CHECK_EQ(bump_pointer_space_->RevokeThreadLocalBuffers(thread), 0U);
3547 }
3548 if (region_space_ != nullptr) {
3549 CHECK_EQ(region_space_->RevokeThreadLocalBuffers(thread), 0U);
3550 }
3551 }
3552
RevokeRosAllocThreadLocalBuffers(Thread * thread)3553 void Heap::RevokeRosAllocThreadLocalBuffers(Thread* thread) {
3554 if (rosalloc_space_ != nullptr) {
3555 size_t freed_bytes_revoke = rosalloc_space_->RevokeThreadLocalBuffers(thread);
3556 if (freed_bytes_revoke > 0U) {
3557 num_bytes_freed_revoke_.FetchAndAddSequentiallyConsistent(freed_bytes_revoke);
3558 CHECK_GE(num_bytes_allocated_.LoadRelaxed(), num_bytes_freed_revoke_.LoadRelaxed());
3559 }
3560 }
3561 }
3562
RevokeAllThreadLocalBuffers()3563 void Heap::RevokeAllThreadLocalBuffers() {
3564 if (rosalloc_space_ != nullptr) {
3565 size_t freed_bytes_revoke = rosalloc_space_->RevokeAllThreadLocalBuffers();
3566 if (freed_bytes_revoke > 0U) {
3567 num_bytes_freed_revoke_.FetchAndAddSequentiallyConsistent(freed_bytes_revoke);
3568 CHECK_GE(num_bytes_allocated_.LoadRelaxed(), num_bytes_freed_revoke_.LoadRelaxed());
3569 }
3570 }
3571 if (bump_pointer_space_ != nullptr) {
3572 CHECK_EQ(bump_pointer_space_->RevokeAllThreadLocalBuffers(), 0U);
3573 }
3574 if (region_space_ != nullptr) {
3575 CHECK_EQ(region_space_->RevokeAllThreadLocalBuffers(), 0U);
3576 }
3577 }
3578
IsGCRequestPending() const3579 bool Heap::IsGCRequestPending() const {
3580 return concurrent_gc_pending_.LoadRelaxed();
3581 }
3582
RunFinalization(JNIEnv * env,uint64_t timeout)3583 void Heap::RunFinalization(JNIEnv* env, uint64_t timeout) {
3584 env->CallStaticVoidMethod(WellKnownClasses::dalvik_system_VMRuntime,
3585 WellKnownClasses::dalvik_system_VMRuntime_runFinalization,
3586 static_cast<jlong>(timeout));
3587 }
3588
RegisterNativeAllocation(JNIEnv * env,size_t bytes)3589 void Heap::RegisterNativeAllocation(JNIEnv* env, size_t bytes) {
3590 Thread* self = ThreadForEnv(env);
3591 if (native_need_to_run_finalization_) {
3592 RunFinalization(env, kNativeAllocationFinalizeTimeout);
3593 UpdateMaxNativeFootprint();
3594 native_need_to_run_finalization_ = false;
3595 }
3596 // Total number of native bytes allocated.
3597 size_t new_native_bytes_allocated = native_bytes_allocated_.FetchAndAddSequentiallyConsistent(bytes);
3598 new_native_bytes_allocated += bytes;
3599 if (new_native_bytes_allocated > native_footprint_gc_watermark_) {
3600 collector::GcType gc_type = HasZygoteSpace() ? collector::kGcTypePartial :
3601 collector::kGcTypeFull;
3602
3603 // The second watermark is higher than the gc watermark. If you hit this it means you are
3604 // allocating native objects faster than the GC can keep up with.
3605 if (new_native_bytes_allocated > growth_limit_) {
3606 if (WaitForGcToComplete(kGcCauseForNativeAlloc, self) != collector::kGcTypeNone) {
3607 // Just finished a GC, attempt to run finalizers.
3608 RunFinalization(env, kNativeAllocationFinalizeTimeout);
3609 CHECK(!env->ExceptionCheck());
3610 // Native bytes allocated may be updated by finalization, refresh it.
3611 new_native_bytes_allocated = native_bytes_allocated_.LoadRelaxed();
3612 }
3613 // If we still are over the watermark, attempt a GC for alloc and run finalizers.
3614 if (new_native_bytes_allocated > growth_limit_) {
3615 CollectGarbageInternal(gc_type, kGcCauseForNativeAlloc, false);
3616 RunFinalization(env, kNativeAllocationFinalizeTimeout);
3617 native_need_to_run_finalization_ = false;
3618 CHECK(!env->ExceptionCheck());
3619 }
3620 // We have just run finalizers, update the native watermark since it is very likely that
3621 // finalizers released native managed allocations.
3622 UpdateMaxNativeFootprint();
3623 } else if (!IsGCRequestPending()) {
3624 if (IsGcConcurrent()) {
3625 RequestConcurrentGC(self, true); // Request non-sticky type.
3626 } else {
3627 CollectGarbageInternal(gc_type, kGcCauseForNativeAlloc, false);
3628 }
3629 }
3630 }
3631 }
3632
RegisterNativeFree(JNIEnv * env,size_t bytes)3633 void Heap::RegisterNativeFree(JNIEnv* env, size_t bytes) {
3634 size_t expected_size;
3635 do {
3636 expected_size = native_bytes_allocated_.LoadRelaxed();
3637 if (UNLIKELY(bytes > expected_size)) {
3638 ScopedObjectAccess soa(env);
3639 env->ThrowNew(WellKnownClasses::java_lang_RuntimeException,
3640 StringPrintf("Attempted to free %zd native bytes with only %zd native bytes "
3641 "registered as allocated", bytes, expected_size).c_str());
3642 break;
3643 }
3644 } while (!native_bytes_allocated_.CompareExchangeWeakRelaxed(expected_size,
3645 expected_size - bytes));
3646 }
3647
GetTotalMemory() const3648 size_t Heap::GetTotalMemory() const {
3649 return std::max(max_allowed_footprint_, GetBytesAllocated());
3650 }
3651
AddModUnionTable(accounting::ModUnionTable * mod_union_table)3652 void Heap::AddModUnionTable(accounting::ModUnionTable* mod_union_table) {
3653 DCHECK(mod_union_table != nullptr);
3654 mod_union_tables_.Put(mod_union_table->GetSpace(), mod_union_table);
3655 }
3656
CheckPreconditionsForAllocObject(mirror::Class * c,size_t byte_count)3657 void Heap::CheckPreconditionsForAllocObject(mirror::Class* c, size_t byte_count) {
3658 CHECK(c == nullptr || (c->IsClassClass() && byte_count >= sizeof(mirror::Class)) ||
3659 (c->IsVariableSize() || c->GetObjectSize() == byte_count));
3660 CHECK_GE(byte_count, sizeof(mirror::Object));
3661 }
3662
AddRememberedSet(accounting::RememberedSet * remembered_set)3663 void Heap::AddRememberedSet(accounting::RememberedSet* remembered_set) {
3664 CHECK(remembered_set != nullptr);
3665 space::Space* space = remembered_set->GetSpace();
3666 CHECK(space != nullptr);
3667 CHECK(remembered_sets_.find(space) == remembered_sets_.end()) << space;
3668 remembered_sets_.Put(space, remembered_set);
3669 CHECK(remembered_sets_.find(space) != remembered_sets_.end()) << space;
3670 }
3671
RemoveRememberedSet(space::Space * space)3672 void Heap::RemoveRememberedSet(space::Space* space) {
3673 CHECK(space != nullptr);
3674 auto it = remembered_sets_.find(space);
3675 CHECK(it != remembered_sets_.end());
3676 delete it->second;
3677 remembered_sets_.erase(it);
3678 CHECK(remembered_sets_.find(space) == remembered_sets_.end());
3679 }
3680
ClearMarkedObjects()3681 void Heap::ClearMarkedObjects() {
3682 // Clear all of the spaces' mark bitmaps.
3683 for (const auto& space : GetContinuousSpaces()) {
3684 accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap();
3685 if (space->GetLiveBitmap() != mark_bitmap) {
3686 mark_bitmap->Clear();
3687 }
3688 }
3689 // Clear the marked objects in the discontinous space object sets.
3690 for (const auto& space : GetDiscontinuousSpaces()) {
3691 space->GetMarkBitmap()->Clear();
3692 }
3693 }
3694
3695 // Based on debug malloc logic from libc/bionic/debug_stacktrace.cpp.
3696 class StackCrawlState {
3697 public:
StackCrawlState(uintptr_t * frames,size_t max_depth,size_t skip_count)3698 StackCrawlState(uintptr_t* frames, size_t max_depth, size_t skip_count)
3699 : frames_(frames), frame_count_(0), max_depth_(max_depth), skip_count_(skip_count) {
3700 }
GetFrameCount() const3701 size_t GetFrameCount() const {
3702 return frame_count_;
3703 }
Callback(_Unwind_Context * context,void * arg)3704 static _Unwind_Reason_Code Callback(_Unwind_Context* context, void* arg) {
3705 auto* const state = reinterpret_cast<StackCrawlState*>(arg);
3706 const uintptr_t ip = _Unwind_GetIP(context);
3707 // The first stack frame is get_backtrace itself. Skip it.
3708 if (ip != 0 && state->skip_count_ > 0) {
3709 --state->skip_count_;
3710 return _URC_NO_REASON;
3711 }
3712 // ip may be off for ARM but it shouldn't matter since we only use it for hashing.
3713 state->frames_[state->frame_count_] = ip;
3714 state->frame_count_++;
3715 return state->frame_count_ >= state->max_depth_ ? _URC_END_OF_STACK : _URC_NO_REASON;
3716 }
3717
3718 private:
3719 uintptr_t* const frames_;
3720 size_t frame_count_;
3721 const size_t max_depth_;
3722 size_t skip_count_;
3723 };
3724
get_backtrace(uintptr_t * frames,size_t max_depth)3725 static size_t get_backtrace(uintptr_t* frames, size_t max_depth) {
3726 StackCrawlState state(frames, max_depth, 0u);
3727 _Unwind_Backtrace(&StackCrawlState::Callback, &state);
3728 return state.GetFrameCount();
3729 }
3730
CheckGcStressMode(Thread * self,mirror::Object ** obj)3731 void Heap::CheckGcStressMode(Thread* self, mirror::Object** obj) {
3732 auto* const runtime = Runtime::Current();
3733 if (gc_stress_mode_ && runtime->GetClassLinker()->IsInitialized() &&
3734 !runtime->IsActiveTransaction() && mirror::Class::HasJavaLangClass()) {
3735 // Check if we should GC.
3736 bool new_backtrace = false;
3737 {
3738 static constexpr size_t kMaxFrames = 16u;
3739 uintptr_t backtrace[kMaxFrames];
3740 const size_t frames = get_backtrace(backtrace, kMaxFrames);
3741 uint64_t hash = 0;
3742 for (size_t i = 0; i < frames; ++i) {
3743 hash = hash * 2654435761 + backtrace[i];
3744 hash += (hash >> 13) ^ (hash << 6);
3745 }
3746 MutexLock mu(self, *backtrace_lock_);
3747 new_backtrace = seen_backtraces_.find(hash) == seen_backtraces_.end();
3748 if (new_backtrace) {
3749 seen_backtraces_.insert(hash);
3750 }
3751 }
3752 if (new_backtrace) {
3753 StackHandleScope<1> hs(self);
3754 auto h = hs.NewHandleWrapper(obj);
3755 CollectGarbage(false);
3756 unique_backtrace_count_.FetchAndAddSequentiallyConsistent(1);
3757 } else {
3758 seen_backtrace_count_.FetchAndAddSequentiallyConsistent(1);
3759 }
3760 }
3761 }
3762
3763 } // namespace gc
3764 } // namespace art
3765