1 /*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "heap.h"
18
19 #include <limits>
20 #include <memory>
21 #include <vector>
22
23 #include "android-base/stringprintf.h"
24
25 #include "allocation_listener.h"
26 #include "art_field-inl.h"
27 #include "backtrace_helper.h"
28 #include "base/allocator.h"
29 #include "base/arena_allocator.h"
30 #include "base/dumpable.h"
31 #include "base/histogram-inl.h"
32 #include "base/memory_tool.h"
33 #include "base/stl_util.h"
34 #include "base/systrace.h"
35 #include "base/time_utils.h"
36 #include "common_throws.h"
37 #include "cutils/sched_policy.h"
38 #include "debugger.h"
39 #include "dex_file-inl.h"
40 #include "gc/accounting/atomic_stack.h"
41 #include "gc/accounting/card_table-inl.h"
42 #include "gc/accounting/heap_bitmap-inl.h"
43 #include "gc/accounting/mod_union_table-inl.h"
44 #include "gc/accounting/remembered_set.h"
45 #include "gc/accounting/space_bitmap-inl.h"
46 #include "gc/collector/concurrent_copying.h"
47 #include "gc/collector/mark_compact.h"
48 #include "gc/collector/mark_sweep.h"
49 #include "gc/collector/partial_mark_sweep.h"
50 #include "gc/collector/semi_space.h"
51 #include "gc/collector/sticky_mark_sweep.h"
52 #include "gc/reference_processor.h"
53 #include "gc/scoped_gc_critical_section.h"
54 #include "gc/space/bump_pointer_space.h"
55 #include "gc/space/dlmalloc_space-inl.h"
56 #include "gc/space/image_space.h"
57 #include "gc/space/large_object_space.h"
58 #include "gc/space/region_space.h"
59 #include "gc/space/rosalloc_space-inl.h"
60 #include "gc/space/space-inl.h"
61 #include "gc/space/zygote_space.h"
62 #include "gc/task_processor.h"
63 #include "gc/verification.h"
64 #include "entrypoints/quick/quick_alloc_entrypoints.h"
65 #include "gc_pause_listener.h"
66 #include "heap-inl.h"
67 #include "image.h"
68 #include "intern_table.h"
69 #include "java_vm_ext.h"
70 #include "jit/jit.h"
71 #include "jit/jit_code_cache.h"
72 #include "obj_ptr-inl.h"
73 #include "mirror/class-inl.h"
74 #include "mirror/object-inl.h"
75 #include "mirror/object-refvisitor-inl.h"
76 #include "mirror/object_array-inl.h"
77 #include "mirror/reference-inl.h"
78 #include "os.h"
79 #include "reflection.h"
80 #include "runtime.h"
81 #include "ScopedLocalRef.h"
82 #include "scoped_thread_state_change-inl.h"
83 #include "handle_scope-inl.h"
84 #include "thread_list.h"
85 #include "verify_object-inl.h"
86 #include "well_known_classes.h"
87
88 namespace art {
89
90 namespace gc {
91
92 using android::base::StringPrintf;
93
94 static constexpr size_t kCollectorTransitionStressIterations = 0;
95 static constexpr size_t kCollectorTransitionStressWait = 10 * 1000; // Microseconds
96 // Minimum amount of remaining bytes before a concurrent GC is triggered.
97 static constexpr size_t kMinConcurrentRemainingBytes = 128 * KB;
98 static constexpr size_t kMaxConcurrentRemainingBytes = 512 * KB;
99 // Sticky GC throughput adjustment, divided by 4. Increasing this causes sticky GC to occur more
100 // relative to partial/full GC. This may be desirable since sticky GCs interfere less with mutator
101 // threads (lower pauses, use less memory bandwidth).
102 static constexpr double kStickyGcThroughputAdjustment = 1.0;
103 // Whether or not we compact the zygote in PreZygoteFork.
104 static constexpr bool kCompactZygote = kMovingCollector;
105 // How many reserve entries are at the end of the allocation stack, these are only needed if the
106 // allocation stack overflows.
107 static constexpr size_t kAllocationStackReserveSize = 1024;
108 // Default mark stack size in bytes.
109 static const size_t kDefaultMarkStackSize = 64 * KB;
110 // Define space name.
111 static const char* kDlMallocSpaceName[2] = {"main dlmalloc space", "main dlmalloc space 1"};
112 static const char* kRosAllocSpaceName[2] = {"main rosalloc space", "main rosalloc space 1"};
113 static const char* kMemMapSpaceName[2] = {"main space", "main space 1"};
114 static const char* kNonMovingSpaceName = "non moving space";
115 static const char* kZygoteSpaceName = "zygote space";
116 static constexpr size_t kGSSBumpPointerSpaceCapacity = 32 * MB;
117 static constexpr bool kGCALotMode = false;
118 // GC alot mode uses a small allocation stack to stress test a lot of GC.
119 static constexpr size_t kGcAlotAllocationStackSize = 4 * KB /
120 sizeof(mirror::HeapReference<mirror::Object>);
121 // Verify objet has a small allocation stack size since searching the allocation stack is slow.
122 static constexpr size_t kVerifyObjectAllocationStackSize = 16 * KB /
123 sizeof(mirror::HeapReference<mirror::Object>);
124 static constexpr size_t kDefaultAllocationStackSize = 8 * MB /
125 sizeof(mirror::HeapReference<mirror::Object>);
126 // System.runFinalization can deadlock with native allocations, to deal with this, we have a
127 // timeout on how long we wait for finalizers to run. b/21544853
128 static constexpr uint64_t kNativeAllocationFinalizeTimeout = MsToNs(250u);
129
130 // For deterministic compilation, we need the heap to be at a well-known address.
131 static constexpr uint32_t kAllocSpaceBeginForDeterministicAoT = 0x40000000;
132 // Dump the rosalloc stats on SIGQUIT.
133 static constexpr bool kDumpRosAllocStatsOnSigQuit = false;
134
135 // Extra added to the heap growth multiplier. Used to adjust the GC ergonomics for the read barrier
136 // config.
137 static constexpr double kExtraHeapGrowthMultiplier = kUseReadBarrier ? 1.0 : 0.0;
138
139 static const char* kRegionSpaceName = "main space (region space)";
140
141 // If true, we log all GCs in the both the foreground and background. Used for debugging.
142 static constexpr bool kLogAllGCs = false;
143
144 // How much we grow the TLAB if we can do it.
145 static constexpr size_t kPartialTlabSize = 16 * KB;
146 static constexpr bool kUsePartialTlabs = true;
147
148 #if defined(__LP64__) || !defined(ADDRESS_SANITIZER)
149 // 300 MB (0x12c00000) - (default non-moving space capacity).
150 static uint8_t* const kPreferredAllocSpaceBegin =
151 reinterpret_cast<uint8_t*>(300 * MB - Heap::kDefaultNonMovingSpaceCapacity);
152 #else
153 // For 32-bit, use 0x20000000 because asan reserves 0x04000000 - 0x20000000.
154 static uint8_t* const kPreferredAllocSpaceBegin = reinterpret_cast<uint8_t*>(0x20000000);
155 #endif
156
CareAboutPauseTimes()157 static inline bool CareAboutPauseTimes() {
158 return Runtime::Current()->InJankPerceptibleProcessState();
159 }
160
Heap(size_t initial_size,size_t growth_limit,size_t min_free,size_t max_free,double target_utilization,double foreground_heap_growth_multiplier,size_t capacity,size_t non_moving_space_capacity,const std::string & image_file_name,const InstructionSet image_instruction_set,CollectorType foreground_collector_type,CollectorType background_collector_type,space::LargeObjectSpaceType large_object_space_type,size_t large_object_threshold,size_t parallel_gc_threads,size_t conc_gc_threads,bool low_memory_mode,size_t long_pause_log_threshold,size_t long_gc_log_threshold,bool ignore_max_footprint,bool use_tlab,bool verify_pre_gc_heap,bool verify_pre_sweeping_heap,bool verify_post_gc_heap,bool verify_pre_gc_rosalloc,bool verify_pre_sweeping_rosalloc,bool verify_post_gc_rosalloc,bool gc_stress_mode,bool measure_gc_performance,bool use_homogeneous_space_compaction_for_oom,uint64_t min_interval_homogeneous_space_compaction_by_oom)161 Heap::Heap(size_t initial_size,
162 size_t growth_limit,
163 size_t min_free,
164 size_t max_free,
165 double target_utilization,
166 double foreground_heap_growth_multiplier,
167 size_t capacity,
168 size_t non_moving_space_capacity,
169 const std::string& image_file_name,
170 const InstructionSet image_instruction_set,
171 CollectorType foreground_collector_type,
172 CollectorType background_collector_type,
173 space::LargeObjectSpaceType large_object_space_type,
174 size_t large_object_threshold,
175 size_t parallel_gc_threads,
176 size_t conc_gc_threads,
177 bool low_memory_mode,
178 size_t long_pause_log_threshold,
179 size_t long_gc_log_threshold,
180 bool ignore_max_footprint,
181 bool use_tlab,
182 bool verify_pre_gc_heap,
183 bool verify_pre_sweeping_heap,
184 bool verify_post_gc_heap,
185 bool verify_pre_gc_rosalloc,
186 bool verify_pre_sweeping_rosalloc,
187 bool verify_post_gc_rosalloc,
188 bool gc_stress_mode,
189 bool measure_gc_performance,
190 bool use_homogeneous_space_compaction_for_oom,
191 uint64_t min_interval_homogeneous_space_compaction_by_oom)
192 : non_moving_space_(nullptr),
193 rosalloc_space_(nullptr),
194 dlmalloc_space_(nullptr),
195 main_space_(nullptr),
196 collector_type_(kCollectorTypeNone),
197 foreground_collector_type_(foreground_collector_type),
198 background_collector_type_(background_collector_type),
199 desired_collector_type_(foreground_collector_type_),
200 pending_task_lock_(nullptr),
201 parallel_gc_threads_(parallel_gc_threads),
202 conc_gc_threads_(conc_gc_threads),
203 low_memory_mode_(low_memory_mode),
204 long_pause_log_threshold_(long_pause_log_threshold),
205 long_gc_log_threshold_(long_gc_log_threshold),
206 ignore_max_footprint_(ignore_max_footprint),
207 zygote_creation_lock_("zygote creation lock", kZygoteCreationLock),
208 zygote_space_(nullptr),
209 large_object_threshold_(large_object_threshold),
210 disable_thread_flip_count_(0),
211 thread_flip_running_(false),
212 collector_type_running_(kCollectorTypeNone),
213 thread_running_gc_(nullptr),
214 last_gc_type_(collector::kGcTypeNone),
215 next_gc_type_(collector::kGcTypePartial),
216 capacity_(capacity),
217 growth_limit_(growth_limit),
218 max_allowed_footprint_(initial_size),
219 concurrent_start_bytes_(std::numeric_limits<size_t>::max()),
220 total_bytes_freed_ever_(0),
221 total_objects_freed_ever_(0),
222 num_bytes_allocated_(0),
223 new_native_bytes_allocated_(0),
224 old_native_bytes_allocated_(0),
225 num_bytes_freed_revoke_(0),
226 verify_missing_card_marks_(false),
227 verify_system_weaks_(false),
228 verify_pre_gc_heap_(verify_pre_gc_heap),
229 verify_pre_sweeping_heap_(verify_pre_sweeping_heap),
230 verify_post_gc_heap_(verify_post_gc_heap),
231 verify_mod_union_table_(false),
232 verify_pre_gc_rosalloc_(verify_pre_gc_rosalloc),
233 verify_pre_sweeping_rosalloc_(verify_pre_sweeping_rosalloc),
234 verify_post_gc_rosalloc_(verify_post_gc_rosalloc),
235 gc_stress_mode_(gc_stress_mode),
236 /* For GC a lot mode, we limit the allocations stacks to be kGcAlotInterval allocations. This
237 * causes a lot of GC since we do a GC for alloc whenever the stack is full. When heap
238 * verification is enabled, we limit the size of allocation stacks to speed up their
239 * searching.
240 */
241 max_allocation_stack_size_(kGCALotMode ? kGcAlotAllocationStackSize
242 : (kVerifyObjectSupport > kVerifyObjectModeFast) ? kVerifyObjectAllocationStackSize :
243 kDefaultAllocationStackSize),
244 current_allocator_(kAllocatorTypeDlMalloc),
245 current_non_moving_allocator_(kAllocatorTypeNonMoving),
246 bump_pointer_space_(nullptr),
247 temp_space_(nullptr),
248 region_space_(nullptr),
249 min_free_(min_free),
250 max_free_(max_free),
251 target_utilization_(target_utilization),
252 foreground_heap_growth_multiplier_(
253 foreground_heap_growth_multiplier + kExtraHeapGrowthMultiplier),
254 total_wait_time_(0),
255 verify_object_mode_(kVerifyObjectModeDisabled),
256 disable_moving_gc_count_(0),
257 semi_space_collector_(nullptr),
258 mark_compact_collector_(nullptr),
259 concurrent_copying_collector_(nullptr),
260 is_running_on_memory_tool_(Runtime::Current()->IsRunningOnMemoryTool()),
261 use_tlab_(use_tlab),
262 main_space_backup_(nullptr),
263 min_interval_homogeneous_space_compaction_by_oom_(
264 min_interval_homogeneous_space_compaction_by_oom),
265 last_time_homogeneous_space_compaction_by_oom_(NanoTime()),
266 pending_collector_transition_(nullptr),
267 pending_heap_trim_(nullptr),
268 use_homogeneous_space_compaction_for_oom_(use_homogeneous_space_compaction_for_oom),
269 running_collection_is_blocking_(false),
270 blocking_gc_count_(0U),
271 blocking_gc_time_(0U),
272 last_update_time_gc_count_rate_histograms_( // Round down by the window duration.
273 (NanoTime() / kGcCountRateHistogramWindowDuration) * kGcCountRateHistogramWindowDuration),
274 gc_count_last_window_(0U),
275 blocking_gc_count_last_window_(0U),
276 gc_count_rate_histogram_("gc count rate histogram", 1U, kGcCountRateMaxBucketCount),
277 blocking_gc_count_rate_histogram_("blocking gc count rate histogram", 1U,
278 kGcCountRateMaxBucketCount),
279 alloc_tracking_enabled_(false),
280 backtrace_lock_(nullptr),
281 seen_backtrace_count_(0u),
282 unique_backtrace_count_(0u),
283 gc_disabled_for_shutdown_(false) {
284 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
285 LOG(INFO) << "Heap() entering";
286 }
287 if (kUseReadBarrier) {
288 CHECK_EQ(foreground_collector_type_, kCollectorTypeCC);
289 CHECK_EQ(background_collector_type_, kCollectorTypeCCBackground);
290 }
291 verification_.reset(new Verification(this));
292 CHECK_GE(large_object_threshold, kMinLargeObjectThreshold);
293 ScopedTrace trace(__FUNCTION__);
294 Runtime* const runtime = Runtime::Current();
295 // If we aren't the zygote, switch to the default non zygote allocator. This may update the
296 // entrypoints.
297 const bool is_zygote = runtime->IsZygote();
298 if (!is_zygote) {
299 // Background compaction is currently not supported for command line runs.
300 if (background_collector_type_ != foreground_collector_type_) {
301 VLOG(heap) << "Disabling background compaction for non zygote";
302 background_collector_type_ = foreground_collector_type_;
303 }
304 }
305 ChangeCollector(desired_collector_type_);
306 live_bitmap_.reset(new accounting::HeapBitmap(this));
307 mark_bitmap_.reset(new accounting::HeapBitmap(this));
308 // Requested begin for the alloc space, to follow the mapped image and oat files
309 uint8_t* requested_alloc_space_begin = nullptr;
310 if (foreground_collector_type_ == kCollectorTypeCC) {
311 // Need to use a low address so that we can allocate a contiguous 2 * Xmx space when there's no
312 // image (dex2oat for target).
313 requested_alloc_space_begin = kPreferredAllocSpaceBegin;
314 }
315
316 // Load image space(s).
317 if (space::ImageSpace::LoadBootImage(image_file_name,
318 image_instruction_set,
319 &boot_image_spaces_,
320 &requested_alloc_space_begin)) {
321 for (auto space : boot_image_spaces_) {
322 AddSpace(space);
323 }
324 }
325
326 /*
327 requested_alloc_space_begin -> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
328 +- nonmoving space (non_moving_space_capacity)+-
329 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
330 +-????????????????????????????????????????????+-
331 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
332 +-main alloc space / bump space 1 (capacity_) +-
333 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
334 +-????????????????????????????????????????????+-
335 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
336 +-main alloc space2 / bump space 2 (capacity_)+-
337 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
338 */
339 // We don't have hspace compaction enabled with GSS or CC.
340 if (foreground_collector_type_ == kCollectorTypeGSS ||
341 foreground_collector_type_ == kCollectorTypeCC) {
342 use_homogeneous_space_compaction_for_oom_ = false;
343 }
344 bool support_homogeneous_space_compaction =
345 background_collector_type_ == gc::kCollectorTypeHomogeneousSpaceCompact ||
346 use_homogeneous_space_compaction_for_oom_;
347 // We may use the same space the main space for the non moving space if we don't need to compact
348 // from the main space.
349 // This is not the case if we support homogeneous compaction or have a moving background
350 // collector type.
351 bool separate_non_moving_space = is_zygote ||
352 support_homogeneous_space_compaction || IsMovingGc(foreground_collector_type_) ||
353 IsMovingGc(background_collector_type_);
354 if (foreground_collector_type_ == kCollectorTypeGSS) {
355 separate_non_moving_space = false;
356 }
357 std::unique_ptr<MemMap> main_mem_map_1;
358 std::unique_ptr<MemMap> main_mem_map_2;
359
360 // Gross hack to make dex2oat deterministic.
361 if (foreground_collector_type_ == kCollectorTypeMS &&
362 requested_alloc_space_begin == nullptr &&
363 Runtime::Current()->IsAotCompiler()) {
364 // Currently only enabled for MS collector since that is what the deterministic dex2oat uses.
365 // b/26849108
366 requested_alloc_space_begin = reinterpret_cast<uint8_t*>(kAllocSpaceBeginForDeterministicAoT);
367 }
368 uint8_t* request_begin = requested_alloc_space_begin;
369 if (request_begin != nullptr && separate_non_moving_space) {
370 request_begin += non_moving_space_capacity;
371 }
372 std::string error_str;
373 std::unique_ptr<MemMap> non_moving_space_mem_map;
374 if (separate_non_moving_space) {
375 ScopedTrace trace2("Create separate non moving space");
376 // If we are the zygote, the non moving space becomes the zygote space when we run
377 // PreZygoteFork the first time. In this case, call the map "zygote space" since we can't
378 // rename the mem map later.
379 const char* space_name = is_zygote ? kZygoteSpaceName : kNonMovingSpaceName;
380 // Reserve the non moving mem map before the other two since it needs to be at a specific
381 // address.
382 non_moving_space_mem_map.reset(
383 MemMap::MapAnonymous(space_name, requested_alloc_space_begin,
384 non_moving_space_capacity, PROT_READ | PROT_WRITE, true, false,
385 &error_str));
386 CHECK(non_moving_space_mem_map != nullptr) << error_str;
387 // Try to reserve virtual memory at a lower address if we have a separate non moving space.
388 request_begin = kPreferredAllocSpaceBegin + non_moving_space_capacity;
389 }
390 // Attempt to create 2 mem maps at or after the requested begin.
391 if (foreground_collector_type_ != kCollectorTypeCC) {
392 ScopedTrace trace2("Create main mem map");
393 if (separate_non_moving_space || !is_zygote) {
394 main_mem_map_1.reset(MapAnonymousPreferredAddress(kMemMapSpaceName[0],
395 request_begin,
396 capacity_,
397 &error_str));
398 } else {
399 // If no separate non-moving space and we are the zygote, the main space must come right
400 // after the image space to avoid a gap. This is required since we want the zygote space to
401 // be adjacent to the image space.
402 main_mem_map_1.reset(MemMap::MapAnonymous(kMemMapSpaceName[0], request_begin, capacity_,
403 PROT_READ | PROT_WRITE, true, false,
404 &error_str));
405 }
406 CHECK(main_mem_map_1.get() != nullptr) << error_str;
407 }
408 if (support_homogeneous_space_compaction ||
409 background_collector_type_ == kCollectorTypeSS ||
410 foreground_collector_type_ == kCollectorTypeSS) {
411 ScopedTrace trace2("Create main mem map 2");
412 main_mem_map_2.reset(MapAnonymousPreferredAddress(kMemMapSpaceName[1], main_mem_map_1->End(),
413 capacity_, &error_str));
414 CHECK(main_mem_map_2.get() != nullptr) << error_str;
415 }
416
417 // Create the non moving space first so that bitmaps don't take up the address range.
418 if (separate_non_moving_space) {
419 ScopedTrace trace2("Add non moving space");
420 // Non moving space is always dlmalloc since we currently don't have support for multiple
421 // active rosalloc spaces.
422 const size_t size = non_moving_space_mem_map->Size();
423 non_moving_space_ = space::DlMallocSpace::CreateFromMemMap(
424 non_moving_space_mem_map.release(), "zygote / non moving space", kDefaultStartingSize,
425 initial_size, size, size, false);
426 non_moving_space_->SetFootprintLimit(non_moving_space_->Capacity());
427 CHECK(non_moving_space_ != nullptr) << "Failed creating non moving space "
428 << requested_alloc_space_begin;
429 AddSpace(non_moving_space_);
430 }
431 // Create other spaces based on whether or not we have a moving GC.
432 if (foreground_collector_type_ == kCollectorTypeCC) {
433 CHECK(separate_non_moving_space);
434 MemMap* region_space_mem_map = space::RegionSpace::CreateMemMap(kRegionSpaceName,
435 capacity_ * 2,
436 request_begin);
437 CHECK(region_space_mem_map != nullptr) << "No region space mem map";
438 region_space_ = space::RegionSpace::Create(kRegionSpaceName, region_space_mem_map);
439 AddSpace(region_space_);
440 } else if (IsMovingGc(foreground_collector_type_) &&
441 foreground_collector_type_ != kCollectorTypeGSS) {
442 // Create bump pointer spaces.
443 // We only to create the bump pointer if the foreground collector is a compacting GC.
444 // TODO: Place bump-pointer spaces somewhere to minimize size of card table.
445 bump_pointer_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space 1",
446 main_mem_map_1.release());
447 CHECK(bump_pointer_space_ != nullptr) << "Failed to create bump pointer space";
448 AddSpace(bump_pointer_space_);
449 temp_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space 2",
450 main_mem_map_2.release());
451 CHECK(temp_space_ != nullptr) << "Failed to create bump pointer space";
452 AddSpace(temp_space_);
453 CHECK(separate_non_moving_space);
454 } else {
455 CreateMainMallocSpace(main_mem_map_1.release(), initial_size, growth_limit_, capacity_);
456 CHECK(main_space_ != nullptr);
457 AddSpace(main_space_);
458 if (!separate_non_moving_space) {
459 non_moving_space_ = main_space_;
460 CHECK(!non_moving_space_->CanMoveObjects());
461 }
462 if (foreground_collector_type_ == kCollectorTypeGSS) {
463 CHECK_EQ(foreground_collector_type_, background_collector_type_);
464 // Create bump pointer spaces instead of a backup space.
465 main_mem_map_2.release();
466 bump_pointer_space_ = space::BumpPointerSpace::Create("Bump pointer space 1",
467 kGSSBumpPointerSpaceCapacity, nullptr);
468 CHECK(bump_pointer_space_ != nullptr);
469 AddSpace(bump_pointer_space_);
470 temp_space_ = space::BumpPointerSpace::Create("Bump pointer space 2",
471 kGSSBumpPointerSpaceCapacity, nullptr);
472 CHECK(temp_space_ != nullptr);
473 AddSpace(temp_space_);
474 } else if (main_mem_map_2.get() != nullptr) {
475 const char* name = kUseRosAlloc ? kRosAllocSpaceName[1] : kDlMallocSpaceName[1];
476 main_space_backup_.reset(CreateMallocSpaceFromMemMap(main_mem_map_2.release(), initial_size,
477 growth_limit_, capacity_, name, true));
478 CHECK(main_space_backup_.get() != nullptr);
479 // Add the space so its accounted for in the heap_begin and heap_end.
480 AddSpace(main_space_backup_.get());
481 }
482 }
483 CHECK(non_moving_space_ != nullptr);
484 CHECK(!non_moving_space_->CanMoveObjects());
485 // Allocate the large object space.
486 if (large_object_space_type == space::LargeObjectSpaceType::kFreeList) {
487 large_object_space_ = space::FreeListSpace::Create("free list large object space", nullptr,
488 capacity_);
489 CHECK(large_object_space_ != nullptr) << "Failed to create large object space";
490 } else if (large_object_space_type == space::LargeObjectSpaceType::kMap) {
491 large_object_space_ = space::LargeObjectMapSpace::Create("mem map large object space");
492 CHECK(large_object_space_ != nullptr) << "Failed to create large object space";
493 } else {
494 // Disable the large object space by making the cutoff excessively large.
495 large_object_threshold_ = std::numeric_limits<size_t>::max();
496 large_object_space_ = nullptr;
497 }
498 if (large_object_space_ != nullptr) {
499 AddSpace(large_object_space_);
500 }
501 // Compute heap capacity. Continuous spaces are sorted in order of Begin().
502 CHECK(!continuous_spaces_.empty());
503 // Relies on the spaces being sorted.
504 uint8_t* heap_begin = continuous_spaces_.front()->Begin();
505 uint8_t* heap_end = continuous_spaces_.back()->Limit();
506 size_t heap_capacity = heap_end - heap_begin;
507 // Remove the main backup space since it slows down the GC to have unused extra spaces.
508 // TODO: Avoid needing to do this.
509 if (main_space_backup_.get() != nullptr) {
510 RemoveSpace(main_space_backup_.get());
511 }
512 // Allocate the card table.
513 // We currently don't support dynamically resizing the card table.
514 // Since we don't know where in the low_4gb the app image will be located, make the card table
515 // cover the whole low_4gb. TODO: Extend the card table in AddSpace.
516 UNUSED(heap_capacity);
517 // Start at 64 KB, we can be sure there are no spaces mapped this low since the address range is
518 // reserved by the kernel.
519 static constexpr size_t kMinHeapAddress = 4 * KB;
520 card_table_.reset(accounting::CardTable::Create(reinterpret_cast<uint8_t*>(kMinHeapAddress),
521 4 * GB - kMinHeapAddress));
522 CHECK(card_table_.get() != nullptr) << "Failed to create card table";
523 if (foreground_collector_type_ == kCollectorTypeCC && kUseTableLookupReadBarrier) {
524 rb_table_.reset(new accounting::ReadBarrierTable());
525 DCHECK(rb_table_->IsAllCleared());
526 }
527 if (HasBootImageSpace()) {
528 // Don't add the image mod union table if we are running without an image, this can crash if
529 // we use the CardCache implementation.
530 for (space::ImageSpace* image_space : GetBootImageSpaces()) {
531 accounting::ModUnionTable* mod_union_table = new accounting::ModUnionTableToZygoteAllocspace(
532 "Image mod-union table", this, image_space);
533 CHECK(mod_union_table != nullptr) << "Failed to create image mod-union table";
534 AddModUnionTable(mod_union_table);
535 }
536 }
537 if (collector::SemiSpace::kUseRememberedSet && non_moving_space_ != main_space_) {
538 accounting::RememberedSet* non_moving_space_rem_set =
539 new accounting::RememberedSet("Non-moving space remembered set", this, non_moving_space_);
540 CHECK(non_moving_space_rem_set != nullptr) << "Failed to create non-moving space remembered set";
541 AddRememberedSet(non_moving_space_rem_set);
542 }
543 // TODO: Count objects in the image space here?
544 num_bytes_allocated_.StoreRelaxed(0);
545 mark_stack_.reset(accounting::ObjectStack::Create("mark stack", kDefaultMarkStackSize,
546 kDefaultMarkStackSize));
547 const size_t alloc_stack_capacity = max_allocation_stack_size_ + kAllocationStackReserveSize;
548 allocation_stack_.reset(accounting::ObjectStack::Create(
549 "allocation stack", max_allocation_stack_size_, alloc_stack_capacity));
550 live_stack_.reset(accounting::ObjectStack::Create(
551 "live stack", max_allocation_stack_size_, alloc_stack_capacity));
552 // It's still too early to take a lock because there are no threads yet, but we can create locks
553 // now. We don't create it earlier to make it clear that you can't use locks during heap
554 // initialization.
555 gc_complete_lock_ = new Mutex("GC complete lock");
556 gc_complete_cond_.reset(new ConditionVariable("GC complete condition variable",
557 *gc_complete_lock_));
558 native_blocking_gc_lock_ = new Mutex("Native blocking GC lock");
559 native_blocking_gc_cond_.reset(new ConditionVariable("Native blocking GC condition variable",
560 *native_blocking_gc_lock_));
561 native_blocking_gc_in_progress_ = false;
562 native_blocking_gcs_finished_ = 0;
563
564 thread_flip_lock_ = new Mutex("GC thread flip lock");
565 thread_flip_cond_.reset(new ConditionVariable("GC thread flip condition variable",
566 *thread_flip_lock_));
567 task_processor_.reset(new TaskProcessor());
568 reference_processor_.reset(new ReferenceProcessor());
569 pending_task_lock_ = new Mutex("Pending task lock");
570 if (ignore_max_footprint_) {
571 SetIdealFootprint(std::numeric_limits<size_t>::max());
572 concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
573 }
574 CHECK_NE(max_allowed_footprint_, 0U);
575 // Create our garbage collectors.
576 for (size_t i = 0; i < 2; ++i) {
577 const bool concurrent = i != 0;
578 if ((MayUseCollector(kCollectorTypeCMS) && concurrent) ||
579 (MayUseCollector(kCollectorTypeMS) && !concurrent)) {
580 garbage_collectors_.push_back(new collector::MarkSweep(this, concurrent));
581 garbage_collectors_.push_back(new collector::PartialMarkSweep(this, concurrent));
582 garbage_collectors_.push_back(new collector::StickyMarkSweep(this, concurrent));
583 }
584 }
585 if (kMovingCollector) {
586 if (MayUseCollector(kCollectorTypeSS) || MayUseCollector(kCollectorTypeGSS) ||
587 MayUseCollector(kCollectorTypeHomogeneousSpaceCompact) ||
588 use_homogeneous_space_compaction_for_oom_) {
589 // TODO: Clean this up.
590 const bool generational = foreground_collector_type_ == kCollectorTypeGSS;
591 semi_space_collector_ = new collector::SemiSpace(this, generational,
592 generational ? "generational" : "");
593 garbage_collectors_.push_back(semi_space_collector_);
594 }
595 if (MayUseCollector(kCollectorTypeCC)) {
596 concurrent_copying_collector_ = new collector::ConcurrentCopying(this,
597 "",
598 measure_gc_performance);
599 DCHECK(region_space_ != nullptr);
600 concurrent_copying_collector_->SetRegionSpace(region_space_);
601 garbage_collectors_.push_back(concurrent_copying_collector_);
602 }
603 if (MayUseCollector(kCollectorTypeMC)) {
604 mark_compact_collector_ = new collector::MarkCompact(this);
605 garbage_collectors_.push_back(mark_compact_collector_);
606 }
607 }
608 if (!GetBootImageSpaces().empty() && non_moving_space_ != nullptr &&
609 (is_zygote || separate_non_moving_space || foreground_collector_type_ == kCollectorTypeGSS)) {
610 // Check that there's no gap between the image space and the non moving space so that the
611 // immune region won't break (eg. due to a large object allocated in the gap). This is only
612 // required when we're the zygote or using GSS.
613 // Space with smallest Begin().
614 space::ImageSpace* first_space = nullptr;
615 for (space::ImageSpace* space : boot_image_spaces_) {
616 if (first_space == nullptr || space->Begin() < first_space->Begin()) {
617 first_space = space;
618 }
619 }
620 bool no_gap = MemMap::CheckNoGaps(first_space->GetMemMap(), non_moving_space_->GetMemMap());
621 if (!no_gap) {
622 PrintFileToLog("/proc/self/maps", LogSeverity::ERROR);
623 MemMap::DumpMaps(LOG_STREAM(ERROR), true);
624 LOG(FATAL) << "There's a gap between the image space and the non-moving space";
625 }
626 }
627 instrumentation::Instrumentation* const instrumentation = runtime->GetInstrumentation();
628 if (gc_stress_mode_) {
629 backtrace_lock_ = new Mutex("GC complete lock");
630 }
631 if (is_running_on_memory_tool_ || gc_stress_mode_) {
632 instrumentation->InstrumentQuickAllocEntryPoints();
633 }
634 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
635 LOG(INFO) << "Heap() exiting";
636 }
637 }
638
MapAnonymousPreferredAddress(const char * name,uint8_t * request_begin,size_t capacity,std::string * out_error_str)639 MemMap* Heap::MapAnonymousPreferredAddress(const char* name,
640 uint8_t* request_begin,
641 size_t capacity,
642 std::string* out_error_str) {
643 while (true) {
644 MemMap* map = MemMap::MapAnonymous(name, request_begin, capacity,
645 PROT_READ | PROT_WRITE, true, false, out_error_str);
646 if (map != nullptr || request_begin == nullptr) {
647 return map;
648 }
649 // Retry a second time with no specified request begin.
650 request_begin = nullptr;
651 }
652 }
653
MayUseCollector(CollectorType type) const654 bool Heap::MayUseCollector(CollectorType type) const {
655 return foreground_collector_type_ == type || background_collector_type_ == type;
656 }
657
CreateMallocSpaceFromMemMap(MemMap * mem_map,size_t initial_size,size_t growth_limit,size_t capacity,const char * name,bool can_move_objects)658 space::MallocSpace* Heap::CreateMallocSpaceFromMemMap(MemMap* mem_map,
659 size_t initial_size,
660 size_t growth_limit,
661 size_t capacity,
662 const char* name,
663 bool can_move_objects) {
664 space::MallocSpace* malloc_space = nullptr;
665 if (kUseRosAlloc) {
666 // Create rosalloc space.
667 malloc_space = space::RosAllocSpace::CreateFromMemMap(mem_map, name, kDefaultStartingSize,
668 initial_size, growth_limit, capacity,
669 low_memory_mode_, can_move_objects);
670 } else {
671 malloc_space = space::DlMallocSpace::CreateFromMemMap(mem_map, name, kDefaultStartingSize,
672 initial_size, growth_limit, capacity,
673 can_move_objects);
674 }
675 if (collector::SemiSpace::kUseRememberedSet) {
676 accounting::RememberedSet* rem_set =
677 new accounting::RememberedSet(std::string(name) + " remembered set", this, malloc_space);
678 CHECK(rem_set != nullptr) << "Failed to create main space remembered set";
679 AddRememberedSet(rem_set);
680 }
681 CHECK(malloc_space != nullptr) << "Failed to create " << name;
682 malloc_space->SetFootprintLimit(malloc_space->Capacity());
683 return malloc_space;
684 }
685
CreateMainMallocSpace(MemMap * mem_map,size_t initial_size,size_t growth_limit,size_t capacity)686 void Heap::CreateMainMallocSpace(MemMap* mem_map, size_t initial_size, size_t growth_limit,
687 size_t capacity) {
688 // Is background compaction is enabled?
689 bool can_move_objects = IsMovingGc(background_collector_type_) !=
690 IsMovingGc(foreground_collector_type_) || use_homogeneous_space_compaction_for_oom_;
691 // If we are the zygote and don't yet have a zygote space, it means that the zygote fork will
692 // happen in the future. If this happens and we have kCompactZygote enabled we wish to compact
693 // from the main space to the zygote space. If background compaction is enabled, always pass in
694 // that we can move objets.
695 if (kCompactZygote && Runtime::Current()->IsZygote() && !can_move_objects) {
696 // After the zygote we want this to be false if we don't have background compaction enabled so
697 // that getting primitive array elements is faster.
698 // We never have homogeneous compaction with GSS and don't need a space with movable objects.
699 can_move_objects = !HasZygoteSpace() && foreground_collector_type_ != kCollectorTypeGSS;
700 }
701 if (collector::SemiSpace::kUseRememberedSet && main_space_ != nullptr) {
702 RemoveRememberedSet(main_space_);
703 }
704 const char* name = kUseRosAlloc ? kRosAllocSpaceName[0] : kDlMallocSpaceName[0];
705 main_space_ = CreateMallocSpaceFromMemMap(mem_map, initial_size, growth_limit, capacity, name,
706 can_move_objects);
707 SetSpaceAsDefault(main_space_);
708 VLOG(heap) << "Created main space " << main_space_;
709 }
710
ChangeAllocator(AllocatorType allocator)711 void Heap::ChangeAllocator(AllocatorType allocator) {
712 if (current_allocator_ != allocator) {
713 // These two allocators are only used internally and don't have any entrypoints.
714 CHECK_NE(allocator, kAllocatorTypeLOS);
715 CHECK_NE(allocator, kAllocatorTypeNonMoving);
716 current_allocator_ = allocator;
717 MutexLock mu(nullptr, *Locks::runtime_shutdown_lock_);
718 SetQuickAllocEntryPointsAllocator(current_allocator_);
719 Runtime::Current()->GetInstrumentation()->ResetQuickAllocEntryPoints();
720 }
721 }
722
DisableMovingGc()723 void Heap::DisableMovingGc() {
724 CHECK(!kUseReadBarrier);
725 if (IsMovingGc(foreground_collector_type_)) {
726 foreground_collector_type_ = kCollectorTypeCMS;
727 }
728 if (IsMovingGc(background_collector_type_)) {
729 background_collector_type_ = foreground_collector_type_;
730 }
731 TransitionCollector(foreground_collector_type_);
732 Thread* const self = Thread::Current();
733 ScopedThreadStateChange tsc(self, kSuspended);
734 ScopedSuspendAll ssa(__FUNCTION__);
735 // Something may have caused the transition to fail.
736 if (!IsMovingGc(collector_type_) && non_moving_space_ != main_space_) {
737 CHECK(main_space_ != nullptr);
738 // The allocation stack may have non movable objects in it. We need to flush it since the GC
739 // can't only handle marking allocation stack objects of one non moving space and one main
740 // space.
741 {
742 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
743 FlushAllocStack();
744 }
745 main_space_->DisableMovingObjects();
746 non_moving_space_ = main_space_;
747 CHECK(!non_moving_space_->CanMoveObjects());
748 }
749 }
750
IsCompilingBoot() const751 bool Heap::IsCompilingBoot() const {
752 if (!Runtime::Current()->IsAotCompiler()) {
753 return false;
754 }
755 ScopedObjectAccess soa(Thread::Current());
756 for (const auto& space : continuous_spaces_) {
757 if (space->IsImageSpace() || space->IsZygoteSpace()) {
758 return false;
759 }
760 }
761 return true;
762 }
763
IncrementDisableMovingGC(Thread * self)764 void Heap::IncrementDisableMovingGC(Thread* self) {
765 // Need to do this holding the lock to prevent races where the GC is about to run / running when
766 // we attempt to disable it.
767 ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
768 MutexLock mu(self, *gc_complete_lock_);
769 ++disable_moving_gc_count_;
770 if (IsMovingGc(collector_type_running_)) {
771 WaitForGcToCompleteLocked(kGcCauseDisableMovingGc, self);
772 }
773 }
774
DecrementDisableMovingGC(Thread * self)775 void Heap::DecrementDisableMovingGC(Thread* self) {
776 MutexLock mu(self, *gc_complete_lock_);
777 CHECK_GT(disable_moving_gc_count_, 0U);
778 --disable_moving_gc_count_;
779 }
780
IncrementDisableThreadFlip(Thread * self)781 void Heap::IncrementDisableThreadFlip(Thread* self) {
782 // Supposed to be called by mutators. If thread_flip_running_ is true, block. Otherwise, go ahead.
783 CHECK(kUseReadBarrier);
784 bool is_nested = self->GetDisableThreadFlipCount() > 0;
785 self->IncrementDisableThreadFlipCount();
786 if (is_nested) {
787 // If this is a nested JNI critical section enter, we don't need to wait or increment the global
788 // counter. The global counter is incremented only once for a thread for the outermost enter.
789 return;
790 }
791 ScopedThreadStateChange tsc(self, kWaitingForGcThreadFlip);
792 MutexLock mu(self, *thread_flip_lock_);
793 bool has_waited = false;
794 uint64_t wait_start = NanoTime();
795 if (thread_flip_running_) {
796 ATRACE_BEGIN("IncrementDisableThreadFlip");
797 while (thread_flip_running_) {
798 has_waited = true;
799 thread_flip_cond_->Wait(self);
800 }
801 ATRACE_END();
802 }
803 ++disable_thread_flip_count_;
804 if (has_waited) {
805 uint64_t wait_time = NanoTime() - wait_start;
806 total_wait_time_ += wait_time;
807 if (wait_time > long_pause_log_threshold_) {
808 LOG(INFO) << __FUNCTION__ << " blocked for " << PrettyDuration(wait_time);
809 }
810 }
811 }
812
DecrementDisableThreadFlip(Thread * self)813 void Heap::DecrementDisableThreadFlip(Thread* self) {
814 // Supposed to be called by mutators. Decrement disable_thread_flip_count_ and potentially wake up
815 // the GC waiting before doing a thread flip.
816 CHECK(kUseReadBarrier);
817 self->DecrementDisableThreadFlipCount();
818 bool is_outermost = self->GetDisableThreadFlipCount() == 0;
819 if (!is_outermost) {
820 // If this is not an outermost JNI critical exit, we don't need to decrement the global counter.
821 // The global counter is decremented only once for a thread for the outermost exit.
822 return;
823 }
824 MutexLock mu(self, *thread_flip_lock_);
825 CHECK_GT(disable_thread_flip_count_, 0U);
826 --disable_thread_flip_count_;
827 if (disable_thread_flip_count_ == 0) {
828 // Potentially notify the GC thread blocking to begin a thread flip.
829 thread_flip_cond_->Broadcast(self);
830 }
831 }
832
ThreadFlipBegin(Thread * self)833 void Heap::ThreadFlipBegin(Thread* self) {
834 // Supposed to be called by GC. Set thread_flip_running_ to be true. If disable_thread_flip_count_
835 // > 0, block. Otherwise, go ahead.
836 CHECK(kUseReadBarrier);
837 ScopedThreadStateChange tsc(self, kWaitingForGcThreadFlip);
838 MutexLock mu(self, *thread_flip_lock_);
839 bool has_waited = false;
840 uint64_t wait_start = NanoTime();
841 CHECK(!thread_flip_running_);
842 // Set this to true before waiting so that frequent JNI critical enter/exits won't starve
843 // GC. This like a writer preference of a reader-writer lock.
844 thread_flip_running_ = true;
845 while (disable_thread_flip_count_ > 0) {
846 has_waited = true;
847 thread_flip_cond_->Wait(self);
848 }
849 if (has_waited) {
850 uint64_t wait_time = NanoTime() - wait_start;
851 total_wait_time_ += wait_time;
852 if (wait_time > long_pause_log_threshold_) {
853 LOG(INFO) << __FUNCTION__ << " blocked for " << PrettyDuration(wait_time);
854 }
855 }
856 }
857
ThreadFlipEnd(Thread * self)858 void Heap::ThreadFlipEnd(Thread* self) {
859 // Supposed to be called by GC. Set thread_flip_running_ to false and potentially wake up mutators
860 // waiting before doing a JNI critical.
861 CHECK(kUseReadBarrier);
862 MutexLock mu(self, *thread_flip_lock_);
863 CHECK(thread_flip_running_);
864 thread_flip_running_ = false;
865 // Potentially notify mutator threads blocking to enter a JNI critical section.
866 thread_flip_cond_->Broadcast(self);
867 }
868
UpdateProcessState(ProcessState old_process_state,ProcessState new_process_state)869 void Heap::UpdateProcessState(ProcessState old_process_state, ProcessState new_process_state) {
870 if (old_process_state != new_process_state) {
871 const bool jank_perceptible = new_process_state == kProcessStateJankPerceptible;
872 for (size_t i = 1; i <= kCollectorTransitionStressIterations; ++i) {
873 // Start at index 1 to avoid "is always false" warning.
874 // Have iteration 1 always transition the collector.
875 TransitionCollector((((i & 1) == 1) == jank_perceptible)
876 ? foreground_collector_type_
877 : background_collector_type_);
878 usleep(kCollectorTransitionStressWait);
879 }
880 if (jank_perceptible) {
881 // Transition back to foreground right away to prevent jank.
882 RequestCollectorTransition(foreground_collector_type_, 0);
883 } else {
884 // Don't delay for debug builds since we may want to stress test the GC.
885 // If background_collector_type_ is kCollectorTypeHomogeneousSpaceCompact then we have
886 // special handling which does a homogenous space compaction once but then doesn't transition
887 // the collector. Similarly, we invoke a full compaction for kCollectorTypeCC but don't
888 // transition the collector.
889 RequestCollectorTransition(background_collector_type_,
890 kIsDebugBuild ? 0 : kCollectorTransitionWait);
891 }
892 }
893 }
894
CreateThreadPool()895 void Heap::CreateThreadPool() {
896 const size_t num_threads = std::max(parallel_gc_threads_, conc_gc_threads_);
897 if (num_threads != 0) {
898 thread_pool_.reset(new ThreadPool("Heap thread pool", num_threads));
899 }
900 }
901
902 // Visit objects when threads aren't suspended. If concurrent moving
903 // GC, disable moving GC and suspend threads and then visit objects.
VisitObjects(ObjectCallback callback,void * arg)904 void Heap::VisitObjects(ObjectCallback callback, void* arg) {
905 Thread* self = Thread::Current();
906 Locks::mutator_lock_->AssertSharedHeld(self);
907 DCHECK(!Locks::mutator_lock_->IsExclusiveHeld(self)) << "Call VisitObjectsPaused() instead";
908 if (IsGcConcurrentAndMoving()) {
909 // Concurrent moving GC. Just suspending threads isn't sufficient
910 // because a collection isn't one big pause and we could suspend
911 // threads in the middle (between phases) of a concurrent moving
912 // collection where it's not easily known which objects are alive
913 // (both the region space and the non-moving space) or which
914 // copies of objects to visit, and the to-space invariant could be
915 // easily broken. Visit objects while GC isn't running by using
916 // IncrementDisableMovingGC() and threads are suspended.
917 IncrementDisableMovingGC(self);
918 {
919 ScopedThreadSuspension sts(self, kWaitingForVisitObjects);
920 ScopedSuspendAll ssa(__FUNCTION__);
921 VisitObjectsInternalRegionSpace(callback, arg);
922 VisitObjectsInternal(callback, arg);
923 }
924 DecrementDisableMovingGC(self);
925 } else {
926 // Since concurrent moving GC has thread suspension, also poison ObjPtr the normal case to
927 // catch bugs.
928 self->PoisonObjectPointers();
929 // GCs can move objects, so don't allow this.
930 ScopedAssertNoThreadSuspension ants("Visiting objects");
931 DCHECK(region_space_ == nullptr);
932 VisitObjectsInternal(callback, arg);
933 self->PoisonObjectPointers();
934 }
935 }
936
937 // Visit objects when threads are already suspended.
VisitObjectsPaused(ObjectCallback callback,void * arg)938 void Heap::VisitObjectsPaused(ObjectCallback callback, void* arg) {
939 Thread* self = Thread::Current();
940 Locks::mutator_lock_->AssertExclusiveHeld(self);
941 VisitObjectsInternalRegionSpace(callback, arg);
942 VisitObjectsInternal(callback, arg);
943 }
944
945 // Visit objects in the region spaces.
VisitObjectsInternalRegionSpace(ObjectCallback callback,void * arg)946 void Heap::VisitObjectsInternalRegionSpace(ObjectCallback callback, void* arg) {
947 Thread* self = Thread::Current();
948 Locks::mutator_lock_->AssertExclusiveHeld(self);
949 if (region_space_ != nullptr) {
950 DCHECK(IsGcConcurrentAndMoving());
951 if (!zygote_creation_lock_.IsExclusiveHeld(self)) {
952 // Exclude the pre-zygote fork time where the semi-space collector
953 // calls VerifyHeapReferences() as part of the zygote compaction
954 // which then would call here without the moving GC disabled,
955 // which is fine.
956 bool is_thread_running_gc = false;
957 if (kIsDebugBuild) {
958 MutexLock mu(self, *gc_complete_lock_);
959 is_thread_running_gc = self == thread_running_gc_;
960 }
961 // If we are not the thread running the GC on in a GC exclusive region, then moving GC
962 // must be disabled.
963 DCHECK(is_thread_running_gc || IsMovingGCDisabled(self));
964 }
965 region_space_->Walk(callback, arg);
966 }
967 }
968
969 // Visit objects in the other spaces.
VisitObjectsInternal(ObjectCallback callback,void * arg)970 void Heap::VisitObjectsInternal(ObjectCallback callback, void* arg) {
971 if (bump_pointer_space_ != nullptr) {
972 // Visit objects in bump pointer space.
973 bump_pointer_space_->Walk(callback, arg);
974 }
975 // TODO: Switch to standard begin and end to use ranged a based loop.
976 for (auto* it = allocation_stack_->Begin(), *end = allocation_stack_->End(); it < end; ++it) {
977 mirror::Object* const obj = it->AsMirrorPtr();
978 if (obj != nullptr && obj->GetClass() != nullptr) {
979 // Avoid the race condition caused by the object not yet being written into the allocation
980 // stack or the class not yet being written in the object. Or, if
981 // kUseThreadLocalAllocationStack, there can be nulls on the allocation stack.
982 callback(obj, arg);
983 }
984 }
985 {
986 ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
987 GetLiveBitmap()->Walk(callback, arg);
988 }
989 }
990
MarkAllocStackAsLive(accounting::ObjectStack * stack)991 void Heap::MarkAllocStackAsLive(accounting::ObjectStack* stack) {
992 space::ContinuousSpace* space1 = main_space_ != nullptr ? main_space_ : non_moving_space_;
993 space::ContinuousSpace* space2 = non_moving_space_;
994 // TODO: Generalize this to n bitmaps?
995 CHECK(space1 != nullptr);
996 CHECK(space2 != nullptr);
997 MarkAllocStack(space1->GetLiveBitmap(), space2->GetLiveBitmap(),
998 (large_object_space_ != nullptr ? large_object_space_->GetLiveBitmap() : nullptr),
999 stack);
1000 }
1001
DeleteThreadPool()1002 void Heap::DeleteThreadPool() {
1003 thread_pool_.reset(nullptr);
1004 }
1005
AddSpace(space::Space * space)1006 void Heap::AddSpace(space::Space* space) {
1007 CHECK(space != nullptr);
1008 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
1009 if (space->IsContinuousSpace()) {
1010 DCHECK(!space->IsDiscontinuousSpace());
1011 space::ContinuousSpace* continuous_space = space->AsContinuousSpace();
1012 // Continuous spaces don't necessarily have bitmaps.
1013 accounting::ContinuousSpaceBitmap* live_bitmap = continuous_space->GetLiveBitmap();
1014 accounting::ContinuousSpaceBitmap* mark_bitmap = continuous_space->GetMarkBitmap();
1015 // The region space bitmap is not added since VisitObjects visits the region space objects with
1016 // special handling.
1017 if (live_bitmap != nullptr && !space->IsRegionSpace()) {
1018 CHECK(mark_bitmap != nullptr);
1019 live_bitmap_->AddContinuousSpaceBitmap(live_bitmap);
1020 mark_bitmap_->AddContinuousSpaceBitmap(mark_bitmap);
1021 }
1022 continuous_spaces_.push_back(continuous_space);
1023 // Ensure that spaces remain sorted in increasing order of start address.
1024 std::sort(continuous_spaces_.begin(), continuous_spaces_.end(),
1025 [](const space::ContinuousSpace* a, const space::ContinuousSpace* b) {
1026 return a->Begin() < b->Begin();
1027 });
1028 } else {
1029 CHECK(space->IsDiscontinuousSpace());
1030 space::DiscontinuousSpace* discontinuous_space = space->AsDiscontinuousSpace();
1031 live_bitmap_->AddLargeObjectBitmap(discontinuous_space->GetLiveBitmap());
1032 mark_bitmap_->AddLargeObjectBitmap(discontinuous_space->GetMarkBitmap());
1033 discontinuous_spaces_.push_back(discontinuous_space);
1034 }
1035 if (space->IsAllocSpace()) {
1036 alloc_spaces_.push_back(space->AsAllocSpace());
1037 }
1038 }
1039
SetSpaceAsDefault(space::ContinuousSpace * continuous_space)1040 void Heap::SetSpaceAsDefault(space::ContinuousSpace* continuous_space) {
1041 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
1042 if (continuous_space->IsDlMallocSpace()) {
1043 dlmalloc_space_ = continuous_space->AsDlMallocSpace();
1044 } else if (continuous_space->IsRosAllocSpace()) {
1045 rosalloc_space_ = continuous_space->AsRosAllocSpace();
1046 }
1047 }
1048
RemoveSpace(space::Space * space)1049 void Heap::RemoveSpace(space::Space* space) {
1050 DCHECK(space != nullptr);
1051 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
1052 if (space->IsContinuousSpace()) {
1053 DCHECK(!space->IsDiscontinuousSpace());
1054 space::ContinuousSpace* continuous_space = space->AsContinuousSpace();
1055 // Continuous spaces don't necessarily have bitmaps.
1056 accounting::ContinuousSpaceBitmap* live_bitmap = continuous_space->GetLiveBitmap();
1057 accounting::ContinuousSpaceBitmap* mark_bitmap = continuous_space->GetMarkBitmap();
1058 if (live_bitmap != nullptr && !space->IsRegionSpace()) {
1059 DCHECK(mark_bitmap != nullptr);
1060 live_bitmap_->RemoveContinuousSpaceBitmap(live_bitmap);
1061 mark_bitmap_->RemoveContinuousSpaceBitmap(mark_bitmap);
1062 }
1063 auto it = std::find(continuous_spaces_.begin(), continuous_spaces_.end(), continuous_space);
1064 DCHECK(it != continuous_spaces_.end());
1065 continuous_spaces_.erase(it);
1066 } else {
1067 DCHECK(space->IsDiscontinuousSpace());
1068 space::DiscontinuousSpace* discontinuous_space = space->AsDiscontinuousSpace();
1069 live_bitmap_->RemoveLargeObjectBitmap(discontinuous_space->GetLiveBitmap());
1070 mark_bitmap_->RemoveLargeObjectBitmap(discontinuous_space->GetMarkBitmap());
1071 auto it = std::find(discontinuous_spaces_.begin(), discontinuous_spaces_.end(),
1072 discontinuous_space);
1073 DCHECK(it != discontinuous_spaces_.end());
1074 discontinuous_spaces_.erase(it);
1075 }
1076 if (space->IsAllocSpace()) {
1077 auto it = std::find(alloc_spaces_.begin(), alloc_spaces_.end(), space->AsAllocSpace());
1078 DCHECK(it != alloc_spaces_.end());
1079 alloc_spaces_.erase(it);
1080 }
1081 }
1082
DumpGcPerformanceInfo(std::ostream & os)1083 void Heap::DumpGcPerformanceInfo(std::ostream& os) {
1084 // Dump cumulative timings.
1085 os << "Dumping cumulative Gc timings\n";
1086 uint64_t total_duration = 0;
1087 // Dump cumulative loggers for each GC type.
1088 uint64_t total_paused_time = 0;
1089 for (auto& collector : garbage_collectors_) {
1090 total_duration += collector->GetCumulativeTimings().GetTotalNs();
1091 total_paused_time += collector->GetTotalPausedTimeNs();
1092 collector->DumpPerformanceInfo(os);
1093 }
1094 if (total_duration != 0) {
1095 const double total_seconds = static_cast<double>(total_duration / 1000) / 1000000.0;
1096 os << "Total time spent in GC: " << PrettyDuration(total_duration) << "\n";
1097 os << "Mean GC size throughput: "
1098 << PrettySize(GetBytesFreedEver() / total_seconds) << "/s\n";
1099 os << "Mean GC object throughput: "
1100 << (GetObjectsFreedEver() / total_seconds) << " objects/s\n";
1101 }
1102 uint64_t total_objects_allocated = GetObjectsAllocatedEver();
1103 os << "Total number of allocations " << total_objects_allocated << "\n";
1104 os << "Total bytes allocated " << PrettySize(GetBytesAllocatedEver()) << "\n";
1105 os << "Total bytes freed " << PrettySize(GetBytesFreedEver()) << "\n";
1106 os << "Free memory " << PrettySize(GetFreeMemory()) << "\n";
1107 os << "Free memory until GC " << PrettySize(GetFreeMemoryUntilGC()) << "\n";
1108 os << "Free memory until OOME " << PrettySize(GetFreeMemoryUntilOOME()) << "\n";
1109 os << "Total memory " << PrettySize(GetTotalMemory()) << "\n";
1110 os << "Max memory " << PrettySize(GetMaxMemory()) << "\n";
1111 if (HasZygoteSpace()) {
1112 os << "Zygote space size " << PrettySize(zygote_space_->Size()) << "\n";
1113 }
1114 os << "Total mutator paused time: " << PrettyDuration(total_paused_time) << "\n";
1115 os << "Total time waiting for GC to complete: " << PrettyDuration(total_wait_time_) << "\n";
1116 os << "Total GC count: " << GetGcCount() << "\n";
1117 os << "Total GC time: " << PrettyDuration(GetGcTime()) << "\n";
1118 os << "Total blocking GC count: " << GetBlockingGcCount() << "\n";
1119 os << "Total blocking GC time: " << PrettyDuration(GetBlockingGcTime()) << "\n";
1120
1121 {
1122 MutexLock mu(Thread::Current(), *gc_complete_lock_);
1123 if (gc_count_rate_histogram_.SampleSize() > 0U) {
1124 os << "Histogram of GC count per " << NsToMs(kGcCountRateHistogramWindowDuration) << " ms: ";
1125 gc_count_rate_histogram_.DumpBins(os);
1126 os << "\n";
1127 }
1128 if (blocking_gc_count_rate_histogram_.SampleSize() > 0U) {
1129 os << "Histogram of blocking GC count per "
1130 << NsToMs(kGcCountRateHistogramWindowDuration) << " ms: ";
1131 blocking_gc_count_rate_histogram_.DumpBins(os);
1132 os << "\n";
1133 }
1134 }
1135
1136 if (kDumpRosAllocStatsOnSigQuit && rosalloc_space_ != nullptr) {
1137 rosalloc_space_->DumpStats(os);
1138 }
1139
1140 os << "Registered native bytes allocated: "
1141 << old_native_bytes_allocated_.LoadRelaxed() + new_native_bytes_allocated_.LoadRelaxed()
1142 << "\n";
1143
1144 BaseMutex::DumpAll(os);
1145 }
1146
ResetGcPerformanceInfo()1147 void Heap::ResetGcPerformanceInfo() {
1148 for (auto& collector : garbage_collectors_) {
1149 collector->ResetMeasurements();
1150 }
1151 total_bytes_freed_ever_ = 0;
1152 total_objects_freed_ever_ = 0;
1153 total_wait_time_ = 0;
1154 blocking_gc_count_ = 0;
1155 blocking_gc_time_ = 0;
1156 gc_count_last_window_ = 0;
1157 blocking_gc_count_last_window_ = 0;
1158 last_update_time_gc_count_rate_histograms_ = // Round down by the window duration.
1159 (NanoTime() / kGcCountRateHistogramWindowDuration) * kGcCountRateHistogramWindowDuration;
1160 {
1161 MutexLock mu(Thread::Current(), *gc_complete_lock_);
1162 gc_count_rate_histogram_.Reset();
1163 blocking_gc_count_rate_histogram_.Reset();
1164 }
1165 }
1166
GetGcCount() const1167 uint64_t Heap::GetGcCount() const {
1168 uint64_t gc_count = 0U;
1169 for (auto& collector : garbage_collectors_) {
1170 gc_count += collector->GetCumulativeTimings().GetIterations();
1171 }
1172 return gc_count;
1173 }
1174
GetGcTime() const1175 uint64_t Heap::GetGcTime() const {
1176 uint64_t gc_time = 0U;
1177 for (auto& collector : garbage_collectors_) {
1178 gc_time += collector->GetCumulativeTimings().GetTotalNs();
1179 }
1180 return gc_time;
1181 }
1182
GetBlockingGcCount() const1183 uint64_t Heap::GetBlockingGcCount() const {
1184 return blocking_gc_count_;
1185 }
1186
GetBlockingGcTime() const1187 uint64_t Heap::GetBlockingGcTime() const {
1188 return blocking_gc_time_;
1189 }
1190
DumpGcCountRateHistogram(std::ostream & os) const1191 void Heap::DumpGcCountRateHistogram(std::ostream& os) const {
1192 MutexLock mu(Thread::Current(), *gc_complete_lock_);
1193 if (gc_count_rate_histogram_.SampleSize() > 0U) {
1194 gc_count_rate_histogram_.DumpBins(os);
1195 }
1196 }
1197
DumpBlockingGcCountRateHistogram(std::ostream & os) const1198 void Heap::DumpBlockingGcCountRateHistogram(std::ostream& os) const {
1199 MutexLock mu(Thread::Current(), *gc_complete_lock_);
1200 if (blocking_gc_count_rate_histogram_.SampleSize() > 0U) {
1201 blocking_gc_count_rate_histogram_.DumpBins(os);
1202 }
1203 }
1204
1205 ALWAYS_INLINE
GetAndOverwriteAllocationListener(Atomic<AllocationListener * > * storage,AllocationListener * new_value)1206 static inline AllocationListener* GetAndOverwriteAllocationListener(
1207 Atomic<AllocationListener*>* storage, AllocationListener* new_value) {
1208 AllocationListener* old;
1209 do {
1210 old = storage->LoadSequentiallyConsistent();
1211 } while (!storage->CompareExchangeStrongSequentiallyConsistent(old, new_value));
1212 return old;
1213 }
1214
~Heap()1215 Heap::~Heap() {
1216 VLOG(heap) << "Starting ~Heap()";
1217 STLDeleteElements(&garbage_collectors_);
1218 // If we don't reset then the mark stack complains in its destructor.
1219 allocation_stack_->Reset();
1220 allocation_records_.reset();
1221 live_stack_->Reset();
1222 STLDeleteValues(&mod_union_tables_);
1223 STLDeleteValues(&remembered_sets_);
1224 STLDeleteElements(&continuous_spaces_);
1225 STLDeleteElements(&discontinuous_spaces_);
1226 delete gc_complete_lock_;
1227 delete native_blocking_gc_lock_;
1228 delete thread_flip_lock_;
1229 delete pending_task_lock_;
1230 delete backtrace_lock_;
1231 if (unique_backtrace_count_.LoadRelaxed() != 0 || seen_backtrace_count_.LoadRelaxed() != 0) {
1232 LOG(INFO) << "gc stress unique=" << unique_backtrace_count_.LoadRelaxed()
1233 << " total=" << seen_backtrace_count_.LoadRelaxed() +
1234 unique_backtrace_count_.LoadRelaxed();
1235 }
1236
1237 VLOG(heap) << "Finished ~Heap()";
1238 }
1239
1240
FindContinuousSpaceFromAddress(const mirror::Object * addr) const1241 space::ContinuousSpace* Heap::FindContinuousSpaceFromAddress(const mirror::Object* addr) const {
1242 for (const auto& space : continuous_spaces_) {
1243 if (space->Contains(addr)) {
1244 return space;
1245 }
1246 }
1247 return nullptr;
1248 }
1249
FindContinuousSpaceFromObject(ObjPtr<mirror::Object> obj,bool fail_ok) const1250 space::ContinuousSpace* Heap::FindContinuousSpaceFromObject(ObjPtr<mirror::Object> obj,
1251 bool fail_ok) const {
1252 space::ContinuousSpace* space = FindContinuousSpaceFromAddress(obj.Ptr());
1253 if (space != nullptr) {
1254 return space;
1255 }
1256 if (!fail_ok) {
1257 LOG(FATAL) << "object " << obj << " not inside any spaces!";
1258 }
1259 return nullptr;
1260 }
1261
FindDiscontinuousSpaceFromObject(ObjPtr<mirror::Object> obj,bool fail_ok) const1262 space::DiscontinuousSpace* Heap::FindDiscontinuousSpaceFromObject(ObjPtr<mirror::Object> obj,
1263 bool fail_ok) const {
1264 for (const auto& space : discontinuous_spaces_) {
1265 if (space->Contains(obj.Ptr())) {
1266 return space;
1267 }
1268 }
1269 if (!fail_ok) {
1270 LOG(FATAL) << "object " << obj << " not inside any spaces!";
1271 }
1272 return nullptr;
1273 }
1274
FindSpaceFromObject(ObjPtr<mirror::Object> obj,bool fail_ok) const1275 space::Space* Heap::FindSpaceFromObject(ObjPtr<mirror::Object> obj, bool fail_ok) const {
1276 space::Space* result = FindContinuousSpaceFromObject(obj, true);
1277 if (result != nullptr) {
1278 return result;
1279 }
1280 return FindDiscontinuousSpaceFromObject(obj, fail_ok);
1281 }
1282
FindSpaceFromAddress(const void * addr) const1283 space::Space* Heap::FindSpaceFromAddress(const void* addr) const {
1284 for (const auto& space : continuous_spaces_) {
1285 if (space->Contains(reinterpret_cast<const mirror::Object*>(addr))) {
1286 return space;
1287 }
1288 }
1289 for (const auto& space : discontinuous_spaces_) {
1290 if (space->Contains(reinterpret_cast<const mirror::Object*>(addr))) {
1291 return space;
1292 }
1293 }
1294 return nullptr;
1295 }
1296
1297
ThrowOutOfMemoryError(Thread * self,size_t byte_count,AllocatorType allocator_type)1298 void Heap::ThrowOutOfMemoryError(Thread* self, size_t byte_count, AllocatorType allocator_type) {
1299 // If we're in a stack overflow, do not create a new exception. It would require running the
1300 // constructor, which will of course still be in a stack overflow.
1301 if (self->IsHandlingStackOverflow()) {
1302 self->SetException(Runtime::Current()->GetPreAllocatedOutOfMemoryError());
1303 return;
1304 }
1305
1306 std::ostringstream oss;
1307 size_t total_bytes_free = GetFreeMemory();
1308 oss << "Failed to allocate a " << byte_count << " byte allocation with " << total_bytes_free
1309 << " free bytes and " << PrettySize(GetFreeMemoryUntilOOME()) << " until OOM,"
1310 << " max allowed footprint " << max_allowed_footprint_ << ", growth limit "
1311 << growth_limit_;
1312 // If the allocation failed due to fragmentation, print out the largest continuous allocation.
1313 if (total_bytes_free >= byte_count) {
1314 space::AllocSpace* space = nullptr;
1315 if (allocator_type == kAllocatorTypeNonMoving) {
1316 space = non_moving_space_;
1317 } else if (allocator_type == kAllocatorTypeRosAlloc ||
1318 allocator_type == kAllocatorTypeDlMalloc) {
1319 space = main_space_;
1320 } else if (allocator_type == kAllocatorTypeBumpPointer ||
1321 allocator_type == kAllocatorTypeTLAB) {
1322 space = bump_pointer_space_;
1323 } else if (allocator_type == kAllocatorTypeRegion ||
1324 allocator_type == kAllocatorTypeRegionTLAB) {
1325 space = region_space_;
1326 }
1327 if (space != nullptr) {
1328 space->LogFragmentationAllocFailure(oss, byte_count);
1329 }
1330 }
1331 self->ThrowOutOfMemoryError(oss.str().c_str());
1332 }
1333
DoPendingCollectorTransition()1334 void Heap::DoPendingCollectorTransition() {
1335 CollectorType desired_collector_type = desired_collector_type_;
1336 // Launch homogeneous space compaction if it is desired.
1337 if (desired_collector_type == kCollectorTypeHomogeneousSpaceCompact) {
1338 if (!CareAboutPauseTimes()) {
1339 PerformHomogeneousSpaceCompact();
1340 } else {
1341 VLOG(gc) << "Homogeneous compaction ignored due to jank perceptible process state";
1342 }
1343 } else if (desired_collector_type == kCollectorTypeCCBackground) {
1344 DCHECK(kUseReadBarrier);
1345 if (!CareAboutPauseTimes()) {
1346 // Invoke CC full compaction.
1347 CollectGarbageInternal(collector::kGcTypeFull,
1348 kGcCauseCollectorTransition,
1349 /*clear_soft_references*/false);
1350 } else {
1351 VLOG(gc) << "CC background compaction ignored due to jank perceptible process state";
1352 }
1353 } else {
1354 TransitionCollector(desired_collector_type);
1355 }
1356 }
1357
Trim(Thread * self)1358 void Heap::Trim(Thread* self) {
1359 Runtime* const runtime = Runtime::Current();
1360 if (!CareAboutPauseTimes()) {
1361 // Deflate the monitors, this can cause a pause but shouldn't matter since we don't care
1362 // about pauses.
1363 ScopedTrace trace("Deflating monitors");
1364 // Avoid race conditions on the lock word for CC.
1365 ScopedGCCriticalSection gcs(self, kGcCauseTrim, kCollectorTypeHeapTrim);
1366 ScopedSuspendAll ssa(__FUNCTION__);
1367 uint64_t start_time = NanoTime();
1368 size_t count = runtime->GetMonitorList()->DeflateMonitors();
1369 VLOG(heap) << "Deflating " << count << " monitors took "
1370 << PrettyDuration(NanoTime() - start_time);
1371 }
1372 TrimIndirectReferenceTables(self);
1373 TrimSpaces(self);
1374 // Trim arenas that may have been used by JIT or verifier.
1375 runtime->GetArenaPool()->TrimMaps();
1376 }
1377
1378 class TrimIndirectReferenceTableClosure : public Closure {
1379 public:
TrimIndirectReferenceTableClosure(Barrier * barrier)1380 explicit TrimIndirectReferenceTableClosure(Barrier* barrier) : barrier_(barrier) {
1381 }
Run(Thread * thread)1382 virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
1383 thread->GetJniEnv()->locals.Trim();
1384 // If thread is a running mutator, then act on behalf of the trim thread.
1385 // See the code in ThreadList::RunCheckpoint.
1386 barrier_->Pass(Thread::Current());
1387 }
1388
1389 private:
1390 Barrier* const barrier_;
1391 };
1392
TrimIndirectReferenceTables(Thread * self)1393 void Heap::TrimIndirectReferenceTables(Thread* self) {
1394 ScopedObjectAccess soa(self);
1395 ScopedTrace trace(__PRETTY_FUNCTION__);
1396 JavaVMExt* vm = soa.Vm();
1397 // Trim globals indirect reference table.
1398 vm->TrimGlobals();
1399 // Trim locals indirect reference tables.
1400 Barrier barrier(0);
1401 TrimIndirectReferenceTableClosure closure(&barrier);
1402 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
1403 size_t barrier_count = Runtime::Current()->GetThreadList()->RunCheckpoint(&closure);
1404 if (barrier_count != 0) {
1405 barrier.Increment(self, barrier_count);
1406 }
1407 }
1408
StartGC(Thread * self,GcCause cause,CollectorType collector_type)1409 void Heap::StartGC(Thread* self, GcCause cause, CollectorType collector_type) {
1410 // Need to do this before acquiring the locks since we don't want to get suspended while
1411 // holding any locks.
1412 ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
1413 MutexLock mu(self, *gc_complete_lock_);
1414 // Ensure there is only one GC at a time.
1415 WaitForGcToCompleteLocked(cause, self);
1416 collector_type_running_ = collector_type;
1417 thread_running_gc_ = self;
1418 }
1419
TrimSpaces(Thread * self)1420 void Heap::TrimSpaces(Thread* self) {
1421 // Pretend we are doing a GC to prevent background compaction from deleting the space we are
1422 // trimming.
1423 StartGC(self, kGcCauseTrim, kCollectorTypeHeapTrim);
1424 ScopedTrace trace(__PRETTY_FUNCTION__);
1425 const uint64_t start_ns = NanoTime();
1426 // Trim the managed spaces.
1427 uint64_t total_alloc_space_allocated = 0;
1428 uint64_t total_alloc_space_size = 0;
1429 uint64_t managed_reclaimed = 0;
1430 {
1431 ScopedObjectAccess soa(self);
1432 for (const auto& space : continuous_spaces_) {
1433 if (space->IsMallocSpace()) {
1434 gc::space::MallocSpace* malloc_space = space->AsMallocSpace();
1435 if (malloc_space->IsRosAllocSpace() || !CareAboutPauseTimes()) {
1436 // Don't trim dlmalloc spaces if we care about pauses since this can hold the space lock
1437 // for a long period of time.
1438 managed_reclaimed += malloc_space->Trim();
1439 }
1440 total_alloc_space_size += malloc_space->Size();
1441 }
1442 }
1443 }
1444 total_alloc_space_allocated = GetBytesAllocated();
1445 if (large_object_space_ != nullptr) {
1446 total_alloc_space_allocated -= large_object_space_->GetBytesAllocated();
1447 }
1448 if (bump_pointer_space_ != nullptr) {
1449 total_alloc_space_allocated -= bump_pointer_space_->Size();
1450 }
1451 if (region_space_ != nullptr) {
1452 total_alloc_space_allocated -= region_space_->GetBytesAllocated();
1453 }
1454 const float managed_utilization = static_cast<float>(total_alloc_space_allocated) /
1455 static_cast<float>(total_alloc_space_size);
1456 uint64_t gc_heap_end_ns = NanoTime();
1457 // We never move things in the native heap, so we can finish the GC at this point.
1458 FinishGC(self, collector::kGcTypeNone);
1459
1460 VLOG(heap) << "Heap trim of managed (duration=" << PrettyDuration(gc_heap_end_ns - start_ns)
1461 << ", advised=" << PrettySize(managed_reclaimed) << ") heap. Managed heap utilization of "
1462 << static_cast<int>(100 * managed_utilization) << "%.";
1463 }
1464
IsValidObjectAddress(const void * addr) const1465 bool Heap::IsValidObjectAddress(const void* addr) const {
1466 if (addr == nullptr) {
1467 return true;
1468 }
1469 return IsAligned<kObjectAlignment>(addr) && FindSpaceFromAddress(addr) != nullptr;
1470 }
1471
IsNonDiscontinuousSpaceHeapAddress(const void * addr) const1472 bool Heap::IsNonDiscontinuousSpaceHeapAddress(const void* addr) const {
1473 return FindContinuousSpaceFromAddress(reinterpret_cast<const mirror::Object*>(addr)) != nullptr;
1474 }
1475
IsLiveObjectLocked(ObjPtr<mirror::Object> obj,bool search_allocation_stack,bool search_live_stack,bool sorted)1476 bool Heap::IsLiveObjectLocked(ObjPtr<mirror::Object> obj,
1477 bool search_allocation_stack,
1478 bool search_live_stack,
1479 bool sorted) {
1480 if (UNLIKELY(!IsAligned<kObjectAlignment>(obj.Ptr()))) {
1481 return false;
1482 }
1483 if (bump_pointer_space_ != nullptr && bump_pointer_space_->HasAddress(obj.Ptr())) {
1484 mirror::Class* klass = obj->GetClass<kVerifyNone>();
1485 if (obj == klass) {
1486 // This case happens for java.lang.Class.
1487 return true;
1488 }
1489 return VerifyClassClass(klass) && IsLiveObjectLocked(klass);
1490 } else if (temp_space_ != nullptr && temp_space_->HasAddress(obj.Ptr())) {
1491 // If we are in the allocated region of the temp space, then we are probably live (e.g. during
1492 // a GC). When a GC isn't running End() - Begin() is 0 which means no objects are contained.
1493 return temp_space_->Contains(obj.Ptr());
1494 }
1495 if (region_space_ != nullptr && region_space_->HasAddress(obj.Ptr())) {
1496 return true;
1497 }
1498 space::ContinuousSpace* c_space = FindContinuousSpaceFromObject(obj, true);
1499 space::DiscontinuousSpace* d_space = nullptr;
1500 if (c_space != nullptr) {
1501 if (c_space->GetLiveBitmap()->Test(obj.Ptr())) {
1502 return true;
1503 }
1504 } else {
1505 d_space = FindDiscontinuousSpaceFromObject(obj, true);
1506 if (d_space != nullptr) {
1507 if (d_space->GetLiveBitmap()->Test(obj.Ptr())) {
1508 return true;
1509 }
1510 }
1511 }
1512 // This is covering the allocation/live stack swapping that is done without mutators suspended.
1513 for (size_t i = 0; i < (sorted ? 1 : 5); ++i) {
1514 if (i > 0) {
1515 NanoSleep(MsToNs(10));
1516 }
1517 if (search_allocation_stack) {
1518 if (sorted) {
1519 if (allocation_stack_->ContainsSorted(obj.Ptr())) {
1520 return true;
1521 }
1522 } else if (allocation_stack_->Contains(obj.Ptr())) {
1523 return true;
1524 }
1525 }
1526
1527 if (search_live_stack) {
1528 if (sorted) {
1529 if (live_stack_->ContainsSorted(obj.Ptr())) {
1530 return true;
1531 }
1532 } else if (live_stack_->Contains(obj.Ptr())) {
1533 return true;
1534 }
1535 }
1536 }
1537 // We need to check the bitmaps again since there is a race where we mark something as live and
1538 // then clear the stack containing it.
1539 if (c_space != nullptr) {
1540 if (c_space->GetLiveBitmap()->Test(obj.Ptr())) {
1541 return true;
1542 }
1543 } else {
1544 d_space = FindDiscontinuousSpaceFromObject(obj, true);
1545 if (d_space != nullptr && d_space->GetLiveBitmap()->Test(obj.Ptr())) {
1546 return true;
1547 }
1548 }
1549 return false;
1550 }
1551
DumpSpaces() const1552 std::string Heap::DumpSpaces() const {
1553 std::ostringstream oss;
1554 DumpSpaces(oss);
1555 return oss.str();
1556 }
1557
DumpSpaces(std::ostream & stream) const1558 void Heap::DumpSpaces(std::ostream& stream) const {
1559 for (const auto& space : continuous_spaces_) {
1560 accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
1561 accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap();
1562 stream << space << " " << *space << "\n";
1563 if (live_bitmap != nullptr) {
1564 stream << live_bitmap << " " << *live_bitmap << "\n";
1565 }
1566 if (mark_bitmap != nullptr) {
1567 stream << mark_bitmap << " " << *mark_bitmap << "\n";
1568 }
1569 }
1570 for (const auto& space : discontinuous_spaces_) {
1571 stream << space << " " << *space << "\n";
1572 }
1573 }
1574
VerifyObjectBody(ObjPtr<mirror::Object> obj)1575 void Heap::VerifyObjectBody(ObjPtr<mirror::Object> obj) {
1576 if (verify_object_mode_ == kVerifyObjectModeDisabled) {
1577 return;
1578 }
1579
1580 // Ignore early dawn of the universe verifications.
1581 if (UNLIKELY(static_cast<size_t>(num_bytes_allocated_.LoadRelaxed()) < 10 * KB)) {
1582 return;
1583 }
1584 CHECK_ALIGNED(obj.Ptr(), kObjectAlignment) << "Object isn't aligned";
1585 mirror::Class* c = obj->GetFieldObject<mirror::Class, kVerifyNone>(mirror::Object::ClassOffset());
1586 CHECK(c != nullptr) << "Null class in object " << obj;
1587 CHECK_ALIGNED(c, kObjectAlignment) << "Class " << c << " not aligned in object " << obj;
1588 CHECK(VerifyClassClass(c));
1589
1590 if (verify_object_mode_ > kVerifyObjectModeFast) {
1591 // Note: the bitmap tests below are racy since we don't hold the heap bitmap lock.
1592 CHECK(IsLiveObjectLocked(obj)) << "Object is dead " << obj << "\n" << DumpSpaces();
1593 }
1594 }
1595
VerificationCallback(mirror::Object * obj,void * arg)1596 void Heap::VerificationCallback(mirror::Object* obj, void* arg) {
1597 reinterpret_cast<Heap*>(arg)->VerifyObjectBody(obj);
1598 }
1599
VerifyHeap()1600 void Heap::VerifyHeap() {
1601 ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
1602 GetLiveBitmap()->Walk(Heap::VerificationCallback, this);
1603 }
1604
RecordFree(uint64_t freed_objects,int64_t freed_bytes)1605 void Heap::RecordFree(uint64_t freed_objects, int64_t freed_bytes) {
1606 // Use signed comparison since freed bytes can be negative when background compaction foreground
1607 // transitions occurs. This is caused by the moving objects from a bump pointer space to a
1608 // free list backed space typically increasing memory footprint due to padding and binning.
1609 DCHECK_LE(freed_bytes, static_cast<int64_t>(num_bytes_allocated_.LoadRelaxed()));
1610 // Note: This relies on 2s complement for handling negative freed_bytes.
1611 num_bytes_allocated_.FetchAndSubSequentiallyConsistent(static_cast<ssize_t>(freed_bytes));
1612 if (Runtime::Current()->HasStatsEnabled()) {
1613 RuntimeStats* thread_stats = Thread::Current()->GetStats();
1614 thread_stats->freed_objects += freed_objects;
1615 thread_stats->freed_bytes += freed_bytes;
1616 // TODO: Do this concurrently.
1617 RuntimeStats* global_stats = Runtime::Current()->GetStats();
1618 global_stats->freed_objects += freed_objects;
1619 global_stats->freed_bytes += freed_bytes;
1620 }
1621 }
1622
RecordFreeRevoke()1623 void Heap::RecordFreeRevoke() {
1624 // Subtract num_bytes_freed_revoke_ from num_bytes_allocated_ to cancel out the
1625 // the ahead-of-time, bulk counting of bytes allocated in rosalloc thread-local buffers.
1626 // If there's a concurrent revoke, ok to not necessarily reset num_bytes_freed_revoke_
1627 // all the way to zero exactly as the remainder will be subtracted at the next GC.
1628 size_t bytes_freed = num_bytes_freed_revoke_.LoadSequentiallyConsistent();
1629 CHECK_GE(num_bytes_freed_revoke_.FetchAndSubSequentiallyConsistent(bytes_freed),
1630 bytes_freed) << "num_bytes_freed_revoke_ underflow";
1631 CHECK_GE(num_bytes_allocated_.FetchAndSubSequentiallyConsistent(bytes_freed),
1632 bytes_freed) << "num_bytes_allocated_ underflow";
1633 GetCurrentGcIteration()->SetFreedRevoke(bytes_freed);
1634 }
1635
GetRosAllocSpace(gc::allocator::RosAlloc * rosalloc) const1636 space::RosAllocSpace* Heap::GetRosAllocSpace(gc::allocator::RosAlloc* rosalloc) const {
1637 if (rosalloc_space_ != nullptr && rosalloc_space_->GetRosAlloc() == rosalloc) {
1638 return rosalloc_space_;
1639 }
1640 for (const auto& space : continuous_spaces_) {
1641 if (space->AsContinuousSpace()->IsRosAllocSpace()) {
1642 if (space->AsContinuousSpace()->AsRosAllocSpace()->GetRosAlloc() == rosalloc) {
1643 return space->AsContinuousSpace()->AsRosAllocSpace();
1644 }
1645 }
1646 }
1647 return nullptr;
1648 }
1649
EntrypointsInstrumented()1650 static inline bool EntrypointsInstrumented() REQUIRES_SHARED(Locks::mutator_lock_) {
1651 instrumentation::Instrumentation* const instrumentation =
1652 Runtime::Current()->GetInstrumentation();
1653 return instrumentation != nullptr && instrumentation->AllocEntrypointsInstrumented();
1654 }
1655
AllocateInternalWithGc(Thread * self,AllocatorType allocator,bool instrumented,size_t alloc_size,size_t * bytes_allocated,size_t * usable_size,size_t * bytes_tl_bulk_allocated,ObjPtr<mirror::Class> * klass)1656 mirror::Object* Heap::AllocateInternalWithGc(Thread* self,
1657 AllocatorType allocator,
1658 bool instrumented,
1659 size_t alloc_size,
1660 size_t* bytes_allocated,
1661 size_t* usable_size,
1662 size_t* bytes_tl_bulk_allocated,
1663 ObjPtr<mirror::Class>* klass) {
1664 bool was_default_allocator = allocator == GetCurrentAllocator();
1665 // Make sure there is no pending exception since we may need to throw an OOME.
1666 self->AssertNoPendingException();
1667 DCHECK(klass != nullptr);
1668 StackHandleScope<1> hs(self);
1669 HandleWrapperObjPtr<mirror::Class> h(hs.NewHandleWrapper(klass));
1670 // The allocation failed. If the GC is running, block until it completes, and then retry the
1671 // allocation.
1672 collector::GcType last_gc = WaitForGcToComplete(kGcCauseForAlloc, self);
1673 // If we were the default allocator but the allocator changed while we were suspended,
1674 // abort the allocation.
1675 if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
1676 (!instrumented && EntrypointsInstrumented())) {
1677 return nullptr;
1678 }
1679 if (last_gc != collector::kGcTypeNone) {
1680 // A GC was in progress and we blocked, retry allocation now that memory has been freed.
1681 mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated,
1682 usable_size, bytes_tl_bulk_allocated);
1683 if (ptr != nullptr) {
1684 return ptr;
1685 }
1686 }
1687
1688 collector::GcType tried_type = next_gc_type_;
1689 const bool gc_ran =
1690 CollectGarbageInternal(tried_type, kGcCauseForAlloc, false) != collector::kGcTypeNone;
1691 if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
1692 (!instrumented && EntrypointsInstrumented())) {
1693 return nullptr;
1694 }
1695 if (gc_ran) {
1696 mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated,
1697 usable_size, bytes_tl_bulk_allocated);
1698 if (ptr != nullptr) {
1699 return ptr;
1700 }
1701 }
1702
1703 // Loop through our different Gc types and try to Gc until we get enough free memory.
1704 for (collector::GcType gc_type : gc_plan_) {
1705 if (gc_type == tried_type) {
1706 continue;
1707 }
1708 // Attempt to run the collector, if we succeed, re-try the allocation.
1709 const bool plan_gc_ran =
1710 CollectGarbageInternal(gc_type, kGcCauseForAlloc, false) != collector::kGcTypeNone;
1711 if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
1712 (!instrumented && EntrypointsInstrumented())) {
1713 return nullptr;
1714 }
1715 if (plan_gc_ran) {
1716 // Did we free sufficient memory for the allocation to succeed?
1717 mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated,
1718 usable_size, bytes_tl_bulk_allocated);
1719 if (ptr != nullptr) {
1720 return ptr;
1721 }
1722 }
1723 }
1724 // Allocations have failed after GCs; this is an exceptional state.
1725 // Try harder, growing the heap if necessary.
1726 mirror::Object* ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated,
1727 usable_size, bytes_tl_bulk_allocated);
1728 if (ptr != nullptr) {
1729 return ptr;
1730 }
1731 // Most allocations should have succeeded by now, so the heap is really full, really fragmented,
1732 // or the requested size is really big. Do another GC, collecting SoftReferences this time. The
1733 // VM spec requires that all SoftReferences have been collected and cleared before throwing
1734 // OOME.
1735 VLOG(gc) << "Forcing collection of SoftReferences for " << PrettySize(alloc_size)
1736 << " allocation";
1737 // TODO: Run finalization, but this may cause more allocations to occur.
1738 // We don't need a WaitForGcToComplete here either.
1739 DCHECK(!gc_plan_.empty());
1740 CollectGarbageInternal(gc_plan_.back(), kGcCauseForAlloc, true);
1741 if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
1742 (!instrumented && EntrypointsInstrumented())) {
1743 return nullptr;
1744 }
1745 ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated, usable_size,
1746 bytes_tl_bulk_allocated);
1747 if (ptr == nullptr) {
1748 const uint64_t current_time = NanoTime();
1749 switch (allocator) {
1750 case kAllocatorTypeRosAlloc:
1751 // Fall-through.
1752 case kAllocatorTypeDlMalloc: {
1753 if (use_homogeneous_space_compaction_for_oom_ &&
1754 current_time - last_time_homogeneous_space_compaction_by_oom_ >
1755 min_interval_homogeneous_space_compaction_by_oom_) {
1756 last_time_homogeneous_space_compaction_by_oom_ = current_time;
1757 HomogeneousSpaceCompactResult result = PerformHomogeneousSpaceCompact();
1758 // Thread suspension could have occurred.
1759 if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
1760 (!instrumented && EntrypointsInstrumented())) {
1761 return nullptr;
1762 }
1763 switch (result) {
1764 case HomogeneousSpaceCompactResult::kSuccess:
1765 // If the allocation succeeded, we delayed an oom.
1766 ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated,
1767 usable_size, bytes_tl_bulk_allocated);
1768 if (ptr != nullptr) {
1769 count_delayed_oom_++;
1770 }
1771 break;
1772 case HomogeneousSpaceCompactResult::kErrorReject:
1773 // Reject due to disabled moving GC.
1774 break;
1775 case HomogeneousSpaceCompactResult::kErrorVMShuttingDown:
1776 // Throw OOM by default.
1777 break;
1778 default: {
1779 UNIMPLEMENTED(FATAL) << "homogeneous space compaction result: "
1780 << static_cast<size_t>(result);
1781 UNREACHABLE();
1782 }
1783 }
1784 // Always print that we ran homogeneous space compation since this can cause jank.
1785 VLOG(heap) << "Ran heap homogeneous space compaction, "
1786 << " requested defragmentation "
1787 << count_requested_homogeneous_space_compaction_.LoadSequentiallyConsistent()
1788 << " performed defragmentation "
1789 << count_performed_homogeneous_space_compaction_.LoadSequentiallyConsistent()
1790 << " ignored homogeneous space compaction "
1791 << count_ignored_homogeneous_space_compaction_.LoadSequentiallyConsistent()
1792 << " delayed count = "
1793 << count_delayed_oom_.LoadSequentiallyConsistent();
1794 }
1795 break;
1796 }
1797 case kAllocatorTypeNonMoving: {
1798 if (kUseReadBarrier) {
1799 // DisableMovingGc() isn't compatible with CC.
1800 break;
1801 }
1802 // Try to transition the heap if the allocation failure was due to the space being full.
1803 if (!IsOutOfMemoryOnAllocation(allocator, alloc_size, /*grow*/ false)) {
1804 // If we aren't out of memory then the OOM was probably from the non moving space being
1805 // full. Attempt to disable compaction and turn the main space into a non moving space.
1806 DisableMovingGc();
1807 // Thread suspension could have occurred.
1808 if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
1809 (!instrumented && EntrypointsInstrumented())) {
1810 return nullptr;
1811 }
1812 // If we are still a moving GC then something must have caused the transition to fail.
1813 if (IsMovingGc(collector_type_)) {
1814 MutexLock mu(self, *gc_complete_lock_);
1815 // If we couldn't disable moving GC, just throw OOME and return null.
1816 LOG(WARNING) << "Couldn't disable moving GC with disable GC count "
1817 << disable_moving_gc_count_;
1818 } else {
1819 LOG(WARNING) << "Disabled moving GC due to the non moving space being full";
1820 ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated,
1821 usable_size, bytes_tl_bulk_allocated);
1822 }
1823 }
1824 break;
1825 }
1826 default: {
1827 // Do nothing for others allocators.
1828 }
1829 }
1830 }
1831 // If the allocation hasn't succeeded by this point, throw an OOM error.
1832 if (ptr == nullptr) {
1833 ThrowOutOfMemoryError(self, alloc_size, allocator);
1834 }
1835 return ptr;
1836 }
1837
SetTargetHeapUtilization(float target)1838 void Heap::SetTargetHeapUtilization(float target) {
1839 DCHECK_GT(target, 0.0f); // asserted in Java code
1840 DCHECK_LT(target, 1.0f);
1841 target_utilization_ = target;
1842 }
1843
GetObjectsAllocated() const1844 size_t Heap::GetObjectsAllocated() const {
1845 Thread* const self = Thread::Current();
1846 ScopedThreadStateChange tsc(self, kWaitingForGetObjectsAllocated);
1847 // Prevent GC running during GetObjectsALlocated since we may get a checkpoint request that tells
1848 // us to suspend while we are doing SuspendAll. b/35232978
1849 gc::ScopedGCCriticalSection gcs(Thread::Current(),
1850 gc::kGcCauseGetObjectsAllocated,
1851 gc::kCollectorTypeGetObjectsAllocated);
1852 // Need SuspendAll here to prevent lock violation if RosAlloc does it during InspectAll.
1853 ScopedSuspendAll ssa(__FUNCTION__);
1854 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
1855 size_t total = 0;
1856 for (space::AllocSpace* space : alloc_spaces_) {
1857 total += space->GetObjectsAllocated();
1858 }
1859 return total;
1860 }
1861
GetObjectsAllocatedEver() const1862 uint64_t Heap::GetObjectsAllocatedEver() const {
1863 uint64_t total = GetObjectsFreedEver();
1864 // If we are detached, we can't use GetObjectsAllocated since we can't change thread states.
1865 if (Thread::Current() != nullptr) {
1866 total += GetObjectsAllocated();
1867 }
1868 return total;
1869 }
1870
GetBytesAllocatedEver() const1871 uint64_t Heap::GetBytesAllocatedEver() const {
1872 return GetBytesFreedEver() + GetBytesAllocated();
1873 }
1874
1875 class InstanceCounter {
1876 public:
InstanceCounter(const std::vector<Handle<mirror::Class>> & classes,bool use_is_assignable_from,uint64_t * counts)1877 InstanceCounter(const std::vector<Handle<mirror::Class>>& classes,
1878 bool use_is_assignable_from,
1879 uint64_t* counts)
1880 REQUIRES_SHARED(Locks::mutator_lock_)
1881 : classes_(classes), use_is_assignable_from_(use_is_assignable_from), counts_(counts) {}
1882
Callback(mirror::Object * obj,void * arg)1883 static void Callback(mirror::Object* obj, void* arg)
1884 REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
1885 InstanceCounter* instance_counter = reinterpret_cast<InstanceCounter*>(arg);
1886 mirror::Class* instance_class = obj->GetClass();
1887 CHECK(instance_class != nullptr);
1888 for (size_t i = 0; i < instance_counter->classes_.size(); ++i) {
1889 ObjPtr<mirror::Class> klass = instance_counter->classes_[i].Get();
1890 if (instance_counter->use_is_assignable_from_) {
1891 if (klass != nullptr && klass->IsAssignableFrom(instance_class)) {
1892 ++instance_counter->counts_[i];
1893 }
1894 } else if (instance_class == klass) {
1895 ++instance_counter->counts_[i];
1896 }
1897 }
1898 }
1899
1900 private:
1901 const std::vector<Handle<mirror::Class>>& classes_;
1902 bool use_is_assignable_from_;
1903 uint64_t* const counts_;
1904 DISALLOW_COPY_AND_ASSIGN(InstanceCounter);
1905 };
1906
CountInstances(const std::vector<Handle<mirror::Class>> & classes,bool use_is_assignable_from,uint64_t * counts)1907 void Heap::CountInstances(const std::vector<Handle<mirror::Class>>& classes,
1908 bool use_is_assignable_from,
1909 uint64_t* counts) {
1910 InstanceCounter counter(classes, use_is_assignable_from, counts);
1911 VisitObjects(InstanceCounter::Callback, &counter);
1912 }
1913
1914 class InstanceCollector {
1915 public:
InstanceCollector(VariableSizedHandleScope & scope,Handle<mirror::Class> c,int32_t max_count,std::vector<Handle<mirror::Object>> & instances)1916 InstanceCollector(VariableSizedHandleScope& scope,
1917 Handle<mirror::Class> c,
1918 int32_t max_count,
1919 std::vector<Handle<mirror::Object>>& instances)
1920 REQUIRES_SHARED(Locks::mutator_lock_)
1921 : scope_(scope),
1922 class_(c),
1923 max_count_(max_count),
1924 instances_(instances) {}
1925
Callback(mirror::Object * obj,void * arg)1926 static void Callback(mirror::Object* obj, void* arg)
1927 REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
1928 DCHECK(arg != nullptr);
1929 InstanceCollector* instance_collector = reinterpret_cast<InstanceCollector*>(arg);
1930 if (obj->GetClass() == instance_collector->class_.Get()) {
1931 if (instance_collector->max_count_ == 0 ||
1932 instance_collector->instances_.size() < instance_collector->max_count_) {
1933 instance_collector->instances_.push_back(instance_collector->scope_.NewHandle(obj));
1934 }
1935 }
1936 }
1937
1938 private:
1939 VariableSizedHandleScope& scope_;
1940 Handle<mirror::Class> const class_;
1941 const uint32_t max_count_;
1942 std::vector<Handle<mirror::Object>>& instances_;
1943 DISALLOW_COPY_AND_ASSIGN(InstanceCollector);
1944 };
1945
GetInstances(VariableSizedHandleScope & scope,Handle<mirror::Class> c,int32_t max_count,std::vector<Handle<mirror::Object>> & instances)1946 void Heap::GetInstances(VariableSizedHandleScope& scope,
1947 Handle<mirror::Class> c,
1948 int32_t max_count,
1949 std::vector<Handle<mirror::Object>>& instances) {
1950 InstanceCollector collector(scope, c, max_count, instances);
1951 VisitObjects(&InstanceCollector::Callback, &collector);
1952 }
1953
1954 class ReferringObjectsFinder {
1955 public:
ReferringObjectsFinder(VariableSizedHandleScope & scope,Handle<mirror::Object> object,int32_t max_count,std::vector<Handle<mirror::Object>> & referring_objects)1956 ReferringObjectsFinder(VariableSizedHandleScope& scope,
1957 Handle<mirror::Object> object,
1958 int32_t max_count,
1959 std::vector<Handle<mirror::Object>>& referring_objects)
1960 REQUIRES_SHARED(Locks::mutator_lock_)
1961 : scope_(scope),
1962 object_(object),
1963 max_count_(max_count),
1964 referring_objects_(referring_objects) {}
1965
Callback(mirror::Object * obj,void * arg)1966 static void Callback(mirror::Object* obj, void* arg)
1967 REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
1968 reinterpret_cast<ReferringObjectsFinder*>(arg)->operator()(obj);
1969 }
1970
1971 // For bitmap Visit.
1972 // TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for
1973 // annotalysis on visitors.
operator ()(ObjPtr<mirror::Object> o) const1974 void operator()(ObjPtr<mirror::Object> o) const NO_THREAD_SAFETY_ANALYSIS {
1975 o->VisitReferences(*this, VoidFunctor());
1976 }
1977
1978 // For Object::VisitReferences.
operator ()(ObjPtr<mirror::Object> obj,MemberOffset offset,bool is_static ATTRIBUTE_UNUSED) const1979 void operator()(ObjPtr<mirror::Object> obj,
1980 MemberOffset offset,
1981 bool is_static ATTRIBUTE_UNUSED) const
1982 REQUIRES_SHARED(Locks::mutator_lock_) {
1983 mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset);
1984 if (ref == object_.Get() && (max_count_ == 0 || referring_objects_.size() < max_count_)) {
1985 referring_objects_.push_back(scope_.NewHandle(obj));
1986 }
1987 }
1988
VisitRootIfNonNull(mirror::CompressedReference<mirror::Object> * root ATTRIBUTE_UNUSED) const1989 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED)
1990 const {}
VisitRoot(mirror::CompressedReference<mirror::Object> * root ATTRIBUTE_UNUSED) const1991 void VisitRoot(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) const {}
1992
1993 private:
1994 VariableSizedHandleScope& scope_;
1995 Handle<mirror::Object> const object_;
1996 const uint32_t max_count_;
1997 std::vector<Handle<mirror::Object>>& referring_objects_;
1998 DISALLOW_COPY_AND_ASSIGN(ReferringObjectsFinder);
1999 };
2000
GetReferringObjects(VariableSizedHandleScope & scope,Handle<mirror::Object> o,int32_t max_count,std::vector<Handle<mirror::Object>> & referring_objects)2001 void Heap::GetReferringObjects(VariableSizedHandleScope& scope,
2002 Handle<mirror::Object> o,
2003 int32_t max_count,
2004 std::vector<Handle<mirror::Object>>& referring_objects) {
2005 ReferringObjectsFinder finder(scope, o, max_count, referring_objects);
2006 VisitObjects(&ReferringObjectsFinder::Callback, &finder);
2007 }
2008
CollectGarbage(bool clear_soft_references)2009 void Heap::CollectGarbage(bool clear_soft_references) {
2010 // Even if we waited for a GC we still need to do another GC since weaks allocated during the
2011 // last GC will not have necessarily been cleared.
2012 CollectGarbageInternal(gc_plan_.back(), kGcCauseExplicit, clear_soft_references);
2013 }
2014
SupportHomogeneousSpaceCompactAndCollectorTransitions() const2015 bool Heap::SupportHomogeneousSpaceCompactAndCollectorTransitions() const {
2016 return main_space_backup_.get() != nullptr && main_space_ != nullptr &&
2017 foreground_collector_type_ == kCollectorTypeCMS;
2018 }
2019
PerformHomogeneousSpaceCompact()2020 HomogeneousSpaceCompactResult Heap::PerformHomogeneousSpaceCompact() {
2021 Thread* self = Thread::Current();
2022 // Inc requested homogeneous space compaction.
2023 count_requested_homogeneous_space_compaction_++;
2024 // Store performed homogeneous space compaction at a new request arrival.
2025 ScopedThreadStateChange tsc(self, kWaitingPerformingGc);
2026 Locks::mutator_lock_->AssertNotHeld(self);
2027 {
2028 ScopedThreadStateChange tsc2(self, kWaitingForGcToComplete);
2029 MutexLock mu(self, *gc_complete_lock_);
2030 // Ensure there is only one GC at a time.
2031 WaitForGcToCompleteLocked(kGcCauseHomogeneousSpaceCompact, self);
2032 // Homogeneous space compaction is a copying transition, can't run it if the moving GC disable count
2033 // is non zero.
2034 // If the collector type changed to something which doesn't benefit from homogeneous space compaction,
2035 // exit.
2036 if (disable_moving_gc_count_ != 0 || IsMovingGc(collector_type_) ||
2037 !main_space_->CanMoveObjects()) {
2038 return kErrorReject;
2039 }
2040 if (!SupportHomogeneousSpaceCompactAndCollectorTransitions()) {
2041 return kErrorUnsupported;
2042 }
2043 collector_type_running_ = kCollectorTypeHomogeneousSpaceCompact;
2044 }
2045 if (Runtime::Current()->IsShuttingDown(self)) {
2046 // Don't allow heap transitions to happen if the runtime is shutting down since these can
2047 // cause objects to get finalized.
2048 FinishGC(self, collector::kGcTypeNone);
2049 return HomogeneousSpaceCompactResult::kErrorVMShuttingDown;
2050 }
2051 collector::GarbageCollector* collector;
2052 {
2053 ScopedSuspendAll ssa(__FUNCTION__);
2054 uint64_t start_time = NanoTime();
2055 // Launch compaction.
2056 space::MallocSpace* to_space = main_space_backup_.release();
2057 space::MallocSpace* from_space = main_space_;
2058 to_space->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
2059 const uint64_t space_size_before_compaction = from_space->Size();
2060 AddSpace(to_space);
2061 // Make sure that we will have enough room to copy.
2062 CHECK_GE(to_space->GetFootprintLimit(), from_space->GetFootprintLimit());
2063 collector = Compact(to_space, from_space, kGcCauseHomogeneousSpaceCompact);
2064 const uint64_t space_size_after_compaction = to_space->Size();
2065 main_space_ = to_space;
2066 main_space_backup_.reset(from_space);
2067 RemoveSpace(from_space);
2068 SetSpaceAsDefault(main_space_); // Set as default to reset the proper dlmalloc space.
2069 // Update performed homogeneous space compaction count.
2070 count_performed_homogeneous_space_compaction_++;
2071 // Print statics log and resume all threads.
2072 uint64_t duration = NanoTime() - start_time;
2073 VLOG(heap) << "Heap homogeneous space compaction took " << PrettyDuration(duration) << " size: "
2074 << PrettySize(space_size_before_compaction) << " -> "
2075 << PrettySize(space_size_after_compaction) << " compact-ratio: "
2076 << std::fixed << static_cast<double>(space_size_after_compaction) /
2077 static_cast<double>(space_size_before_compaction);
2078 }
2079 // Finish GC.
2080 reference_processor_->EnqueueClearedReferences(self);
2081 GrowForUtilization(semi_space_collector_);
2082 LogGC(kGcCauseHomogeneousSpaceCompact, collector);
2083 FinishGC(self, collector::kGcTypeFull);
2084 {
2085 ScopedObjectAccess soa(self);
2086 soa.Vm()->UnloadNativeLibraries();
2087 }
2088 return HomogeneousSpaceCompactResult::kSuccess;
2089 }
2090
TransitionCollector(CollectorType collector_type)2091 void Heap::TransitionCollector(CollectorType collector_type) {
2092 if (collector_type == collector_type_) {
2093 return;
2094 }
2095 // Collector transition must not happen with CC
2096 CHECK(!kUseReadBarrier);
2097 VLOG(heap) << "TransitionCollector: " << static_cast<int>(collector_type_)
2098 << " -> " << static_cast<int>(collector_type);
2099 uint64_t start_time = NanoTime();
2100 uint32_t before_allocated = num_bytes_allocated_.LoadSequentiallyConsistent();
2101 Runtime* const runtime = Runtime::Current();
2102 Thread* const self = Thread::Current();
2103 ScopedThreadStateChange tsc(self, kWaitingPerformingGc);
2104 Locks::mutator_lock_->AssertNotHeld(self);
2105 // Busy wait until we can GC (StartGC can fail if we have a non-zero
2106 // compacting_gc_disable_count_, this should rarely occurs).
2107 for (;;) {
2108 {
2109 ScopedThreadStateChange tsc2(self, kWaitingForGcToComplete);
2110 MutexLock mu(self, *gc_complete_lock_);
2111 // Ensure there is only one GC at a time.
2112 WaitForGcToCompleteLocked(kGcCauseCollectorTransition, self);
2113 // Currently we only need a heap transition if we switch from a moving collector to a
2114 // non-moving one, or visa versa.
2115 const bool copying_transition = IsMovingGc(collector_type_) != IsMovingGc(collector_type);
2116 // If someone else beat us to it and changed the collector before we could, exit.
2117 // This is safe to do before the suspend all since we set the collector_type_running_ before
2118 // we exit the loop. If another thread attempts to do the heap transition before we exit,
2119 // then it would get blocked on WaitForGcToCompleteLocked.
2120 if (collector_type == collector_type_) {
2121 return;
2122 }
2123 // GC can be disabled if someone has a used GetPrimitiveArrayCritical but not yet released.
2124 if (!copying_transition || disable_moving_gc_count_ == 0) {
2125 // TODO: Not hard code in semi-space collector?
2126 collector_type_running_ = copying_transition ? kCollectorTypeSS : collector_type;
2127 break;
2128 }
2129 }
2130 usleep(1000);
2131 }
2132 if (runtime->IsShuttingDown(self)) {
2133 // Don't allow heap transitions to happen if the runtime is shutting down since these can
2134 // cause objects to get finalized.
2135 FinishGC(self, collector::kGcTypeNone);
2136 return;
2137 }
2138 collector::GarbageCollector* collector = nullptr;
2139 {
2140 ScopedSuspendAll ssa(__FUNCTION__);
2141 switch (collector_type) {
2142 case kCollectorTypeSS: {
2143 if (!IsMovingGc(collector_type_)) {
2144 // Create the bump pointer space from the backup space.
2145 CHECK(main_space_backup_ != nullptr);
2146 std::unique_ptr<MemMap> mem_map(main_space_backup_->ReleaseMemMap());
2147 // We are transitioning from non moving GC -> moving GC, since we copied from the bump
2148 // pointer space last transition it will be protected.
2149 CHECK(mem_map != nullptr);
2150 mem_map->Protect(PROT_READ | PROT_WRITE);
2151 bump_pointer_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space",
2152 mem_map.release());
2153 AddSpace(bump_pointer_space_);
2154 collector = Compact(bump_pointer_space_, main_space_, kGcCauseCollectorTransition);
2155 // Use the now empty main space mem map for the bump pointer temp space.
2156 mem_map.reset(main_space_->ReleaseMemMap());
2157 // Unset the pointers just in case.
2158 if (dlmalloc_space_ == main_space_) {
2159 dlmalloc_space_ = nullptr;
2160 } else if (rosalloc_space_ == main_space_) {
2161 rosalloc_space_ = nullptr;
2162 }
2163 // Remove the main space so that we don't try to trim it, this doens't work for debug
2164 // builds since RosAlloc attempts to read the magic number from a protected page.
2165 RemoveSpace(main_space_);
2166 RemoveRememberedSet(main_space_);
2167 delete main_space_; // Delete the space since it has been removed.
2168 main_space_ = nullptr;
2169 RemoveRememberedSet(main_space_backup_.get());
2170 main_space_backup_.reset(nullptr); // Deletes the space.
2171 temp_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space 2",
2172 mem_map.release());
2173 AddSpace(temp_space_);
2174 }
2175 break;
2176 }
2177 case kCollectorTypeMS:
2178 // Fall through.
2179 case kCollectorTypeCMS: {
2180 if (IsMovingGc(collector_type_)) {
2181 CHECK(temp_space_ != nullptr);
2182 std::unique_ptr<MemMap> mem_map(temp_space_->ReleaseMemMap());
2183 RemoveSpace(temp_space_);
2184 temp_space_ = nullptr;
2185 mem_map->Protect(PROT_READ | PROT_WRITE);
2186 CreateMainMallocSpace(mem_map.get(),
2187 kDefaultInitialSize,
2188 std::min(mem_map->Size(), growth_limit_),
2189 mem_map->Size());
2190 mem_map.release();
2191 // Compact to the main space from the bump pointer space, don't need to swap semispaces.
2192 AddSpace(main_space_);
2193 collector = Compact(main_space_, bump_pointer_space_, kGcCauseCollectorTransition);
2194 mem_map.reset(bump_pointer_space_->ReleaseMemMap());
2195 RemoveSpace(bump_pointer_space_);
2196 bump_pointer_space_ = nullptr;
2197 const char* name = kUseRosAlloc ? kRosAllocSpaceName[1] : kDlMallocSpaceName[1];
2198 // Temporarily unprotect the backup mem map so rosalloc can write the debug magic number.
2199 if (kIsDebugBuild && kUseRosAlloc) {
2200 mem_map->Protect(PROT_READ | PROT_WRITE);
2201 }
2202 main_space_backup_.reset(CreateMallocSpaceFromMemMap(
2203 mem_map.get(),
2204 kDefaultInitialSize,
2205 std::min(mem_map->Size(), growth_limit_),
2206 mem_map->Size(),
2207 name,
2208 true));
2209 if (kIsDebugBuild && kUseRosAlloc) {
2210 mem_map->Protect(PROT_NONE);
2211 }
2212 mem_map.release();
2213 }
2214 break;
2215 }
2216 default: {
2217 LOG(FATAL) << "Attempted to transition to invalid collector type "
2218 << static_cast<size_t>(collector_type);
2219 break;
2220 }
2221 }
2222 ChangeCollector(collector_type);
2223 }
2224 // Can't call into java code with all threads suspended.
2225 reference_processor_->EnqueueClearedReferences(self);
2226 uint64_t duration = NanoTime() - start_time;
2227 GrowForUtilization(semi_space_collector_);
2228 DCHECK(collector != nullptr);
2229 LogGC(kGcCauseCollectorTransition, collector);
2230 FinishGC(self, collector::kGcTypeFull);
2231 {
2232 ScopedObjectAccess soa(self);
2233 soa.Vm()->UnloadNativeLibraries();
2234 }
2235 int32_t after_allocated = num_bytes_allocated_.LoadSequentiallyConsistent();
2236 int32_t delta_allocated = before_allocated - after_allocated;
2237 std::string saved_str;
2238 if (delta_allocated >= 0) {
2239 saved_str = " saved at least " + PrettySize(delta_allocated);
2240 } else {
2241 saved_str = " expanded " + PrettySize(-delta_allocated);
2242 }
2243 VLOG(heap) << "Collector transition to " << collector_type << " took "
2244 << PrettyDuration(duration) << saved_str;
2245 }
2246
ChangeCollector(CollectorType collector_type)2247 void Heap::ChangeCollector(CollectorType collector_type) {
2248 // TODO: Only do this with all mutators suspended to avoid races.
2249 if (collector_type != collector_type_) {
2250 if (collector_type == kCollectorTypeMC) {
2251 // Don't allow mark compact unless support is compiled in.
2252 CHECK(kMarkCompactSupport);
2253 }
2254 collector_type_ = collector_type;
2255 gc_plan_.clear();
2256 switch (collector_type_) {
2257 case kCollectorTypeCC: {
2258 gc_plan_.push_back(collector::kGcTypeFull);
2259 if (use_tlab_) {
2260 ChangeAllocator(kAllocatorTypeRegionTLAB);
2261 } else {
2262 ChangeAllocator(kAllocatorTypeRegion);
2263 }
2264 break;
2265 }
2266 case kCollectorTypeMC: // Fall-through.
2267 case kCollectorTypeSS: // Fall-through.
2268 case kCollectorTypeGSS: {
2269 gc_plan_.push_back(collector::kGcTypeFull);
2270 if (use_tlab_) {
2271 ChangeAllocator(kAllocatorTypeTLAB);
2272 } else {
2273 ChangeAllocator(kAllocatorTypeBumpPointer);
2274 }
2275 break;
2276 }
2277 case kCollectorTypeMS: {
2278 gc_plan_.push_back(collector::kGcTypeSticky);
2279 gc_plan_.push_back(collector::kGcTypePartial);
2280 gc_plan_.push_back(collector::kGcTypeFull);
2281 ChangeAllocator(kUseRosAlloc ? kAllocatorTypeRosAlloc : kAllocatorTypeDlMalloc);
2282 break;
2283 }
2284 case kCollectorTypeCMS: {
2285 gc_plan_.push_back(collector::kGcTypeSticky);
2286 gc_plan_.push_back(collector::kGcTypePartial);
2287 gc_plan_.push_back(collector::kGcTypeFull);
2288 ChangeAllocator(kUseRosAlloc ? kAllocatorTypeRosAlloc : kAllocatorTypeDlMalloc);
2289 break;
2290 }
2291 default: {
2292 UNIMPLEMENTED(FATAL);
2293 UNREACHABLE();
2294 }
2295 }
2296 if (IsGcConcurrent()) {
2297 concurrent_start_bytes_ =
2298 std::max(max_allowed_footprint_, kMinConcurrentRemainingBytes) - kMinConcurrentRemainingBytes;
2299 } else {
2300 concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
2301 }
2302 }
2303 }
2304
2305 // Special compacting collector which uses sub-optimal bin packing to reduce zygote space size.
2306 class ZygoteCompactingCollector FINAL : public collector::SemiSpace {
2307 public:
ZygoteCompactingCollector(gc::Heap * heap,bool is_running_on_memory_tool)2308 ZygoteCompactingCollector(gc::Heap* heap, bool is_running_on_memory_tool)
2309 : SemiSpace(heap, false, "zygote collector"),
2310 bin_live_bitmap_(nullptr),
2311 bin_mark_bitmap_(nullptr),
2312 is_running_on_memory_tool_(is_running_on_memory_tool) {}
2313
BuildBins(space::ContinuousSpace * space)2314 void BuildBins(space::ContinuousSpace* space) {
2315 bin_live_bitmap_ = space->GetLiveBitmap();
2316 bin_mark_bitmap_ = space->GetMarkBitmap();
2317 BinContext context;
2318 context.prev_ = reinterpret_cast<uintptr_t>(space->Begin());
2319 context.collector_ = this;
2320 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
2321 // Note: This requires traversing the space in increasing order of object addresses.
2322 bin_live_bitmap_->Walk(Callback, reinterpret_cast<void*>(&context));
2323 // Add the last bin which spans after the last object to the end of the space.
2324 AddBin(reinterpret_cast<uintptr_t>(space->End()) - context.prev_, context.prev_);
2325 }
2326
2327 private:
2328 struct BinContext {
2329 uintptr_t prev_; // The end of the previous object.
2330 ZygoteCompactingCollector* collector_;
2331 };
2332 // Maps from bin sizes to locations.
2333 std::multimap<size_t, uintptr_t> bins_;
2334 // Live bitmap of the space which contains the bins.
2335 accounting::ContinuousSpaceBitmap* bin_live_bitmap_;
2336 // Mark bitmap of the space which contains the bins.
2337 accounting::ContinuousSpaceBitmap* bin_mark_bitmap_;
2338 const bool is_running_on_memory_tool_;
2339
Callback(mirror::Object * obj,void * arg)2340 static void Callback(mirror::Object* obj, void* arg)
2341 REQUIRES_SHARED(Locks::mutator_lock_) {
2342 DCHECK(arg != nullptr);
2343 BinContext* context = reinterpret_cast<BinContext*>(arg);
2344 ZygoteCompactingCollector* collector = context->collector_;
2345 uintptr_t object_addr = reinterpret_cast<uintptr_t>(obj);
2346 size_t bin_size = object_addr - context->prev_;
2347 // Add the bin consisting of the end of the previous object to the start of the current object.
2348 collector->AddBin(bin_size, context->prev_);
2349 context->prev_ = object_addr + RoundUp(obj->SizeOf<kDefaultVerifyFlags>(), kObjectAlignment);
2350 }
2351
AddBin(size_t size,uintptr_t position)2352 void AddBin(size_t size, uintptr_t position) {
2353 if (is_running_on_memory_tool_) {
2354 MEMORY_TOOL_MAKE_DEFINED(reinterpret_cast<void*>(position), size);
2355 }
2356 if (size != 0) {
2357 bins_.insert(std::make_pair(size, position));
2358 }
2359 }
2360
ShouldSweepSpace(space::ContinuousSpace * space ATTRIBUTE_UNUSED) const2361 virtual bool ShouldSweepSpace(space::ContinuousSpace* space ATTRIBUTE_UNUSED) const {
2362 // Don't sweep any spaces since we probably blasted the internal accounting of the free list
2363 // allocator.
2364 return false;
2365 }
2366
MarkNonForwardedObject(mirror::Object * obj)2367 virtual mirror::Object* MarkNonForwardedObject(mirror::Object* obj)
2368 REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
2369 size_t obj_size = obj->SizeOf<kDefaultVerifyFlags>();
2370 size_t alloc_size = RoundUp(obj_size, kObjectAlignment);
2371 mirror::Object* forward_address;
2372 // Find the smallest bin which we can move obj in.
2373 auto it = bins_.lower_bound(alloc_size);
2374 if (it == bins_.end()) {
2375 // No available space in the bins, place it in the target space instead (grows the zygote
2376 // space).
2377 size_t bytes_allocated, dummy;
2378 forward_address = to_space_->Alloc(self_, alloc_size, &bytes_allocated, nullptr, &dummy);
2379 if (to_space_live_bitmap_ != nullptr) {
2380 to_space_live_bitmap_->Set(forward_address);
2381 } else {
2382 GetHeap()->GetNonMovingSpace()->GetLiveBitmap()->Set(forward_address);
2383 GetHeap()->GetNonMovingSpace()->GetMarkBitmap()->Set(forward_address);
2384 }
2385 } else {
2386 size_t size = it->first;
2387 uintptr_t pos = it->second;
2388 bins_.erase(it); // Erase the old bin which we replace with the new smaller bin.
2389 forward_address = reinterpret_cast<mirror::Object*>(pos);
2390 // Set the live and mark bits so that sweeping system weaks works properly.
2391 bin_live_bitmap_->Set(forward_address);
2392 bin_mark_bitmap_->Set(forward_address);
2393 DCHECK_GE(size, alloc_size);
2394 // Add a new bin with the remaining space.
2395 AddBin(size - alloc_size, pos + alloc_size);
2396 }
2397 // Copy the object over to its new location. Don't use alloc_size to avoid valgrind error.
2398 memcpy(reinterpret_cast<void*>(forward_address), obj, obj_size);
2399 if (kUseBakerReadBarrier) {
2400 obj->AssertReadBarrierState();
2401 forward_address->AssertReadBarrierState();
2402 }
2403 return forward_address;
2404 }
2405 };
2406
UnBindBitmaps()2407 void Heap::UnBindBitmaps() {
2408 TimingLogger::ScopedTiming t("UnBindBitmaps", GetCurrentGcIteration()->GetTimings());
2409 for (const auto& space : GetContinuousSpaces()) {
2410 if (space->IsContinuousMemMapAllocSpace()) {
2411 space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
2412 if (alloc_space->HasBoundBitmaps()) {
2413 alloc_space->UnBindBitmaps();
2414 }
2415 }
2416 }
2417 }
2418
PreZygoteFork()2419 void Heap::PreZygoteFork() {
2420 if (!HasZygoteSpace()) {
2421 // We still want to GC in case there is some unreachable non moving objects that could cause a
2422 // suboptimal bin packing when we compact the zygote space.
2423 CollectGarbageInternal(collector::kGcTypeFull, kGcCauseBackground, false);
2424 // Trim the pages at the end of the non moving space. Trim while not holding zygote lock since
2425 // the trim process may require locking the mutator lock.
2426 non_moving_space_->Trim();
2427 }
2428 Thread* self = Thread::Current();
2429 MutexLock mu(self, zygote_creation_lock_);
2430 // Try to see if we have any Zygote spaces.
2431 if (HasZygoteSpace()) {
2432 return;
2433 }
2434 Runtime::Current()->GetInternTable()->AddNewTable();
2435 Runtime::Current()->GetClassLinker()->MoveClassTableToPreZygote();
2436 VLOG(heap) << "Starting PreZygoteFork";
2437 // The end of the non-moving space may be protected, unprotect it so that we can copy the zygote
2438 // there.
2439 non_moving_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
2440 const bool same_space = non_moving_space_ == main_space_;
2441 if (kCompactZygote) {
2442 // Temporarily disable rosalloc verification because the zygote
2443 // compaction will mess up the rosalloc internal metadata.
2444 ScopedDisableRosAllocVerification disable_rosalloc_verif(this);
2445 ZygoteCompactingCollector zygote_collector(this, is_running_on_memory_tool_);
2446 zygote_collector.BuildBins(non_moving_space_);
2447 // Create a new bump pointer space which we will compact into.
2448 space::BumpPointerSpace target_space("zygote bump space", non_moving_space_->End(),
2449 non_moving_space_->Limit());
2450 // Compact the bump pointer space to a new zygote bump pointer space.
2451 bool reset_main_space = false;
2452 if (IsMovingGc(collector_type_)) {
2453 if (collector_type_ == kCollectorTypeCC) {
2454 zygote_collector.SetFromSpace(region_space_);
2455 } else {
2456 zygote_collector.SetFromSpace(bump_pointer_space_);
2457 }
2458 } else {
2459 CHECK(main_space_ != nullptr);
2460 CHECK_NE(main_space_, non_moving_space_)
2461 << "Does not make sense to compact within the same space";
2462 // Copy from the main space.
2463 zygote_collector.SetFromSpace(main_space_);
2464 reset_main_space = true;
2465 }
2466 zygote_collector.SetToSpace(&target_space);
2467 zygote_collector.SetSwapSemiSpaces(false);
2468 zygote_collector.Run(kGcCauseCollectorTransition, false);
2469 if (reset_main_space) {
2470 main_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
2471 madvise(main_space_->Begin(), main_space_->Capacity(), MADV_DONTNEED);
2472 MemMap* mem_map = main_space_->ReleaseMemMap();
2473 RemoveSpace(main_space_);
2474 space::Space* old_main_space = main_space_;
2475 CreateMainMallocSpace(mem_map, kDefaultInitialSize, std::min(mem_map->Size(), growth_limit_),
2476 mem_map->Size());
2477 delete old_main_space;
2478 AddSpace(main_space_);
2479 } else {
2480 if (collector_type_ == kCollectorTypeCC) {
2481 region_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
2482 // Evacuated everything out of the region space, clear the mark bitmap.
2483 region_space_->GetMarkBitmap()->Clear();
2484 } else {
2485 bump_pointer_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
2486 }
2487 }
2488 if (temp_space_ != nullptr) {
2489 CHECK(temp_space_->IsEmpty());
2490 }
2491 total_objects_freed_ever_ += GetCurrentGcIteration()->GetFreedObjects();
2492 total_bytes_freed_ever_ += GetCurrentGcIteration()->GetFreedBytes();
2493 // Update the end and write out image.
2494 non_moving_space_->SetEnd(target_space.End());
2495 non_moving_space_->SetLimit(target_space.Limit());
2496 VLOG(heap) << "Create zygote space with size=" << non_moving_space_->Size() << " bytes";
2497 }
2498 // Change the collector to the post zygote one.
2499 ChangeCollector(foreground_collector_type_);
2500 // Save the old space so that we can remove it after we complete creating the zygote space.
2501 space::MallocSpace* old_alloc_space = non_moving_space_;
2502 // Turn the current alloc space into a zygote space and obtain the new alloc space composed of
2503 // the remaining available space.
2504 // Remove the old space before creating the zygote space since creating the zygote space sets
2505 // the old alloc space's bitmaps to null.
2506 RemoveSpace(old_alloc_space);
2507 if (collector::SemiSpace::kUseRememberedSet) {
2508 // Sanity bound check.
2509 FindRememberedSetFromSpace(old_alloc_space)->AssertAllDirtyCardsAreWithinSpace();
2510 // Remove the remembered set for the now zygote space (the old
2511 // non-moving space). Note now that we have compacted objects into
2512 // the zygote space, the data in the remembered set is no longer
2513 // needed. The zygote space will instead have a mod-union table
2514 // from this point on.
2515 RemoveRememberedSet(old_alloc_space);
2516 }
2517 // Remaining space becomes the new non moving space.
2518 zygote_space_ = old_alloc_space->CreateZygoteSpace(kNonMovingSpaceName, low_memory_mode_,
2519 &non_moving_space_);
2520 CHECK(!non_moving_space_->CanMoveObjects());
2521 if (same_space) {
2522 main_space_ = non_moving_space_;
2523 SetSpaceAsDefault(main_space_);
2524 }
2525 delete old_alloc_space;
2526 CHECK(HasZygoteSpace()) << "Failed creating zygote space";
2527 AddSpace(zygote_space_);
2528 non_moving_space_->SetFootprintLimit(non_moving_space_->Capacity());
2529 AddSpace(non_moving_space_);
2530 if (kUseBakerReadBarrier && gc::collector::ConcurrentCopying::kGrayDirtyImmuneObjects) {
2531 // Treat all of the objects in the zygote as marked to avoid unnecessary dirty pages. This is
2532 // safe since we mark all of the objects that may reference non immune objects as gray.
2533 zygote_space_->GetLiveBitmap()->VisitMarkedRange(
2534 reinterpret_cast<uintptr_t>(zygote_space_->Begin()),
2535 reinterpret_cast<uintptr_t>(zygote_space_->Limit()),
2536 [](mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
2537 CHECK(obj->AtomicSetMarkBit(0, 1));
2538 });
2539 }
2540
2541 // Create the zygote space mod union table.
2542 accounting::ModUnionTable* mod_union_table =
2543 new accounting::ModUnionTableCardCache("zygote space mod-union table", this, zygote_space_);
2544 CHECK(mod_union_table != nullptr) << "Failed to create zygote space mod-union table";
2545
2546 if (collector_type_ != kCollectorTypeCC) {
2547 // Set all the cards in the mod-union table since we don't know which objects contain references
2548 // to large objects.
2549 mod_union_table->SetCards();
2550 } else {
2551 // Make sure to clear the zygote space cards so that we don't dirty pages in the next GC. There
2552 // may be dirty cards from the zygote compaction or reference processing. These cards are not
2553 // necessary to have marked since the zygote space may not refer to any objects not in the
2554 // zygote or image spaces at this point.
2555 mod_union_table->ProcessCards();
2556 mod_union_table->ClearTable();
2557
2558 // For CC we never collect zygote large objects. This means we do not need to set the cards for
2559 // the zygote mod-union table and we can also clear all of the existing image mod-union tables.
2560 // The existing mod-union tables are only for image spaces and may only reference zygote and
2561 // image objects.
2562 for (auto& pair : mod_union_tables_) {
2563 CHECK(pair.first->IsImageSpace());
2564 CHECK(!pair.first->AsImageSpace()->GetImageHeader().IsAppImage());
2565 accounting::ModUnionTable* table = pair.second;
2566 table->ClearTable();
2567 }
2568 }
2569 AddModUnionTable(mod_union_table);
2570 large_object_space_->SetAllLargeObjectsAsZygoteObjects(self);
2571 if (collector::SemiSpace::kUseRememberedSet) {
2572 // Add a new remembered set for the post-zygote non-moving space.
2573 accounting::RememberedSet* post_zygote_non_moving_space_rem_set =
2574 new accounting::RememberedSet("Post-zygote non-moving space remembered set", this,
2575 non_moving_space_);
2576 CHECK(post_zygote_non_moving_space_rem_set != nullptr)
2577 << "Failed to create post-zygote non-moving space remembered set";
2578 AddRememberedSet(post_zygote_non_moving_space_rem_set);
2579 }
2580 }
2581
FlushAllocStack()2582 void Heap::FlushAllocStack() {
2583 MarkAllocStackAsLive(allocation_stack_.get());
2584 allocation_stack_->Reset();
2585 }
2586
MarkAllocStack(accounting::ContinuousSpaceBitmap * bitmap1,accounting::ContinuousSpaceBitmap * bitmap2,accounting::LargeObjectBitmap * large_objects,accounting::ObjectStack * stack)2587 void Heap::MarkAllocStack(accounting::ContinuousSpaceBitmap* bitmap1,
2588 accounting::ContinuousSpaceBitmap* bitmap2,
2589 accounting::LargeObjectBitmap* large_objects,
2590 accounting::ObjectStack* stack) {
2591 DCHECK(bitmap1 != nullptr);
2592 DCHECK(bitmap2 != nullptr);
2593 const auto* limit = stack->End();
2594 for (auto* it = stack->Begin(); it != limit; ++it) {
2595 const mirror::Object* obj = it->AsMirrorPtr();
2596 if (!kUseThreadLocalAllocationStack || obj != nullptr) {
2597 if (bitmap1->HasAddress(obj)) {
2598 bitmap1->Set(obj);
2599 } else if (bitmap2->HasAddress(obj)) {
2600 bitmap2->Set(obj);
2601 } else {
2602 DCHECK(large_objects != nullptr);
2603 large_objects->Set(obj);
2604 }
2605 }
2606 }
2607 }
2608
SwapSemiSpaces()2609 void Heap::SwapSemiSpaces() {
2610 CHECK(bump_pointer_space_ != nullptr);
2611 CHECK(temp_space_ != nullptr);
2612 std::swap(bump_pointer_space_, temp_space_);
2613 }
2614
Compact(space::ContinuousMemMapAllocSpace * target_space,space::ContinuousMemMapAllocSpace * source_space,GcCause gc_cause)2615 collector::GarbageCollector* Heap::Compact(space::ContinuousMemMapAllocSpace* target_space,
2616 space::ContinuousMemMapAllocSpace* source_space,
2617 GcCause gc_cause) {
2618 CHECK(kMovingCollector);
2619 if (target_space != source_space) {
2620 // Don't swap spaces since this isn't a typical semi space collection.
2621 semi_space_collector_->SetSwapSemiSpaces(false);
2622 semi_space_collector_->SetFromSpace(source_space);
2623 semi_space_collector_->SetToSpace(target_space);
2624 semi_space_collector_->Run(gc_cause, false);
2625 return semi_space_collector_;
2626 } else {
2627 CHECK(target_space->IsBumpPointerSpace())
2628 << "In-place compaction is only supported for bump pointer spaces";
2629 mark_compact_collector_->SetSpace(target_space->AsBumpPointerSpace());
2630 mark_compact_collector_->Run(kGcCauseCollectorTransition, false);
2631 return mark_compact_collector_;
2632 }
2633 }
2634
CollectGarbageInternal(collector::GcType gc_type,GcCause gc_cause,bool clear_soft_references)2635 collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type,
2636 GcCause gc_cause,
2637 bool clear_soft_references) {
2638 Thread* self = Thread::Current();
2639 Runtime* runtime = Runtime::Current();
2640 // If the heap can't run the GC, silently fail and return that no GC was run.
2641 switch (gc_type) {
2642 case collector::kGcTypePartial: {
2643 if (!HasZygoteSpace()) {
2644 return collector::kGcTypeNone;
2645 }
2646 break;
2647 }
2648 default: {
2649 // Other GC types don't have any special cases which makes them not runnable. The main case
2650 // here is full GC.
2651 }
2652 }
2653 ScopedThreadStateChange tsc(self, kWaitingPerformingGc);
2654 Locks::mutator_lock_->AssertNotHeld(self);
2655 if (self->IsHandlingStackOverflow()) {
2656 // If we are throwing a stack overflow error we probably don't have enough remaining stack
2657 // space to run the GC.
2658 return collector::kGcTypeNone;
2659 }
2660 bool compacting_gc;
2661 {
2662 gc_complete_lock_->AssertNotHeld(self);
2663 ScopedThreadStateChange tsc2(self, kWaitingForGcToComplete);
2664 MutexLock mu(self, *gc_complete_lock_);
2665 // Ensure there is only one GC at a time.
2666 WaitForGcToCompleteLocked(gc_cause, self);
2667 compacting_gc = IsMovingGc(collector_type_);
2668 // GC can be disabled if someone has a used GetPrimitiveArrayCritical.
2669 if (compacting_gc && disable_moving_gc_count_ != 0) {
2670 LOG(WARNING) << "Skipping GC due to disable moving GC count " << disable_moving_gc_count_;
2671 return collector::kGcTypeNone;
2672 }
2673 if (gc_disabled_for_shutdown_) {
2674 return collector::kGcTypeNone;
2675 }
2676 collector_type_running_ = collector_type_;
2677 }
2678 if (gc_cause == kGcCauseForAlloc && runtime->HasStatsEnabled()) {
2679 ++runtime->GetStats()->gc_for_alloc_count;
2680 ++self->GetStats()->gc_for_alloc_count;
2681 }
2682 const uint64_t bytes_allocated_before_gc = GetBytesAllocated();
2683 // Approximate heap size.
2684 ATRACE_INT("Heap size (KB)", bytes_allocated_before_gc / KB);
2685
2686 if (gc_type == NonStickyGcType()) {
2687 // Move all bytes from new_native_bytes_allocated_ to
2688 // old_native_bytes_allocated_ now that GC has been triggered, resetting
2689 // new_native_bytes_allocated_ to zero in the process.
2690 old_native_bytes_allocated_.FetchAndAddRelaxed(new_native_bytes_allocated_.ExchangeRelaxed(0));
2691 }
2692
2693 DCHECK_LT(gc_type, collector::kGcTypeMax);
2694 DCHECK_NE(gc_type, collector::kGcTypeNone);
2695
2696 collector::GarbageCollector* collector = nullptr;
2697 // TODO: Clean this up.
2698 if (compacting_gc) {
2699 DCHECK(current_allocator_ == kAllocatorTypeBumpPointer ||
2700 current_allocator_ == kAllocatorTypeTLAB ||
2701 current_allocator_ == kAllocatorTypeRegion ||
2702 current_allocator_ == kAllocatorTypeRegionTLAB);
2703 switch (collector_type_) {
2704 case kCollectorTypeSS:
2705 // Fall-through.
2706 case kCollectorTypeGSS:
2707 semi_space_collector_->SetFromSpace(bump_pointer_space_);
2708 semi_space_collector_->SetToSpace(temp_space_);
2709 semi_space_collector_->SetSwapSemiSpaces(true);
2710 collector = semi_space_collector_;
2711 break;
2712 case kCollectorTypeCC:
2713 collector = concurrent_copying_collector_;
2714 break;
2715 case kCollectorTypeMC:
2716 mark_compact_collector_->SetSpace(bump_pointer_space_);
2717 collector = mark_compact_collector_;
2718 break;
2719 default:
2720 LOG(FATAL) << "Invalid collector type " << static_cast<size_t>(collector_type_);
2721 }
2722 if (collector != mark_compact_collector_ && collector != concurrent_copying_collector_) {
2723 temp_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
2724 if (kIsDebugBuild) {
2725 // Try to read each page of the memory map in case mprotect didn't work properly b/19894268.
2726 temp_space_->GetMemMap()->TryReadable();
2727 }
2728 CHECK(temp_space_->IsEmpty());
2729 }
2730 gc_type = collector::kGcTypeFull; // TODO: Not hard code this in.
2731 } else if (current_allocator_ == kAllocatorTypeRosAlloc ||
2732 current_allocator_ == kAllocatorTypeDlMalloc) {
2733 collector = FindCollectorByGcType(gc_type);
2734 } else {
2735 LOG(FATAL) << "Invalid current allocator " << current_allocator_;
2736 }
2737 if (IsGcConcurrent()) {
2738 // Disable concurrent GC check so that we don't have spammy JNI requests.
2739 // This gets recalculated in GrowForUtilization. It is important that it is disabled /
2740 // calculated in the same thread so that there aren't any races that can cause it to become
2741 // permanantly disabled. b/17942071
2742 concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
2743 }
2744
2745 CHECK(collector != nullptr)
2746 << "Could not find garbage collector with collector_type="
2747 << static_cast<size_t>(collector_type_) << " and gc_type=" << gc_type;
2748 collector->Run(gc_cause, clear_soft_references || runtime->IsZygote());
2749 total_objects_freed_ever_ += GetCurrentGcIteration()->GetFreedObjects();
2750 total_bytes_freed_ever_ += GetCurrentGcIteration()->GetFreedBytes();
2751 RequestTrim(self);
2752 // Enqueue cleared references.
2753 reference_processor_->EnqueueClearedReferences(self);
2754 // Grow the heap so that we know when to perform the next GC.
2755 GrowForUtilization(collector, bytes_allocated_before_gc);
2756 LogGC(gc_cause, collector);
2757 FinishGC(self, gc_type);
2758 // Inform DDMS that a GC completed.
2759 Dbg::GcDidFinish();
2760 // Unload native libraries for class unloading. We do this after calling FinishGC to prevent
2761 // deadlocks in case the JNI_OnUnload function does allocations.
2762 {
2763 ScopedObjectAccess soa(self);
2764 soa.Vm()->UnloadNativeLibraries();
2765 }
2766 return gc_type;
2767 }
2768
LogGC(GcCause gc_cause,collector::GarbageCollector * collector)2769 void Heap::LogGC(GcCause gc_cause, collector::GarbageCollector* collector) {
2770 const size_t duration = GetCurrentGcIteration()->GetDurationNs();
2771 const std::vector<uint64_t>& pause_times = GetCurrentGcIteration()->GetPauseTimes();
2772 // Print the GC if it is an explicit GC (e.g. Runtime.gc()) or a slow GC
2773 // (mutator time blocked >= long_pause_log_threshold_).
2774 bool log_gc = kLogAllGCs || gc_cause == kGcCauseExplicit;
2775 if (!log_gc && CareAboutPauseTimes()) {
2776 // GC for alloc pauses the allocating thread, so consider it as a pause.
2777 log_gc = duration > long_gc_log_threshold_ ||
2778 (gc_cause == kGcCauseForAlloc && duration > long_pause_log_threshold_);
2779 for (uint64_t pause : pause_times) {
2780 log_gc = log_gc || pause >= long_pause_log_threshold_;
2781 }
2782 }
2783 if (log_gc) {
2784 const size_t percent_free = GetPercentFree();
2785 const size_t current_heap_size = GetBytesAllocated();
2786 const size_t total_memory = GetTotalMemory();
2787 std::ostringstream pause_string;
2788 for (size_t i = 0; i < pause_times.size(); ++i) {
2789 pause_string << PrettyDuration((pause_times[i] / 1000) * 1000)
2790 << ((i != pause_times.size() - 1) ? "," : "");
2791 }
2792 LOG(INFO) << gc_cause << " " << collector->GetName()
2793 << " GC freed " << current_gc_iteration_.GetFreedObjects() << "("
2794 << PrettySize(current_gc_iteration_.GetFreedBytes()) << ") AllocSpace objects, "
2795 << current_gc_iteration_.GetFreedLargeObjects() << "("
2796 << PrettySize(current_gc_iteration_.GetFreedLargeObjectBytes()) << ") LOS objects, "
2797 << percent_free << "% free, " << PrettySize(current_heap_size) << "/"
2798 << PrettySize(total_memory) << ", " << "paused " << pause_string.str()
2799 << " total " << PrettyDuration((duration / 1000) * 1000);
2800 VLOG(heap) << Dumpable<TimingLogger>(*current_gc_iteration_.GetTimings());
2801 }
2802 }
2803
FinishGC(Thread * self,collector::GcType gc_type)2804 void Heap::FinishGC(Thread* self, collector::GcType gc_type) {
2805 MutexLock mu(self, *gc_complete_lock_);
2806 collector_type_running_ = kCollectorTypeNone;
2807 if (gc_type != collector::kGcTypeNone) {
2808 last_gc_type_ = gc_type;
2809
2810 // Update stats.
2811 ++gc_count_last_window_;
2812 if (running_collection_is_blocking_) {
2813 // If the currently running collection was a blocking one,
2814 // increment the counters and reset the flag.
2815 ++blocking_gc_count_;
2816 blocking_gc_time_ += GetCurrentGcIteration()->GetDurationNs();
2817 ++blocking_gc_count_last_window_;
2818 }
2819 // Update the gc count rate histograms if due.
2820 UpdateGcCountRateHistograms();
2821 }
2822 // Reset.
2823 running_collection_is_blocking_ = false;
2824 thread_running_gc_ = nullptr;
2825 // Wake anyone who may have been waiting for the GC to complete.
2826 gc_complete_cond_->Broadcast(self);
2827 }
2828
UpdateGcCountRateHistograms()2829 void Heap::UpdateGcCountRateHistograms() {
2830 // Invariant: if the time since the last update includes more than
2831 // one windows, all the GC runs (if > 0) must have happened in first
2832 // window because otherwise the update must have already taken place
2833 // at an earlier GC run. So, we report the non-first windows with
2834 // zero counts to the histograms.
2835 DCHECK_EQ(last_update_time_gc_count_rate_histograms_ % kGcCountRateHistogramWindowDuration, 0U);
2836 uint64_t now = NanoTime();
2837 DCHECK_GE(now, last_update_time_gc_count_rate_histograms_);
2838 uint64_t time_since_last_update = now - last_update_time_gc_count_rate_histograms_;
2839 uint64_t num_of_windows = time_since_last_update / kGcCountRateHistogramWindowDuration;
2840 if (time_since_last_update >= kGcCountRateHistogramWindowDuration) {
2841 // Record the first window.
2842 gc_count_rate_histogram_.AddValue(gc_count_last_window_ - 1); // Exclude the current run.
2843 blocking_gc_count_rate_histogram_.AddValue(running_collection_is_blocking_ ?
2844 blocking_gc_count_last_window_ - 1 : blocking_gc_count_last_window_);
2845 // Record the other windows (with zero counts).
2846 for (uint64_t i = 0; i < num_of_windows - 1; ++i) {
2847 gc_count_rate_histogram_.AddValue(0);
2848 blocking_gc_count_rate_histogram_.AddValue(0);
2849 }
2850 // Update the last update time and reset the counters.
2851 last_update_time_gc_count_rate_histograms_ =
2852 (now / kGcCountRateHistogramWindowDuration) * kGcCountRateHistogramWindowDuration;
2853 gc_count_last_window_ = 1; // Include the current run.
2854 blocking_gc_count_last_window_ = running_collection_is_blocking_ ? 1 : 0;
2855 }
2856 DCHECK_EQ(last_update_time_gc_count_rate_histograms_ % kGcCountRateHistogramWindowDuration, 0U);
2857 }
2858
2859 class RootMatchesObjectVisitor : public SingleRootVisitor {
2860 public:
RootMatchesObjectVisitor(const mirror::Object * obj)2861 explicit RootMatchesObjectVisitor(const mirror::Object* obj) : obj_(obj) { }
2862
VisitRoot(mirror::Object * root,const RootInfo & info)2863 void VisitRoot(mirror::Object* root, const RootInfo& info)
2864 OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
2865 if (root == obj_) {
2866 LOG(INFO) << "Object " << obj_ << " is a root " << info.ToString();
2867 }
2868 }
2869
2870 private:
2871 const mirror::Object* const obj_;
2872 };
2873
2874
2875 class ScanVisitor {
2876 public:
operator ()(const mirror::Object * obj) const2877 void operator()(const mirror::Object* obj) const {
2878 LOG(ERROR) << "Would have rescanned object " << obj;
2879 }
2880 };
2881
2882 // Verify a reference from an object.
2883 class VerifyReferenceVisitor : public SingleRootVisitor {
2884 public:
VerifyReferenceVisitor(Heap * heap,Atomic<size_t> * fail_count,bool verify_referent)2885 VerifyReferenceVisitor(Heap* heap, Atomic<size_t>* fail_count, bool verify_referent)
2886 REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_)
2887 : heap_(heap), fail_count_(fail_count), verify_referent_(verify_referent) {}
2888
GetFailureCount() const2889 size_t GetFailureCount() const {
2890 return fail_count_->LoadSequentiallyConsistent();
2891 }
2892
operator ()(ObjPtr<mirror::Class> klass ATTRIBUTE_UNUSED,ObjPtr<mirror::Reference> ref) const2893 void operator()(ObjPtr<mirror::Class> klass ATTRIBUTE_UNUSED, ObjPtr<mirror::Reference> ref) const
2894 REQUIRES_SHARED(Locks::mutator_lock_) {
2895 if (verify_referent_) {
2896 VerifyReference(ref.Ptr(), ref->GetReferent(), mirror::Reference::ReferentOffset());
2897 }
2898 }
2899
operator ()(ObjPtr<mirror::Object> obj,MemberOffset offset,bool is_static ATTRIBUTE_UNUSED) const2900 void operator()(ObjPtr<mirror::Object> obj,
2901 MemberOffset offset,
2902 bool is_static ATTRIBUTE_UNUSED) const
2903 REQUIRES_SHARED(Locks::mutator_lock_) {
2904 VerifyReference(obj.Ptr(), obj->GetFieldObject<mirror::Object>(offset), offset);
2905 }
2906
IsLive(ObjPtr<mirror::Object> obj) const2907 bool IsLive(ObjPtr<mirror::Object> obj) const NO_THREAD_SAFETY_ANALYSIS {
2908 return heap_->IsLiveObjectLocked(obj, true, false, true);
2909 }
2910
VisitRootIfNonNull(mirror::CompressedReference<mirror::Object> * root) const2911 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
2912 REQUIRES_SHARED(Locks::mutator_lock_) {
2913 if (!root->IsNull()) {
2914 VisitRoot(root);
2915 }
2916 }
VisitRoot(mirror::CompressedReference<mirror::Object> * root) const2917 void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
2918 REQUIRES_SHARED(Locks::mutator_lock_) {
2919 const_cast<VerifyReferenceVisitor*>(this)->VisitRoot(
2920 root->AsMirrorPtr(), RootInfo(kRootVMInternal));
2921 }
2922
VisitRoot(mirror::Object * root,const RootInfo & root_info)2923 virtual void VisitRoot(mirror::Object* root, const RootInfo& root_info) OVERRIDE
2924 REQUIRES_SHARED(Locks::mutator_lock_) {
2925 if (root == nullptr) {
2926 LOG(ERROR) << "Root is null with info " << root_info.GetType();
2927 } else if (!VerifyReference(nullptr, root, MemberOffset(0))) {
2928 LOG(ERROR) << "Root " << root << " is dead with type " << mirror::Object::PrettyTypeOf(root)
2929 << " thread_id= " << root_info.GetThreadId() << " root_type= " << root_info.GetType();
2930 }
2931 }
2932
2933 private:
2934 // TODO: Fix the no thread safety analysis.
2935 // Returns false on failure.
VerifyReference(mirror::Object * obj,mirror::Object * ref,MemberOffset offset) const2936 bool VerifyReference(mirror::Object* obj, mirror::Object* ref, MemberOffset offset) const
2937 NO_THREAD_SAFETY_ANALYSIS {
2938 if (ref == nullptr || IsLive(ref)) {
2939 // Verify that the reference is live.
2940 return true;
2941 }
2942 if (fail_count_->FetchAndAddSequentiallyConsistent(1) == 0) {
2943 // Print message on only on first failure to prevent spam.
2944 LOG(ERROR) << "!!!!!!!!!!!!!!Heap corruption detected!!!!!!!!!!!!!!!!!!!";
2945 }
2946 if (obj != nullptr) {
2947 // Only do this part for non roots.
2948 accounting::CardTable* card_table = heap_->GetCardTable();
2949 accounting::ObjectStack* alloc_stack = heap_->allocation_stack_.get();
2950 accounting::ObjectStack* live_stack = heap_->live_stack_.get();
2951 uint8_t* card_addr = card_table->CardFromAddr(obj);
2952 LOG(ERROR) << "Object " << obj << " references dead object " << ref << " at offset "
2953 << offset << "\n card value = " << static_cast<int>(*card_addr);
2954 if (heap_->IsValidObjectAddress(obj->GetClass())) {
2955 LOG(ERROR) << "Obj type " << obj->PrettyTypeOf();
2956 } else {
2957 LOG(ERROR) << "Object " << obj << " class(" << obj->GetClass() << ") not a heap address";
2958 }
2959
2960 // Attempt to find the class inside of the recently freed objects.
2961 space::ContinuousSpace* ref_space = heap_->FindContinuousSpaceFromObject(ref, true);
2962 if (ref_space != nullptr && ref_space->IsMallocSpace()) {
2963 space::MallocSpace* space = ref_space->AsMallocSpace();
2964 mirror::Class* ref_class = space->FindRecentFreedObject(ref);
2965 if (ref_class != nullptr) {
2966 LOG(ERROR) << "Reference " << ref << " found as a recently freed object with class "
2967 << ref_class->PrettyClass();
2968 } else {
2969 LOG(ERROR) << "Reference " << ref << " not found as a recently freed object";
2970 }
2971 }
2972
2973 if (ref->GetClass() != nullptr && heap_->IsValidObjectAddress(ref->GetClass()) &&
2974 ref->GetClass()->IsClass()) {
2975 LOG(ERROR) << "Ref type " << ref->PrettyTypeOf();
2976 } else {
2977 LOG(ERROR) << "Ref " << ref << " class(" << ref->GetClass()
2978 << ") is not a valid heap address";
2979 }
2980
2981 card_table->CheckAddrIsInCardTable(reinterpret_cast<const uint8_t*>(obj));
2982 void* cover_begin = card_table->AddrFromCard(card_addr);
2983 void* cover_end = reinterpret_cast<void*>(reinterpret_cast<size_t>(cover_begin) +
2984 accounting::CardTable::kCardSize);
2985 LOG(ERROR) << "Card " << reinterpret_cast<void*>(card_addr) << " covers " << cover_begin
2986 << "-" << cover_end;
2987 accounting::ContinuousSpaceBitmap* bitmap =
2988 heap_->GetLiveBitmap()->GetContinuousSpaceBitmap(obj);
2989
2990 if (bitmap == nullptr) {
2991 LOG(ERROR) << "Object " << obj << " has no bitmap";
2992 if (!VerifyClassClass(obj->GetClass())) {
2993 LOG(ERROR) << "Object " << obj << " failed class verification!";
2994 }
2995 } else {
2996 // Print out how the object is live.
2997 if (bitmap->Test(obj)) {
2998 LOG(ERROR) << "Object " << obj << " found in live bitmap";
2999 }
3000 if (alloc_stack->Contains(const_cast<mirror::Object*>(obj))) {
3001 LOG(ERROR) << "Object " << obj << " found in allocation stack";
3002 }
3003 if (live_stack->Contains(const_cast<mirror::Object*>(obj))) {
3004 LOG(ERROR) << "Object " << obj << " found in live stack";
3005 }
3006 if (alloc_stack->Contains(const_cast<mirror::Object*>(ref))) {
3007 LOG(ERROR) << "Ref " << ref << " found in allocation stack";
3008 }
3009 if (live_stack->Contains(const_cast<mirror::Object*>(ref))) {
3010 LOG(ERROR) << "Ref " << ref << " found in live stack";
3011 }
3012 // Attempt to see if the card table missed the reference.
3013 ScanVisitor scan_visitor;
3014 uint8_t* byte_cover_begin = reinterpret_cast<uint8_t*>(card_table->AddrFromCard(card_addr));
3015 card_table->Scan<false>(bitmap, byte_cover_begin,
3016 byte_cover_begin + accounting::CardTable::kCardSize, scan_visitor);
3017 }
3018
3019 // Search to see if any of the roots reference our object.
3020 RootMatchesObjectVisitor visitor1(obj);
3021 Runtime::Current()->VisitRoots(&visitor1);
3022 // Search to see if any of the roots reference our reference.
3023 RootMatchesObjectVisitor visitor2(ref);
3024 Runtime::Current()->VisitRoots(&visitor2);
3025 }
3026 return false;
3027 }
3028
3029 Heap* const heap_;
3030 Atomic<size_t>* const fail_count_;
3031 const bool verify_referent_;
3032 };
3033
3034 // Verify all references within an object, for use with HeapBitmap::Visit.
3035 class VerifyObjectVisitor {
3036 public:
VerifyObjectVisitor(Heap * heap,Atomic<size_t> * fail_count,bool verify_referent)3037 VerifyObjectVisitor(Heap* heap, Atomic<size_t>* fail_count, bool verify_referent)
3038 : heap_(heap), fail_count_(fail_count), verify_referent_(verify_referent) {}
3039
operator ()(mirror::Object * obj)3040 void operator()(mirror::Object* obj)
3041 REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
3042 // Note: we are verifying the references in obj but not obj itself, this is because obj must
3043 // be live or else how did we find it in the live bitmap?
3044 VerifyReferenceVisitor visitor(heap_, fail_count_, verify_referent_);
3045 // The class doesn't count as a reference but we should verify it anyways.
3046 obj->VisitReferences(visitor, visitor);
3047 }
3048
VisitCallback(mirror::Object * obj,void * arg)3049 static void VisitCallback(mirror::Object* obj, void* arg)
3050 REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
3051 VerifyObjectVisitor* visitor = reinterpret_cast<VerifyObjectVisitor*>(arg);
3052 visitor->operator()(obj);
3053 }
3054
VerifyRoots()3055 void VerifyRoots() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::heap_bitmap_lock_) {
3056 ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
3057 VerifyReferenceVisitor visitor(heap_, fail_count_, verify_referent_);
3058 Runtime::Current()->VisitRoots(&visitor);
3059 }
3060
GetFailureCount() const3061 size_t GetFailureCount() const {
3062 return fail_count_->LoadSequentiallyConsistent();
3063 }
3064
3065 private:
3066 Heap* const heap_;
3067 Atomic<size_t>* const fail_count_;
3068 const bool verify_referent_;
3069 };
3070
PushOnAllocationStackWithInternalGC(Thread * self,ObjPtr<mirror::Object> * obj)3071 void Heap::PushOnAllocationStackWithInternalGC(Thread* self, ObjPtr<mirror::Object>* obj) {
3072 // Slow path, the allocation stack push back must have already failed.
3073 DCHECK(!allocation_stack_->AtomicPushBack(obj->Ptr()));
3074 do {
3075 // TODO: Add handle VerifyObject.
3076 StackHandleScope<1> hs(self);
3077 HandleWrapperObjPtr<mirror::Object> wrapper(hs.NewHandleWrapper(obj));
3078 // Push our object into the reserve region of the allocaiton stack. This is only required due
3079 // to heap verification requiring that roots are live (either in the live bitmap or in the
3080 // allocation stack).
3081 CHECK(allocation_stack_->AtomicPushBackIgnoreGrowthLimit(obj->Ptr()));
3082 CollectGarbageInternal(collector::kGcTypeSticky, kGcCauseForAlloc, false);
3083 } while (!allocation_stack_->AtomicPushBack(obj->Ptr()));
3084 }
3085
PushOnThreadLocalAllocationStackWithInternalGC(Thread * self,ObjPtr<mirror::Object> * obj)3086 void Heap::PushOnThreadLocalAllocationStackWithInternalGC(Thread* self,
3087 ObjPtr<mirror::Object>* obj) {
3088 // Slow path, the allocation stack push back must have already failed.
3089 DCHECK(!self->PushOnThreadLocalAllocationStack(obj->Ptr()));
3090 StackReference<mirror::Object>* start_address;
3091 StackReference<mirror::Object>* end_address;
3092 while (!allocation_stack_->AtomicBumpBack(kThreadLocalAllocationStackSize, &start_address,
3093 &end_address)) {
3094 // TODO: Add handle VerifyObject.
3095 StackHandleScope<1> hs(self);
3096 HandleWrapperObjPtr<mirror::Object> wrapper(hs.NewHandleWrapper(obj));
3097 // Push our object into the reserve region of the allocaiton stack. This is only required due
3098 // to heap verification requiring that roots are live (either in the live bitmap or in the
3099 // allocation stack).
3100 CHECK(allocation_stack_->AtomicPushBackIgnoreGrowthLimit(obj->Ptr()));
3101 // Push into the reserve allocation stack.
3102 CollectGarbageInternal(collector::kGcTypeSticky, kGcCauseForAlloc, false);
3103 }
3104 self->SetThreadLocalAllocationStack(start_address, end_address);
3105 // Retry on the new thread-local allocation stack.
3106 CHECK(self->PushOnThreadLocalAllocationStack(obj->Ptr())); // Must succeed.
3107 }
3108
3109 // Must do this with mutators suspended since we are directly accessing the allocation stacks.
VerifyHeapReferences(bool verify_referents)3110 size_t Heap::VerifyHeapReferences(bool verify_referents) {
3111 Thread* self = Thread::Current();
3112 Locks::mutator_lock_->AssertExclusiveHeld(self);
3113 // Lets sort our allocation stacks so that we can efficiently binary search them.
3114 allocation_stack_->Sort();
3115 live_stack_->Sort();
3116 // Since we sorted the allocation stack content, need to revoke all
3117 // thread-local allocation stacks.
3118 RevokeAllThreadLocalAllocationStacks(self);
3119 Atomic<size_t> fail_count_(0);
3120 VerifyObjectVisitor visitor(this, &fail_count_, verify_referents);
3121 // Verify objects in the allocation stack since these will be objects which were:
3122 // 1. Allocated prior to the GC (pre GC verification).
3123 // 2. Allocated during the GC (pre sweep GC verification).
3124 // We don't want to verify the objects in the live stack since they themselves may be
3125 // pointing to dead objects if they are not reachable.
3126 VisitObjectsPaused(VerifyObjectVisitor::VisitCallback, &visitor);
3127 // Verify the roots:
3128 visitor.VerifyRoots();
3129 if (visitor.GetFailureCount() > 0) {
3130 // Dump mod-union tables.
3131 for (const auto& table_pair : mod_union_tables_) {
3132 accounting::ModUnionTable* mod_union_table = table_pair.second;
3133 mod_union_table->Dump(LOG_STREAM(ERROR) << mod_union_table->GetName() << ": ");
3134 }
3135 // Dump remembered sets.
3136 for (const auto& table_pair : remembered_sets_) {
3137 accounting::RememberedSet* remembered_set = table_pair.second;
3138 remembered_set->Dump(LOG_STREAM(ERROR) << remembered_set->GetName() << ": ");
3139 }
3140 DumpSpaces(LOG_STREAM(ERROR));
3141 }
3142 return visitor.GetFailureCount();
3143 }
3144
3145 class VerifyReferenceCardVisitor {
3146 public:
VerifyReferenceCardVisitor(Heap * heap,bool * failed)3147 VerifyReferenceCardVisitor(Heap* heap, bool* failed)
3148 REQUIRES_SHARED(Locks::mutator_lock_,
3149 Locks::heap_bitmap_lock_)
3150 : heap_(heap), failed_(failed) {
3151 }
3152
3153 // There is no card marks for native roots on a class.
VisitRootIfNonNull(mirror::CompressedReference<mirror::Object> * root ATTRIBUTE_UNUSED) const3154 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED)
3155 const {}
VisitRoot(mirror::CompressedReference<mirror::Object> * root ATTRIBUTE_UNUSED) const3156 void VisitRoot(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) const {}
3157
3158 // TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for
3159 // annotalysis on visitors.
operator ()(mirror::Object * obj,MemberOffset offset,bool is_static) const3160 void operator()(mirror::Object* obj, MemberOffset offset, bool is_static) const
3161 NO_THREAD_SAFETY_ANALYSIS {
3162 mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset);
3163 // Filter out class references since changing an object's class does not mark the card as dirty.
3164 // Also handles large objects, since the only reference they hold is a class reference.
3165 if (ref != nullptr && !ref->IsClass()) {
3166 accounting::CardTable* card_table = heap_->GetCardTable();
3167 // If the object is not dirty and it is referencing something in the live stack other than
3168 // class, then it must be on a dirty card.
3169 if (!card_table->AddrIsInCardTable(obj)) {
3170 LOG(ERROR) << "Object " << obj << " is not in the address range of the card table";
3171 *failed_ = true;
3172 } else if (!card_table->IsDirty(obj)) {
3173 // TODO: Check mod-union tables.
3174 // Card should be either kCardDirty if it got re-dirtied after we aged it, or
3175 // kCardDirty - 1 if it didnt get touched since we aged it.
3176 accounting::ObjectStack* live_stack = heap_->live_stack_.get();
3177 if (live_stack->ContainsSorted(ref)) {
3178 if (live_stack->ContainsSorted(obj)) {
3179 LOG(ERROR) << "Object " << obj << " found in live stack";
3180 }
3181 if (heap_->GetLiveBitmap()->Test(obj)) {
3182 LOG(ERROR) << "Object " << obj << " found in live bitmap";
3183 }
3184 LOG(ERROR) << "Object " << obj << " " << mirror::Object::PrettyTypeOf(obj)
3185 << " references " << ref << " " << mirror::Object::PrettyTypeOf(ref)
3186 << " in live stack";
3187
3188 // Print which field of the object is dead.
3189 if (!obj->IsObjectArray()) {
3190 mirror::Class* klass = is_static ? obj->AsClass() : obj->GetClass();
3191 CHECK(klass != nullptr);
3192 for (ArtField& field : (is_static ? klass->GetSFields() : klass->GetIFields())) {
3193 if (field.GetOffset().Int32Value() == offset.Int32Value()) {
3194 LOG(ERROR) << (is_static ? "Static " : "") << "field in the live stack is "
3195 << field.PrettyField();
3196 break;
3197 }
3198 }
3199 } else {
3200 mirror::ObjectArray<mirror::Object>* object_array =
3201 obj->AsObjectArray<mirror::Object>();
3202 for (int32_t i = 0; i < object_array->GetLength(); ++i) {
3203 if (object_array->Get(i) == ref) {
3204 LOG(ERROR) << (is_static ? "Static " : "") << "obj[" << i << "] = ref";
3205 }
3206 }
3207 }
3208
3209 *failed_ = true;
3210 }
3211 }
3212 }
3213 }
3214
3215 private:
3216 Heap* const heap_;
3217 bool* const failed_;
3218 };
3219
3220 class VerifyLiveStackReferences {
3221 public:
VerifyLiveStackReferences(Heap * heap)3222 explicit VerifyLiveStackReferences(Heap* heap)
3223 : heap_(heap),
3224 failed_(false) {}
3225
operator ()(mirror::Object * obj) const3226 void operator()(mirror::Object* obj) const
3227 REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
3228 VerifyReferenceCardVisitor visitor(heap_, const_cast<bool*>(&failed_));
3229 obj->VisitReferences(visitor, VoidFunctor());
3230 }
3231
Failed() const3232 bool Failed() const {
3233 return failed_;
3234 }
3235
3236 private:
3237 Heap* const heap_;
3238 bool failed_;
3239 };
3240
VerifyMissingCardMarks()3241 bool Heap::VerifyMissingCardMarks() {
3242 Thread* self = Thread::Current();
3243 Locks::mutator_lock_->AssertExclusiveHeld(self);
3244 // We need to sort the live stack since we binary search it.
3245 live_stack_->Sort();
3246 // Since we sorted the allocation stack content, need to revoke all
3247 // thread-local allocation stacks.
3248 RevokeAllThreadLocalAllocationStacks(self);
3249 VerifyLiveStackReferences visitor(this);
3250 GetLiveBitmap()->Visit(visitor);
3251 // We can verify objects in the live stack since none of these should reference dead objects.
3252 for (auto* it = live_stack_->Begin(); it != live_stack_->End(); ++it) {
3253 if (!kUseThreadLocalAllocationStack || it->AsMirrorPtr() != nullptr) {
3254 visitor(it->AsMirrorPtr());
3255 }
3256 }
3257 return !visitor.Failed();
3258 }
3259
SwapStacks()3260 void Heap::SwapStacks() {
3261 if (kUseThreadLocalAllocationStack) {
3262 live_stack_->AssertAllZero();
3263 }
3264 allocation_stack_.swap(live_stack_);
3265 }
3266
RevokeAllThreadLocalAllocationStacks(Thread * self)3267 void Heap::RevokeAllThreadLocalAllocationStacks(Thread* self) {
3268 // This must be called only during the pause.
3269 DCHECK(Locks::mutator_lock_->IsExclusiveHeld(self));
3270 MutexLock mu(self, *Locks::runtime_shutdown_lock_);
3271 MutexLock mu2(self, *Locks::thread_list_lock_);
3272 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
3273 for (Thread* t : thread_list) {
3274 t->RevokeThreadLocalAllocationStack();
3275 }
3276 }
3277
AssertThreadLocalBuffersAreRevoked(Thread * thread)3278 void Heap::AssertThreadLocalBuffersAreRevoked(Thread* thread) {
3279 if (kIsDebugBuild) {
3280 if (rosalloc_space_ != nullptr) {
3281 rosalloc_space_->AssertThreadLocalBuffersAreRevoked(thread);
3282 }
3283 if (bump_pointer_space_ != nullptr) {
3284 bump_pointer_space_->AssertThreadLocalBuffersAreRevoked(thread);
3285 }
3286 }
3287 }
3288
AssertAllBumpPointerSpaceThreadLocalBuffersAreRevoked()3289 void Heap::AssertAllBumpPointerSpaceThreadLocalBuffersAreRevoked() {
3290 if (kIsDebugBuild) {
3291 if (bump_pointer_space_ != nullptr) {
3292 bump_pointer_space_->AssertAllThreadLocalBuffersAreRevoked();
3293 }
3294 }
3295 }
3296
FindModUnionTableFromSpace(space::Space * space)3297 accounting::ModUnionTable* Heap::FindModUnionTableFromSpace(space::Space* space) {
3298 auto it = mod_union_tables_.find(space);
3299 if (it == mod_union_tables_.end()) {
3300 return nullptr;
3301 }
3302 return it->second;
3303 }
3304
FindRememberedSetFromSpace(space::Space * space)3305 accounting::RememberedSet* Heap::FindRememberedSetFromSpace(space::Space* space) {
3306 auto it = remembered_sets_.find(space);
3307 if (it == remembered_sets_.end()) {
3308 return nullptr;
3309 }
3310 return it->second;
3311 }
3312
ProcessCards(TimingLogger * timings,bool use_rem_sets,bool process_alloc_space_cards,bool clear_alloc_space_cards)3313 void Heap::ProcessCards(TimingLogger* timings,
3314 bool use_rem_sets,
3315 bool process_alloc_space_cards,
3316 bool clear_alloc_space_cards) {
3317 TimingLogger::ScopedTiming t(__FUNCTION__, timings);
3318 // Clear cards and keep track of cards cleared in the mod-union table.
3319 for (const auto& space : continuous_spaces_) {
3320 accounting::ModUnionTable* table = FindModUnionTableFromSpace(space);
3321 accounting::RememberedSet* rem_set = FindRememberedSetFromSpace(space);
3322 if (table != nullptr) {
3323 const char* name = space->IsZygoteSpace() ? "ZygoteModUnionClearCards" :
3324 "ImageModUnionClearCards";
3325 TimingLogger::ScopedTiming t2(name, timings);
3326 table->ProcessCards();
3327 } else if (use_rem_sets && rem_set != nullptr) {
3328 DCHECK(collector::SemiSpace::kUseRememberedSet && collector_type_ == kCollectorTypeGSS)
3329 << static_cast<int>(collector_type_);
3330 TimingLogger::ScopedTiming t2("AllocSpaceRemSetClearCards", timings);
3331 rem_set->ClearCards();
3332 } else if (process_alloc_space_cards) {
3333 TimingLogger::ScopedTiming t2("AllocSpaceClearCards", timings);
3334 if (clear_alloc_space_cards) {
3335 uint8_t* end = space->End();
3336 if (space->IsImageSpace()) {
3337 // Image space end is the end of the mirror objects, it is not necessarily page or card
3338 // aligned. Align up so that the check in ClearCardRange does not fail.
3339 end = AlignUp(end, accounting::CardTable::kCardSize);
3340 }
3341 card_table_->ClearCardRange(space->Begin(), end);
3342 } else {
3343 // No mod union table for the AllocSpace. Age the cards so that the GC knows that these
3344 // cards were dirty before the GC started.
3345 // TODO: Need to use atomic for the case where aged(cleaning thread) -> dirty(other thread)
3346 // -> clean(cleaning thread).
3347 // The races are we either end up with: Aged card, unaged card. Since we have the
3348 // checkpoint roots and then we scan / update mod union tables after. We will always
3349 // scan either card. If we end up with the non aged card, we scan it it in the pause.
3350 card_table_->ModifyCardsAtomic(space->Begin(), space->End(), AgeCardVisitor(),
3351 VoidFunctor());
3352 }
3353 }
3354 }
3355 }
3356
3357 struct IdentityMarkHeapReferenceVisitor : public MarkObjectVisitor {
MarkObjectart::gc::IdentityMarkHeapReferenceVisitor3358 virtual mirror::Object* MarkObject(mirror::Object* obj) OVERRIDE {
3359 return obj;
3360 }
MarkHeapReferenceart::gc::IdentityMarkHeapReferenceVisitor3361 virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>*, bool) OVERRIDE {
3362 }
3363 };
3364
PreGcVerificationPaused(collector::GarbageCollector * gc)3365 void Heap::PreGcVerificationPaused(collector::GarbageCollector* gc) {
3366 Thread* const self = Thread::Current();
3367 TimingLogger* const timings = current_gc_iteration_.GetTimings();
3368 TimingLogger::ScopedTiming t(__FUNCTION__, timings);
3369 if (verify_pre_gc_heap_) {
3370 TimingLogger::ScopedTiming t2("(Paused)PreGcVerifyHeapReferences", timings);
3371 size_t failures = VerifyHeapReferences();
3372 if (failures > 0) {
3373 LOG(FATAL) << "Pre " << gc->GetName() << " heap verification failed with " << failures
3374 << " failures";
3375 }
3376 }
3377 // Check that all objects which reference things in the live stack are on dirty cards.
3378 if (verify_missing_card_marks_) {
3379 TimingLogger::ScopedTiming t2("(Paused)PreGcVerifyMissingCardMarks", timings);
3380 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
3381 SwapStacks();
3382 // Sort the live stack so that we can quickly binary search it later.
3383 CHECK(VerifyMissingCardMarks()) << "Pre " << gc->GetName()
3384 << " missing card mark verification failed\n" << DumpSpaces();
3385 SwapStacks();
3386 }
3387 if (verify_mod_union_table_) {
3388 TimingLogger::ScopedTiming t2("(Paused)PreGcVerifyModUnionTables", timings);
3389 ReaderMutexLock reader_lock(self, *Locks::heap_bitmap_lock_);
3390 for (const auto& table_pair : mod_union_tables_) {
3391 accounting::ModUnionTable* mod_union_table = table_pair.second;
3392 IdentityMarkHeapReferenceVisitor visitor;
3393 mod_union_table->UpdateAndMarkReferences(&visitor);
3394 mod_union_table->Verify();
3395 }
3396 }
3397 }
3398
PreGcVerification(collector::GarbageCollector * gc)3399 void Heap::PreGcVerification(collector::GarbageCollector* gc) {
3400 if (verify_pre_gc_heap_ || verify_missing_card_marks_ || verify_mod_union_table_) {
3401 collector::GarbageCollector::ScopedPause pause(gc, false);
3402 PreGcVerificationPaused(gc);
3403 }
3404 }
3405
PrePauseRosAllocVerification(collector::GarbageCollector * gc ATTRIBUTE_UNUSED)3406 void Heap::PrePauseRosAllocVerification(collector::GarbageCollector* gc ATTRIBUTE_UNUSED) {
3407 // TODO: Add a new runtime option for this?
3408 if (verify_pre_gc_rosalloc_) {
3409 RosAllocVerification(current_gc_iteration_.GetTimings(), "PreGcRosAllocVerification");
3410 }
3411 }
3412
PreSweepingGcVerification(collector::GarbageCollector * gc)3413 void Heap::PreSweepingGcVerification(collector::GarbageCollector* gc) {
3414 Thread* const self = Thread::Current();
3415 TimingLogger* const timings = current_gc_iteration_.GetTimings();
3416 TimingLogger::ScopedTiming t(__FUNCTION__, timings);
3417 // Called before sweeping occurs since we want to make sure we are not going so reclaim any
3418 // reachable objects.
3419 if (verify_pre_sweeping_heap_) {
3420 TimingLogger::ScopedTiming t2("(Paused)PostSweepingVerifyHeapReferences", timings);
3421 CHECK_NE(self->GetState(), kRunnable);
3422 {
3423 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
3424 // Swapping bound bitmaps does nothing.
3425 gc->SwapBitmaps();
3426 }
3427 // Pass in false since concurrent reference processing can mean that the reference referents
3428 // may point to dead objects at the point which PreSweepingGcVerification is called.
3429 size_t failures = VerifyHeapReferences(false);
3430 if (failures > 0) {
3431 LOG(FATAL) << "Pre sweeping " << gc->GetName() << " GC verification failed with " << failures
3432 << " failures";
3433 }
3434 {
3435 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
3436 gc->SwapBitmaps();
3437 }
3438 }
3439 if (verify_pre_sweeping_rosalloc_) {
3440 RosAllocVerification(timings, "PreSweepingRosAllocVerification");
3441 }
3442 }
3443
PostGcVerificationPaused(collector::GarbageCollector * gc)3444 void Heap::PostGcVerificationPaused(collector::GarbageCollector* gc) {
3445 // Only pause if we have to do some verification.
3446 Thread* const self = Thread::Current();
3447 TimingLogger* const timings = GetCurrentGcIteration()->GetTimings();
3448 TimingLogger::ScopedTiming t(__FUNCTION__, timings);
3449 if (verify_system_weaks_) {
3450 ReaderMutexLock mu2(self, *Locks::heap_bitmap_lock_);
3451 collector::MarkSweep* mark_sweep = down_cast<collector::MarkSweep*>(gc);
3452 mark_sweep->VerifySystemWeaks();
3453 }
3454 if (verify_post_gc_rosalloc_) {
3455 RosAllocVerification(timings, "(Paused)PostGcRosAllocVerification");
3456 }
3457 if (verify_post_gc_heap_) {
3458 TimingLogger::ScopedTiming t2("(Paused)PostGcVerifyHeapReferences", timings);
3459 size_t failures = VerifyHeapReferences();
3460 if (failures > 0) {
3461 LOG(FATAL) << "Pre " << gc->GetName() << " heap verification failed with " << failures
3462 << " failures";
3463 }
3464 }
3465 }
3466
PostGcVerification(collector::GarbageCollector * gc)3467 void Heap::PostGcVerification(collector::GarbageCollector* gc) {
3468 if (verify_system_weaks_ || verify_post_gc_rosalloc_ || verify_post_gc_heap_) {
3469 collector::GarbageCollector::ScopedPause pause(gc, false);
3470 PostGcVerificationPaused(gc);
3471 }
3472 }
3473
RosAllocVerification(TimingLogger * timings,const char * name)3474 void Heap::RosAllocVerification(TimingLogger* timings, const char* name) {
3475 TimingLogger::ScopedTiming t(name, timings);
3476 for (const auto& space : continuous_spaces_) {
3477 if (space->IsRosAllocSpace()) {
3478 VLOG(heap) << name << " : " << space->GetName();
3479 space->AsRosAllocSpace()->Verify();
3480 }
3481 }
3482 }
3483
WaitForGcToComplete(GcCause cause,Thread * self)3484 collector::GcType Heap::WaitForGcToComplete(GcCause cause, Thread* self) {
3485 ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
3486 MutexLock mu(self, *gc_complete_lock_);
3487 return WaitForGcToCompleteLocked(cause, self);
3488 }
3489
WaitForGcToCompleteLocked(GcCause cause,Thread * self)3490 collector::GcType Heap::WaitForGcToCompleteLocked(GcCause cause, Thread* self) {
3491 collector::GcType last_gc_type = collector::kGcTypeNone;
3492 uint64_t wait_start = NanoTime();
3493 while (collector_type_running_ != kCollectorTypeNone) {
3494 if (self != task_processor_->GetRunningThread()) {
3495 // The current thread is about to wait for a currently running
3496 // collection to finish. If the waiting thread is not the heap
3497 // task daemon thread, the currently running collection is
3498 // considered as a blocking GC.
3499 running_collection_is_blocking_ = true;
3500 VLOG(gc) << "Waiting for a blocking GC " << cause;
3501 }
3502 ScopedTrace trace("GC: Wait For Completion");
3503 // We must wait, change thread state then sleep on gc_complete_cond_;
3504 gc_complete_cond_->Wait(self);
3505 last_gc_type = last_gc_type_;
3506 }
3507 uint64_t wait_time = NanoTime() - wait_start;
3508 total_wait_time_ += wait_time;
3509 if (wait_time > long_pause_log_threshold_) {
3510 LOG(INFO) << "WaitForGcToComplete blocked for " << PrettyDuration(wait_time)
3511 << " for cause " << cause;
3512 }
3513 if (self != task_processor_->GetRunningThread()) {
3514 // The current thread is about to run a collection. If the thread
3515 // is not the heap task daemon thread, it's considered as a
3516 // blocking GC (i.e., blocking itself).
3517 running_collection_is_blocking_ = true;
3518 // Don't log fake "GC" types that are only used for debugger or hidden APIs. If we log these,
3519 // it results in log spam. kGcCauseExplicit is already logged in LogGC, so avoid it here too.
3520 if (cause == kGcCauseForAlloc ||
3521 cause == kGcCauseForNativeAlloc ||
3522 cause == kGcCauseDisableMovingGc) {
3523 VLOG(gc) << "Starting a blocking GC " << cause;
3524 }
3525 }
3526 return last_gc_type;
3527 }
3528
DumpForSigQuit(std::ostream & os)3529 void Heap::DumpForSigQuit(std::ostream& os) {
3530 os << "Heap: " << GetPercentFree() << "% free, " << PrettySize(GetBytesAllocated()) << "/"
3531 << PrettySize(GetTotalMemory()) << "; " << GetObjectsAllocated() << " objects\n";
3532 DumpGcPerformanceInfo(os);
3533 }
3534
GetPercentFree()3535 size_t Heap::GetPercentFree() {
3536 return static_cast<size_t>(100.0f * static_cast<float>(GetFreeMemory()) / max_allowed_footprint_);
3537 }
3538
SetIdealFootprint(size_t max_allowed_footprint)3539 void Heap::SetIdealFootprint(size_t max_allowed_footprint) {
3540 if (max_allowed_footprint > GetMaxMemory()) {
3541 VLOG(gc) << "Clamp target GC heap from " << PrettySize(max_allowed_footprint) << " to "
3542 << PrettySize(GetMaxMemory());
3543 max_allowed_footprint = GetMaxMemory();
3544 }
3545 max_allowed_footprint_ = max_allowed_footprint;
3546 }
3547
IsMovableObject(ObjPtr<mirror::Object> obj) const3548 bool Heap::IsMovableObject(ObjPtr<mirror::Object> obj) const {
3549 if (kMovingCollector) {
3550 space::Space* space = FindContinuousSpaceFromObject(obj.Ptr(), true);
3551 if (space != nullptr) {
3552 // TODO: Check large object?
3553 return space->CanMoveObjects();
3554 }
3555 }
3556 return false;
3557 }
3558
FindCollectorByGcType(collector::GcType gc_type)3559 collector::GarbageCollector* Heap::FindCollectorByGcType(collector::GcType gc_type) {
3560 for (const auto& collector : garbage_collectors_) {
3561 if (collector->GetCollectorType() == collector_type_ &&
3562 collector->GetGcType() == gc_type) {
3563 return collector;
3564 }
3565 }
3566 return nullptr;
3567 }
3568
HeapGrowthMultiplier() const3569 double Heap::HeapGrowthMultiplier() const {
3570 // If we don't care about pause times we are background, so return 1.0.
3571 if (!CareAboutPauseTimes() || IsLowMemoryMode()) {
3572 return 1.0;
3573 }
3574 return foreground_heap_growth_multiplier_;
3575 }
3576
GrowForUtilization(collector::GarbageCollector * collector_ran,uint64_t bytes_allocated_before_gc)3577 void Heap::GrowForUtilization(collector::GarbageCollector* collector_ran,
3578 uint64_t bytes_allocated_before_gc) {
3579 // We know what our utilization is at this moment.
3580 // This doesn't actually resize any memory. It just lets the heap grow more when necessary.
3581 const uint64_t bytes_allocated = GetBytesAllocated();
3582 uint64_t target_size;
3583 collector::GcType gc_type = collector_ran->GetGcType();
3584 const double multiplier = HeapGrowthMultiplier(); // Use the multiplier to grow more for
3585 // foreground.
3586 const uint64_t adjusted_min_free = static_cast<uint64_t>(min_free_ * multiplier);
3587 const uint64_t adjusted_max_free = static_cast<uint64_t>(max_free_ * multiplier);
3588 if (gc_type != collector::kGcTypeSticky) {
3589 // Grow the heap for non sticky GC.
3590 ssize_t delta = bytes_allocated / GetTargetHeapUtilization() - bytes_allocated;
3591 CHECK_GE(delta, 0);
3592 target_size = bytes_allocated + delta * multiplier;
3593 target_size = std::min(target_size, bytes_allocated + adjusted_max_free);
3594 target_size = std::max(target_size, bytes_allocated + adjusted_min_free);
3595 next_gc_type_ = collector::kGcTypeSticky;
3596 } else {
3597 collector::GcType non_sticky_gc_type = NonStickyGcType();
3598 // Find what the next non sticky collector will be.
3599 collector::GarbageCollector* non_sticky_collector = FindCollectorByGcType(non_sticky_gc_type);
3600 // If the throughput of the current sticky GC >= throughput of the non sticky collector, then
3601 // do another sticky collection next.
3602 // We also check that the bytes allocated aren't over the footprint limit in order to prevent a
3603 // pathological case where dead objects which aren't reclaimed by sticky could get accumulated
3604 // if the sticky GC throughput always remained >= the full/partial throughput.
3605 if (current_gc_iteration_.GetEstimatedThroughput() * kStickyGcThroughputAdjustment >=
3606 non_sticky_collector->GetEstimatedMeanThroughput() &&
3607 non_sticky_collector->NumberOfIterations() > 0 &&
3608 bytes_allocated <= max_allowed_footprint_) {
3609 next_gc_type_ = collector::kGcTypeSticky;
3610 } else {
3611 next_gc_type_ = non_sticky_gc_type;
3612 }
3613 // If we have freed enough memory, shrink the heap back down.
3614 if (bytes_allocated + adjusted_max_free < max_allowed_footprint_) {
3615 target_size = bytes_allocated + adjusted_max_free;
3616 } else {
3617 target_size = std::max(bytes_allocated, static_cast<uint64_t>(max_allowed_footprint_));
3618 }
3619 }
3620 if (!ignore_max_footprint_) {
3621 SetIdealFootprint(target_size);
3622 if (IsGcConcurrent()) {
3623 const uint64_t freed_bytes = current_gc_iteration_.GetFreedBytes() +
3624 current_gc_iteration_.GetFreedLargeObjectBytes() +
3625 current_gc_iteration_.GetFreedRevokeBytes();
3626 // Bytes allocated will shrink by freed_bytes after the GC runs, so if we want to figure out
3627 // how many bytes were allocated during the GC we need to add freed_bytes back on.
3628 CHECK_GE(bytes_allocated + freed_bytes, bytes_allocated_before_gc);
3629 const uint64_t bytes_allocated_during_gc = bytes_allocated + freed_bytes -
3630 bytes_allocated_before_gc;
3631 // Calculate when to perform the next ConcurrentGC.
3632 // Calculate the estimated GC duration.
3633 const double gc_duration_seconds = NsToMs(current_gc_iteration_.GetDurationNs()) / 1000.0;
3634 // Estimate how many remaining bytes we will have when we need to start the next GC.
3635 size_t remaining_bytes = bytes_allocated_during_gc * gc_duration_seconds;
3636 remaining_bytes = std::min(remaining_bytes, kMaxConcurrentRemainingBytes);
3637 remaining_bytes = std::max(remaining_bytes, kMinConcurrentRemainingBytes);
3638 if (UNLIKELY(remaining_bytes > max_allowed_footprint_)) {
3639 // A never going to happen situation that from the estimated allocation rate we will exceed
3640 // the applications entire footprint with the given estimated allocation rate. Schedule
3641 // another GC nearly straight away.
3642 remaining_bytes = kMinConcurrentRemainingBytes;
3643 }
3644 DCHECK_LE(remaining_bytes, max_allowed_footprint_);
3645 DCHECK_LE(max_allowed_footprint_, GetMaxMemory());
3646 // Start a concurrent GC when we get close to the estimated remaining bytes. When the
3647 // allocation rate is very high, remaining_bytes could tell us that we should start a GC
3648 // right away.
3649 concurrent_start_bytes_ = std::max(max_allowed_footprint_ - remaining_bytes,
3650 static_cast<size_t>(bytes_allocated));
3651 }
3652 }
3653 }
3654
ClampGrowthLimit()3655 void Heap::ClampGrowthLimit() {
3656 // Use heap bitmap lock to guard against races with BindLiveToMarkBitmap.
3657 ScopedObjectAccess soa(Thread::Current());
3658 WriterMutexLock mu(soa.Self(), *Locks::heap_bitmap_lock_);
3659 capacity_ = growth_limit_;
3660 for (const auto& space : continuous_spaces_) {
3661 if (space->IsMallocSpace()) {
3662 gc::space::MallocSpace* malloc_space = space->AsMallocSpace();
3663 malloc_space->ClampGrowthLimit();
3664 }
3665 }
3666 // This space isn't added for performance reasons.
3667 if (main_space_backup_.get() != nullptr) {
3668 main_space_backup_->ClampGrowthLimit();
3669 }
3670 }
3671
ClearGrowthLimit()3672 void Heap::ClearGrowthLimit() {
3673 growth_limit_ = capacity_;
3674 ScopedObjectAccess soa(Thread::Current());
3675 for (const auto& space : continuous_spaces_) {
3676 if (space->IsMallocSpace()) {
3677 gc::space::MallocSpace* malloc_space = space->AsMallocSpace();
3678 malloc_space->ClearGrowthLimit();
3679 malloc_space->SetFootprintLimit(malloc_space->Capacity());
3680 }
3681 }
3682 // This space isn't added for performance reasons.
3683 if (main_space_backup_.get() != nullptr) {
3684 main_space_backup_->ClearGrowthLimit();
3685 main_space_backup_->SetFootprintLimit(main_space_backup_->Capacity());
3686 }
3687 }
3688
AddFinalizerReference(Thread * self,ObjPtr<mirror::Object> * object)3689 void Heap::AddFinalizerReference(Thread* self, ObjPtr<mirror::Object>* object) {
3690 ScopedObjectAccess soa(self);
3691 ScopedLocalRef<jobject> arg(self->GetJniEnv(), soa.AddLocalReference<jobject>(*object));
3692 jvalue args[1];
3693 args[0].l = arg.get();
3694 InvokeWithJValues(soa, nullptr, WellKnownClasses::java_lang_ref_FinalizerReference_add, args);
3695 // Restore object in case it gets moved.
3696 *object = soa.Decode<mirror::Object>(arg.get());
3697 }
3698
RequestConcurrentGCAndSaveObject(Thread * self,bool force_full,ObjPtr<mirror::Object> * obj)3699 void Heap::RequestConcurrentGCAndSaveObject(Thread* self,
3700 bool force_full,
3701 ObjPtr<mirror::Object>* obj) {
3702 StackHandleScope<1> hs(self);
3703 HandleWrapperObjPtr<mirror::Object> wrapper(hs.NewHandleWrapper(obj));
3704 RequestConcurrentGC(self, kGcCauseBackground, force_full);
3705 }
3706
3707 class Heap::ConcurrentGCTask : public HeapTask {
3708 public:
ConcurrentGCTask(uint64_t target_time,GcCause cause,bool force_full)3709 ConcurrentGCTask(uint64_t target_time, GcCause cause, bool force_full)
3710 : HeapTask(target_time), cause_(cause), force_full_(force_full) {}
Run(Thread * self)3711 virtual void Run(Thread* self) OVERRIDE {
3712 gc::Heap* heap = Runtime::Current()->GetHeap();
3713 heap->ConcurrentGC(self, cause_, force_full_);
3714 heap->ClearConcurrentGCRequest();
3715 }
3716
3717 private:
3718 const GcCause cause_;
3719 const bool force_full_; // If true, force full (or partial) collection.
3720 };
3721
CanAddHeapTask(Thread * self)3722 static bool CanAddHeapTask(Thread* self) REQUIRES(!Locks::runtime_shutdown_lock_) {
3723 Runtime* runtime = Runtime::Current();
3724 return runtime != nullptr && runtime->IsFinishedStarting() && !runtime->IsShuttingDown(self) &&
3725 !self->IsHandlingStackOverflow();
3726 }
3727
ClearConcurrentGCRequest()3728 void Heap::ClearConcurrentGCRequest() {
3729 concurrent_gc_pending_.StoreRelaxed(false);
3730 }
3731
RequestConcurrentGC(Thread * self,GcCause cause,bool force_full)3732 void Heap::RequestConcurrentGC(Thread* self, GcCause cause, bool force_full) {
3733 if (CanAddHeapTask(self) &&
3734 concurrent_gc_pending_.CompareExchangeStrongSequentiallyConsistent(false, true)) {
3735 task_processor_->AddTask(self, new ConcurrentGCTask(NanoTime(), // Start straight away.
3736 cause,
3737 force_full));
3738 }
3739 }
3740
ConcurrentGC(Thread * self,GcCause cause,bool force_full)3741 void Heap::ConcurrentGC(Thread* self, GcCause cause, bool force_full) {
3742 if (!Runtime::Current()->IsShuttingDown(self)) {
3743 // Wait for any GCs currently running to finish.
3744 if (WaitForGcToComplete(cause, self) == collector::kGcTypeNone) {
3745 // If the we can't run the GC type we wanted to run, find the next appropriate one and try
3746 // that instead. E.g. can't do partial, so do full instead.
3747 collector::GcType next_gc_type = next_gc_type_;
3748 // If forcing full and next gc type is sticky, override with a non-sticky type.
3749 if (force_full && next_gc_type == collector::kGcTypeSticky) {
3750 next_gc_type = NonStickyGcType();
3751 }
3752 if (CollectGarbageInternal(next_gc_type, cause, false) == collector::kGcTypeNone) {
3753 for (collector::GcType gc_type : gc_plan_) {
3754 // Attempt to run the collector, if we succeed, we are done.
3755 if (gc_type > next_gc_type &&
3756 CollectGarbageInternal(gc_type, cause, false) != collector::kGcTypeNone) {
3757 break;
3758 }
3759 }
3760 }
3761 }
3762 }
3763 }
3764
3765 class Heap::CollectorTransitionTask : public HeapTask {
3766 public:
CollectorTransitionTask(uint64_t target_time)3767 explicit CollectorTransitionTask(uint64_t target_time) : HeapTask(target_time) {}
3768
Run(Thread * self)3769 virtual void Run(Thread* self) OVERRIDE {
3770 gc::Heap* heap = Runtime::Current()->GetHeap();
3771 heap->DoPendingCollectorTransition();
3772 heap->ClearPendingCollectorTransition(self);
3773 }
3774 };
3775
ClearPendingCollectorTransition(Thread * self)3776 void Heap::ClearPendingCollectorTransition(Thread* self) {
3777 MutexLock mu(self, *pending_task_lock_);
3778 pending_collector_transition_ = nullptr;
3779 }
3780
RequestCollectorTransition(CollectorType desired_collector_type,uint64_t delta_time)3781 void Heap::RequestCollectorTransition(CollectorType desired_collector_type, uint64_t delta_time) {
3782 Thread* self = Thread::Current();
3783 desired_collector_type_ = desired_collector_type;
3784 if (desired_collector_type_ == collector_type_ || !CanAddHeapTask(self)) {
3785 return;
3786 }
3787 if (collector_type_ == kCollectorTypeCC) {
3788 // For CC, we invoke a full compaction when going to the background, but the collector type
3789 // doesn't change.
3790 DCHECK_EQ(desired_collector_type_, kCollectorTypeCCBackground);
3791 }
3792 DCHECK_NE(collector_type_, kCollectorTypeCCBackground);
3793 CollectorTransitionTask* added_task = nullptr;
3794 const uint64_t target_time = NanoTime() + delta_time;
3795 {
3796 MutexLock mu(self, *pending_task_lock_);
3797 // If we have an existing collector transition, update the targe time to be the new target.
3798 if (pending_collector_transition_ != nullptr) {
3799 task_processor_->UpdateTargetRunTime(self, pending_collector_transition_, target_time);
3800 return;
3801 }
3802 added_task = new CollectorTransitionTask(target_time);
3803 pending_collector_transition_ = added_task;
3804 }
3805 task_processor_->AddTask(self, added_task);
3806 }
3807
3808 class Heap::HeapTrimTask : public HeapTask {
3809 public:
HeapTrimTask(uint64_t delta_time)3810 explicit HeapTrimTask(uint64_t delta_time) : HeapTask(NanoTime() + delta_time) { }
Run(Thread * self)3811 virtual void Run(Thread* self) OVERRIDE {
3812 gc::Heap* heap = Runtime::Current()->GetHeap();
3813 heap->Trim(self);
3814 heap->ClearPendingTrim(self);
3815 }
3816 };
3817
ClearPendingTrim(Thread * self)3818 void Heap::ClearPendingTrim(Thread* self) {
3819 MutexLock mu(self, *pending_task_lock_);
3820 pending_heap_trim_ = nullptr;
3821 }
3822
RequestTrim(Thread * self)3823 void Heap::RequestTrim(Thread* self) {
3824 if (!CanAddHeapTask(self)) {
3825 return;
3826 }
3827 // GC completed and now we must decide whether to request a heap trim (advising pages back to the
3828 // kernel) or not. Issuing a request will also cause trimming of the libc heap. As a trim scans
3829 // a space it will hold its lock and can become a cause of jank.
3830 // Note, the large object space self trims and the Zygote space was trimmed and unchanging since
3831 // forking.
3832
3833 // We don't have a good measure of how worthwhile a trim might be. We can't use the live bitmap
3834 // because that only marks object heads, so a large array looks like lots of empty space. We
3835 // don't just call dlmalloc all the time, because the cost of an _attempted_ trim is proportional
3836 // to utilization (which is probably inversely proportional to how much benefit we can expect).
3837 // We could try mincore(2) but that's only a measure of how many pages we haven't given away,
3838 // not how much use we're making of those pages.
3839 HeapTrimTask* added_task = nullptr;
3840 {
3841 MutexLock mu(self, *pending_task_lock_);
3842 if (pending_heap_trim_ != nullptr) {
3843 // Already have a heap trim request in task processor, ignore this request.
3844 return;
3845 }
3846 added_task = new HeapTrimTask(kHeapTrimWait);
3847 pending_heap_trim_ = added_task;
3848 }
3849 task_processor_->AddTask(self, added_task);
3850 }
3851
RevokeThreadLocalBuffers(Thread * thread)3852 void Heap::RevokeThreadLocalBuffers(Thread* thread) {
3853 if (rosalloc_space_ != nullptr) {
3854 size_t freed_bytes_revoke = rosalloc_space_->RevokeThreadLocalBuffers(thread);
3855 if (freed_bytes_revoke > 0U) {
3856 num_bytes_freed_revoke_.FetchAndAddSequentiallyConsistent(freed_bytes_revoke);
3857 CHECK_GE(num_bytes_allocated_.LoadRelaxed(), num_bytes_freed_revoke_.LoadRelaxed());
3858 }
3859 }
3860 if (bump_pointer_space_ != nullptr) {
3861 CHECK_EQ(bump_pointer_space_->RevokeThreadLocalBuffers(thread), 0U);
3862 }
3863 if (region_space_ != nullptr) {
3864 CHECK_EQ(region_space_->RevokeThreadLocalBuffers(thread), 0U);
3865 }
3866 }
3867
RevokeRosAllocThreadLocalBuffers(Thread * thread)3868 void Heap::RevokeRosAllocThreadLocalBuffers(Thread* thread) {
3869 if (rosalloc_space_ != nullptr) {
3870 size_t freed_bytes_revoke = rosalloc_space_->RevokeThreadLocalBuffers(thread);
3871 if (freed_bytes_revoke > 0U) {
3872 num_bytes_freed_revoke_.FetchAndAddSequentiallyConsistent(freed_bytes_revoke);
3873 CHECK_GE(num_bytes_allocated_.LoadRelaxed(), num_bytes_freed_revoke_.LoadRelaxed());
3874 }
3875 }
3876 }
3877
RevokeAllThreadLocalBuffers()3878 void Heap::RevokeAllThreadLocalBuffers() {
3879 if (rosalloc_space_ != nullptr) {
3880 size_t freed_bytes_revoke = rosalloc_space_->RevokeAllThreadLocalBuffers();
3881 if (freed_bytes_revoke > 0U) {
3882 num_bytes_freed_revoke_.FetchAndAddSequentiallyConsistent(freed_bytes_revoke);
3883 CHECK_GE(num_bytes_allocated_.LoadRelaxed(), num_bytes_freed_revoke_.LoadRelaxed());
3884 }
3885 }
3886 if (bump_pointer_space_ != nullptr) {
3887 CHECK_EQ(bump_pointer_space_->RevokeAllThreadLocalBuffers(), 0U);
3888 }
3889 if (region_space_ != nullptr) {
3890 CHECK_EQ(region_space_->RevokeAllThreadLocalBuffers(), 0U);
3891 }
3892 }
3893
IsGCRequestPending() const3894 bool Heap::IsGCRequestPending() const {
3895 return concurrent_gc_pending_.LoadRelaxed();
3896 }
3897
RunFinalization(JNIEnv * env,uint64_t timeout)3898 void Heap::RunFinalization(JNIEnv* env, uint64_t timeout) {
3899 env->CallStaticVoidMethod(WellKnownClasses::dalvik_system_VMRuntime,
3900 WellKnownClasses::dalvik_system_VMRuntime_runFinalization,
3901 static_cast<jlong>(timeout));
3902 }
3903
RegisterNativeAllocation(JNIEnv * env,size_t bytes)3904 void Heap::RegisterNativeAllocation(JNIEnv* env, size_t bytes) {
3905 // See the REDESIGN section of go/understanding-register-native-allocation
3906 // for an explanation of how RegisterNativeAllocation works.
3907 size_t new_value = bytes + new_native_bytes_allocated_.FetchAndAddRelaxed(bytes);
3908 if (new_value > NativeAllocationBlockingGcWatermark()) {
3909 // Wait for a new GC to finish and finalizers to run, because the
3910 // allocation rate is too high.
3911 Thread* self = ThreadForEnv(env);
3912
3913 bool run_gc = false;
3914 {
3915 MutexLock mu(self, *native_blocking_gc_lock_);
3916 uint32_t initial_gcs_finished = native_blocking_gcs_finished_;
3917 if (native_blocking_gc_in_progress_) {
3918 // A native blocking GC is in progress from the last time the native
3919 // allocation blocking GC watermark was exceeded. Wait for that GC to
3920 // finish before addressing the fact that we exceeded the blocking
3921 // watermark again.
3922 do {
3923 native_blocking_gc_cond_->Wait(self);
3924 } while (native_blocking_gcs_finished_ == initial_gcs_finished);
3925 initial_gcs_finished++;
3926 }
3927
3928 // It's possible multiple threads have seen that we exceeded the
3929 // blocking watermark. Ensure that only one of those threads runs the
3930 // blocking GC. The rest of the threads should instead wait for the
3931 // blocking GC to complete.
3932 if (native_blocking_gcs_finished_ == initial_gcs_finished) {
3933 if (native_blocking_gc_in_progress_) {
3934 do {
3935 native_blocking_gc_cond_->Wait(self);
3936 } while (native_blocking_gcs_finished_ == initial_gcs_finished);
3937 } else {
3938 native_blocking_gc_in_progress_ = true;
3939 run_gc = true;
3940 }
3941 }
3942 }
3943
3944 if (run_gc) {
3945 CollectGarbageInternal(NonStickyGcType(), kGcCauseForNativeAlloc, false);
3946 RunFinalization(env, kNativeAllocationFinalizeTimeout);
3947 CHECK(!env->ExceptionCheck());
3948
3949 MutexLock mu(self, *native_blocking_gc_lock_);
3950 native_blocking_gc_in_progress_ = false;
3951 native_blocking_gcs_finished_++;
3952 native_blocking_gc_cond_->Broadcast(self);
3953 }
3954 } else if (new_value > NativeAllocationGcWatermark() * HeapGrowthMultiplier() &&
3955 !IsGCRequestPending()) {
3956 // Trigger another GC because there have been enough native bytes
3957 // allocated since the last GC.
3958 if (IsGcConcurrent()) {
3959 RequestConcurrentGC(ThreadForEnv(env), kGcCauseForNativeAllocBackground, /*force_full*/true);
3960 } else {
3961 CollectGarbageInternal(NonStickyGcType(), kGcCauseForNativeAlloc, false);
3962 }
3963 }
3964 }
3965
RegisterNativeFree(JNIEnv *,size_t bytes)3966 void Heap::RegisterNativeFree(JNIEnv*, size_t bytes) {
3967 // Take the bytes freed out of new_native_bytes_allocated_ first. If
3968 // new_native_bytes_allocated_ reaches zero, take the remaining bytes freed
3969 // out of old_native_bytes_allocated_ to ensure all freed bytes are
3970 // accounted for.
3971 size_t allocated;
3972 size_t new_freed_bytes;
3973 do {
3974 allocated = new_native_bytes_allocated_.LoadRelaxed();
3975 new_freed_bytes = std::min(allocated, bytes);
3976 } while (!new_native_bytes_allocated_.CompareExchangeWeakRelaxed(allocated,
3977 allocated - new_freed_bytes));
3978 if (new_freed_bytes < bytes) {
3979 old_native_bytes_allocated_.FetchAndSubRelaxed(bytes - new_freed_bytes);
3980 }
3981 }
3982
GetTotalMemory() const3983 size_t Heap::GetTotalMemory() const {
3984 return std::max(max_allowed_footprint_, GetBytesAllocated());
3985 }
3986
AddModUnionTable(accounting::ModUnionTable * mod_union_table)3987 void Heap::AddModUnionTable(accounting::ModUnionTable* mod_union_table) {
3988 DCHECK(mod_union_table != nullptr);
3989 mod_union_tables_.Put(mod_union_table->GetSpace(), mod_union_table);
3990 }
3991
CheckPreconditionsForAllocObject(ObjPtr<mirror::Class> c,size_t byte_count)3992 void Heap::CheckPreconditionsForAllocObject(ObjPtr<mirror::Class> c, size_t byte_count) {
3993 CHECK(c == nullptr || (c->IsClassClass() && byte_count >= sizeof(mirror::Class)) ||
3994 (c->IsVariableSize() || c->GetObjectSize() == byte_count))
3995 << "ClassFlags=" << c->GetClassFlags()
3996 << " IsClassClass=" << c->IsClassClass()
3997 << " byte_count=" << byte_count
3998 << " IsVariableSize=" << c->IsVariableSize()
3999 << " ObjectSize=" << c->GetObjectSize()
4000 << " sizeof(Class)=" << sizeof(mirror::Class)
4001 << " klass=" << c.Ptr();
4002 CHECK_GE(byte_count, sizeof(mirror::Object));
4003 }
4004
AddRememberedSet(accounting::RememberedSet * remembered_set)4005 void Heap::AddRememberedSet(accounting::RememberedSet* remembered_set) {
4006 CHECK(remembered_set != nullptr);
4007 space::Space* space = remembered_set->GetSpace();
4008 CHECK(space != nullptr);
4009 CHECK(remembered_sets_.find(space) == remembered_sets_.end()) << space;
4010 remembered_sets_.Put(space, remembered_set);
4011 CHECK(remembered_sets_.find(space) != remembered_sets_.end()) << space;
4012 }
4013
RemoveRememberedSet(space::Space * space)4014 void Heap::RemoveRememberedSet(space::Space* space) {
4015 CHECK(space != nullptr);
4016 auto it = remembered_sets_.find(space);
4017 CHECK(it != remembered_sets_.end());
4018 delete it->second;
4019 remembered_sets_.erase(it);
4020 CHECK(remembered_sets_.find(space) == remembered_sets_.end());
4021 }
4022
ClearMarkedObjects()4023 void Heap::ClearMarkedObjects() {
4024 // Clear all of the spaces' mark bitmaps.
4025 for (const auto& space : GetContinuousSpaces()) {
4026 accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap();
4027 if (space->GetLiveBitmap() != mark_bitmap) {
4028 mark_bitmap->Clear();
4029 }
4030 }
4031 // Clear the marked objects in the discontinous space object sets.
4032 for (const auto& space : GetDiscontinuousSpaces()) {
4033 space->GetMarkBitmap()->Clear();
4034 }
4035 }
4036
SetAllocationRecords(AllocRecordObjectMap * records)4037 void Heap::SetAllocationRecords(AllocRecordObjectMap* records) {
4038 allocation_records_.reset(records);
4039 }
4040
VisitAllocationRecords(RootVisitor * visitor) const4041 void Heap::VisitAllocationRecords(RootVisitor* visitor) const {
4042 if (IsAllocTrackingEnabled()) {
4043 MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
4044 if (IsAllocTrackingEnabled()) {
4045 GetAllocationRecords()->VisitRoots(visitor);
4046 }
4047 }
4048 }
4049
SweepAllocationRecords(IsMarkedVisitor * visitor) const4050 void Heap::SweepAllocationRecords(IsMarkedVisitor* visitor) const {
4051 if (IsAllocTrackingEnabled()) {
4052 MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
4053 if (IsAllocTrackingEnabled()) {
4054 GetAllocationRecords()->SweepAllocationRecords(visitor);
4055 }
4056 }
4057 }
4058
AllowNewAllocationRecords() const4059 void Heap::AllowNewAllocationRecords() const {
4060 CHECK(!kUseReadBarrier);
4061 MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
4062 AllocRecordObjectMap* allocation_records = GetAllocationRecords();
4063 if (allocation_records != nullptr) {
4064 allocation_records->AllowNewAllocationRecords();
4065 }
4066 }
4067
DisallowNewAllocationRecords() const4068 void Heap::DisallowNewAllocationRecords() const {
4069 CHECK(!kUseReadBarrier);
4070 MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
4071 AllocRecordObjectMap* allocation_records = GetAllocationRecords();
4072 if (allocation_records != nullptr) {
4073 allocation_records->DisallowNewAllocationRecords();
4074 }
4075 }
4076
BroadcastForNewAllocationRecords() const4077 void Heap::BroadcastForNewAllocationRecords() const {
4078 // Always broadcast without checking IsAllocTrackingEnabled() because IsAllocTrackingEnabled() may
4079 // be set to false while some threads are waiting for system weak access in
4080 // AllocRecordObjectMap::RecordAllocation() and we may fail to wake them up. b/27467554.
4081 MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
4082 AllocRecordObjectMap* allocation_records = GetAllocationRecords();
4083 if (allocation_records != nullptr) {
4084 allocation_records->BroadcastForNewAllocationRecords();
4085 }
4086 }
4087
CheckGcStressMode(Thread * self,ObjPtr<mirror::Object> * obj)4088 void Heap::CheckGcStressMode(Thread* self, ObjPtr<mirror::Object>* obj) {
4089 auto* const runtime = Runtime::Current();
4090 if (gc_stress_mode_ && runtime->GetClassLinker()->IsInitialized() &&
4091 !runtime->IsActiveTransaction() && mirror::Class::HasJavaLangClass()) {
4092 // Check if we should GC.
4093 bool new_backtrace = false;
4094 {
4095 static constexpr size_t kMaxFrames = 16u;
4096 FixedSizeBacktrace<kMaxFrames> backtrace;
4097 backtrace.Collect(/* skip_frames */ 2);
4098 uint64_t hash = backtrace.Hash();
4099 MutexLock mu(self, *backtrace_lock_);
4100 new_backtrace = seen_backtraces_.find(hash) == seen_backtraces_.end();
4101 if (new_backtrace) {
4102 seen_backtraces_.insert(hash);
4103 }
4104 }
4105 if (new_backtrace) {
4106 StackHandleScope<1> hs(self);
4107 auto h = hs.NewHandleWrapper(obj);
4108 CollectGarbage(false);
4109 unique_backtrace_count_.FetchAndAddSequentiallyConsistent(1);
4110 } else {
4111 seen_backtrace_count_.FetchAndAddSequentiallyConsistent(1);
4112 }
4113 }
4114 }
4115
DisableGCForShutdown()4116 void Heap::DisableGCForShutdown() {
4117 Thread* const self = Thread::Current();
4118 CHECK(Runtime::Current()->IsShuttingDown(self));
4119 MutexLock mu(self, *gc_complete_lock_);
4120 gc_disabled_for_shutdown_ = true;
4121 }
4122
ObjectIsInBootImageSpace(ObjPtr<mirror::Object> obj) const4123 bool Heap::ObjectIsInBootImageSpace(ObjPtr<mirror::Object> obj) const {
4124 for (gc::space::ImageSpace* space : boot_image_spaces_) {
4125 if (space->HasAddress(obj.Ptr())) {
4126 return true;
4127 }
4128 }
4129 return false;
4130 }
4131
IsInBootImageOatFile(const void * p) const4132 bool Heap::IsInBootImageOatFile(const void* p) const {
4133 for (gc::space::ImageSpace* space : boot_image_spaces_) {
4134 if (space->GetOatFile()->Contains(p)) {
4135 return true;
4136 }
4137 }
4138 return false;
4139 }
4140
GetBootImagesSize(uint32_t * boot_image_begin,uint32_t * boot_image_end,uint32_t * boot_oat_begin,uint32_t * boot_oat_end)4141 void Heap::GetBootImagesSize(uint32_t* boot_image_begin,
4142 uint32_t* boot_image_end,
4143 uint32_t* boot_oat_begin,
4144 uint32_t* boot_oat_end) {
4145 DCHECK(boot_image_begin != nullptr);
4146 DCHECK(boot_image_end != nullptr);
4147 DCHECK(boot_oat_begin != nullptr);
4148 DCHECK(boot_oat_end != nullptr);
4149 *boot_image_begin = 0u;
4150 *boot_image_end = 0u;
4151 *boot_oat_begin = 0u;
4152 *boot_oat_end = 0u;
4153 for (gc::space::ImageSpace* space_ : GetBootImageSpaces()) {
4154 const uint32_t image_begin = PointerToLowMemUInt32(space_->Begin());
4155 const uint32_t image_size = space_->GetImageHeader().GetImageSize();
4156 if (*boot_image_begin == 0 || image_begin < *boot_image_begin) {
4157 *boot_image_begin = image_begin;
4158 }
4159 *boot_image_end = std::max(*boot_image_end, image_begin + image_size);
4160 const OatFile* boot_oat_file = space_->GetOatFile();
4161 const uint32_t oat_begin = PointerToLowMemUInt32(boot_oat_file->Begin());
4162 const uint32_t oat_size = boot_oat_file->Size();
4163 if (*boot_oat_begin == 0 || oat_begin < *boot_oat_begin) {
4164 *boot_oat_begin = oat_begin;
4165 }
4166 *boot_oat_end = std::max(*boot_oat_end, oat_begin + oat_size);
4167 }
4168 }
4169
SetAllocationListener(AllocationListener * l)4170 void Heap::SetAllocationListener(AllocationListener* l) {
4171 AllocationListener* old = GetAndOverwriteAllocationListener(&alloc_listener_, l);
4172
4173 if (old == nullptr) {
4174 Runtime::Current()->GetInstrumentation()->InstrumentQuickAllocEntryPoints();
4175 }
4176 }
4177
RemoveAllocationListener()4178 void Heap::RemoveAllocationListener() {
4179 AllocationListener* old = GetAndOverwriteAllocationListener(&alloc_listener_, nullptr);
4180
4181 if (old != nullptr) {
4182 Runtime::Current()->GetInstrumentation()->UninstrumentQuickAllocEntryPoints();
4183 }
4184 }
4185
SetGcPauseListener(GcPauseListener * l)4186 void Heap::SetGcPauseListener(GcPauseListener* l) {
4187 gc_pause_listener_.StoreRelaxed(l);
4188 }
4189
RemoveGcPauseListener()4190 void Heap::RemoveGcPauseListener() {
4191 gc_pause_listener_.StoreRelaxed(nullptr);
4192 }
4193
AllocWithNewTLAB(Thread * self,size_t alloc_size,bool grow,size_t * bytes_allocated,size_t * usable_size,size_t * bytes_tl_bulk_allocated)4194 mirror::Object* Heap::AllocWithNewTLAB(Thread* self,
4195 size_t alloc_size,
4196 bool grow,
4197 size_t* bytes_allocated,
4198 size_t* usable_size,
4199 size_t* bytes_tl_bulk_allocated) {
4200 const AllocatorType allocator_type = GetCurrentAllocator();
4201 if (kUsePartialTlabs && alloc_size <= self->TlabRemainingCapacity()) {
4202 DCHECK_GT(alloc_size, self->TlabSize());
4203 // There is enough space if we grow the TLAB. Lets do that. This increases the
4204 // TLAB bytes.
4205 const size_t min_expand_size = alloc_size - self->TlabSize();
4206 const size_t expand_bytes = std::max(
4207 min_expand_size,
4208 std::min(self->TlabRemainingCapacity() - self->TlabSize(), kPartialTlabSize));
4209 if (UNLIKELY(IsOutOfMemoryOnAllocation(allocator_type, expand_bytes, grow))) {
4210 return nullptr;
4211 }
4212 *bytes_tl_bulk_allocated = expand_bytes;
4213 self->ExpandTlab(expand_bytes);
4214 DCHECK_LE(alloc_size, self->TlabSize());
4215 } else if (allocator_type == kAllocatorTypeTLAB) {
4216 DCHECK(bump_pointer_space_ != nullptr);
4217 const size_t new_tlab_size = alloc_size + kDefaultTLABSize;
4218 if (UNLIKELY(IsOutOfMemoryOnAllocation(allocator_type, new_tlab_size, grow))) {
4219 return nullptr;
4220 }
4221 // Try allocating a new thread local buffer, if the allocation fails the space must be
4222 // full so return null.
4223 if (!bump_pointer_space_->AllocNewTlab(self, new_tlab_size)) {
4224 return nullptr;
4225 }
4226 *bytes_tl_bulk_allocated = new_tlab_size;
4227 } else {
4228 DCHECK(allocator_type == kAllocatorTypeRegionTLAB);
4229 DCHECK(region_space_ != nullptr);
4230 if (space::RegionSpace::kRegionSize >= alloc_size) {
4231 // Non-large. Check OOME for a tlab.
4232 if (LIKELY(!IsOutOfMemoryOnAllocation(allocator_type,
4233 space::RegionSpace::kRegionSize,
4234 grow))) {
4235 const size_t new_tlab_size = kUsePartialTlabs
4236 ? std::max(alloc_size, kPartialTlabSize)
4237 : gc::space::RegionSpace::kRegionSize;
4238 // Try to allocate a tlab.
4239 if (!region_space_->AllocNewTlab(self, new_tlab_size)) {
4240 // Failed to allocate a tlab. Try non-tlab.
4241 return region_space_->AllocNonvirtual<false>(alloc_size,
4242 bytes_allocated,
4243 usable_size,
4244 bytes_tl_bulk_allocated);
4245 }
4246 *bytes_tl_bulk_allocated = new_tlab_size;
4247 // Fall-through to using the TLAB below.
4248 } else {
4249 // Check OOME for a non-tlab allocation.
4250 if (!IsOutOfMemoryOnAllocation(allocator_type, alloc_size, grow)) {
4251 return region_space_->AllocNonvirtual<false>(alloc_size,
4252 bytes_allocated,
4253 usable_size,
4254 bytes_tl_bulk_allocated);
4255 }
4256 // Neither tlab or non-tlab works. Give up.
4257 return nullptr;
4258 }
4259 } else {
4260 // Large. Check OOME.
4261 if (LIKELY(!IsOutOfMemoryOnAllocation(allocator_type, alloc_size, grow))) {
4262 return region_space_->AllocNonvirtual<false>(alloc_size,
4263 bytes_allocated,
4264 usable_size,
4265 bytes_tl_bulk_allocated);
4266 }
4267 return nullptr;
4268 }
4269 }
4270 // Refilled TLAB, return.
4271 mirror::Object* ret = self->AllocTlab(alloc_size);
4272 DCHECK(ret != nullptr);
4273 *bytes_allocated = alloc_size;
4274 *usable_size = alloc_size;
4275 return ret;
4276 }
4277
GetVerification() const4278 const Verification* Heap::GetVerification() const {
4279 return verification_.get();
4280 }
4281
4282 } // namespace gc
4283 } // namespace art
4284