1 /*
2  * Copyright (C) 2011 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "heap.h"
18 
19 #include <limits>
20 #include <memory>
21 #include <unwind.h>  // For GC verification.
22 #include <vector>
23 
24 #include "art_field-inl.h"
25 #include "base/allocator.h"
26 #include "base/arena_allocator.h"
27 #include "base/dumpable.h"
28 #include "base/histogram-inl.h"
29 #include "base/stl_util.h"
30 #include "base/systrace.h"
31 #include "base/time_utils.h"
32 #include "common_throws.h"
33 #include "cutils/sched_policy.h"
34 #include "debugger.h"
35 #include "dex_file-inl.h"
36 #include "gc/accounting/atomic_stack.h"
37 #include "gc/accounting/card_table-inl.h"
38 #include "gc/accounting/heap_bitmap-inl.h"
39 #include "gc/accounting/mod_union_table-inl.h"
40 #include "gc/accounting/remembered_set.h"
41 #include "gc/accounting/space_bitmap-inl.h"
42 #include "gc/collector/concurrent_copying.h"
43 #include "gc/collector/mark_compact.h"
44 #include "gc/collector/mark_sweep.h"
45 #include "gc/collector/partial_mark_sweep.h"
46 #include "gc/collector/semi_space.h"
47 #include "gc/collector/sticky_mark_sweep.h"
48 #include "gc/reference_processor.h"
49 #include "gc/space/bump_pointer_space.h"
50 #include "gc/space/dlmalloc_space-inl.h"
51 #include "gc/space/image_space.h"
52 #include "gc/space/large_object_space.h"
53 #include "gc/space/region_space.h"
54 #include "gc/space/rosalloc_space-inl.h"
55 #include "gc/space/space-inl.h"
56 #include "gc/space/zygote_space.h"
57 #include "gc/task_processor.h"
58 #include "entrypoints/quick/quick_alloc_entrypoints.h"
59 #include "heap-inl.h"
60 #include "image.h"
61 #include "intern_table.h"
62 #include "jit/jit.h"
63 #include "jit/jit_code_cache.h"
64 #include "mirror/class-inl.h"
65 #include "mirror/object-inl.h"
66 #include "mirror/object_array-inl.h"
67 #include "mirror/reference-inl.h"
68 #include "os.h"
69 #include "reflection.h"
70 #include "runtime.h"
71 #include "ScopedLocalRef.h"
72 #include "scoped_thread_state_change.h"
73 #include "handle_scope-inl.h"
74 #include "thread_list.h"
75 #include "well_known_classes.h"
76 
77 namespace art {
78 
79 namespace gc {
80 
81 static constexpr size_t kCollectorTransitionStressIterations = 0;
82 static constexpr size_t kCollectorTransitionStressWait = 10 * 1000;  // Microseconds
83 // Minimum amount of remaining bytes before a concurrent GC is triggered.
84 static constexpr size_t kMinConcurrentRemainingBytes = 128 * KB;
85 static constexpr size_t kMaxConcurrentRemainingBytes = 512 * KB;
86 // Sticky GC throughput adjustment, divided by 4. Increasing this causes sticky GC to occur more
87 // relative to partial/full GC. This may be desirable since sticky GCs interfere less with mutator
88 // threads (lower pauses, use less memory bandwidth).
89 static constexpr double kStickyGcThroughputAdjustment = 1.0;
90 // Whether or not we compact the zygote in PreZygoteFork.
91 static constexpr bool kCompactZygote = kMovingCollector;
92 // How many reserve entries are at the end of the allocation stack, these are only needed if the
93 // allocation stack overflows.
94 static constexpr size_t kAllocationStackReserveSize = 1024;
95 // Default mark stack size in bytes.
96 static const size_t kDefaultMarkStackSize = 64 * KB;
97 // Define space name.
98 static const char* kDlMallocSpaceName[2] = {"main dlmalloc space", "main dlmalloc space 1"};
99 static const char* kRosAllocSpaceName[2] = {"main rosalloc space", "main rosalloc space 1"};
100 static const char* kMemMapSpaceName[2] = {"main space", "main space 1"};
101 static const char* kNonMovingSpaceName = "non moving space";
102 static const char* kZygoteSpaceName = "zygote space";
103 static constexpr size_t kGSSBumpPointerSpaceCapacity = 32 * MB;
104 static constexpr bool kGCALotMode = false;
105 // GC alot mode uses a small allocation stack to stress test a lot of GC.
106 static constexpr size_t kGcAlotAllocationStackSize = 4 * KB /
107     sizeof(mirror::HeapReference<mirror::Object>);
108 // Verify objet has a small allocation stack size since searching the allocation stack is slow.
109 static constexpr size_t kVerifyObjectAllocationStackSize = 16 * KB /
110     sizeof(mirror::HeapReference<mirror::Object>);
111 static constexpr size_t kDefaultAllocationStackSize = 8 * MB /
112     sizeof(mirror::HeapReference<mirror::Object>);
113 // System.runFinalization can deadlock with native allocations, to deal with this, we have a
114 // timeout on how long we wait for finalizers to run. b/21544853
115 static constexpr uint64_t kNativeAllocationFinalizeTimeout = MsToNs(250u);
116 
117 // For deterministic compilation, we need the heap to be at a well-known address.
118 static constexpr uint32_t kAllocSpaceBeginForDeterministicAoT = 0x40000000;
119 // Dump the rosalloc stats on SIGQUIT.
120 static constexpr bool kDumpRosAllocStatsOnSigQuit = false;
121 
122 static constexpr size_t kNativeAllocationHistogramBuckets = 16;
123 
CareAboutPauseTimes()124 static inline bool CareAboutPauseTimes() {
125   return Runtime::Current()->InJankPerceptibleProcessState();
126 }
127 
Heap(size_t initial_size,size_t growth_limit,size_t min_free,size_t max_free,double target_utilization,double foreground_heap_growth_multiplier,size_t capacity,size_t non_moving_space_capacity,const std::string & image_file_name,const InstructionSet image_instruction_set,CollectorType foreground_collector_type,CollectorType background_collector_type,space::LargeObjectSpaceType large_object_space_type,size_t large_object_threshold,size_t parallel_gc_threads,size_t conc_gc_threads,bool low_memory_mode,size_t long_pause_log_threshold,size_t long_gc_log_threshold,bool ignore_max_footprint,bool use_tlab,bool verify_pre_gc_heap,bool verify_pre_sweeping_heap,bool verify_post_gc_heap,bool verify_pre_gc_rosalloc,bool verify_pre_sweeping_rosalloc,bool verify_post_gc_rosalloc,bool gc_stress_mode,bool use_homogeneous_space_compaction_for_oom,uint64_t min_interval_homogeneous_space_compaction_by_oom)128 Heap::Heap(size_t initial_size,
129            size_t growth_limit,
130            size_t min_free,
131            size_t max_free,
132            double target_utilization,
133            double foreground_heap_growth_multiplier,
134            size_t capacity,
135            size_t non_moving_space_capacity,
136            const std::string& image_file_name,
137            const InstructionSet image_instruction_set,
138            CollectorType foreground_collector_type,
139            CollectorType background_collector_type,
140            space::LargeObjectSpaceType large_object_space_type,
141            size_t large_object_threshold,
142            size_t parallel_gc_threads,
143            size_t conc_gc_threads,
144            bool low_memory_mode,
145            size_t long_pause_log_threshold,
146            size_t long_gc_log_threshold,
147            bool ignore_max_footprint,
148            bool use_tlab,
149            bool verify_pre_gc_heap,
150            bool verify_pre_sweeping_heap,
151            bool verify_post_gc_heap,
152            bool verify_pre_gc_rosalloc,
153            bool verify_pre_sweeping_rosalloc,
154            bool verify_post_gc_rosalloc,
155            bool gc_stress_mode,
156            bool use_homogeneous_space_compaction_for_oom,
157            uint64_t min_interval_homogeneous_space_compaction_by_oom)
158     : non_moving_space_(nullptr),
159       rosalloc_space_(nullptr),
160       dlmalloc_space_(nullptr),
161       main_space_(nullptr),
162       collector_type_(kCollectorTypeNone),
163       foreground_collector_type_(foreground_collector_type),
164       background_collector_type_(background_collector_type),
165       desired_collector_type_(foreground_collector_type_),
166       pending_task_lock_(nullptr),
167       parallel_gc_threads_(parallel_gc_threads),
168       conc_gc_threads_(conc_gc_threads),
169       low_memory_mode_(low_memory_mode),
170       long_pause_log_threshold_(long_pause_log_threshold),
171       long_gc_log_threshold_(long_gc_log_threshold),
172       ignore_max_footprint_(ignore_max_footprint),
173       zygote_creation_lock_("zygote creation lock", kZygoteCreationLock),
174       zygote_space_(nullptr),
175       large_object_threshold_(large_object_threshold),
176       disable_thread_flip_count_(0),
177       thread_flip_running_(false),
178       collector_type_running_(kCollectorTypeNone),
179       last_gc_type_(collector::kGcTypeNone),
180       next_gc_type_(collector::kGcTypePartial),
181       capacity_(capacity),
182       growth_limit_(growth_limit),
183       max_allowed_footprint_(initial_size),
184       native_footprint_gc_watermark_(initial_size),
185       native_need_to_run_finalization_(false),
186       concurrent_start_bytes_(std::numeric_limits<size_t>::max()),
187       total_bytes_freed_ever_(0),
188       total_objects_freed_ever_(0),
189       num_bytes_allocated_(0),
190       native_bytes_allocated_(0),
191       native_histogram_lock_("Native allocation lock"),
192       native_allocation_histogram_("Native allocation sizes",
193                                    1U,
194                                    kNativeAllocationHistogramBuckets),
195       native_free_histogram_("Native free sizes", 1U, kNativeAllocationHistogramBuckets),
196       num_bytes_freed_revoke_(0),
197       verify_missing_card_marks_(false),
198       verify_system_weaks_(false),
199       verify_pre_gc_heap_(verify_pre_gc_heap),
200       verify_pre_sweeping_heap_(verify_pre_sweeping_heap),
201       verify_post_gc_heap_(verify_post_gc_heap),
202       verify_mod_union_table_(false),
203       verify_pre_gc_rosalloc_(verify_pre_gc_rosalloc),
204       verify_pre_sweeping_rosalloc_(verify_pre_sweeping_rosalloc),
205       verify_post_gc_rosalloc_(verify_post_gc_rosalloc),
206       gc_stress_mode_(gc_stress_mode),
207       /* For GC a lot mode, we limit the allocations stacks to be kGcAlotInterval allocations. This
208        * causes a lot of GC since we do a GC for alloc whenever the stack is full. When heap
209        * verification is enabled, we limit the size of allocation stacks to speed up their
210        * searching.
211        */
212       max_allocation_stack_size_(kGCALotMode ? kGcAlotAllocationStackSize
213           : (kVerifyObjectSupport > kVerifyObjectModeFast) ? kVerifyObjectAllocationStackSize :
214           kDefaultAllocationStackSize),
215       current_allocator_(kAllocatorTypeDlMalloc),
216       current_non_moving_allocator_(kAllocatorTypeNonMoving),
217       bump_pointer_space_(nullptr),
218       temp_space_(nullptr),
219       region_space_(nullptr),
220       min_free_(min_free),
221       max_free_(max_free),
222       target_utilization_(target_utilization),
223       foreground_heap_growth_multiplier_(foreground_heap_growth_multiplier),
224       total_wait_time_(0),
225       verify_object_mode_(kVerifyObjectModeDisabled),
226       disable_moving_gc_count_(0),
227       is_running_on_memory_tool_(Runtime::Current()->IsRunningOnMemoryTool()),
228       use_tlab_(use_tlab),
229       main_space_backup_(nullptr),
230       min_interval_homogeneous_space_compaction_by_oom_(
231           min_interval_homogeneous_space_compaction_by_oom),
232       last_time_homogeneous_space_compaction_by_oom_(NanoTime()),
233       pending_collector_transition_(nullptr),
234       pending_heap_trim_(nullptr),
235       use_homogeneous_space_compaction_for_oom_(use_homogeneous_space_compaction_for_oom),
236       running_collection_is_blocking_(false),
237       blocking_gc_count_(0U),
238       blocking_gc_time_(0U),
239       last_update_time_gc_count_rate_histograms_(  // Round down by the window duration.
240           (NanoTime() / kGcCountRateHistogramWindowDuration) * kGcCountRateHistogramWindowDuration),
241       gc_count_last_window_(0U),
242       blocking_gc_count_last_window_(0U),
243       gc_count_rate_histogram_("gc count rate histogram", 1U, kGcCountRateMaxBucketCount),
244       blocking_gc_count_rate_histogram_("blocking gc count rate histogram", 1U,
245                                         kGcCountRateMaxBucketCount),
246       alloc_tracking_enabled_(false),
247       backtrace_lock_(nullptr),
248       seen_backtrace_count_(0u),
249       unique_backtrace_count_(0u),
250       gc_disabled_for_shutdown_(false) {
251   if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
252     LOG(INFO) << "Heap() entering";
253   }
254   ScopedTrace trace(__FUNCTION__);
255   Runtime* const runtime = Runtime::Current();
256   // If we aren't the zygote, switch to the default non zygote allocator. This may update the
257   // entrypoints.
258   const bool is_zygote = runtime->IsZygote();
259   if (!is_zygote) {
260     // Background compaction is currently not supported for command line runs.
261     if (background_collector_type_ != foreground_collector_type_) {
262       VLOG(heap) << "Disabling background compaction for non zygote";
263       background_collector_type_ = foreground_collector_type_;
264     }
265   }
266   ChangeCollector(desired_collector_type_);
267   live_bitmap_.reset(new accounting::HeapBitmap(this));
268   mark_bitmap_.reset(new accounting::HeapBitmap(this));
269   // Requested begin for the alloc space, to follow the mapped image and oat files
270   uint8_t* requested_alloc_space_begin = nullptr;
271   if (foreground_collector_type_ == kCollectorTypeCC) {
272     // Need to use a low address so that we can allocate a contiguous
273     // 2 * Xmx space when there's no image (dex2oat for target).
274     CHECK_GE(300 * MB, non_moving_space_capacity);
275     requested_alloc_space_begin = reinterpret_cast<uint8_t*>(300 * MB) - non_moving_space_capacity;
276   }
277 
278   // Load image space(s).
279   if (!image_file_name.empty()) {
280     // For code reuse, handle this like a work queue.
281     std::vector<std::string> image_file_names;
282     image_file_names.push_back(image_file_name);
283     // The loaded spaces. Secondary images may fail to load, in which case we need to remove
284     // already added spaces.
285     std::vector<space::Space*> added_image_spaces;
286     uint8_t* const original_requested_alloc_space_begin = requested_alloc_space_begin;
287     for (size_t index = 0; index < image_file_names.size(); ++index) {
288       std::string& image_name = image_file_names[index];
289       std::string error_msg;
290       space::ImageSpace* boot_image_space = space::ImageSpace::CreateBootImage(
291           image_name.c_str(),
292           image_instruction_set,
293           index > 0,
294           &error_msg);
295       if (boot_image_space != nullptr) {
296         AddSpace(boot_image_space);
297         added_image_spaces.push_back(boot_image_space);
298         // Oat files referenced by image files immediately follow them in memory, ensure alloc space
299         // isn't going to get in the middle
300         uint8_t* oat_file_end_addr = boot_image_space->GetImageHeader().GetOatFileEnd();
301         CHECK_GT(oat_file_end_addr, boot_image_space->End());
302         requested_alloc_space_begin = AlignUp(oat_file_end_addr, kPageSize);
303         boot_image_spaces_.push_back(boot_image_space);
304 
305         if (index == 0) {
306           // If this was the first space, check whether there are more images to load.
307           const OatFile* boot_oat_file = boot_image_space->GetOatFile();
308           if (boot_oat_file == nullptr) {
309             continue;
310           }
311 
312           const OatHeader& boot_oat_header = boot_oat_file->GetOatHeader();
313           const char* boot_classpath =
314               boot_oat_header.GetStoreValueByKey(OatHeader::kBootClassPathKey);
315           if (boot_classpath == nullptr) {
316             continue;
317           }
318 
319           space::ImageSpace::CreateMultiImageLocations(image_file_name,
320                                                        boot_classpath,
321                                                        &image_file_names);
322         }
323       } else {
324         LOG(ERROR) << "Could not create image space with image file '" << image_file_name << "'. "
325             << "Attempting to fall back to imageless running. Error was: " << error_msg
326             << "\nAttempted image: " << image_name;
327         // Remove already loaded spaces.
328         for (space::Space* loaded_space : added_image_spaces) {
329           RemoveSpace(loaded_space);
330           delete loaded_space;
331         }
332         boot_image_spaces_.clear();
333         requested_alloc_space_begin = original_requested_alloc_space_begin;
334         break;
335       }
336     }
337   }
338   /*
339   requested_alloc_space_begin ->     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
340                                      +-  nonmoving space (non_moving_space_capacity)+-
341                                      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
342                                      +-????????????????????????????????????????????+-
343                                      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
344                                      +-main alloc space / bump space 1 (capacity_) +-
345                                      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
346                                      +-????????????????????????????????????????????+-
347                                      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
348                                      +-main alloc space2 / bump space 2 (capacity_)+-
349                                      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
350   */
351   // We don't have hspace compaction enabled with GSS or CC.
352   if (foreground_collector_type_ == kCollectorTypeGSS ||
353       foreground_collector_type_ == kCollectorTypeCC) {
354     use_homogeneous_space_compaction_for_oom_ = false;
355   }
356   bool support_homogeneous_space_compaction =
357       background_collector_type_ == gc::kCollectorTypeHomogeneousSpaceCompact ||
358       use_homogeneous_space_compaction_for_oom_;
359   // We may use the same space the main space for the non moving space if we don't need to compact
360   // from the main space.
361   // This is not the case if we support homogeneous compaction or have a moving background
362   // collector type.
363   bool separate_non_moving_space = is_zygote ||
364       support_homogeneous_space_compaction || IsMovingGc(foreground_collector_type_) ||
365       IsMovingGc(background_collector_type_);
366   if (foreground_collector_type_ == kCollectorTypeGSS) {
367     separate_non_moving_space = false;
368   }
369   std::unique_ptr<MemMap> main_mem_map_1;
370   std::unique_ptr<MemMap> main_mem_map_2;
371 
372   // Gross hack to make dex2oat deterministic.
373   if (foreground_collector_type_ == kCollectorTypeMS &&
374       requested_alloc_space_begin == nullptr &&
375       Runtime::Current()->IsAotCompiler()) {
376     // Currently only enabled for MS collector since that is what the deterministic dex2oat uses.
377     // b/26849108
378     requested_alloc_space_begin = reinterpret_cast<uint8_t*>(kAllocSpaceBeginForDeterministicAoT);
379   }
380   uint8_t* request_begin = requested_alloc_space_begin;
381   if (request_begin != nullptr && separate_non_moving_space) {
382     request_begin += non_moving_space_capacity;
383   }
384   std::string error_str;
385   std::unique_ptr<MemMap> non_moving_space_mem_map;
386   if (separate_non_moving_space) {
387     ScopedTrace trace2("Create separate non moving space");
388     // If we are the zygote, the non moving space becomes the zygote space when we run
389     // PreZygoteFork the first time. In this case, call the map "zygote space" since we can't
390     // rename the mem map later.
391     const char* space_name = is_zygote ? kZygoteSpaceName: kNonMovingSpaceName;
392     // Reserve the non moving mem map before the other two since it needs to be at a specific
393     // address.
394     non_moving_space_mem_map.reset(
395         MemMap::MapAnonymous(space_name, requested_alloc_space_begin,
396                              non_moving_space_capacity, PROT_READ | PROT_WRITE, true, false,
397                              &error_str));
398     CHECK(non_moving_space_mem_map != nullptr) << error_str;
399     // Try to reserve virtual memory at a lower address if we have a separate non moving space.
400     request_begin = reinterpret_cast<uint8_t*>(300 * MB);
401   }
402   // Attempt to create 2 mem maps at or after the requested begin.
403   if (foreground_collector_type_ != kCollectorTypeCC) {
404     ScopedTrace trace2("Create main mem map");
405     if (separate_non_moving_space || !is_zygote) {
406       main_mem_map_1.reset(MapAnonymousPreferredAddress(kMemMapSpaceName[0],
407                                                         request_begin,
408                                                         capacity_,
409                                                         &error_str));
410     } else {
411       // If no separate non-moving space and we are the zygote, the main space must come right
412       // after the image space to avoid a gap. This is required since we want the zygote space to
413       // be adjacent to the image space.
414       main_mem_map_1.reset(MemMap::MapAnonymous(kMemMapSpaceName[0], request_begin, capacity_,
415                                                 PROT_READ | PROT_WRITE, true, false,
416                                                 &error_str));
417     }
418     CHECK(main_mem_map_1.get() != nullptr) << error_str;
419   }
420   if (support_homogeneous_space_compaction ||
421       background_collector_type_ == kCollectorTypeSS ||
422       foreground_collector_type_ == kCollectorTypeSS) {
423     ScopedTrace trace2("Create main mem map 2");
424     main_mem_map_2.reset(MapAnonymousPreferredAddress(kMemMapSpaceName[1], main_mem_map_1->End(),
425                                                       capacity_, &error_str));
426     CHECK(main_mem_map_2.get() != nullptr) << error_str;
427   }
428 
429   // Create the non moving space first so that bitmaps don't take up the address range.
430   if (separate_non_moving_space) {
431     ScopedTrace trace2("Add non moving space");
432     // Non moving space is always dlmalloc since we currently don't have support for multiple
433     // active rosalloc spaces.
434     const size_t size = non_moving_space_mem_map->Size();
435     non_moving_space_ = space::DlMallocSpace::CreateFromMemMap(
436         non_moving_space_mem_map.release(), "zygote / non moving space", kDefaultStartingSize,
437         initial_size, size, size, false);
438     non_moving_space_->SetFootprintLimit(non_moving_space_->Capacity());
439     CHECK(non_moving_space_ != nullptr) << "Failed creating non moving space "
440         << requested_alloc_space_begin;
441     AddSpace(non_moving_space_);
442   }
443   // Create other spaces based on whether or not we have a moving GC.
444   if (foreground_collector_type_ == kCollectorTypeCC) {
445     region_space_ = space::RegionSpace::Create("Region space", capacity_ * 2, request_begin);
446     AddSpace(region_space_);
447   } else if (IsMovingGc(foreground_collector_type_) &&
448       foreground_collector_type_ != kCollectorTypeGSS) {
449     // Create bump pointer spaces.
450     // We only to create the bump pointer if the foreground collector is a compacting GC.
451     // TODO: Place bump-pointer spaces somewhere to minimize size of card table.
452     bump_pointer_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space 1",
453                                                                     main_mem_map_1.release());
454     CHECK(bump_pointer_space_ != nullptr) << "Failed to create bump pointer space";
455     AddSpace(bump_pointer_space_);
456     temp_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space 2",
457                                                             main_mem_map_2.release());
458     CHECK(temp_space_ != nullptr) << "Failed to create bump pointer space";
459     AddSpace(temp_space_);
460     CHECK(separate_non_moving_space);
461   } else {
462     CreateMainMallocSpace(main_mem_map_1.release(), initial_size, growth_limit_, capacity_);
463     CHECK(main_space_ != nullptr);
464     AddSpace(main_space_);
465     if (!separate_non_moving_space) {
466       non_moving_space_ = main_space_;
467       CHECK(!non_moving_space_->CanMoveObjects());
468     }
469     if (foreground_collector_type_ == kCollectorTypeGSS) {
470       CHECK_EQ(foreground_collector_type_, background_collector_type_);
471       // Create bump pointer spaces instead of a backup space.
472       main_mem_map_2.release();
473       bump_pointer_space_ = space::BumpPointerSpace::Create("Bump pointer space 1",
474                                                             kGSSBumpPointerSpaceCapacity, nullptr);
475       CHECK(bump_pointer_space_ != nullptr);
476       AddSpace(bump_pointer_space_);
477       temp_space_ = space::BumpPointerSpace::Create("Bump pointer space 2",
478                                                     kGSSBumpPointerSpaceCapacity, nullptr);
479       CHECK(temp_space_ != nullptr);
480       AddSpace(temp_space_);
481     } else if (main_mem_map_2.get() != nullptr) {
482       const char* name = kUseRosAlloc ? kRosAllocSpaceName[1] : kDlMallocSpaceName[1];
483       main_space_backup_.reset(CreateMallocSpaceFromMemMap(main_mem_map_2.release(), initial_size,
484                                                            growth_limit_, capacity_, name, true));
485       CHECK(main_space_backup_.get() != nullptr);
486       // Add the space so its accounted for in the heap_begin and heap_end.
487       AddSpace(main_space_backup_.get());
488     }
489   }
490   CHECK(non_moving_space_ != nullptr);
491   CHECK(!non_moving_space_->CanMoveObjects());
492   // Allocate the large object space.
493   if (large_object_space_type == space::LargeObjectSpaceType::kFreeList) {
494     large_object_space_ = space::FreeListSpace::Create("free list large object space", nullptr,
495                                                        capacity_);
496     CHECK(large_object_space_ != nullptr) << "Failed to create large object space";
497   } else if (large_object_space_type == space::LargeObjectSpaceType::kMap) {
498     large_object_space_ = space::LargeObjectMapSpace::Create("mem map large object space");
499     CHECK(large_object_space_ != nullptr) << "Failed to create large object space";
500   } else {
501     // Disable the large object space by making the cutoff excessively large.
502     large_object_threshold_ = std::numeric_limits<size_t>::max();
503     large_object_space_ = nullptr;
504   }
505   if (large_object_space_ != nullptr) {
506     AddSpace(large_object_space_);
507   }
508   // Compute heap capacity. Continuous spaces are sorted in order of Begin().
509   CHECK(!continuous_spaces_.empty());
510   // Relies on the spaces being sorted.
511   uint8_t* heap_begin = continuous_spaces_.front()->Begin();
512   uint8_t* heap_end = continuous_spaces_.back()->Limit();
513   size_t heap_capacity = heap_end - heap_begin;
514   // Remove the main backup space since it slows down the GC to have unused extra spaces.
515   // TODO: Avoid needing to do this.
516   if (main_space_backup_.get() != nullptr) {
517     RemoveSpace(main_space_backup_.get());
518   }
519   // Allocate the card table.
520   // We currently don't support dynamically resizing the card table.
521   // Since we don't know where in the low_4gb the app image will be located, make the card table
522   // cover the whole low_4gb. TODO: Extend the card table in AddSpace.
523   UNUSED(heap_capacity);
524   // Start at 64 KB, we can be sure there are no spaces mapped this low since the address range is
525   // reserved by the kernel.
526   static constexpr size_t kMinHeapAddress = 4 * KB;
527   card_table_.reset(accounting::CardTable::Create(reinterpret_cast<uint8_t*>(kMinHeapAddress),
528                                                   4 * GB - kMinHeapAddress));
529   CHECK(card_table_.get() != nullptr) << "Failed to create card table";
530   if (foreground_collector_type_ == kCollectorTypeCC && kUseTableLookupReadBarrier) {
531     rb_table_.reset(new accounting::ReadBarrierTable());
532     DCHECK(rb_table_->IsAllCleared());
533   }
534   if (HasBootImageSpace()) {
535     // Don't add the image mod union table if we are running without an image, this can crash if
536     // we use the CardCache implementation.
537     for (space::ImageSpace* image_space : GetBootImageSpaces()) {
538       accounting::ModUnionTable* mod_union_table = new accounting::ModUnionTableToZygoteAllocspace(
539           "Image mod-union table", this, image_space);
540       CHECK(mod_union_table != nullptr) << "Failed to create image mod-union table";
541       AddModUnionTable(mod_union_table);
542     }
543   }
544   if (collector::SemiSpace::kUseRememberedSet && non_moving_space_ != main_space_) {
545     accounting::RememberedSet* non_moving_space_rem_set =
546         new accounting::RememberedSet("Non-moving space remembered set", this, non_moving_space_);
547     CHECK(non_moving_space_rem_set != nullptr) << "Failed to create non-moving space remembered set";
548     AddRememberedSet(non_moving_space_rem_set);
549   }
550   // TODO: Count objects in the image space here?
551   num_bytes_allocated_.StoreRelaxed(0);
552   mark_stack_.reset(accounting::ObjectStack::Create("mark stack", kDefaultMarkStackSize,
553                                                     kDefaultMarkStackSize));
554   const size_t alloc_stack_capacity = max_allocation_stack_size_ + kAllocationStackReserveSize;
555   allocation_stack_.reset(accounting::ObjectStack::Create(
556       "allocation stack", max_allocation_stack_size_, alloc_stack_capacity));
557   live_stack_.reset(accounting::ObjectStack::Create(
558       "live stack", max_allocation_stack_size_, alloc_stack_capacity));
559   // It's still too early to take a lock because there are no threads yet, but we can create locks
560   // now. We don't create it earlier to make it clear that you can't use locks during heap
561   // initialization.
562   gc_complete_lock_ = new Mutex("GC complete lock");
563   gc_complete_cond_.reset(new ConditionVariable("GC complete condition variable",
564                                                 *gc_complete_lock_));
565   thread_flip_lock_ = new Mutex("GC thread flip lock");
566   thread_flip_cond_.reset(new ConditionVariable("GC thread flip condition variable",
567                                                 *thread_flip_lock_));
568   task_processor_.reset(new TaskProcessor());
569   reference_processor_.reset(new ReferenceProcessor());
570   pending_task_lock_ = new Mutex("Pending task lock");
571   if (ignore_max_footprint_) {
572     SetIdealFootprint(std::numeric_limits<size_t>::max());
573     concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
574   }
575   CHECK_NE(max_allowed_footprint_, 0U);
576   // Create our garbage collectors.
577   for (size_t i = 0; i < 2; ++i) {
578     const bool concurrent = i != 0;
579     if ((MayUseCollector(kCollectorTypeCMS) && concurrent) ||
580         (MayUseCollector(kCollectorTypeMS) && !concurrent)) {
581       garbage_collectors_.push_back(new collector::MarkSweep(this, concurrent));
582       garbage_collectors_.push_back(new collector::PartialMarkSweep(this, concurrent));
583       garbage_collectors_.push_back(new collector::StickyMarkSweep(this, concurrent));
584     }
585   }
586   if (kMovingCollector) {
587     if (MayUseCollector(kCollectorTypeSS) || MayUseCollector(kCollectorTypeGSS) ||
588         MayUseCollector(kCollectorTypeHomogeneousSpaceCompact) ||
589         use_homogeneous_space_compaction_for_oom_) {
590       // TODO: Clean this up.
591       const bool generational = foreground_collector_type_ == kCollectorTypeGSS;
592       semi_space_collector_ = new collector::SemiSpace(this, generational,
593                                                        generational ? "generational" : "");
594       garbage_collectors_.push_back(semi_space_collector_);
595     }
596     if (MayUseCollector(kCollectorTypeCC)) {
597       concurrent_copying_collector_ = new collector::ConcurrentCopying(this);
598       garbage_collectors_.push_back(concurrent_copying_collector_);
599     }
600     if (MayUseCollector(kCollectorTypeMC)) {
601       mark_compact_collector_ = new collector::MarkCompact(this);
602       garbage_collectors_.push_back(mark_compact_collector_);
603     }
604   }
605   if (!GetBootImageSpaces().empty() && non_moving_space_ != nullptr &&
606       (is_zygote || separate_non_moving_space || foreground_collector_type_ == kCollectorTypeGSS)) {
607     // Check that there's no gap between the image space and the non moving space so that the
608     // immune region won't break (eg. due to a large object allocated in the gap). This is only
609     // required when we're the zygote or using GSS.
610     // Space with smallest Begin().
611     space::ImageSpace* first_space = nullptr;
612     for (space::ImageSpace* space : boot_image_spaces_) {
613       if (first_space == nullptr || space->Begin() < first_space->Begin()) {
614         first_space = space;
615       }
616     }
617     bool no_gap = MemMap::CheckNoGaps(first_space->GetMemMap(), non_moving_space_->GetMemMap());
618     if (!no_gap) {
619       PrintFileToLog("/proc/self/maps", LogSeverity::ERROR);
620       MemMap::DumpMaps(LOG(ERROR), true);
621       LOG(FATAL) << "There's a gap between the image space and the non-moving space";
622     }
623   }
624   instrumentation::Instrumentation* const instrumentation = runtime->GetInstrumentation();
625   if (gc_stress_mode_) {
626     backtrace_lock_ = new Mutex("GC complete lock");
627   }
628   if (is_running_on_memory_tool_ || gc_stress_mode_) {
629     instrumentation->InstrumentQuickAllocEntryPoints();
630   }
631   if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
632     LOG(INFO) << "Heap() exiting";
633   }
634 }
635 
MapAnonymousPreferredAddress(const char * name,uint8_t * request_begin,size_t capacity,std::string * out_error_str)636 MemMap* Heap::MapAnonymousPreferredAddress(const char* name,
637                                            uint8_t* request_begin,
638                                            size_t capacity,
639                                            std::string* out_error_str) {
640   while (true) {
641     MemMap* map = MemMap::MapAnonymous(name, request_begin, capacity,
642                                        PROT_READ | PROT_WRITE, true, false, out_error_str);
643     if (map != nullptr || request_begin == nullptr) {
644       return map;
645     }
646     // Retry a  second time with no specified request begin.
647     request_begin = nullptr;
648   }
649 }
650 
MayUseCollector(CollectorType type) const651 bool Heap::MayUseCollector(CollectorType type) const {
652   return foreground_collector_type_ == type || background_collector_type_ == type;
653 }
654 
CreateMallocSpaceFromMemMap(MemMap * mem_map,size_t initial_size,size_t growth_limit,size_t capacity,const char * name,bool can_move_objects)655 space::MallocSpace* Heap::CreateMallocSpaceFromMemMap(MemMap* mem_map,
656                                                       size_t initial_size,
657                                                       size_t growth_limit,
658                                                       size_t capacity,
659                                                       const char* name,
660                                                       bool can_move_objects) {
661   space::MallocSpace* malloc_space = nullptr;
662   if (kUseRosAlloc) {
663     // Create rosalloc space.
664     malloc_space = space::RosAllocSpace::CreateFromMemMap(mem_map, name, kDefaultStartingSize,
665                                                           initial_size, growth_limit, capacity,
666                                                           low_memory_mode_, can_move_objects);
667   } else {
668     malloc_space = space::DlMallocSpace::CreateFromMemMap(mem_map, name, kDefaultStartingSize,
669                                                           initial_size, growth_limit, capacity,
670                                                           can_move_objects);
671   }
672   if (collector::SemiSpace::kUseRememberedSet) {
673     accounting::RememberedSet* rem_set  =
674         new accounting::RememberedSet(std::string(name) + " remembered set", this, malloc_space);
675     CHECK(rem_set != nullptr) << "Failed to create main space remembered set";
676     AddRememberedSet(rem_set);
677   }
678   CHECK(malloc_space != nullptr) << "Failed to create " << name;
679   malloc_space->SetFootprintLimit(malloc_space->Capacity());
680   return malloc_space;
681 }
682 
CreateMainMallocSpace(MemMap * mem_map,size_t initial_size,size_t growth_limit,size_t capacity)683 void Heap::CreateMainMallocSpace(MemMap* mem_map, size_t initial_size, size_t growth_limit,
684                                  size_t capacity) {
685   // Is background compaction is enabled?
686   bool can_move_objects = IsMovingGc(background_collector_type_) !=
687       IsMovingGc(foreground_collector_type_) || use_homogeneous_space_compaction_for_oom_;
688   // If we are the zygote and don't yet have a zygote space, it means that the zygote fork will
689   // happen in the future. If this happens and we have kCompactZygote enabled we wish to compact
690   // from the main space to the zygote space. If background compaction is enabled, always pass in
691   // that we can move objets.
692   if (kCompactZygote && Runtime::Current()->IsZygote() && !can_move_objects) {
693     // After the zygote we want this to be false if we don't have background compaction enabled so
694     // that getting primitive array elements is faster.
695     // We never have homogeneous compaction with GSS and don't need a space with movable objects.
696     can_move_objects = !HasZygoteSpace() && foreground_collector_type_ != kCollectorTypeGSS;
697   }
698   if (collector::SemiSpace::kUseRememberedSet && main_space_ != nullptr) {
699     RemoveRememberedSet(main_space_);
700   }
701   const char* name = kUseRosAlloc ? kRosAllocSpaceName[0] : kDlMallocSpaceName[0];
702   main_space_ = CreateMallocSpaceFromMemMap(mem_map, initial_size, growth_limit, capacity, name,
703                                             can_move_objects);
704   SetSpaceAsDefault(main_space_);
705   VLOG(heap) << "Created main space " << main_space_;
706 }
707 
ChangeAllocator(AllocatorType allocator)708 void Heap::ChangeAllocator(AllocatorType allocator) {
709   if (current_allocator_ != allocator) {
710     // These two allocators are only used internally and don't have any entrypoints.
711     CHECK_NE(allocator, kAllocatorTypeLOS);
712     CHECK_NE(allocator, kAllocatorTypeNonMoving);
713     current_allocator_ = allocator;
714     MutexLock mu(nullptr, *Locks::runtime_shutdown_lock_);
715     SetQuickAllocEntryPointsAllocator(current_allocator_);
716     Runtime::Current()->GetInstrumentation()->ResetQuickAllocEntryPoints();
717   }
718 }
719 
DisableMovingGc()720 void Heap::DisableMovingGc() {
721   if (IsMovingGc(foreground_collector_type_)) {
722     foreground_collector_type_ = kCollectorTypeCMS;
723   }
724   if (IsMovingGc(background_collector_type_)) {
725     background_collector_type_ = foreground_collector_type_;
726   }
727   TransitionCollector(foreground_collector_type_);
728   Thread* const self = Thread::Current();
729   ScopedThreadStateChange tsc(self, kSuspended);
730   ScopedSuspendAll ssa(__FUNCTION__);
731   // Something may have caused the transition to fail.
732   if (!IsMovingGc(collector_type_) && non_moving_space_ != main_space_) {
733     CHECK(main_space_ != nullptr);
734     // The allocation stack may have non movable objects in it. We need to flush it since the GC
735     // can't only handle marking allocation stack objects of one non moving space and one main
736     // space.
737     {
738       WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
739       FlushAllocStack();
740     }
741     main_space_->DisableMovingObjects();
742     non_moving_space_ = main_space_;
743     CHECK(!non_moving_space_->CanMoveObjects());
744   }
745 }
746 
SafeGetClassDescriptor(mirror::Class * klass)747 std::string Heap::SafeGetClassDescriptor(mirror::Class* klass) {
748   if (!IsValidContinuousSpaceObjectAddress(klass)) {
749     return StringPrintf("<non heap address klass %p>", klass);
750   }
751   mirror::Class* component_type = klass->GetComponentType<kVerifyNone>();
752   if (IsValidContinuousSpaceObjectAddress(component_type) && klass->IsArrayClass<kVerifyNone>()) {
753     std::string result("[");
754     result += SafeGetClassDescriptor(component_type);
755     return result;
756   } else if (UNLIKELY(klass->IsPrimitive<kVerifyNone>())) {
757     return Primitive::Descriptor(klass->GetPrimitiveType<kVerifyNone>());
758   } else if (UNLIKELY(klass->IsProxyClass<kVerifyNone>())) {
759     return Runtime::Current()->GetClassLinker()->GetDescriptorForProxy(klass);
760   } else {
761     mirror::DexCache* dex_cache = klass->GetDexCache<kVerifyNone>();
762     if (!IsValidContinuousSpaceObjectAddress(dex_cache)) {
763       return StringPrintf("<non heap address dex_cache %p>", dex_cache);
764     }
765     const DexFile* dex_file = dex_cache->GetDexFile();
766     uint16_t class_def_idx = klass->GetDexClassDefIndex();
767     if (class_def_idx == DexFile::kDexNoIndex16) {
768       return "<class def not found>";
769     }
770     const DexFile::ClassDef& class_def = dex_file->GetClassDef(class_def_idx);
771     const DexFile::TypeId& type_id = dex_file->GetTypeId(class_def.class_idx_);
772     return dex_file->GetTypeDescriptor(type_id);
773   }
774 }
775 
SafePrettyTypeOf(mirror::Object * obj)776 std::string Heap::SafePrettyTypeOf(mirror::Object* obj) {
777   if (obj == nullptr) {
778     return "null";
779   }
780   mirror::Class* klass = obj->GetClass<kVerifyNone>();
781   if (klass == nullptr) {
782     return "(class=null)";
783   }
784   std::string result(SafeGetClassDescriptor(klass));
785   if (obj->IsClass()) {
786     result += "<" + SafeGetClassDescriptor(obj->AsClass<kVerifyNone>()) + ">";
787   }
788   return result;
789 }
790 
DumpObject(std::ostream & stream,mirror::Object * obj)791 void Heap::DumpObject(std::ostream& stream, mirror::Object* obj) {
792   if (obj == nullptr) {
793     stream << "(obj=null)";
794     return;
795   }
796   if (IsAligned<kObjectAlignment>(obj)) {
797     space::Space* space = nullptr;
798     // Don't use find space since it only finds spaces which actually contain objects instead of
799     // spaces which may contain objects (e.g. cleared bump pointer spaces).
800     for (const auto& cur_space : continuous_spaces_) {
801       if (cur_space->HasAddress(obj)) {
802         space = cur_space;
803         break;
804       }
805     }
806     // Unprotect all the spaces.
807     for (const auto& con_space : continuous_spaces_) {
808       mprotect(con_space->Begin(), con_space->Capacity(), PROT_READ | PROT_WRITE);
809     }
810     stream << "Object " << obj;
811     if (space != nullptr) {
812       stream << " in space " << *space;
813     }
814     mirror::Class* klass = obj->GetClass<kVerifyNone>();
815     stream << "\nclass=" << klass;
816     if (klass != nullptr) {
817       stream << " type= " << SafePrettyTypeOf(obj);
818     }
819     // Re-protect the address we faulted on.
820     mprotect(AlignDown(obj, kPageSize), kPageSize, PROT_NONE);
821   }
822 }
823 
IsCompilingBoot() const824 bool Heap::IsCompilingBoot() const {
825   if (!Runtime::Current()->IsAotCompiler()) {
826     return false;
827   }
828   ScopedObjectAccess soa(Thread::Current());
829   for (const auto& space : continuous_spaces_) {
830     if (space->IsImageSpace() || space->IsZygoteSpace()) {
831       return false;
832     }
833   }
834   return true;
835 }
836 
IncrementDisableMovingGC(Thread * self)837 void Heap::IncrementDisableMovingGC(Thread* self) {
838   // Need to do this holding the lock to prevent races where the GC is about to run / running when
839   // we attempt to disable it.
840   ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
841   MutexLock mu(self, *gc_complete_lock_);
842   ++disable_moving_gc_count_;
843   if (IsMovingGc(collector_type_running_)) {
844     WaitForGcToCompleteLocked(kGcCauseDisableMovingGc, self);
845   }
846 }
847 
DecrementDisableMovingGC(Thread * self)848 void Heap::DecrementDisableMovingGC(Thread* self) {
849   MutexLock mu(self, *gc_complete_lock_);
850   CHECK_GT(disable_moving_gc_count_, 0U);
851   --disable_moving_gc_count_;
852 }
853 
IncrementDisableThreadFlip(Thread * self)854 void Heap::IncrementDisableThreadFlip(Thread* self) {
855   // Supposed to be called by mutators. If thread_flip_running_ is true, block. Otherwise, go ahead.
856   CHECK(kUseReadBarrier);
857   bool is_nested = self->GetDisableThreadFlipCount() > 0;
858   self->IncrementDisableThreadFlipCount();
859   if (is_nested) {
860     // If this is a nested JNI critical section enter, we don't need to wait or increment the global
861     // counter. The global counter is incremented only once for a thread for the outermost enter.
862     return;
863   }
864   ScopedThreadStateChange tsc(self, kWaitingForGcThreadFlip);
865   MutexLock mu(self, *thread_flip_lock_);
866   bool has_waited = false;
867   uint64_t wait_start = NanoTime();
868   while (thread_flip_running_) {
869     has_waited = true;
870     thread_flip_cond_->Wait(self);
871   }
872   ++disable_thread_flip_count_;
873   if (has_waited) {
874     uint64_t wait_time = NanoTime() - wait_start;
875     total_wait_time_ += wait_time;
876     if (wait_time > long_pause_log_threshold_) {
877       LOG(INFO) << __FUNCTION__ << " blocked for " << PrettyDuration(wait_time);
878     }
879   }
880 }
881 
DecrementDisableThreadFlip(Thread * self)882 void Heap::DecrementDisableThreadFlip(Thread* self) {
883   // Supposed to be called by mutators. Decrement disable_thread_flip_count_ and potentially wake up
884   // the GC waiting before doing a thread flip.
885   CHECK(kUseReadBarrier);
886   self->DecrementDisableThreadFlipCount();
887   bool is_outermost = self->GetDisableThreadFlipCount() == 0;
888   if (!is_outermost) {
889     // If this is not an outermost JNI critical exit, we don't need to decrement the global counter.
890     // The global counter is decremented only once for a thread for the outermost exit.
891     return;
892   }
893   MutexLock mu(self, *thread_flip_lock_);
894   CHECK_GT(disable_thread_flip_count_, 0U);
895   --disable_thread_flip_count_;
896   if (disable_thread_flip_count_ == 0) {
897     // Potentially notify the GC thread blocking to begin a thread flip.
898     thread_flip_cond_->Broadcast(self);
899   }
900 }
901 
ThreadFlipBegin(Thread * self)902 void Heap::ThreadFlipBegin(Thread* self) {
903   // Supposed to be called by GC. Set thread_flip_running_ to be true. If disable_thread_flip_count_
904   // > 0, block. Otherwise, go ahead.
905   CHECK(kUseReadBarrier);
906   ScopedThreadStateChange tsc(self, kWaitingForGcThreadFlip);
907   MutexLock mu(self, *thread_flip_lock_);
908   bool has_waited = false;
909   uint64_t wait_start = NanoTime();
910   CHECK(!thread_flip_running_);
911   // Set this to true before waiting so that frequent JNI critical enter/exits won't starve
912   // GC. This like a writer preference of a reader-writer lock.
913   thread_flip_running_ = true;
914   while (disable_thread_flip_count_ > 0) {
915     has_waited = true;
916     thread_flip_cond_->Wait(self);
917   }
918   if (has_waited) {
919     uint64_t wait_time = NanoTime() - wait_start;
920     total_wait_time_ += wait_time;
921     if (wait_time > long_pause_log_threshold_) {
922       LOG(INFO) << __FUNCTION__ << " blocked for " << PrettyDuration(wait_time);
923     }
924   }
925 }
926 
ThreadFlipEnd(Thread * self)927 void Heap::ThreadFlipEnd(Thread* self) {
928   // Supposed to be called by GC. Set thread_flip_running_ to false and potentially wake up mutators
929   // waiting before doing a JNI critical.
930   CHECK(kUseReadBarrier);
931   MutexLock mu(self, *thread_flip_lock_);
932   CHECK(thread_flip_running_);
933   thread_flip_running_ = false;
934   // Potentially notify mutator threads blocking to enter a JNI critical section.
935   thread_flip_cond_->Broadcast(self);
936 }
937 
UpdateProcessState(ProcessState old_process_state,ProcessState new_process_state)938 void Heap::UpdateProcessState(ProcessState old_process_state, ProcessState new_process_state) {
939   if (old_process_state != new_process_state) {
940     const bool jank_perceptible = new_process_state == kProcessStateJankPerceptible;
941     for (size_t i = 1; i <= kCollectorTransitionStressIterations; ++i) {
942       // Start at index 1 to avoid "is always false" warning.
943       // Have iteration 1 always transition the collector.
944       TransitionCollector((((i & 1) == 1) == jank_perceptible)
945           ? foreground_collector_type_
946           : background_collector_type_);
947       usleep(kCollectorTransitionStressWait);
948     }
949     if (jank_perceptible) {
950       // Transition back to foreground right away to prevent jank.
951       RequestCollectorTransition(foreground_collector_type_, 0);
952     } else {
953       // Don't delay for debug builds since we may want to stress test the GC.
954       // If background_collector_type_ is kCollectorTypeHomogeneousSpaceCompact then we have
955       // special handling which does a homogenous space compaction once but then doesn't transition
956       // the collector.
957       RequestCollectorTransition(background_collector_type_,
958                                  kIsDebugBuild ? 0 : kCollectorTransitionWait);
959     }
960   }
961 }
962 
CreateThreadPool()963 void Heap::CreateThreadPool() {
964   const size_t num_threads = std::max(parallel_gc_threads_, conc_gc_threads_);
965   if (num_threads != 0) {
966     thread_pool_.reset(new ThreadPool("Heap thread pool", num_threads));
967   }
968 }
969 
970 // Visit objects when threads aren't suspended. If concurrent moving
971 // GC, disable moving GC and suspend threads and then visit objects.
VisitObjects(ObjectCallback callback,void * arg)972 void Heap::VisitObjects(ObjectCallback callback, void* arg) {
973   Thread* self = Thread::Current();
974   Locks::mutator_lock_->AssertSharedHeld(self);
975   DCHECK(!Locks::mutator_lock_->IsExclusiveHeld(self)) << "Call VisitObjectsPaused() instead";
976   if (IsGcConcurrentAndMoving()) {
977     // Concurrent moving GC. Just suspending threads isn't sufficient
978     // because a collection isn't one big pause and we could suspend
979     // threads in the middle (between phases) of a concurrent moving
980     // collection where it's not easily known which objects are alive
981     // (both the region space and the non-moving space) or which
982     // copies of objects to visit, and the to-space invariant could be
983     // easily broken. Visit objects while GC isn't running by using
984     // IncrementDisableMovingGC() and threads are suspended.
985     IncrementDisableMovingGC(self);
986     {
987       ScopedThreadSuspension sts(self, kWaitingForVisitObjects);
988       ScopedSuspendAll ssa(__FUNCTION__);
989       VisitObjectsInternalRegionSpace(callback, arg);
990       VisitObjectsInternal(callback, arg);
991     }
992     DecrementDisableMovingGC(self);
993   } else {
994     // GCs can move objects, so don't allow this.
995     ScopedAssertNoThreadSuspension ants(self, "Visiting objects");
996     DCHECK(region_space_ == nullptr);
997     VisitObjectsInternal(callback, arg);
998   }
999 }
1000 
1001 // Visit objects when threads are already suspended.
VisitObjectsPaused(ObjectCallback callback,void * arg)1002 void Heap::VisitObjectsPaused(ObjectCallback callback, void* arg) {
1003   Thread* self = Thread::Current();
1004   Locks::mutator_lock_->AssertExclusiveHeld(self);
1005   VisitObjectsInternalRegionSpace(callback, arg);
1006   VisitObjectsInternal(callback, arg);
1007 }
1008 
1009 // Visit objects in the region spaces.
VisitObjectsInternalRegionSpace(ObjectCallback callback,void * arg)1010 void Heap::VisitObjectsInternalRegionSpace(ObjectCallback callback, void* arg) {
1011   Thread* self = Thread::Current();
1012   Locks::mutator_lock_->AssertExclusiveHeld(self);
1013   if (region_space_ != nullptr) {
1014     DCHECK(IsGcConcurrentAndMoving());
1015     if (!zygote_creation_lock_.IsExclusiveHeld(self)) {
1016       // Exclude the pre-zygote fork time where the semi-space collector
1017       // calls VerifyHeapReferences() as part of the zygote compaction
1018       // which then would call here without the moving GC disabled,
1019       // which is fine.
1020       DCHECK(IsMovingGCDisabled(self));
1021     }
1022     region_space_->Walk(callback, arg);
1023   }
1024 }
1025 
1026 // Visit objects in the other spaces.
VisitObjectsInternal(ObjectCallback callback,void * arg)1027 void Heap::VisitObjectsInternal(ObjectCallback callback, void* arg) {
1028   if (bump_pointer_space_ != nullptr) {
1029     // Visit objects in bump pointer space.
1030     bump_pointer_space_->Walk(callback, arg);
1031   }
1032   // TODO: Switch to standard begin and end to use ranged a based loop.
1033   for (auto* it = allocation_stack_->Begin(), *end = allocation_stack_->End(); it < end; ++it) {
1034     mirror::Object* const obj = it->AsMirrorPtr();
1035     if (obj != nullptr && obj->GetClass() != nullptr) {
1036       // Avoid the race condition caused by the object not yet being written into the allocation
1037       // stack or the class not yet being written in the object. Or, if
1038       // kUseThreadLocalAllocationStack, there can be nulls on the allocation stack.
1039       callback(obj, arg);
1040     }
1041   }
1042   {
1043     ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
1044     GetLiveBitmap()->Walk(callback, arg);
1045   }
1046 }
1047 
MarkAllocStackAsLive(accounting::ObjectStack * stack)1048 void Heap::MarkAllocStackAsLive(accounting::ObjectStack* stack) {
1049   space::ContinuousSpace* space1 = main_space_ != nullptr ? main_space_ : non_moving_space_;
1050   space::ContinuousSpace* space2 = non_moving_space_;
1051   // TODO: Generalize this to n bitmaps?
1052   CHECK(space1 != nullptr);
1053   CHECK(space2 != nullptr);
1054   MarkAllocStack(space1->GetLiveBitmap(), space2->GetLiveBitmap(),
1055                  (large_object_space_ != nullptr ? large_object_space_->GetLiveBitmap() : nullptr),
1056                  stack);
1057 }
1058 
DeleteThreadPool()1059 void Heap::DeleteThreadPool() {
1060   thread_pool_.reset(nullptr);
1061 }
1062 
AddSpace(space::Space * space)1063 void Heap::AddSpace(space::Space* space) {
1064   CHECK(space != nullptr);
1065   WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
1066   if (space->IsContinuousSpace()) {
1067     DCHECK(!space->IsDiscontinuousSpace());
1068     space::ContinuousSpace* continuous_space = space->AsContinuousSpace();
1069     // Continuous spaces don't necessarily have bitmaps.
1070     accounting::ContinuousSpaceBitmap* live_bitmap = continuous_space->GetLiveBitmap();
1071     accounting::ContinuousSpaceBitmap* mark_bitmap = continuous_space->GetMarkBitmap();
1072     if (live_bitmap != nullptr) {
1073       CHECK(mark_bitmap != nullptr);
1074       live_bitmap_->AddContinuousSpaceBitmap(live_bitmap);
1075       mark_bitmap_->AddContinuousSpaceBitmap(mark_bitmap);
1076     }
1077     continuous_spaces_.push_back(continuous_space);
1078     // Ensure that spaces remain sorted in increasing order of start address.
1079     std::sort(continuous_spaces_.begin(), continuous_spaces_.end(),
1080               [](const space::ContinuousSpace* a, const space::ContinuousSpace* b) {
1081       return a->Begin() < b->Begin();
1082     });
1083   } else {
1084     CHECK(space->IsDiscontinuousSpace());
1085     space::DiscontinuousSpace* discontinuous_space = space->AsDiscontinuousSpace();
1086     live_bitmap_->AddLargeObjectBitmap(discontinuous_space->GetLiveBitmap());
1087     mark_bitmap_->AddLargeObjectBitmap(discontinuous_space->GetMarkBitmap());
1088     discontinuous_spaces_.push_back(discontinuous_space);
1089   }
1090   if (space->IsAllocSpace()) {
1091     alloc_spaces_.push_back(space->AsAllocSpace());
1092   }
1093 }
1094 
SetSpaceAsDefault(space::ContinuousSpace * continuous_space)1095 void Heap::SetSpaceAsDefault(space::ContinuousSpace* continuous_space) {
1096   WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
1097   if (continuous_space->IsDlMallocSpace()) {
1098     dlmalloc_space_ = continuous_space->AsDlMallocSpace();
1099   } else if (continuous_space->IsRosAllocSpace()) {
1100     rosalloc_space_ = continuous_space->AsRosAllocSpace();
1101   }
1102 }
1103 
RemoveSpace(space::Space * space)1104 void Heap::RemoveSpace(space::Space* space) {
1105   DCHECK(space != nullptr);
1106   WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
1107   if (space->IsContinuousSpace()) {
1108     DCHECK(!space->IsDiscontinuousSpace());
1109     space::ContinuousSpace* continuous_space = space->AsContinuousSpace();
1110     // Continuous spaces don't necessarily have bitmaps.
1111     accounting::ContinuousSpaceBitmap* live_bitmap = continuous_space->GetLiveBitmap();
1112     accounting::ContinuousSpaceBitmap* mark_bitmap = continuous_space->GetMarkBitmap();
1113     if (live_bitmap != nullptr) {
1114       DCHECK(mark_bitmap != nullptr);
1115       live_bitmap_->RemoveContinuousSpaceBitmap(live_bitmap);
1116       mark_bitmap_->RemoveContinuousSpaceBitmap(mark_bitmap);
1117     }
1118     auto it = std::find(continuous_spaces_.begin(), continuous_spaces_.end(), continuous_space);
1119     DCHECK(it != continuous_spaces_.end());
1120     continuous_spaces_.erase(it);
1121   } else {
1122     DCHECK(space->IsDiscontinuousSpace());
1123     space::DiscontinuousSpace* discontinuous_space = space->AsDiscontinuousSpace();
1124     live_bitmap_->RemoveLargeObjectBitmap(discontinuous_space->GetLiveBitmap());
1125     mark_bitmap_->RemoveLargeObjectBitmap(discontinuous_space->GetMarkBitmap());
1126     auto it = std::find(discontinuous_spaces_.begin(), discontinuous_spaces_.end(),
1127                         discontinuous_space);
1128     DCHECK(it != discontinuous_spaces_.end());
1129     discontinuous_spaces_.erase(it);
1130   }
1131   if (space->IsAllocSpace()) {
1132     auto it = std::find(alloc_spaces_.begin(), alloc_spaces_.end(), space->AsAllocSpace());
1133     DCHECK(it != alloc_spaces_.end());
1134     alloc_spaces_.erase(it);
1135   }
1136 }
1137 
DumpGcPerformanceInfo(std::ostream & os)1138 void Heap::DumpGcPerformanceInfo(std::ostream& os) {
1139   // Dump cumulative timings.
1140   os << "Dumping cumulative Gc timings\n";
1141   uint64_t total_duration = 0;
1142   // Dump cumulative loggers for each GC type.
1143   uint64_t total_paused_time = 0;
1144   for (auto& collector : garbage_collectors_) {
1145     total_duration += collector->GetCumulativeTimings().GetTotalNs();
1146     total_paused_time += collector->GetTotalPausedTimeNs();
1147     collector->DumpPerformanceInfo(os);
1148   }
1149   if (total_duration != 0) {
1150     const double total_seconds = static_cast<double>(total_duration / 1000) / 1000000.0;
1151     os << "Total time spent in GC: " << PrettyDuration(total_duration) << "\n";
1152     os << "Mean GC size throughput: "
1153        << PrettySize(GetBytesFreedEver() / total_seconds) << "/s\n";
1154     os << "Mean GC object throughput: "
1155        << (GetObjectsFreedEver() / total_seconds) << " objects/s\n";
1156   }
1157   uint64_t total_objects_allocated = GetObjectsAllocatedEver();
1158   os << "Total number of allocations " << total_objects_allocated << "\n";
1159   os << "Total bytes allocated " << PrettySize(GetBytesAllocatedEver()) << "\n";
1160   os << "Total bytes freed " << PrettySize(GetBytesFreedEver()) << "\n";
1161   os << "Free memory " << PrettySize(GetFreeMemory()) << "\n";
1162   os << "Free memory until GC " << PrettySize(GetFreeMemoryUntilGC()) << "\n";
1163   os << "Free memory until OOME " << PrettySize(GetFreeMemoryUntilOOME()) << "\n";
1164   os << "Total memory " << PrettySize(GetTotalMemory()) << "\n";
1165   os << "Max memory " << PrettySize(GetMaxMemory()) << "\n";
1166   if (HasZygoteSpace()) {
1167     os << "Zygote space size " << PrettySize(zygote_space_->Size()) << "\n";
1168   }
1169   os << "Total mutator paused time: " << PrettyDuration(total_paused_time) << "\n";
1170   os << "Total time waiting for GC to complete: " << PrettyDuration(total_wait_time_) << "\n";
1171   os << "Total GC count: " << GetGcCount() << "\n";
1172   os << "Total GC time: " << PrettyDuration(GetGcTime()) << "\n";
1173   os << "Total blocking GC count: " << GetBlockingGcCount() << "\n";
1174   os << "Total blocking GC time: " << PrettyDuration(GetBlockingGcTime()) << "\n";
1175 
1176   {
1177     MutexLock mu(Thread::Current(), *gc_complete_lock_);
1178     if (gc_count_rate_histogram_.SampleSize() > 0U) {
1179       os << "Histogram of GC count per " << NsToMs(kGcCountRateHistogramWindowDuration) << " ms: ";
1180       gc_count_rate_histogram_.DumpBins(os);
1181       os << "\n";
1182     }
1183     if (blocking_gc_count_rate_histogram_.SampleSize() > 0U) {
1184       os << "Histogram of blocking GC count per "
1185          << NsToMs(kGcCountRateHistogramWindowDuration) << " ms: ";
1186       blocking_gc_count_rate_histogram_.DumpBins(os);
1187       os << "\n";
1188     }
1189   }
1190 
1191   if (kDumpRosAllocStatsOnSigQuit && rosalloc_space_ != nullptr) {
1192     rosalloc_space_->DumpStats(os);
1193   }
1194 
1195   {
1196     MutexLock mu(Thread::Current(), native_histogram_lock_);
1197     if (native_allocation_histogram_.SampleSize() > 0u) {
1198       os << "Histogram of native allocation ";
1199       native_allocation_histogram_.DumpBins(os);
1200       os << " bucket size " << native_allocation_histogram_.BucketWidth() << "\n";
1201     }
1202     if (native_free_histogram_.SampleSize() > 0u) {
1203       os << "Histogram of native free ";
1204       native_free_histogram_.DumpBins(os);
1205       os << " bucket size " << native_free_histogram_.BucketWidth() << "\n";
1206     }
1207   }
1208 
1209   BaseMutex::DumpAll(os);
1210 }
1211 
ResetGcPerformanceInfo()1212 void Heap::ResetGcPerformanceInfo() {
1213   for (auto& collector : garbage_collectors_) {
1214     collector->ResetMeasurements();
1215   }
1216   total_bytes_freed_ever_ = 0;
1217   total_objects_freed_ever_ = 0;
1218   total_wait_time_ = 0;
1219   blocking_gc_count_ = 0;
1220   blocking_gc_time_ = 0;
1221   gc_count_last_window_ = 0;
1222   blocking_gc_count_last_window_ = 0;
1223   last_update_time_gc_count_rate_histograms_ =  // Round down by the window duration.
1224       (NanoTime() / kGcCountRateHistogramWindowDuration) * kGcCountRateHistogramWindowDuration;
1225   {
1226     MutexLock mu(Thread::Current(), *gc_complete_lock_);
1227     gc_count_rate_histogram_.Reset();
1228     blocking_gc_count_rate_histogram_.Reset();
1229   }
1230 }
1231 
GetGcCount() const1232 uint64_t Heap::GetGcCount() const {
1233   uint64_t gc_count = 0U;
1234   for (auto& collector : garbage_collectors_) {
1235     gc_count += collector->GetCumulativeTimings().GetIterations();
1236   }
1237   return gc_count;
1238 }
1239 
GetGcTime() const1240 uint64_t Heap::GetGcTime() const {
1241   uint64_t gc_time = 0U;
1242   for (auto& collector : garbage_collectors_) {
1243     gc_time += collector->GetCumulativeTimings().GetTotalNs();
1244   }
1245   return gc_time;
1246 }
1247 
GetBlockingGcCount() const1248 uint64_t Heap::GetBlockingGcCount() const {
1249   return blocking_gc_count_;
1250 }
1251 
GetBlockingGcTime() const1252 uint64_t Heap::GetBlockingGcTime() const {
1253   return blocking_gc_time_;
1254 }
1255 
DumpGcCountRateHistogram(std::ostream & os) const1256 void Heap::DumpGcCountRateHistogram(std::ostream& os) const {
1257   MutexLock mu(Thread::Current(), *gc_complete_lock_);
1258   if (gc_count_rate_histogram_.SampleSize() > 0U) {
1259     gc_count_rate_histogram_.DumpBins(os);
1260   }
1261 }
1262 
DumpBlockingGcCountRateHistogram(std::ostream & os) const1263 void Heap::DumpBlockingGcCountRateHistogram(std::ostream& os) const {
1264   MutexLock mu(Thread::Current(), *gc_complete_lock_);
1265   if (blocking_gc_count_rate_histogram_.SampleSize() > 0U) {
1266     blocking_gc_count_rate_histogram_.DumpBins(os);
1267   }
1268 }
1269 
~Heap()1270 Heap::~Heap() {
1271   VLOG(heap) << "Starting ~Heap()";
1272   STLDeleteElements(&garbage_collectors_);
1273   // If we don't reset then the mark stack complains in its destructor.
1274   allocation_stack_->Reset();
1275   allocation_records_.reset();
1276   live_stack_->Reset();
1277   STLDeleteValues(&mod_union_tables_);
1278   STLDeleteValues(&remembered_sets_);
1279   STLDeleteElements(&continuous_spaces_);
1280   STLDeleteElements(&discontinuous_spaces_);
1281   delete gc_complete_lock_;
1282   delete thread_flip_lock_;
1283   delete pending_task_lock_;
1284   delete backtrace_lock_;
1285   if (unique_backtrace_count_.LoadRelaxed() != 0 || seen_backtrace_count_.LoadRelaxed() != 0) {
1286     LOG(INFO) << "gc stress unique=" << unique_backtrace_count_.LoadRelaxed()
1287         << " total=" << seen_backtrace_count_.LoadRelaxed() +
1288             unique_backtrace_count_.LoadRelaxed();
1289   }
1290   VLOG(heap) << "Finished ~Heap()";
1291 }
1292 
FindContinuousSpaceFromObject(const mirror::Object * obj,bool fail_ok) const1293 space::ContinuousSpace* Heap::FindContinuousSpaceFromObject(const mirror::Object* obj,
1294                                                             bool fail_ok) const {
1295   for (const auto& space : continuous_spaces_) {
1296     if (space->Contains(obj)) {
1297       return space;
1298     }
1299   }
1300   if (!fail_ok) {
1301     LOG(FATAL) << "object " << reinterpret_cast<const void*>(obj) << " not inside any spaces!";
1302   }
1303   return nullptr;
1304 }
1305 
FindDiscontinuousSpaceFromObject(const mirror::Object * obj,bool fail_ok) const1306 space::DiscontinuousSpace* Heap::FindDiscontinuousSpaceFromObject(const mirror::Object* obj,
1307                                                                   bool fail_ok) const {
1308   for (const auto& space : discontinuous_spaces_) {
1309     if (space->Contains(obj)) {
1310       return space;
1311     }
1312   }
1313   if (!fail_ok) {
1314     LOG(FATAL) << "object " << reinterpret_cast<const void*>(obj) << " not inside any spaces!";
1315   }
1316   return nullptr;
1317 }
1318 
FindSpaceFromObject(const mirror::Object * obj,bool fail_ok) const1319 space::Space* Heap::FindSpaceFromObject(const mirror::Object* obj, bool fail_ok) const {
1320   space::Space* result = FindContinuousSpaceFromObject(obj, true);
1321   if (result != nullptr) {
1322     return result;
1323   }
1324   return FindDiscontinuousSpaceFromObject(obj, fail_ok);
1325 }
1326 
ThrowOutOfMemoryError(Thread * self,size_t byte_count,AllocatorType allocator_type)1327 void Heap::ThrowOutOfMemoryError(Thread* self, size_t byte_count, AllocatorType allocator_type) {
1328   // If we're in a stack overflow, do not create a new exception. It would require running the
1329   // constructor, which will of course still be in a stack overflow.
1330   if (self->IsHandlingStackOverflow()) {
1331     self->SetException(Runtime::Current()->GetPreAllocatedOutOfMemoryError());
1332     return;
1333   }
1334 
1335   std::ostringstream oss;
1336   size_t total_bytes_free = GetFreeMemory();
1337   oss << "Failed to allocate a " << byte_count << " byte allocation with " << total_bytes_free
1338       << " free bytes and " << PrettySize(GetFreeMemoryUntilOOME()) << " until OOM";
1339   // If the allocation failed due to fragmentation, print out the largest continuous allocation.
1340   if (total_bytes_free >= byte_count) {
1341     space::AllocSpace* space = nullptr;
1342     if (allocator_type == kAllocatorTypeNonMoving) {
1343       space = non_moving_space_;
1344     } else if (allocator_type == kAllocatorTypeRosAlloc ||
1345                allocator_type == kAllocatorTypeDlMalloc) {
1346       space = main_space_;
1347     } else if (allocator_type == kAllocatorTypeBumpPointer ||
1348                allocator_type == kAllocatorTypeTLAB) {
1349       space = bump_pointer_space_;
1350     } else if (allocator_type == kAllocatorTypeRegion ||
1351                allocator_type == kAllocatorTypeRegionTLAB) {
1352       space = region_space_;
1353     }
1354     if (space != nullptr) {
1355       space->LogFragmentationAllocFailure(oss, byte_count);
1356     }
1357   }
1358   self->ThrowOutOfMemoryError(oss.str().c_str());
1359 }
1360 
DoPendingCollectorTransition()1361 void Heap::DoPendingCollectorTransition() {
1362   CollectorType desired_collector_type = desired_collector_type_;
1363   // Launch homogeneous space compaction if it is desired.
1364   if (desired_collector_type == kCollectorTypeHomogeneousSpaceCompact) {
1365     if (!CareAboutPauseTimes()) {
1366       PerformHomogeneousSpaceCompact();
1367     } else {
1368       VLOG(gc) << "Homogeneous compaction ignored due to jank perceptible process state";
1369     }
1370   } else {
1371     TransitionCollector(desired_collector_type);
1372   }
1373 }
1374 
Trim(Thread * self)1375 void Heap::Trim(Thread* self) {
1376   Runtime* const runtime = Runtime::Current();
1377   if (!CareAboutPauseTimes()) {
1378     // Deflate the monitors, this can cause a pause but shouldn't matter since we don't care
1379     // about pauses.
1380     ScopedTrace trace("Deflating monitors");
1381     ScopedSuspendAll ssa(__FUNCTION__);
1382     uint64_t start_time = NanoTime();
1383     size_t count = runtime->GetMonitorList()->DeflateMonitors();
1384     VLOG(heap) << "Deflating " << count << " monitors took "
1385         << PrettyDuration(NanoTime() - start_time);
1386   }
1387   TrimIndirectReferenceTables(self);
1388   TrimSpaces(self);
1389   // Trim arenas that may have been used by JIT or verifier.
1390   runtime->GetArenaPool()->TrimMaps();
1391 }
1392 
1393 class TrimIndirectReferenceTableClosure : public Closure {
1394  public:
TrimIndirectReferenceTableClosure(Barrier * barrier)1395   explicit TrimIndirectReferenceTableClosure(Barrier* barrier) : barrier_(barrier) {
1396   }
Run(Thread * thread)1397   virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
1398     thread->GetJniEnv()->locals.Trim();
1399     // If thread is a running mutator, then act on behalf of the trim thread.
1400     // See the code in ThreadList::RunCheckpoint.
1401     barrier_->Pass(Thread::Current());
1402   }
1403 
1404  private:
1405   Barrier* const barrier_;
1406 };
1407 
TrimIndirectReferenceTables(Thread * self)1408 void Heap::TrimIndirectReferenceTables(Thread* self) {
1409   ScopedObjectAccess soa(self);
1410   ScopedTrace trace(__PRETTY_FUNCTION__);
1411   JavaVMExt* vm = soa.Vm();
1412   // Trim globals indirect reference table.
1413   vm->TrimGlobals();
1414   // Trim locals indirect reference tables.
1415   Barrier barrier(0);
1416   TrimIndirectReferenceTableClosure closure(&barrier);
1417   ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
1418   size_t barrier_count = Runtime::Current()->GetThreadList()->RunCheckpoint(&closure);
1419   if (barrier_count != 0) {
1420     barrier.Increment(self, barrier_count);
1421   }
1422 }
1423 
StartGC(Thread * self,GcCause cause,CollectorType collector_type)1424 void Heap::StartGC(Thread* self, GcCause cause, CollectorType collector_type) {
1425   MutexLock mu(self, *gc_complete_lock_);
1426   // Ensure there is only one GC at a time.
1427   WaitForGcToCompleteLocked(cause, self);
1428   collector_type_running_ = collector_type;
1429 }
1430 
TrimSpaces(Thread * self)1431 void Heap::TrimSpaces(Thread* self) {
1432   {
1433     // Need to do this before acquiring the locks since we don't want to get suspended while
1434     // holding any locks.
1435     ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
1436     // Pretend we are doing a GC to prevent background compaction from deleting the space we are
1437     // trimming.
1438     StartGC(self, kGcCauseTrim, kCollectorTypeHeapTrim);
1439   }
1440   ScopedTrace trace(__PRETTY_FUNCTION__);
1441   const uint64_t start_ns = NanoTime();
1442   // Trim the managed spaces.
1443   uint64_t total_alloc_space_allocated = 0;
1444   uint64_t total_alloc_space_size = 0;
1445   uint64_t managed_reclaimed = 0;
1446   {
1447     ScopedObjectAccess soa(self);
1448     for (const auto& space : continuous_spaces_) {
1449       if (space->IsMallocSpace()) {
1450         gc::space::MallocSpace* malloc_space = space->AsMallocSpace();
1451         if (malloc_space->IsRosAllocSpace() || !CareAboutPauseTimes()) {
1452           // Don't trim dlmalloc spaces if we care about pauses since this can hold the space lock
1453           // for a long period of time.
1454           managed_reclaimed += malloc_space->Trim();
1455         }
1456         total_alloc_space_size += malloc_space->Size();
1457       }
1458     }
1459   }
1460   total_alloc_space_allocated = GetBytesAllocated();
1461   if (large_object_space_ != nullptr) {
1462     total_alloc_space_allocated -= large_object_space_->GetBytesAllocated();
1463   }
1464   if (bump_pointer_space_ != nullptr) {
1465     total_alloc_space_allocated -= bump_pointer_space_->Size();
1466   }
1467   if (region_space_ != nullptr) {
1468     total_alloc_space_allocated -= region_space_->GetBytesAllocated();
1469   }
1470   const float managed_utilization = static_cast<float>(total_alloc_space_allocated) /
1471       static_cast<float>(total_alloc_space_size);
1472   uint64_t gc_heap_end_ns = NanoTime();
1473   // We never move things in the native heap, so we can finish the GC at this point.
1474   FinishGC(self, collector::kGcTypeNone);
1475 
1476   VLOG(heap) << "Heap trim of managed (duration=" << PrettyDuration(gc_heap_end_ns - start_ns)
1477       << ", advised=" << PrettySize(managed_reclaimed) << ") heap. Managed heap utilization of "
1478       << static_cast<int>(100 * managed_utilization) << "%.";
1479 }
1480 
IsValidObjectAddress(const mirror::Object * obj) const1481 bool Heap::IsValidObjectAddress(const mirror::Object* obj) const {
1482   // Note: we deliberately don't take the lock here, and mustn't test anything that would require
1483   // taking the lock.
1484   if (obj == nullptr) {
1485     return true;
1486   }
1487   return IsAligned<kObjectAlignment>(obj) && FindSpaceFromObject(obj, true) != nullptr;
1488 }
1489 
IsNonDiscontinuousSpaceHeapAddress(const mirror::Object * obj) const1490 bool Heap::IsNonDiscontinuousSpaceHeapAddress(const mirror::Object* obj) const {
1491   return FindContinuousSpaceFromObject(obj, true) != nullptr;
1492 }
1493 
IsValidContinuousSpaceObjectAddress(const mirror::Object * obj) const1494 bool Heap::IsValidContinuousSpaceObjectAddress(const mirror::Object* obj) const {
1495   if (obj == nullptr || !IsAligned<kObjectAlignment>(obj)) {
1496     return false;
1497   }
1498   for (const auto& space : continuous_spaces_) {
1499     if (space->HasAddress(obj)) {
1500       return true;
1501     }
1502   }
1503   return false;
1504 }
1505 
IsLiveObjectLocked(mirror::Object * obj,bool search_allocation_stack,bool search_live_stack,bool sorted)1506 bool Heap::IsLiveObjectLocked(mirror::Object* obj, bool search_allocation_stack,
1507                               bool search_live_stack, bool sorted) {
1508   if (UNLIKELY(!IsAligned<kObjectAlignment>(obj))) {
1509     return false;
1510   }
1511   if (bump_pointer_space_ != nullptr && bump_pointer_space_->HasAddress(obj)) {
1512     mirror::Class* klass = obj->GetClass<kVerifyNone>();
1513     if (obj == klass) {
1514       // This case happens for java.lang.Class.
1515       return true;
1516     }
1517     return VerifyClassClass(klass) && IsLiveObjectLocked(klass);
1518   } else if (temp_space_ != nullptr && temp_space_->HasAddress(obj)) {
1519     // If we are in the allocated region of the temp space, then we are probably live (e.g. during
1520     // a GC). When a GC isn't running End() - Begin() is 0 which means no objects are contained.
1521     return temp_space_->Contains(obj);
1522   }
1523   if (region_space_ != nullptr && region_space_->HasAddress(obj)) {
1524     return true;
1525   }
1526   space::ContinuousSpace* c_space = FindContinuousSpaceFromObject(obj, true);
1527   space::DiscontinuousSpace* d_space = nullptr;
1528   if (c_space != nullptr) {
1529     if (c_space->GetLiveBitmap()->Test(obj)) {
1530       return true;
1531     }
1532   } else {
1533     d_space = FindDiscontinuousSpaceFromObject(obj, true);
1534     if (d_space != nullptr) {
1535       if (d_space->GetLiveBitmap()->Test(obj)) {
1536         return true;
1537       }
1538     }
1539   }
1540   // This is covering the allocation/live stack swapping that is done without mutators suspended.
1541   for (size_t i = 0; i < (sorted ? 1 : 5); ++i) {
1542     if (i > 0) {
1543       NanoSleep(MsToNs(10));
1544     }
1545     if (search_allocation_stack) {
1546       if (sorted) {
1547         if (allocation_stack_->ContainsSorted(obj)) {
1548           return true;
1549         }
1550       } else if (allocation_stack_->Contains(obj)) {
1551         return true;
1552       }
1553     }
1554 
1555     if (search_live_stack) {
1556       if (sorted) {
1557         if (live_stack_->ContainsSorted(obj)) {
1558           return true;
1559         }
1560       } else if (live_stack_->Contains(obj)) {
1561         return true;
1562       }
1563     }
1564   }
1565   // We need to check the bitmaps again since there is a race where we mark something as live and
1566   // then clear the stack containing it.
1567   if (c_space != nullptr) {
1568     if (c_space->GetLiveBitmap()->Test(obj)) {
1569       return true;
1570     }
1571   } else {
1572     d_space = FindDiscontinuousSpaceFromObject(obj, true);
1573     if (d_space != nullptr && d_space->GetLiveBitmap()->Test(obj)) {
1574       return true;
1575     }
1576   }
1577   return false;
1578 }
1579 
DumpSpaces() const1580 std::string Heap::DumpSpaces() const {
1581   std::ostringstream oss;
1582   DumpSpaces(oss);
1583   return oss.str();
1584 }
1585 
DumpSpaces(std::ostream & stream) const1586 void Heap::DumpSpaces(std::ostream& stream) const {
1587   for (const auto& space : continuous_spaces_) {
1588     accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
1589     accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap();
1590     stream << space << " " << *space << "\n";
1591     if (live_bitmap != nullptr) {
1592       stream << live_bitmap << " " << *live_bitmap << "\n";
1593     }
1594     if (mark_bitmap != nullptr) {
1595       stream << mark_bitmap << " " << *mark_bitmap << "\n";
1596     }
1597   }
1598   for (const auto& space : discontinuous_spaces_) {
1599     stream << space << " " << *space << "\n";
1600   }
1601 }
1602 
VerifyObjectBody(mirror::Object * obj)1603 void Heap::VerifyObjectBody(mirror::Object* obj) {
1604   if (verify_object_mode_ == kVerifyObjectModeDisabled) {
1605     return;
1606   }
1607 
1608   // Ignore early dawn of the universe verifications.
1609   if (UNLIKELY(static_cast<size_t>(num_bytes_allocated_.LoadRelaxed()) < 10 * KB)) {
1610     return;
1611   }
1612   CHECK_ALIGNED(obj, kObjectAlignment) << "Object isn't aligned";
1613   mirror::Class* c = obj->GetFieldObject<mirror::Class, kVerifyNone>(mirror::Object::ClassOffset());
1614   CHECK(c != nullptr) << "Null class in object " << obj;
1615   CHECK_ALIGNED(c, kObjectAlignment) << "Class " << c << " not aligned in object " << obj;
1616   CHECK(VerifyClassClass(c));
1617 
1618   if (verify_object_mode_ > kVerifyObjectModeFast) {
1619     // Note: the bitmap tests below are racy since we don't hold the heap bitmap lock.
1620     CHECK(IsLiveObjectLocked(obj)) << "Object is dead " << obj << "\n" << DumpSpaces();
1621   }
1622 }
1623 
VerificationCallback(mirror::Object * obj,void * arg)1624 void Heap::VerificationCallback(mirror::Object* obj, void* arg) {
1625   reinterpret_cast<Heap*>(arg)->VerifyObjectBody(obj);
1626 }
1627 
VerifyHeap()1628 void Heap::VerifyHeap() {
1629   ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
1630   GetLiveBitmap()->Walk(Heap::VerificationCallback, this);
1631 }
1632 
RecordFree(uint64_t freed_objects,int64_t freed_bytes)1633 void Heap::RecordFree(uint64_t freed_objects, int64_t freed_bytes) {
1634   // Use signed comparison since freed bytes can be negative when background compaction foreground
1635   // transitions occurs. This is caused by the moving objects from a bump pointer space to a
1636   // free list backed space typically increasing memory footprint due to padding and binning.
1637   DCHECK_LE(freed_bytes, static_cast<int64_t>(num_bytes_allocated_.LoadRelaxed()));
1638   // Note: This relies on 2s complement for handling negative freed_bytes.
1639   num_bytes_allocated_.FetchAndSubSequentiallyConsistent(static_cast<ssize_t>(freed_bytes));
1640   if (Runtime::Current()->HasStatsEnabled()) {
1641     RuntimeStats* thread_stats = Thread::Current()->GetStats();
1642     thread_stats->freed_objects += freed_objects;
1643     thread_stats->freed_bytes += freed_bytes;
1644     // TODO: Do this concurrently.
1645     RuntimeStats* global_stats = Runtime::Current()->GetStats();
1646     global_stats->freed_objects += freed_objects;
1647     global_stats->freed_bytes += freed_bytes;
1648   }
1649 }
1650 
RecordFreeRevoke()1651 void Heap::RecordFreeRevoke() {
1652   // Subtract num_bytes_freed_revoke_ from num_bytes_allocated_ to cancel out the
1653   // the ahead-of-time, bulk counting of bytes allocated in rosalloc thread-local buffers.
1654   // If there's a concurrent revoke, ok to not necessarily reset num_bytes_freed_revoke_
1655   // all the way to zero exactly as the remainder will be subtracted at the next GC.
1656   size_t bytes_freed = num_bytes_freed_revoke_.LoadSequentiallyConsistent();
1657   CHECK_GE(num_bytes_freed_revoke_.FetchAndSubSequentiallyConsistent(bytes_freed),
1658            bytes_freed) << "num_bytes_freed_revoke_ underflow";
1659   CHECK_GE(num_bytes_allocated_.FetchAndSubSequentiallyConsistent(bytes_freed),
1660            bytes_freed) << "num_bytes_allocated_ underflow";
1661   GetCurrentGcIteration()->SetFreedRevoke(bytes_freed);
1662 }
1663 
GetRosAllocSpace(gc::allocator::RosAlloc * rosalloc) const1664 space::RosAllocSpace* Heap::GetRosAllocSpace(gc::allocator::RosAlloc* rosalloc) const {
1665   if (rosalloc_space_ != nullptr && rosalloc_space_->GetRosAlloc() == rosalloc) {
1666     return rosalloc_space_;
1667   }
1668   for (const auto& space : continuous_spaces_) {
1669     if (space->AsContinuousSpace()->IsRosAllocSpace()) {
1670       if (space->AsContinuousSpace()->AsRosAllocSpace()->GetRosAlloc() == rosalloc) {
1671         return space->AsContinuousSpace()->AsRosAllocSpace();
1672       }
1673     }
1674   }
1675   return nullptr;
1676 }
1677 
EntrypointsInstrumented()1678 static inline bool EntrypointsInstrumented() SHARED_REQUIRES(Locks::mutator_lock_) {
1679   instrumentation::Instrumentation* const instrumentation =
1680       Runtime::Current()->GetInstrumentation();
1681   return instrumentation != nullptr && instrumentation->AllocEntrypointsInstrumented();
1682 }
1683 
AllocateInternalWithGc(Thread * self,AllocatorType allocator,bool instrumented,size_t alloc_size,size_t * bytes_allocated,size_t * usable_size,size_t * bytes_tl_bulk_allocated,mirror::Class ** klass)1684 mirror::Object* Heap::AllocateInternalWithGc(Thread* self,
1685                                              AllocatorType allocator,
1686                                              bool instrumented,
1687                                              size_t alloc_size,
1688                                              size_t* bytes_allocated,
1689                                              size_t* usable_size,
1690                                              size_t* bytes_tl_bulk_allocated,
1691                                              mirror::Class** klass) {
1692   bool was_default_allocator = allocator == GetCurrentAllocator();
1693   // Make sure there is no pending exception since we may need to throw an OOME.
1694   self->AssertNoPendingException();
1695   DCHECK(klass != nullptr);
1696   StackHandleScope<1> hs(self);
1697   HandleWrapper<mirror::Class> h(hs.NewHandleWrapper(klass));
1698   klass = nullptr;  // Invalidate for safety.
1699   // The allocation failed. If the GC is running, block until it completes, and then retry the
1700   // allocation.
1701   collector::GcType last_gc = WaitForGcToComplete(kGcCauseForAlloc, self);
1702   // If we were the default allocator but the allocator changed while we were suspended,
1703   // abort the allocation.
1704   if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
1705       (!instrumented && EntrypointsInstrumented())) {
1706     return nullptr;
1707   }
1708   if (last_gc != collector::kGcTypeNone) {
1709     // A GC was in progress and we blocked, retry allocation now that memory has been freed.
1710     mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated,
1711                                                      usable_size, bytes_tl_bulk_allocated);
1712     if (ptr != nullptr) {
1713       return ptr;
1714     }
1715   }
1716 
1717   collector::GcType tried_type = next_gc_type_;
1718   const bool gc_ran =
1719       CollectGarbageInternal(tried_type, kGcCauseForAlloc, false) != collector::kGcTypeNone;
1720   if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
1721       (!instrumented && EntrypointsInstrumented())) {
1722     return nullptr;
1723   }
1724   if (gc_ran) {
1725     mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated,
1726                                                      usable_size, bytes_tl_bulk_allocated);
1727     if (ptr != nullptr) {
1728       return ptr;
1729     }
1730   }
1731 
1732   // Loop through our different Gc types and try to Gc until we get enough free memory.
1733   for (collector::GcType gc_type : gc_plan_) {
1734     if (gc_type == tried_type) {
1735       continue;
1736     }
1737     // Attempt to run the collector, if we succeed, re-try the allocation.
1738     const bool plan_gc_ran =
1739         CollectGarbageInternal(gc_type, kGcCauseForAlloc, false) != collector::kGcTypeNone;
1740     if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
1741         (!instrumented && EntrypointsInstrumented())) {
1742       return nullptr;
1743     }
1744     if (plan_gc_ran) {
1745       // Did we free sufficient memory for the allocation to succeed?
1746       mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated,
1747                                                        usable_size, bytes_tl_bulk_allocated);
1748       if (ptr != nullptr) {
1749         return ptr;
1750       }
1751     }
1752   }
1753   // Allocations have failed after GCs;  this is an exceptional state.
1754   // Try harder, growing the heap if necessary.
1755   mirror::Object* ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated,
1756                                                   usable_size, bytes_tl_bulk_allocated);
1757   if (ptr != nullptr) {
1758     return ptr;
1759   }
1760   // Most allocations should have succeeded by now, so the heap is really full, really fragmented,
1761   // or the requested size is really big. Do another GC, collecting SoftReferences this time. The
1762   // VM spec requires that all SoftReferences have been collected and cleared before throwing
1763   // OOME.
1764   VLOG(gc) << "Forcing collection of SoftReferences for " << PrettySize(alloc_size)
1765            << " allocation";
1766   // TODO: Run finalization, but this may cause more allocations to occur.
1767   // We don't need a WaitForGcToComplete here either.
1768   DCHECK(!gc_plan_.empty());
1769   CollectGarbageInternal(gc_plan_.back(), kGcCauseForAlloc, true);
1770   if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
1771       (!instrumented && EntrypointsInstrumented())) {
1772     return nullptr;
1773   }
1774   ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated, usable_size,
1775                                   bytes_tl_bulk_allocated);
1776   if (ptr == nullptr) {
1777     const uint64_t current_time = NanoTime();
1778     switch (allocator) {
1779       case kAllocatorTypeRosAlloc:
1780         // Fall-through.
1781       case kAllocatorTypeDlMalloc: {
1782         if (use_homogeneous_space_compaction_for_oom_ &&
1783             current_time - last_time_homogeneous_space_compaction_by_oom_ >
1784             min_interval_homogeneous_space_compaction_by_oom_) {
1785           last_time_homogeneous_space_compaction_by_oom_ = current_time;
1786           HomogeneousSpaceCompactResult result = PerformHomogeneousSpaceCompact();
1787           // Thread suspension could have occurred.
1788           if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
1789               (!instrumented && EntrypointsInstrumented())) {
1790             return nullptr;
1791           }
1792           switch (result) {
1793             case HomogeneousSpaceCompactResult::kSuccess:
1794               // If the allocation succeeded, we delayed an oom.
1795               ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated,
1796                                               usable_size, bytes_tl_bulk_allocated);
1797               if (ptr != nullptr) {
1798                 count_delayed_oom_++;
1799               }
1800               break;
1801             case HomogeneousSpaceCompactResult::kErrorReject:
1802               // Reject due to disabled moving GC.
1803               break;
1804             case HomogeneousSpaceCompactResult::kErrorVMShuttingDown:
1805               // Throw OOM by default.
1806               break;
1807             default: {
1808               UNIMPLEMENTED(FATAL) << "homogeneous space compaction result: "
1809                   << static_cast<size_t>(result);
1810               UNREACHABLE();
1811             }
1812           }
1813           // Always print that we ran homogeneous space compation since this can cause jank.
1814           VLOG(heap) << "Ran heap homogeneous space compaction, "
1815                     << " requested defragmentation "
1816                     << count_requested_homogeneous_space_compaction_.LoadSequentiallyConsistent()
1817                     << " performed defragmentation "
1818                     << count_performed_homogeneous_space_compaction_.LoadSequentiallyConsistent()
1819                     << " ignored homogeneous space compaction "
1820                     << count_ignored_homogeneous_space_compaction_.LoadSequentiallyConsistent()
1821                     << " delayed count = "
1822                     << count_delayed_oom_.LoadSequentiallyConsistent();
1823         }
1824         break;
1825       }
1826       case kAllocatorTypeNonMoving: {
1827         // Try to transition the heap if the allocation failure was due to the space being full.
1828         if (!IsOutOfMemoryOnAllocation<false>(allocator, alloc_size)) {
1829           // If we aren't out of memory then the OOM was probably from the non moving space being
1830           // full. Attempt to disable compaction and turn the main space into a non moving space.
1831           DisableMovingGc();
1832           // Thread suspension could have occurred.
1833           if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
1834               (!instrumented && EntrypointsInstrumented())) {
1835             return nullptr;
1836           }
1837           // If we are still a moving GC then something must have caused the transition to fail.
1838           if (IsMovingGc(collector_type_)) {
1839             MutexLock mu(self, *gc_complete_lock_);
1840             // If we couldn't disable moving GC, just throw OOME and return null.
1841             LOG(WARNING) << "Couldn't disable moving GC with disable GC count "
1842                          << disable_moving_gc_count_;
1843           } else {
1844             LOG(WARNING) << "Disabled moving GC due to the non moving space being full";
1845             ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated,
1846                                             usable_size, bytes_tl_bulk_allocated);
1847           }
1848         }
1849         break;
1850       }
1851       default: {
1852         // Do nothing for others allocators.
1853       }
1854     }
1855   }
1856   // If the allocation hasn't succeeded by this point, throw an OOM error.
1857   if (ptr == nullptr) {
1858     ThrowOutOfMemoryError(self, alloc_size, allocator);
1859   }
1860   return ptr;
1861 }
1862 
SetTargetHeapUtilization(float target)1863 void Heap::SetTargetHeapUtilization(float target) {
1864   DCHECK_GT(target, 0.0f);  // asserted in Java code
1865   DCHECK_LT(target, 1.0f);
1866   target_utilization_ = target;
1867 }
1868 
GetObjectsAllocated() const1869 size_t Heap::GetObjectsAllocated() const {
1870   Thread* const self = Thread::Current();
1871   ScopedThreadStateChange tsc(self, kWaitingForGetObjectsAllocated);
1872   // Need SuspendAll here to prevent lock violation if RosAlloc does it during InspectAll.
1873   ScopedSuspendAll ssa(__FUNCTION__);
1874   ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
1875   size_t total = 0;
1876   for (space::AllocSpace* space : alloc_spaces_) {
1877     total += space->GetObjectsAllocated();
1878   }
1879   return total;
1880 }
1881 
GetObjectsAllocatedEver() const1882 uint64_t Heap::GetObjectsAllocatedEver() const {
1883   uint64_t total = GetObjectsFreedEver();
1884   // If we are detached, we can't use GetObjectsAllocated since we can't change thread states.
1885   if (Thread::Current() != nullptr) {
1886     total += GetObjectsAllocated();
1887   }
1888   return total;
1889 }
1890 
GetBytesAllocatedEver() const1891 uint64_t Heap::GetBytesAllocatedEver() const {
1892   return GetBytesFreedEver() + GetBytesAllocated();
1893 }
1894 
1895 class InstanceCounter {
1896  public:
InstanceCounter(const std::vector<mirror::Class * > & classes,bool use_is_assignable_from,uint64_t * counts)1897   InstanceCounter(const std::vector<mirror::Class*>& classes,
1898                   bool use_is_assignable_from,
1899                   uint64_t* counts)
1900       SHARED_REQUIRES(Locks::mutator_lock_)
1901       : classes_(classes), use_is_assignable_from_(use_is_assignable_from), counts_(counts) {}
1902 
Callback(mirror::Object * obj,void * arg)1903   static void Callback(mirror::Object* obj, void* arg)
1904       SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
1905     InstanceCounter* instance_counter = reinterpret_cast<InstanceCounter*>(arg);
1906     mirror::Class* instance_class = obj->GetClass();
1907     CHECK(instance_class != nullptr);
1908     for (size_t i = 0; i < instance_counter->classes_.size(); ++i) {
1909       mirror::Class* klass = instance_counter->classes_[i];
1910       if (instance_counter->use_is_assignable_from_) {
1911         if (klass != nullptr && klass->IsAssignableFrom(instance_class)) {
1912           ++instance_counter->counts_[i];
1913         }
1914       } else if (instance_class == klass) {
1915         ++instance_counter->counts_[i];
1916       }
1917     }
1918   }
1919 
1920  private:
1921   const std::vector<mirror::Class*>& classes_;
1922   bool use_is_assignable_from_;
1923   uint64_t* const counts_;
1924   DISALLOW_COPY_AND_ASSIGN(InstanceCounter);
1925 };
1926 
CountInstances(const std::vector<mirror::Class * > & classes,bool use_is_assignable_from,uint64_t * counts)1927 void Heap::CountInstances(const std::vector<mirror::Class*>& classes, bool use_is_assignable_from,
1928                           uint64_t* counts) {
1929   InstanceCounter counter(classes, use_is_assignable_from, counts);
1930   VisitObjects(InstanceCounter::Callback, &counter);
1931 }
1932 
1933 class InstanceCollector {
1934  public:
InstanceCollector(mirror::Class * c,int32_t max_count,std::vector<mirror::Object * > & instances)1935   InstanceCollector(mirror::Class* c, int32_t max_count, std::vector<mirror::Object*>& instances)
1936       SHARED_REQUIRES(Locks::mutator_lock_)
1937       : class_(c), max_count_(max_count), instances_(instances) {
1938   }
Callback(mirror::Object * obj,void * arg)1939   static void Callback(mirror::Object* obj, void* arg)
1940       SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
1941     DCHECK(arg != nullptr);
1942     InstanceCollector* instance_collector = reinterpret_cast<InstanceCollector*>(arg);
1943     if (obj->GetClass() == instance_collector->class_) {
1944       if (instance_collector->max_count_ == 0 ||
1945           instance_collector->instances_.size() < instance_collector->max_count_) {
1946         instance_collector->instances_.push_back(obj);
1947       }
1948     }
1949   }
1950 
1951  private:
1952   const mirror::Class* const class_;
1953   const uint32_t max_count_;
1954   std::vector<mirror::Object*>& instances_;
1955   DISALLOW_COPY_AND_ASSIGN(InstanceCollector);
1956 };
1957 
GetInstances(mirror::Class * c,int32_t max_count,std::vector<mirror::Object * > & instances)1958 void Heap::GetInstances(mirror::Class* c,
1959                         int32_t max_count,
1960                         std::vector<mirror::Object*>& instances) {
1961   InstanceCollector collector(c, max_count, instances);
1962   VisitObjects(&InstanceCollector::Callback, &collector);
1963 }
1964 
1965 class ReferringObjectsFinder {
1966  public:
ReferringObjectsFinder(mirror::Object * object,int32_t max_count,std::vector<mirror::Object * > & referring_objects)1967   ReferringObjectsFinder(mirror::Object* object,
1968                          int32_t max_count,
1969                          std::vector<mirror::Object*>& referring_objects)
1970       SHARED_REQUIRES(Locks::mutator_lock_)
1971       : object_(object), max_count_(max_count), referring_objects_(referring_objects) {
1972   }
1973 
Callback(mirror::Object * obj,void * arg)1974   static void Callback(mirror::Object* obj, void* arg)
1975       SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
1976     reinterpret_cast<ReferringObjectsFinder*>(arg)->operator()(obj);
1977   }
1978 
1979   // For bitmap Visit.
1980   // TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for
1981   // annotalysis on visitors.
operator ()(mirror::Object * o) const1982   void operator()(mirror::Object* o) const NO_THREAD_SAFETY_ANALYSIS {
1983     o->VisitReferences(*this, VoidFunctor());
1984   }
1985 
1986   // For Object::VisitReferences.
operator ()(mirror::Object * obj,MemberOffset offset,bool is_static ATTRIBUTE_UNUSED) const1987   void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
1988       SHARED_REQUIRES(Locks::mutator_lock_) {
1989     mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset);
1990     if (ref == object_ && (max_count_ == 0 || referring_objects_.size() < max_count_)) {
1991       referring_objects_.push_back(obj);
1992     }
1993   }
1994 
VisitRootIfNonNull(mirror::CompressedReference<mirror::Object> * root ATTRIBUTE_UNUSED) const1995   void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED)
1996       const {}
VisitRoot(mirror::CompressedReference<mirror::Object> * root ATTRIBUTE_UNUSED) const1997   void VisitRoot(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) const {}
1998 
1999  private:
2000   const mirror::Object* const object_;
2001   const uint32_t max_count_;
2002   std::vector<mirror::Object*>& referring_objects_;
2003   DISALLOW_COPY_AND_ASSIGN(ReferringObjectsFinder);
2004 };
2005 
GetReferringObjects(mirror::Object * o,int32_t max_count,std::vector<mirror::Object * > & referring_objects)2006 void Heap::GetReferringObjects(mirror::Object* o, int32_t max_count,
2007                                std::vector<mirror::Object*>& referring_objects) {
2008   ReferringObjectsFinder finder(o, max_count, referring_objects);
2009   VisitObjects(&ReferringObjectsFinder::Callback, &finder);
2010 }
2011 
CollectGarbage(bool clear_soft_references)2012 void Heap::CollectGarbage(bool clear_soft_references) {
2013   // Even if we waited for a GC we still need to do another GC since weaks allocated during the
2014   // last GC will not have necessarily been cleared.
2015   CollectGarbageInternal(gc_plan_.back(), kGcCauseExplicit, clear_soft_references);
2016 }
2017 
SupportHomogeneousSpaceCompactAndCollectorTransitions() const2018 bool Heap::SupportHomogeneousSpaceCompactAndCollectorTransitions() const {
2019   return main_space_backup_.get() != nullptr && main_space_ != nullptr &&
2020       foreground_collector_type_ == kCollectorTypeCMS;
2021 }
2022 
PerformHomogeneousSpaceCompact()2023 HomogeneousSpaceCompactResult Heap::PerformHomogeneousSpaceCompact() {
2024   Thread* self = Thread::Current();
2025   // Inc requested homogeneous space compaction.
2026   count_requested_homogeneous_space_compaction_++;
2027   // Store performed homogeneous space compaction at a new request arrival.
2028   ScopedThreadStateChange tsc(self, kWaitingPerformingGc);
2029   Locks::mutator_lock_->AssertNotHeld(self);
2030   {
2031     ScopedThreadStateChange tsc2(self, kWaitingForGcToComplete);
2032     MutexLock mu(self, *gc_complete_lock_);
2033     // Ensure there is only one GC at a time.
2034     WaitForGcToCompleteLocked(kGcCauseHomogeneousSpaceCompact, self);
2035     // Homogeneous space compaction is a copying transition, can't run it if the moving GC disable count
2036     // is non zero.
2037     // If the collector type changed to something which doesn't benefit from homogeneous space compaction,
2038     // exit.
2039     if (disable_moving_gc_count_ != 0 || IsMovingGc(collector_type_) ||
2040         !main_space_->CanMoveObjects()) {
2041       return kErrorReject;
2042     }
2043     if (!SupportHomogeneousSpaceCompactAndCollectorTransitions()) {
2044       return kErrorUnsupported;
2045     }
2046     collector_type_running_ = kCollectorTypeHomogeneousSpaceCompact;
2047   }
2048   if (Runtime::Current()->IsShuttingDown(self)) {
2049     // Don't allow heap transitions to happen if the runtime is shutting down since these can
2050     // cause objects to get finalized.
2051     FinishGC(self, collector::kGcTypeNone);
2052     return HomogeneousSpaceCompactResult::kErrorVMShuttingDown;
2053   }
2054   collector::GarbageCollector* collector;
2055   {
2056     ScopedSuspendAll ssa(__FUNCTION__);
2057     uint64_t start_time = NanoTime();
2058     // Launch compaction.
2059     space::MallocSpace* to_space = main_space_backup_.release();
2060     space::MallocSpace* from_space = main_space_;
2061     to_space->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
2062     const uint64_t space_size_before_compaction = from_space->Size();
2063     AddSpace(to_space);
2064     // Make sure that we will have enough room to copy.
2065     CHECK_GE(to_space->GetFootprintLimit(), from_space->GetFootprintLimit());
2066     collector = Compact(to_space, from_space, kGcCauseHomogeneousSpaceCompact);
2067     const uint64_t space_size_after_compaction = to_space->Size();
2068     main_space_ = to_space;
2069     main_space_backup_.reset(from_space);
2070     RemoveSpace(from_space);
2071     SetSpaceAsDefault(main_space_);  // Set as default to reset the proper dlmalloc space.
2072     // Update performed homogeneous space compaction count.
2073     count_performed_homogeneous_space_compaction_++;
2074     // Print statics log and resume all threads.
2075     uint64_t duration = NanoTime() - start_time;
2076     VLOG(heap) << "Heap homogeneous space compaction took " << PrettyDuration(duration) << " size: "
2077                << PrettySize(space_size_before_compaction) << " -> "
2078                << PrettySize(space_size_after_compaction) << " compact-ratio: "
2079                << std::fixed << static_cast<double>(space_size_after_compaction) /
2080                static_cast<double>(space_size_before_compaction);
2081   }
2082   // Finish GC.
2083   reference_processor_->EnqueueClearedReferences(self);
2084   GrowForUtilization(semi_space_collector_);
2085   LogGC(kGcCauseHomogeneousSpaceCompact, collector);
2086   FinishGC(self, collector::kGcTypeFull);
2087   {
2088     ScopedObjectAccess soa(self);
2089     soa.Vm()->UnloadNativeLibraries();
2090   }
2091   return HomogeneousSpaceCompactResult::kSuccess;
2092 }
2093 
TransitionCollector(CollectorType collector_type)2094 void Heap::TransitionCollector(CollectorType collector_type) {
2095   if (collector_type == collector_type_) {
2096     return;
2097   }
2098   VLOG(heap) << "TransitionCollector: " << static_cast<int>(collector_type_)
2099              << " -> " << static_cast<int>(collector_type);
2100   uint64_t start_time = NanoTime();
2101   uint32_t before_allocated = num_bytes_allocated_.LoadSequentiallyConsistent();
2102   Runtime* const runtime = Runtime::Current();
2103   Thread* const self = Thread::Current();
2104   ScopedThreadStateChange tsc(self, kWaitingPerformingGc);
2105   Locks::mutator_lock_->AssertNotHeld(self);
2106   // Busy wait until we can GC (StartGC can fail if we have a non-zero
2107   // compacting_gc_disable_count_, this should rarely occurs).
2108   for (;;) {
2109     {
2110       ScopedThreadStateChange tsc2(self, kWaitingForGcToComplete);
2111       MutexLock mu(self, *gc_complete_lock_);
2112       // Ensure there is only one GC at a time.
2113       WaitForGcToCompleteLocked(kGcCauseCollectorTransition, self);
2114       // Currently we only need a heap transition if we switch from a moving collector to a
2115       // non-moving one, or visa versa.
2116       const bool copying_transition = IsMovingGc(collector_type_) != IsMovingGc(collector_type);
2117       // If someone else beat us to it and changed the collector before we could, exit.
2118       // This is safe to do before the suspend all since we set the collector_type_running_ before
2119       // we exit the loop. If another thread attempts to do the heap transition before we exit,
2120       // then it would get blocked on WaitForGcToCompleteLocked.
2121       if (collector_type == collector_type_) {
2122         return;
2123       }
2124       // GC can be disabled if someone has a used GetPrimitiveArrayCritical but not yet released.
2125       if (!copying_transition || disable_moving_gc_count_ == 0) {
2126         // TODO: Not hard code in semi-space collector?
2127         collector_type_running_ = copying_transition ? kCollectorTypeSS : collector_type;
2128         break;
2129       }
2130     }
2131     usleep(1000);
2132   }
2133   if (runtime->IsShuttingDown(self)) {
2134     // Don't allow heap transitions to happen if the runtime is shutting down since these can
2135     // cause objects to get finalized.
2136     FinishGC(self, collector::kGcTypeNone);
2137     return;
2138   }
2139   collector::GarbageCollector* collector = nullptr;
2140   {
2141     ScopedSuspendAll ssa(__FUNCTION__);
2142     switch (collector_type) {
2143       case kCollectorTypeSS: {
2144         if (!IsMovingGc(collector_type_)) {
2145           // Create the bump pointer space from the backup space.
2146           CHECK(main_space_backup_ != nullptr);
2147           std::unique_ptr<MemMap> mem_map(main_space_backup_->ReleaseMemMap());
2148           // We are transitioning from non moving GC -> moving GC, since we copied from the bump
2149           // pointer space last transition it will be protected.
2150           CHECK(mem_map != nullptr);
2151           mem_map->Protect(PROT_READ | PROT_WRITE);
2152           bump_pointer_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space",
2153                                                                           mem_map.release());
2154           AddSpace(bump_pointer_space_);
2155           collector = Compact(bump_pointer_space_, main_space_, kGcCauseCollectorTransition);
2156           // Use the now empty main space mem map for the bump pointer temp space.
2157           mem_map.reset(main_space_->ReleaseMemMap());
2158           // Unset the pointers just in case.
2159           if (dlmalloc_space_ == main_space_) {
2160             dlmalloc_space_ = nullptr;
2161           } else if (rosalloc_space_ == main_space_) {
2162             rosalloc_space_ = nullptr;
2163           }
2164           // Remove the main space so that we don't try to trim it, this doens't work for debug
2165           // builds since RosAlloc attempts to read the magic number from a protected page.
2166           RemoveSpace(main_space_);
2167           RemoveRememberedSet(main_space_);
2168           delete main_space_;  // Delete the space since it has been removed.
2169           main_space_ = nullptr;
2170           RemoveRememberedSet(main_space_backup_.get());
2171           main_space_backup_.reset(nullptr);  // Deletes the space.
2172           temp_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space 2",
2173                                                                   mem_map.release());
2174           AddSpace(temp_space_);
2175         }
2176         break;
2177       }
2178       case kCollectorTypeMS:
2179         // Fall through.
2180       case kCollectorTypeCMS: {
2181         if (IsMovingGc(collector_type_)) {
2182           CHECK(temp_space_ != nullptr);
2183           std::unique_ptr<MemMap> mem_map(temp_space_->ReleaseMemMap());
2184           RemoveSpace(temp_space_);
2185           temp_space_ = nullptr;
2186           mem_map->Protect(PROT_READ | PROT_WRITE);
2187           CreateMainMallocSpace(mem_map.get(),
2188                                 kDefaultInitialSize,
2189                                 std::min(mem_map->Size(), growth_limit_),
2190                                 mem_map->Size());
2191           mem_map.release();
2192           // Compact to the main space from the bump pointer space, don't need to swap semispaces.
2193           AddSpace(main_space_);
2194           collector = Compact(main_space_, bump_pointer_space_, kGcCauseCollectorTransition);
2195           mem_map.reset(bump_pointer_space_->ReleaseMemMap());
2196           RemoveSpace(bump_pointer_space_);
2197           bump_pointer_space_ = nullptr;
2198           const char* name = kUseRosAlloc ? kRosAllocSpaceName[1] : kDlMallocSpaceName[1];
2199           // Temporarily unprotect the backup mem map so rosalloc can write the debug magic number.
2200           if (kIsDebugBuild && kUseRosAlloc) {
2201             mem_map->Protect(PROT_READ | PROT_WRITE);
2202           }
2203           main_space_backup_.reset(CreateMallocSpaceFromMemMap(
2204               mem_map.get(),
2205               kDefaultInitialSize,
2206               std::min(mem_map->Size(), growth_limit_),
2207               mem_map->Size(),
2208               name,
2209               true));
2210           if (kIsDebugBuild && kUseRosAlloc) {
2211             mem_map->Protect(PROT_NONE);
2212           }
2213           mem_map.release();
2214         }
2215         break;
2216       }
2217       default: {
2218         LOG(FATAL) << "Attempted to transition to invalid collector type "
2219                    << static_cast<size_t>(collector_type);
2220         break;
2221       }
2222     }
2223     ChangeCollector(collector_type);
2224   }
2225   // Can't call into java code with all threads suspended.
2226   reference_processor_->EnqueueClearedReferences(self);
2227   uint64_t duration = NanoTime() - start_time;
2228   GrowForUtilization(semi_space_collector_);
2229   DCHECK(collector != nullptr);
2230   LogGC(kGcCauseCollectorTransition, collector);
2231   FinishGC(self, collector::kGcTypeFull);
2232   {
2233     ScopedObjectAccess soa(self);
2234     soa.Vm()->UnloadNativeLibraries();
2235   }
2236   int32_t after_allocated = num_bytes_allocated_.LoadSequentiallyConsistent();
2237   int32_t delta_allocated = before_allocated - after_allocated;
2238   std::string saved_str;
2239   if (delta_allocated >= 0) {
2240     saved_str = " saved at least " + PrettySize(delta_allocated);
2241   } else {
2242     saved_str = " expanded " + PrettySize(-delta_allocated);
2243   }
2244   VLOG(heap) << "Collector transition to " << collector_type << " took "
2245              << PrettyDuration(duration) << saved_str;
2246 }
2247 
ChangeCollector(CollectorType collector_type)2248 void Heap::ChangeCollector(CollectorType collector_type) {
2249   // TODO: Only do this with all mutators suspended to avoid races.
2250   if (collector_type != collector_type_) {
2251     if (collector_type == kCollectorTypeMC) {
2252       // Don't allow mark compact unless support is compiled in.
2253       CHECK(kMarkCompactSupport);
2254     }
2255     collector_type_ = collector_type;
2256     gc_plan_.clear();
2257     switch (collector_type_) {
2258       case kCollectorTypeCC: {
2259         gc_plan_.push_back(collector::kGcTypeFull);
2260         if (use_tlab_) {
2261           ChangeAllocator(kAllocatorTypeRegionTLAB);
2262         } else {
2263           ChangeAllocator(kAllocatorTypeRegion);
2264         }
2265         break;
2266       }
2267       case kCollectorTypeMC:  // Fall-through.
2268       case kCollectorTypeSS:  // Fall-through.
2269       case kCollectorTypeGSS: {
2270         gc_plan_.push_back(collector::kGcTypeFull);
2271         if (use_tlab_) {
2272           ChangeAllocator(kAllocatorTypeTLAB);
2273         } else {
2274           ChangeAllocator(kAllocatorTypeBumpPointer);
2275         }
2276         break;
2277       }
2278       case kCollectorTypeMS: {
2279         gc_plan_.push_back(collector::kGcTypeSticky);
2280         gc_plan_.push_back(collector::kGcTypePartial);
2281         gc_plan_.push_back(collector::kGcTypeFull);
2282         ChangeAllocator(kUseRosAlloc ? kAllocatorTypeRosAlloc : kAllocatorTypeDlMalloc);
2283         break;
2284       }
2285       case kCollectorTypeCMS: {
2286         gc_plan_.push_back(collector::kGcTypeSticky);
2287         gc_plan_.push_back(collector::kGcTypePartial);
2288         gc_plan_.push_back(collector::kGcTypeFull);
2289         ChangeAllocator(kUseRosAlloc ? kAllocatorTypeRosAlloc : kAllocatorTypeDlMalloc);
2290         break;
2291       }
2292       default: {
2293         UNIMPLEMENTED(FATAL);
2294         UNREACHABLE();
2295       }
2296     }
2297     if (IsGcConcurrent()) {
2298       concurrent_start_bytes_ =
2299           std::max(max_allowed_footprint_, kMinConcurrentRemainingBytes) - kMinConcurrentRemainingBytes;
2300     } else {
2301       concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
2302     }
2303   }
2304 }
2305 
2306 // Special compacting collector which uses sub-optimal bin packing to reduce zygote space size.
2307 class ZygoteCompactingCollector FINAL : public collector::SemiSpace {
2308  public:
ZygoteCompactingCollector(gc::Heap * heap,bool is_running_on_memory_tool)2309   ZygoteCompactingCollector(gc::Heap* heap, bool is_running_on_memory_tool)
2310       : SemiSpace(heap, false, "zygote collector"),
2311         bin_live_bitmap_(nullptr),
2312         bin_mark_bitmap_(nullptr),
2313         is_running_on_memory_tool_(is_running_on_memory_tool) {}
2314 
BuildBins(space::ContinuousSpace * space)2315   void BuildBins(space::ContinuousSpace* space) {
2316     bin_live_bitmap_ = space->GetLiveBitmap();
2317     bin_mark_bitmap_ = space->GetMarkBitmap();
2318     BinContext context;
2319     context.prev_ = reinterpret_cast<uintptr_t>(space->Begin());
2320     context.collector_ = this;
2321     WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
2322     // Note: This requires traversing the space in increasing order of object addresses.
2323     bin_live_bitmap_->Walk(Callback, reinterpret_cast<void*>(&context));
2324     // Add the last bin which spans after the last object to the end of the space.
2325     AddBin(reinterpret_cast<uintptr_t>(space->End()) - context.prev_, context.prev_);
2326   }
2327 
2328  private:
2329   struct BinContext {
2330     uintptr_t prev_;  // The end of the previous object.
2331     ZygoteCompactingCollector* collector_;
2332   };
2333   // Maps from bin sizes to locations.
2334   std::multimap<size_t, uintptr_t> bins_;
2335   // Live bitmap of the space which contains the bins.
2336   accounting::ContinuousSpaceBitmap* bin_live_bitmap_;
2337   // Mark bitmap of the space which contains the bins.
2338   accounting::ContinuousSpaceBitmap* bin_mark_bitmap_;
2339   const bool is_running_on_memory_tool_;
2340 
Callback(mirror::Object * obj,void * arg)2341   static void Callback(mirror::Object* obj, void* arg)
2342       SHARED_REQUIRES(Locks::mutator_lock_) {
2343     DCHECK(arg != nullptr);
2344     BinContext* context = reinterpret_cast<BinContext*>(arg);
2345     ZygoteCompactingCollector* collector = context->collector_;
2346     uintptr_t object_addr = reinterpret_cast<uintptr_t>(obj);
2347     size_t bin_size = object_addr - context->prev_;
2348     // Add the bin consisting of the end of the previous object to the start of the current object.
2349     collector->AddBin(bin_size, context->prev_);
2350     context->prev_ = object_addr + RoundUp(obj->SizeOf(), kObjectAlignment);
2351   }
2352 
AddBin(size_t size,uintptr_t position)2353   void AddBin(size_t size, uintptr_t position) {
2354     if (is_running_on_memory_tool_) {
2355       MEMORY_TOOL_MAKE_DEFINED(reinterpret_cast<void*>(position), size);
2356     }
2357     if (size != 0) {
2358       bins_.insert(std::make_pair(size, position));
2359     }
2360   }
2361 
ShouldSweepSpace(space::ContinuousSpace * space ATTRIBUTE_UNUSED) const2362   virtual bool ShouldSweepSpace(space::ContinuousSpace* space ATTRIBUTE_UNUSED) const {
2363     // Don't sweep any spaces since we probably blasted the internal accounting of the free list
2364     // allocator.
2365     return false;
2366   }
2367 
MarkNonForwardedObject(mirror::Object * obj)2368   virtual mirror::Object* MarkNonForwardedObject(mirror::Object* obj)
2369       REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
2370     size_t obj_size = obj->SizeOf();
2371     size_t alloc_size = RoundUp(obj_size, kObjectAlignment);
2372     mirror::Object* forward_address;
2373     // Find the smallest bin which we can move obj in.
2374     auto it = bins_.lower_bound(alloc_size);
2375     if (it == bins_.end()) {
2376       // No available space in the bins, place it in the target space instead (grows the zygote
2377       // space).
2378       size_t bytes_allocated, dummy;
2379       forward_address = to_space_->Alloc(self_, alloc_size, &bytes_allocated, nullptr, &dummy);
2380       if (to_space_live_bitmap_ != nullptr) {
2381         to_space_live_bitmap_->Set(forward_address);
2382       } else {
2383         GetHeap()->GetNonMovingSpace()->GetLiveBitmap()->Set(forward_address);
2384         GetHeap()->GetNonMovingSpace()->GetMarkBitmap()->Set(forward_address);
2385       }
2386     } else {
2387       size_t size = it->first;
2388       uintptr_t pos = it->second;
2389       bins_.erase(it);  // Erase the old bin which we replace with the new smaller bin.
2390       forward_address = reinterpret_cast<mirror::Object*>(pos);
2391       // Set the live and mark bits so that sweeping system weaks works properly.
2392       bin_live_bitmap_->Set(forward_address);
2393       bin_mark_bitmap_->Set(forward_address);
2394       DCHECK_GE(size, alloc_size);
2395       // Add a new bin with the remaining space.
2396       AddBin(size - alloc_size, pos + alloc_size);
2397     }
2398     // Copy the object over to its new location. Don't use alloc_size to avoid valgrind error.
2399     memcpy(reinterpret_cast<void*>(forward_address), obj, obj_size);
2400     if (kUseBakerOrBrooksReadBarrier) {
2401       obj->AssertReadBarrierPointer();
2402       if (kUseBrooksReadBarrier) {
2403         DCHECK_EQ(forward_address->GetReadBarrierPointer(), obj);
2404         forward_address->SetReadBarrierPointer(forward_address);
2405       }
2406       forward_address->AssertReadBarrierPointer();
2407     }
2408     return forward_address;
2409   }
2410 };
2411 
UnBindBitmaps()2412 void Heap::UnBindBitmaps() {
2413   TimingLogger::ScopedTiming t("UnBindBitmaps", GetCurrentGcIteration()->GetTimings());
2414   for (const auto& space : GetContinuousSpaces()) {
2415     if (space->IsContinuousMemMapAllocSpace()) {
2416       space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
2417       if (alloc_space->HasBoundBitmaps()) {
2418         alloc_space->UnBindBitmaps();
2419       }
2420     }
2421   }
2422 }
2423 
PreZygoteFork()2424 void Heap::PreZygoteFork() {
2425   if (!HasZygoteSpace()) {
2426     // We still want to GC in case there is some unreachable non moving objects that could cause a
2427     // suboptimal bin packing when we compact the zygote space.
2428     CollectGarbageInternal(collector::kGcTypeFull, kGcCauseBackground, false);
2429     // Trim the pages at the end of the non moving space. Trim while not holding zygote lock since
2430     // the trim process may require locking the mutator lock.
2431     non_moving_space_->Trim();
2432   }
2433   Thread* self = Thread::Current();
2434   MutexLock mu(self, zygote_creation_lock_);
2435   // Try to see if we have any Zygote spaces.
2436   if (HasZygoteSpace()) {
2437     return;
2438   }
2439   Runtime::Current()->GetInternTable()->AddNewTable();
2440   Runtime::Current()->GetClassLinker()->MoveClassTableToPreZygote();
2441   VLOG(heap) << "Starting PreZygoteFork";
2442   // The end of the non-moving space may be protected, unprotect it so that we can copy the zygote
2443   // there.
2444   non_moving_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
2445   const bool same_space = non_moving_space_ == main_space_;
2446   if (kCompactZygote) {
2447     // Temporarily disable rosalloc verification because the zygote
2448     // compaction will mess up the rosalloc internal metadata.
2449     ScopedDisableRosAllocVerification disable_rosalloc_verif(this);
2450     ZygoteCompactingCollector zygote_collector(this, is_running_on_memory_tool_);
2451     zygote_collector.BuildBins(non_moving_space_);
2452     // Create a new bump pointer space which we will compact into.
2453     space::BumpPointerSpace target_space("zygote bump space", non_moving_space_->End(),
2454                                          non_moving_space_->Limit());
2455     // Compact the bump pointer space to a new zygote bump pointer space.
2456     bool reset_main_space = false;
2457     if (IsMovingGc(collector_type_)) {
2458       if (collector_type_ == kCollectorTypeCC) {
2459         zygote_collector.SetFromSpace(region_space_);
2460       } else {
2461         zygote_collector.SetFromSpace(bump_pointer_space_);
2462       }
2463     } else {
2464       CHECK(main_space_ != nullptr);
2465       CHECK_NE(main_space_, non_moving_space_)
2466           << "Does not make sense to compact within the same space";
2467       // Copy from the main space.
2468       zygote_collector.SetFromSpace(main_space_);
2469       reset_main_space = true;
2470     }
2471     zygote_collector.SetToSpace(&target_space);
2472     zygote_collector.SetSwapSemiSpaces(false);
2473     zygote_collector.Run(kGcCauseCollectorTransition, false);
2474     if (reset_main_space) {
2475       main_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
2476       madvise(main_space_->Begin(), main_space_->Capacity(), MADV_DONTNEED);
2477       MemMap* mem_map = main_space_->ReleaseMemMap();
2478       RemoveSpace(main_space_);
2479       space::Space* old_main_space = main_space_;
2480       CreateMainMallocSpace(mem_map, kDefaultInitialSize, std::min(mem_map->Size(), growth_limit_),
2481                             mem_map->Size());
2482       delete old_main_space;
2483       AddSpace(main_space_);
2484     } else {
2485       if (collector_type_ == kCollectorTypeCC) {
2486         region_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
2487       } else {
2488         bump_pointer_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
2489       }
2490     }
2491     if (temp_space_ != nullptr) {
2492       CHECK(temp_space_->IsEmpty());
2493     }
2494     total_objects_freed_ever_ += GetCurrentGcIteration()->GetFreedObjects();
2495     total_bytes_freed_ever_ += GetCurrentGcIteration()->GetFreedBytes();
2496     // Update the end and write out image.
2497     non_moving_space_->SetEnd(target_space.End());
2498     non_moving_space_->SetLimit(target_space.Limit());
2499     VLOG(heap) << "Create zygote space with size=" << non_moving_space_->Size() << " bytes";
2500   }
2501   // Change the collector to the post zygote one.
2502   ChangeCollector(foreground_collector_type_);
2503   // Save the old space so that we can remove it after we complete creating the zygote space.
2504   space::MallocSpace* old_alloc_space = non_moving_space_;
2505   // Turn the current alloc space into a zygote space and obtain the new alloc space composed of
2506   // the remaining available space.
2507   // Remove the old space before creating the zygote space since creating the zygote space sets
2508   // the old alloc space's bitmaps to null.
2509   RemoveSpace(old_alloc_space);
2510   if (collector::SemiSpace::kUseRememberedSet) {
2511     // Sanity bound check.
2512     FindRememberedSetFromSpace(old_alloc_space)->AssertAllDirtyCardsAreWithinSpace();
2513     // Remove the remembered set for the now zygote space (the old
2514     // non-moving space). Note now that we have compacted objects into
2515     // the zygote space, the data in the remembered set is no longer
2516     // needed. The zygote space will instead have a mod-union table
2517     // from this point on.
2518     RemoveRememberedSet(old_alloc_space);
2519   }
2520   // Remaining space becomes the new non moving space.
2521   zygote_space_ = old_alloc_space->CreateZygoteSpace(kNonMovingSpaceName, low_memory_mode_,
2522                                                      &non_moving_space_);
2523   CHECK(!non_moving_space_->CanMoveObjects());
2524   if (same_space) {
2525     main_space_ = non_moving_space_;
2526     SetSpaceAsDefault(main_space_);
2527   }
2528   delete old_alloc_space;
2529   CHECK(HasZygoteSpace()) << "Failed creating zygote space";
2530   AddSpace(zygote_space_);
2531   non_moving_space_->SetFootprintLimit(non_moving_space_->Capacity());
2532   AddSpace(non_moving_space_);
2533   // Create the zygote space mod union table.
2534   accounting::ModUnionTable* mod_union_table =
2535       new accounting::ModUnionTableCardCache("zygote space mod-union table", this,
2536                                              zygote_space_);
2537   CHECK(mod_union_table != nullptr) << "Failed to create zygote space mod-union table";
2538   // Set all the cards in the mod-union table since we don't know which objects contain references
2539   // to large objects.
2540   mod_union_table->SetCards();
2541   AddModUnionTable(mod_union_table);
2542   large_object_space_->SetAllLargeObjectsAsZygoteObjects(self);
2543   if (collector::SemiSpace::kUseRememberedSet) {
2544     // Add a new remembered set for the post-zygote non-moving space.
2545     accounting::RememberedSet* post_zygote_non_moving_space_rem_set =
2546         new accounting::RememberedSet("Post-zygote non-moving space remembered set", this,
2547                                       non_moving_space_);
2548     CHECK(post_zygote_non_moving_space_rem_set != nullptr)
2549         << "Failed to create post-zygote non-moving space remembered set";
2550     AddRememberedSet(post_zygote_non_moving_space_rem_set);
2551   }
2552 }
2553 
FlushAllocStack()2554 void Heap::FlushAllocStack() {
2555   MarkAllocStackAsLive(allocation_stack_.get());
2556   allocation_stack_->Reset();
2557 }
2558 
MarkAllocStack(accounting::ContinuousSpaceBitmap * bitmap1,accounting::ContinuousSpaceBitmap * bitmap2,accounting::LargeObjectBitmap * large_objects,accounting::ObjectStack * stack)2559 void Heap::MarkAllocStack(accounting::ContinuousSpaceBitmap* bitmap1,
2560                           accounting::ContinuousSpaceBitmap* bitmap2,
2561                           accounting::LargeObjectBitmap* large_objects,
2562                           accounting::ObjectStack* stack) {
2563   DCHECK(bitmap1 != nullptr);
2564   DCHECK(bitmap2 != nullptr);
2565   const auto* limit = stack->End();
2566   for (auto* it = stack->Begin(); it != limit; ++it) {
2567     const mirror::Object* obj = it->AsMirrorPtr();
2568     if (!kUseThreadLocalAllocationStack || obj != nullptr) {
2569       if (bitmap1->HasAddress(obj)) {
2570         bitmap1->Set(obj);
2571       } else if (bitmap2->HasAddress(obj)) {
2572         bitmap2->Set(obj);
2573       } else {
2574         DCHECK(large_objects != nullptr);
2575         large_objects->Set(obj);
2576       }
2577     }
2578   }
2579 }
2580 
SwapSemiSpaces()2581 void Heap::SwapSemiSpaces() {
2582   CHECK(bump_pointer_space_ != nullptr);
2583   CHECK(temp_space_ != nullptr);
2584   std::swap(bump_pointer_space_, temp_space_);
2585 }
2586 
Compact(space::ContinuousMemMapAllocSpace * target_space,space::ContinuousMemMapAllocSpace * source_space,GcCause gc_cause)2587 collector::GarbageCollector* Heap::Compact(space::ContinuousMemMapAllocSpace* target_space,
2588                                            space::ContinuousMemMapAllocSpace* source_space,
2589                                            GcCause gc_cause) {
2590   CHECK(kMovingCollector);
2591   if (target_space != source_space) {
2592     // Don't swap spaces since this isn't a typical semi space collection.
2593     semi_space_collector_->SetSwapSemiSpaces(false);
2594     semi_space_collector_->SetFromSpace(source_space);
2595     semi_space_collector_->SetToSpace(target_space);
2596     semi_space_collector_->Run(gc_cause, false);
2597     return semi_space_collector_;
2598   } else {
2599     CHECK(target_space->IsBumpPointerSpace())
2600         << "In-place compaction is only supported for bump pointer spaces";
2601     mark_compact_collector_->SetSpace(target_space->AsBumpPointerSpace());
2602     mark_compact_collector_->Run(kGcCauseCollectorTransition, false);
2603     return mark_compact_collector_;
2604   }
2605 }
2606 
CollectGarbageInternal(collector::GcType gc_type,GcCause gc_cause,bool clear_soft_references)2607 collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type,
2608                                                GcCause gc_cause,
2609                                                bool clear_soft_references) {
2610   Thread* self = Thread::Current();
2611   Runtime* runtime = Runtime::Current();
2612   // If the heap can't run the GC, silently fail and return that no GC was run.
2613   switch (gc_type) {
2614     case collector::kGcTypePartial: {
2615       if (!HasZygoteSpace()) {
2616         return collector::kGcTypeNone;
2617       }
2618       break;
2619     }
2620     default: {
2621       // Other GC types don't have any special cases which makes them not runnable. The main case
2622       // here is full GC.
2623     }
2624   }
2625   ScopedThreadStateChange tsc(self, kWaitingPerformingGc);
2626   Locks::mutator_lock_->AssertNotHeld(self);
2627   if (self->IsHandlingStackOverflow()) {
2628     // If we are throwing a stack overflow error we probably don't have enough remaining stack
2629     // space to run the GC.
2630     return collector::kGcTypeNone;
2631   }
2632   bool compacting_gc;
2633   {
2634     gc_complete_lock_->AssertNotHeld(self);
2635     ScopedThreadStateChange tsc2(self, kWaitingForGcToComplete);
2636     MutexLock mu(self, *gc_complete_lock_);
2637     // Ensure there is only one GC at a time.
2638     WaitForGcToCompleteLocked(gc_cause, self);
2639     compacting_gc = IsMovingGc(collector_type_);
2640     // GC can be disabled if someone has a used GetPrimitiveArrayCritical.
2641     if (compacting_gc && disable_moving_gc_count_ != 0) {
2642       LOG(WARNING) << "Skipping GC due to disable moving GC count " << disable_moving_gc_count_;
2643       return collector::kGcTypeNone;
2644     }
2645     if (gc_disabled_for_shutdown_) {
2646       return collector::kGcTypeNone;
2647     }
2648     collector_type_running_ = collector_type_;
2649   }
2650   if (gc_cause == kGcCauseForAlloc && runtime->HasStatsEnabled()) {
2651     ++runtime->GetStats()->gc_for_alloc_count;
2652     ++self->GetStats()->gc_for_alloc_count;
2653   }
2654   const uint64_t bytes_allocated_before_gc = GetBytesAllocated();
2655   // Approximate heap size.
2656   ATRACE_INT("Heap size (KB)", bytes_allocated_before_gc / KB);
2657 
2658   DCHECK_LT(gc_type, collector::kGcTypeMax);
2659   DCHECK_NE(gc_type, collector::kGcTypeNone);
2660 
2661   collector::GarbageCollector* collector = nullptr;
2662   // TODO: Clean this up.
2663   if (compacting_gc) {
2664     DCHECK(current_allocator_ == kAllocatorTypeBumpPointer ||
2665            current_allocator_ == kAllocatorTypeTLAB ||
2666            current_allocator_ == kAllocatorTypeRegion ||
2667            current_allocator_ == kAllocatorTypeRegionTLAB);
2668     switch (collector_type_) {
2669       case kCollectorTypeSS:
2670         // Fall-through.
2671       case kCollectorTypeGSS:
2672         semi_space_collector_->SetFromSpace(bump_pointer_space_);
2673         semi_space_collector_->SetToSpace(temp_space_);
2674         semi_space_collector_->SetSwapSemiSpaces(true);
2675         collector = semi_space_collector_;
2676         break;
2677       case kCollectorTypeCC:
2678         concurrent_copying_collector_->SetRegionSpace(region_space_);
2679         collector = concurrent_copying_collector_;
2680         break;
2681       case kCollectorTypeMC:
2682         mark_compact_collector_->SetSpace(bump_pointer_space_);
2683         collector = mark_compact_collector_;
2684         break;
2685       default:
2686         LOG(FATAL) << "Invalid collector type " << static_cast<size_t>(collector_type_);
2687     }
2688     if (collector != mark_compact_collector_ && collector != concurrent_copying_collector_) {
2689       temp_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
2690       if (kIsDebugBuild) {
2691         // Try to read each page of the memory map in case mprotect didn't work properly b/19894268.
2692         temp_space_->GetMemMap()->TryReadable();
2693       }
2694       CHECK(temp_space_->IsEmpty());
2695     }
2696     gc_type = collector::kGcTypeFull;  // TODO: Not hard code this in.
2697   } else if (current_allocator_ == kAllocatorTypeRosAlloc ||
2698       current_allocator_ == kAllocatorTypeDlMalloc) {
2699     collector = FindCollectorByGcType(gc_type);
2700   } else {
2701     LOG(FATAL) << "Invalid current allocator " << current_allocator_;
2702   }
2703   if (IsGcConcurrent()) {
2704     // Disable concurrent GC check so that we don't have spammy JNI requests.
2705     // This gets recalculated in GrowForUtilization. It is important that it is disabled /
2706     // calculated in the same thread so that there aren't any races that can cause it to become
2707     // permanantly disabled. b/17942071
2708     concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
2709   }
2710 
2711   // It's time to clear all inline caches, in case some classes can be unloaded.
2712   if ((gc_type == collector::kGcTypeFull) && (runtime->GetJit() != nullptr)) {
2713     runtime->GetJit()->GetCodeCache()->ClearGcRootsInInlineCaches(self);
2714   }
2715 
2716   CHECK(collector != nullptr)
2717       << "Could not find garbage collector with collector_type="
2718       << static_cast<size_t>(collector_type_) << " and gc_type=" << gc_type;
2719   collector->Run(gc_cause, clear_soft_references || runtime->IsZygote());
2720   total_objects_freed_ever_ += GetCurrentGcIteration()->GetFreedObjects();
2721   total_bytes_freed_ever_ += GetCurrentGcIteration()->GetFreedBytes();
2722   RequestTrim(self);
2723   // Enqueue cleared references.
2724   reference_processor_->EnqueueClearedReferences(self);
2725   // Grow the heap so that we know when to perform the next GC.
2726   GrowForUtilization(collector, bytes_allocated_before_gc);
2727   LogGC(gc_cause, collector);
2728   FinishGC(self, gc_type);
2729   // Inform DDMS that a GC completed.
2730   Dbg::GcDidFinish();
2731   // Unload native libraries for class unloading. We do this after calling FinishGC to prevent
2732   // deadlocks in case the JNI_OnUnload function does allocations.
2733   {
2734     ScopedObjectAccess soa(self);
2735     soa.Vm()->UnloadNativeLibraries();
2736   }
2737   return gc_type;
2738 }
2739 
LogGC(GcCause gc_cause,collector::GarbageCollector * collector)2740 void Heap::LogGC(GcCause gc_cause, collector::GarbageCollector* collector) {
2741   const size_t duration = GetCurrentGcIteration()->GetDurationNs();
2742   const std::vector<uint64_t>& pause_times = GetCurrentGcIteration()->GetPauseTimes();
2743   // Print the GC if it is an explicit GC (e.g. Runtime.gc()) or a slow GC
2744   // (mutator time blocked >= long_pause_log_threshold_).
2745   bool log_gc = gc_cause == kGcCauseExplicit;
2746   if (!log_gc && CareAboutPauseTimes()) {
2747     // GC for alloc pauses the allocating thread, so consider it as a pause.
2748     log_gc = duration > long_gc_log_threshold_ ||
2749         (gc_cause == kGcCauseForAlloc && duration > long_pause_log_threshold_);
2750     for (uint64_t pause : pause_times) {
2751       log_gc = log_gc || pause >= long_pause_log_threshold_;
2752     }
2753   }
2754   if (log_gc) {
2755     const size_t percent_free = GetPercentFree();
2756     const size_t current_heap_size = GetBytesAllocated();
2757     const size_t total_memory = GetTotalMemory();
2758     std::ostringstream pause_string;
2759     for (size_t i = 0; i < pause_times.size(); ++i) {
2760       pause_string << PrettyDuration((pause_times[i] / 1000) * 1000)
2761                    << ((i != pause_times.size() - 1) ? "," : "");
2762     }
2763     LOG(INFO) << gc_cause << " " << collector->GetName()
2764               << " GC freed "  << current_gc_iteration_.GetFreedObjects() << "("
2765               << PrettySize(current_gc_iteration_.GetFreedBytes()) << ") AllocSpace objects, "
2766               << current_gc_iteration_.GetFreedLargeObjects() << "("
2767               << PrettySize(current_gc_iteration_.GetFreedLargeObjectBytes()) << ") LOS objects, "
2768               << percent_free << "% free, " << PrettySize(current_heap_size) << "/"
2769               << PrettySize(total_memory) << ", " << "paused " << pause_string.str()
2770               << " total " << PrettyDuration((duration / 1000) * 1000);
2771     VLOG(heap) << Dumpable<TimingLogger>(*current_gc_iteration_.GetTimings());
2772   }
2773 }
2774 
FinishGC(Thread * self,collector::GcType gc_type)2775 void Heap::FinishGC(Thread* self, collector::GcType gc_type) {
2776   MutexLock mu(self, *gc_complete_lock_);
2777   collector_type_running_ = kCollectorTypeNone;
2778   if (gc_type != collector::kGcTypeNone) {
2779     last_gc_type_ = gc_type;
2780 
2781     // Update stats.
2782     ++gc_count_last_window_;
2783     if (running_collection_is_blocking_) {
2784       // If the currently running collection was a blocking one,
2785       // increment the counters and reset the flag.
2786       ++blocking_gc_count_;
2787       blocking_gc_time_ += GetCurrentGcIteration()->GetDurationNs();
2788       ++blocking_gc_count_last_window_;
2789     }
2790     // Update the gc count rate histograms if due.
2791     UpdateGcCountRateHistograms();
2792   }
2793   // Reset.
2794   running_collection_is_blocking_ = false;
2795   // Wake anyone who may have been waiting for the GC to complete.
2796   gc_complete_cond_->Broadcast(self);
2797 }
2798 
UpdateGcCountRateHistograms()2799 void Heap::UpdateGcCountRateHistograms() {
2800   // Invariant: if the time since the last update includes more than
2801   // one windows, all the GC runs (if > 0) must have happened in first
2802   // window because otherwise the update must have already taken place
2803   // at an earlier GC run. So, we report the non-first windows with
2804   // zero counts to the histograms.
2805   DCHECK_EQ(last_update_time_gc_count_rate_histograms_ % kGcCountRateHistogramWindowDuration, 0U);
2806   uint64_t now = NanoTime();
2807   DCHECK_GE(now, last_update_time_gc_count_rate_histograms_);
2808   uint64_t time_since_last_update = now - last_update_time_gc_count_rate_histograms_;
2809   uint64_t num_of_windows = time_since_last_update / kGcCountRateHistogramWindowDuration;
2810   if (time_since_last_update >= kGcCountRateHistogramWindowDuration) {
2811     // Record the first window.
2812     gc_count_rate_histogram_.AddValue(gc_count_last_window_ - 1);  // Exclude the current run.
2813     blocking_gc_count_rate_histogram_.AddValue(running_collection_is_blocking_ ?
2814         blocking_gc_count_last_window_ - 1 : blocking_gc_count_last_window_);
2815     // Record the other windows (with zero counts).
2816     for (uint64_t i = 0; i < num_of_windows - 1; ++i) {
2817       gc_count_rate_histogram_.AddValue(0);
2818       blocking_gc_count_rate_histogram_.AddValue(0);
2819     }
2820     // Update the last update time and reset the counters.
2821     last_update_time_gc_count_rate_histograms_ =
2822         (now / kGcCountRateHistogramWindowDuration) * kGcCountRateHistogramWindowDuration;
2823     gc_count_last_window_ = 1;  // Include the current run.
2824     blocking_gc_count_last_window_ = running_collection_is_blocking_ ? 1 : 0;
2825   }
2826   DCHECK_EQ(last_update_time_gc_count_rate_histograms_ % kGcCountRateHistogramWindowDuration, 0U);
2827 }
2828 
2829 class RootMatchesObjectVisitor : public SingleRootVisitor {
2830  public:
RootMatchesObjectVisitor(const mirror::Object * obj)2831   explicit RootMatchesObjectVisitor(const mirror::Object* obj) : obj_(obj) { }
2832 
VisitRoot(mirror::Object * root,const RootInfo & info)2833   void VisitRoot(mirror::Object* root, const RootInfo& info)
2834       OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
2835     if (root == obj_) {
2836       LOG(INFO) << "Object " << obj_ << " is a root " << info.ToString();
2837     }
2838   }
2839 
2840  private:
2841   const mirror::Object* const obj_;
2842 };
2843 
2844 
2845 class ScanVisitor {
2846  public:
operator ()(const mirror::Object * obj) const2847   void operator()(const mirror::Object* obj) const {
2848     LOG(ERROR) << "Would have rescanned object " << obj;
2849   }
2850 };
2851 
2852 // Verify a reference from an object.
2853 class VerifyReferenceVisitor : public SingleRootVisitor {
2854  public:
VerifyReferenceVisitor(Heap * heap,Atomic<size_t> * fail_count,bool verify_referent)2855   VerifyReferenceVisitor(Heap* heap, Atomic<size_t>* fail_count, bool verify_referent)
2856       SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_)
2857       : heap_(heap), fail_count_(fail_count), verify_referent_(verify_referent) {}
2858 
GetFailureCount() const2859   size_t GetFailureCount() const {
2860     return fail_count_->LoadSequentiallyConsistent();
2861   }
2862 
operator ()(mirror::Class * klass ATTRIBUTE_UNUSED,mirror::Reference * ref) const2863   void operator()(mirror::Class* klass ATTRIBUTE_UNUSED, mirror::Reference* ref) const
2864       SHARED_REQUIRES(Locks::mutator_lock_) {
2865     if (verify_referent_) {
2866       VerifyReference(ref, ref->GetReferent(), mirror::Reference::ReferentOffset());
2867     }
2868   }
2869 
operator ()(mirror::Object * obj,MemberOffset offset,bool is_static ATTRIBUTE_UNUSED) const2870   void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
2871       SHARED_REQUIRES(Locks::mutator_lock_) {
2872     VerifyReference(obj, obj->GetFieldObject<mirror::Object>(offset), offset);
2873   }
2874 
IsLive(mirror::Object * obj) const2875   bool IsLive(mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS {
2876     return heap_->IsLiveObjectLocked(obj, true, false, true);
2877   }
2878 
VisitRootIfNonNull(mirror::CompressedReference<mirror::Object> * root) const2879   void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
2880       SHARED_REQUIRES(Locks::mutator_lock_) {
2881     if (!root->IsNull()) {
2882       VisitRoot(root);
2883     }
2884   }
VisitRoot(mirror::CompressedReference<mirror::Object> * root) const2885   void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
2886       SHARED_REQUIRES(Locks::mutator_lock_) {
2887     const_cast<VerifyReferenceVisitor*>(this)->VisitRoot(
2888         root->AsMirrorPtr(), RootInfo(kRootVMInternal));
2889   }
2890 
VisitRoot(mirror::Object * root,const RootInfo & root_info)2891   virtual void VisitRoot(mirror::Object* root, const RootInfo& root_info) OVERRIDE
2892       SHARED_REQUIRES(Locks::mutator_lock_) {
2893     if (root == nullptr) {
2894       LOG(ERROR) << "Root is null with info " << root_info.GetType();
2895     } else if (!VerifyReference(nullptr, root, MemberOffset(0))) {
2896       LOG(ERROR) << "Root " << root << " is dead with type " << PrettyTypeOf(root)
2897           << " thread_id= " << root_info.GetThreadId() << " root_type= " << root_info.GetType();
2898     }
2899   }
2900 
2901  private:
2902   // TODO: Fix the no thread safety analysis.
2903   // Returns false on failure.
VerifyReference(mirror::Object * obj,mirror::Object * ref,MemberOffset offset) const2904   bool VerifyReference(mirror::Object* obj, mirror::Object* ref, MemberOffset offset) const
2905       NO_THREAD_SAFETY_ANALYSIS {
2906     if (ref == nullptr || IsLive(ref)) {
2907       // Verify that the reference is live.
2908       return true;
2909     }
2910     if (fail_count_->FetchAndAddSequentiallyConsistent(1) == 0) {
2911       // Print message on only on first failure to prevent spam.
2912       LOG(ERROR) << "!!!!!!!!!!!!!!Heap corruption detected!!!!!!!!!!!!!!!!!!!";
2913     }
2914     if (obj != nullptr) {
2915       // Only do this part for non roots.
2916       accounting::CardTable* card_table = heap_->GetCardTable();
2917       accounting::ObjectStack* alloc_stack = heap_->allocation_stack_.get();
2918       accounting::ObjectStack* live_stack = heap_->live_stack_.get();
2919       uint8_t* card_addr = card_table->CardFromAddr(obj);
2920       LOG(ERROR) << "Object " << obj << " references dead object " << ref << " at offset "
2921                  << offset << "\n card value = " << static_cast<int>(*card_addr);
2922       if (heap_->IsValidObjectAddress(obj->GetClass())) {
2923         LOG(ERROR) << "Obj type " << PrettyTypeOf(obj);
2924       } else {
2925         LOG(ERROR) << "Object " << obj << " class(" << obj->GetClass() << ") not a heap address";
2926       }
2927 
2928       // Attempt to find the class inside of the recently freed objects.
2929       space::ContinuousSpace* ref_space = heap_->FindContinuousSpaceFromObject(ref, true);
2930       if (ref_space != nullptr && ref_space->IsMallocSpace()) {
2931         space::MallocSpace* space = ref_space->AsMallocSpace();
2932         mirror::Class* ref_class = space->FindRecentFreedObject(ref);
2933         if (ref_class != nullptr) {
2934           LOG(ERROR) << "Reference " << ref << " found as a recently freed object with class "
2935                      << PrettyClass(ref_class);
2936         } else {
2937           LOG(ERROR) << "Reference " << ref << " not found as a recently freed object";
2938         }
2939       }
2940 
2941       if (ref->GetClass() != nullptr && heap_->IsValidObjectAddress(ref->GetClass()) &&
2942           ref->GetClass()->IsClass()) {
2943         LOG(ERROR) << "Ref type " << PrettyTypeOf(ref);
2944       } else {
2945         LOG(ERROR) << "Ref " << ref << " class(" << ref->GetClass()
2946                    << ") is not a valid heap address";
2947       }
2948 
2949       card_table->CheckAddrIsInCardTable(reinterpret_cast<const uint8_t*>(obj));
2950       void* cover_begin = card_table->AddrFromCard(card_addr);
2951       void* cover_end = reinterpret_cast<void*>(reinterpret_cast<size_t>(cover_begin) +
2952           accounting::CardTable::kCardSize);
2953       LOG(ERROR) << "Card " << reinterpret_cast<void*>(card_addr) << " covers " << cover_begin
2954           << "-" << cover_end;
2955       accounting::ContinuousSpaceBitmap* bitmap =
2956           heap_->GetLiveBitmap()->GetContinuousSpaceBitmap(obj);
2957 
2958       if (bitmap == nullptr) {
2959         LOG(ERROR) << "Object " << obj << " has no bitmap";
2960         if (!VerifyClassClass(obj->GetClass())) {
2961           LOG(ERROR) << "Object " << obj << " failed class verification!";
2962         }
2963       } else {
2964         // Print out how the object is live.
2965         if (bitmap->Test(obj)) {
2966           LOG(ERROR) << "Object " << obj << " found in live bitmap";
2967         }
2968         if (alloc_stack->Contains(const_cast<mirror::Object*>(obj))) {
2969           LOG(ERROR) << "Object " << obj << " found in allocation stack";
2970         }
2971         if (live_stack->Contains(const_cast<mirror::Object*>(obj))) {
2972           LOG(ERROR) << "Object " << obj << " found in live stack";
2973         }
2974         if (alloc_stack->Contains(const_cast<mirror::Object*>(ref))) {
2975           LOG(ERROR) << "Ref " << ref << " found in allocation stack";
2976         }
2977         if (live_stack->Contains(const_cast<mirror::Object*>(ref))) {
2978           LOG(ERROR) << "Ref " << ref << " found in live stack";
2979         }
2980         // Attempt to see if the card table missed the reference.
2981         ScanVisitor scan_visitor;
2982         uint8_t* byte_cover_begin = reinterpret_cast<uint8_t*>(card_table->AddrFromCard(card_addr));
2983         card_table->Scan<false>(bitmap, byte_cover_begin,
2984                                 byte_cover_begin + accounting::CardTable::kCardSize, scan_visitor);
2985       }
2986 
2987       // Search to see if any of the roots reference our object.
2988       RootMatchesObjectVisitor visitor1(obj);
2989       Runtime::Current()->VisitRoots(&visitor1);
2990       // Search to see if any of the roots reference our reference.
2991       RootMatchesObjectVisitor visitor2(ref);
2992       Runtime::Current()->VisitRoots(&visitor2);
2993     }
2994     return false;
2995   }
2996 
2997   Heap* const heap_;
2998   Atomic<size_t>* const fail_count_;
2999   const bool verify_referent_;
3000 };
3001 
3002 // Verify all references within an object, for use with HeapBitmap::Visit.
3003 class VerifyObjectVisitor {
3004  public:
VerifyObjectVisitor(Heap * heap,Atomic<size_t> * fail_count,bool verify_referent)3005   VerifyObjectVisitor(Heap* heap, Atomic<size_t>* fail_count, bool verify_referent)
3006       : heap_(heap), fail_count_(fail_count), verify_referent_(verify_referent) {}
3007 
operator ()(mirror::Object * obj)3008   void operator()(mirror::Object* obj)
3009       SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
3010     // Note: we are verifying the references in obj but not obj itself, this is because obj must
3011     // be live or else how did we find it in the live bitmap?
3012     VerifyReferenceVisitor visitor(heap_, fail_count_, verify_referent_);
3013     // The class doesn't count as a reference but we should verify it anyways.
3014     obj->VisitReferences(visitor, visitor);
3015   }
3016 
VisitCallback(mirror::Object * obj,void * arg)3017   static void VisitCallback(mirror::Object* obj, void* arg)
3018       SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
3019     VerifyObjectVisitor* visitor = reinterpret_cast<VerifyObjectVisitor*>(arg);
3020     visitor->operator()(obj);
3021   }
3022 
VerifyRoots()3023   void VerifyRoots() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::heap_bitmap_lock_) {
3024     ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
3025     VerifyReferenceVisitor visitor(heap_, fail_count_, verify_referent_);
3026     Runtime::Current()->VisitRoots(&visitor);
3027   }
3028 
GetFailureCount() const3029   size_t GetFailureCount() const {
3030     return fail_count_->LoadSequentiallyConsistent();
3031   }
3032 
3033  private:
3034   Heap* const heap_;
3035   Atomic<size_t>* const fail_count_;
3036   const bool verify_referent_;
3037 };
3038 
PushOnAllocationStackWithInternalGC(Thread * self,mirror::Object ** obj)3039 void Heap::PushOnAllocationStackWithInternalGC(Thread* self, mirror::Object** obj) {
3040   // Slow path, the allocation stack push back must have already failed.
3041   DCHECK(!allocation_stack_->AtomicPushBack(*obj));
3042   do {
3043     // TODO: Add handle VerifyObject.
3044     StackHandleScope<1> hs(self);
3045     HandleWrapper<mirror::Object> wrapper(hs.NewHandleWrapper(obj));
3046     // Push our object into the reserve region of the allocaiton stack. This is only required due
3047     // to heap verification requiring that roots are live (either in the live bitmap or in the
3048     // allocation stack).
3049     CHECK(allocation_stack_->AtomicPushBackIgnoreGrowthLimit(*obj));
3050     CollectGarbageInternal(collector::kGcTypeSticky, kGcCauseForAlloc, false);
3051   } while (!allocation_stack_->AtomicPushBack(*obj));
3052 }
3053 
PushOnThreadLocalAllocationStackWithInternalGC(Thread * self,mirror::Object ** obj)3054 void Heap::PushOnThreadLocalAllocationStackWithInternalGC(Thread* self, mirror::Object** obj) {
3055   // Slow path, the allocation stack push back must have already failed.
3056   DCHECK(!self->PushOnThreadLocalAllocationStack(*obj));
3057   StackReference<mirror::Object>* start_address;
3058   StackReference<mirror::Object>* end_address;
3059   while (!allocation_stack_->AtomicBumpBack(kThreadLocalAllocationStackSize, &start_address,
3060                                             &end_address)) {
3061     // TODO: Add handle VerifyObject.
3062     StackHandleScope<1> hs(self);
3063     HandleWrapper<mirror::Object> wrapper(hs.NewHandleWrapper(obj));
3064     // Push our object into the reserve region of the allocaiton stack. This is only required due
3065     // to heap verification requiring that roots are live (either in the live bitmap or in the
3066     // allocation stack).
3067     CHECK(allocation_stack_->AtomicPushBackIgnoreGrowthLimit(*obj));
3068     // Push into the reserve allocation stack.
3069     CollectGarbageInternal(collector::kGcTypeSticky, kGcCauseForAlloc, false);
3070   }
3071   self->SetThreadLocalAllocationStack(start_address, end_address);
3072   // Retry on the new thread-local allocation stack.
3073   CHECK(self->PushOnThreadLocalAllocationStack(*obj));  // Must succeed.
3074 }
3075 
3076 // Must do this with mutators suspended since we are directly accessing the allocation stacks.
VerifyHeapReferences(bool verify_referents)3077 size_t Heap::VerifyHeapReferences(bool verify_referents) {
3078   Thread* self = Thread::Current();
3079   Locks::mutator_lock_->AssertExclusiveHeld(self);
3080   // Lets sort our allocation stacks so that we can efficiently binary search them.
3081   allocation_stack_->Sort();
3082   live_stack_->Sort();
3083   // Since we sorted the allocation stack content, need to revoke all
3084   // thread-local allocation stacks.
3085   RevokeAllThreadLocalAllocationStacks(self);
3086   Atomic<size_t> fail_count_(0);
3087   VerifyObjectVisitor visitor(this, &fail_count_, verify_referents);
3088   // Verify objects in the allocation stack since these will be objects which were:
3089   // 1. Allocated prior to the GC (pre GC verification).
3090   // 2. Allocated during the GC (pre sweep GC verification).
3091   // We don't want to verify the objects in the live stack since they themselves may be
3092   // pointing to dead objects if they are not reachable.
3093   VisitObjectsPaused(VerifyObjectVisitor::VisitCallback, &visitor);
3094   // Verify the roots:
3095   visitor.VerifyRoots();
3096   if (visitor.GetFailureCount() > 0) {
3097     // Dump mod-union tables.
3098     for (const auto& table_pair : mod_union_tables_) {
3099       accounting::ModUnionTable* mod_union_table = table_pair.second;
3100       mod_union_table->Dump(LOG(ERROR) << mod_union_table->GetName() << ": ");
3101     }
3102     // Dump remembered sets.
3103     for (const auto& table_pair : remembered_sets_) {
3104       accounting::RememberedSet* remembered_set = table_pair.second;
3105       remembered_set->Dump(LOG(ERROR) << remembered_set->GetName() << ": ");
3106     }
3107     DumpSpaces(LOG(ERROR));
3108   }
3109   return visitor.GetFailureCount();
3110 }
3111 
3112 class VerifyReferenceCardVisitor {
3113  public:
VerifyReferenceCardVisitor(Heap * heap,bool * failed)3114   VerifyReferenceCardVisitor(Heap* heap, bool* failed)
3115       SHARED_REQUIRES(Locks::mutator_lock_,
3116                             Locks::heap_bitmap_lock_)
3117       : heap_(heap), failed_(failed) {
3118   }
3119 
3120   // There is no card marks for native roots on a class.
VisitRootIfNonNull(mirror::CompressedReference<mirror::Object> * root ATTRIBUTE_UNUSED) const3121   void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED)
3122       const {}
VisitRoot(mirror::CompressedReference<mirror::Object> * root ATTRIBUTE_UNUSED) const3123   void VisitRoot(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) const {}
3124 
3125   // TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for
3126   // annotalysis on visitors.
operator ()(mirror::Object * obj,MemberOffset offset,bool is_static) const3127   void operator()(mirror::Object* obj, MemberOffset offset, bool is_static) const
3128       NO_THREAD_SAFETY_ANALYSIS {
3129     mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset);
3130     // Filter out class references since changing an object's class does not mark the card as dirty.
3131     // Also handles large objects, since the only reference they hold is a class reference.
3132     if (ref != nullptr && !ref->IsClass()) {
3133       accounting::CardTable* card_table = heap_->GetCardTable();
3134       // If the object is not dirty and it is referencing something in the live stack other than
3135       // class, then it must be on a dirty card.
3136       if (!card_table->AddrIsInCardTable(obj)) {
3137         LOG(ERROR) << "Object " << obj << " is not in the address range of the card table";
3138         *failed_ = true;
3139       } else if (!card_table->IsDirty(obj)) {
3140         // TODO: Check mod-union tables.
3141         // Card should be either kCardDirty if it got re-dirtied after we aged it, or
3142         // kCardDirty - 1 if it didnt get touched since we aged it.
3143         accounting::ObjectStack* live_stack = heap_->live_stack_.get();
3144         if (live_stack->ContainsSorted(ref)) {
3145           if (live_stack->ContainsSorted(obj)) {
3146             LOG(ERROR) << "Object " << obj << " found in live stack";
3147           }
3148           if (heap_->GetLiveBitmap()->Test(obj)) {
3149             LOG(ERROR) << "Object " << obj << " found in live bitmap";
3150           }
3151           LOG(ERROR) << "Object " << obj << " " << PrettyTypeOf(obj)
3152                     << " references " << ref << " " << PrettyTypeOf(ref) << " in live stack";
3153 
3154           // Print which field of the object is dead.
3155           if (!obj->IsObjectArray()) {
3156             mirror::Class* klass = is_static ? obj->AsClass() : obj->GetClass();
3157             CHECK(klass != nullptr);
3158             for (ArtField& field : (is_static ? klass->GetSFields() : klass->GetIFields())) {
3159               if (field.GetOffset().Int32Value() == offset.Int32Value()) {
3160                 LOG(ERROR) << (is_static ? "Static " : "") << "field in the live stack is "
3161                            << PrettyField(&field);
3162                 break;
3163               }
3164             }
3165           } else {
3166             mirror::ObjectArray<mirror::Object>* object_array =
3167                 obj->AsObjectArray<mirror::Object>();
3168             for (int32_t i = 0; i < object_array->GetLength(); ++i) {
3169               if (object_array->Get(i) == ref) {
3170                 LOG(ERROR) << (is_static ? "Static " : "") << "obj[" << i << "] = ref";
3171               }
3172             }
3173           }
3174 
3175           *failed_ = true;
3176         }
3177       }
3178     }
3179   }
3180 
3181  private:
3182   Heap* const heap_;
3183   bool* const failed_;
3184 };
3185 
3186 class VerifyLiveStackReferences {
3187  public:
VerifyLiveStackReferences(Heap * heap)3188   explicit VerifyLiveStackReferences(Heap* heap)
3189       : heap_(heap),
3190         failed_(false) {}
3191 
operator ()(mirror::Object * obj) const3192   void operator()(mirror::Object* obj) const
3193       SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
3194     VerifyReferenceCardVisitor visitor(heap_, const_cast<bool*>(&failed_));
3195     obj->VisitReferences(visitor, VoidFunctor());
3196   }
3197 
Failed() const3198   bool Failed() const {
3199     return failed_;
3200   }
3201 
3202  private:
3203   Heap* const heap_;
3204   bool failed_;
3205 };
3206 
VerifyMissingCardMarks()3207 bool Heap::VerifyMissingCardMarks() {
3208   Thread* self = Thread::Current();
3209   Locks::mutator_lock_->AssertExclusiveHeld(self);
3210   // We need to sort the live stack since we binary search it.
3211   live_stack_->Sort();
3212   // Since we sorted the allocation stack content, need to revoke all
3213   // thread-local allocation stacks.
3214   RevokeAllThreadLocalAllocationStacks(self);
3215   VerifyLiveStackReferences visitor(this);
3216   GetLiveBitmap()->Visit(visitor);
3217   // We can verify objects in the live stack since none of these should reference dead objects.
3218   for (auto* it = live_stack_->Begin(); it != live_stack_->End(); ++it) {
3219     if (!kUseThreadLocalAllocationStack || it->AsMirrorPtr() != nullptr) {
3220       visitor(it->AsMirrorPtr());
3221     }
3222   }
3223   return !visitor.Failed();
3224 }
3225 
SwapStacks()3226 void Heap::SwapStacks() {
3227   if (kUseThreadLocalAllocationStack) {
3228     live_stack_->AssertAllZero();
3229   }
3230   allocation_stack_.swap(live_stack_);
3231 }
3232 
RevokeAllThreadLocalAllocationStacks(Thread * self)3233 void Heap::RevokeAllThreadLocalAllocationStacks(Thread* self) {
3234   // This must be called only during the pause.
3235   DCHECK(Locks::mutator_lock_->IsExclusiveHeld(self));
3236   MutexLock mu(self, *Locks::runtime_shutdown_lock_);
3237   MutexLock mu2(self, *Locks::thread_list_lock_);
3238   std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
3239   for (Thread* t : thread_list) {
3240     t->RevokeThreadLocalAllocationStack();
3241   }
3242 }
3243 
AssertThreadLocalBuffersAreRevoked(Thread * thread)3244 void Heap::AssertThreadLocalBuffersAreRevoked(Thread* thread) {
3245   if (kIsDebugBuild) {
3246     if (rosalloc_space_ != nullptr) {
3247       rosalloc_space_->AssertThreadLocalBuffersAreRevoked(thread);
3248     }
3249     if (bump_pointer_space_ != nullptr) {
3250       bump_pointer_space_->AssertThreadLocalBuffersAreRevoked(thread);
3251     }
3252   }
3253 }
3254 
AssertAllBumpPointerSpaceThreadLocalBuffersAreRevoked()3255 void Heap::AssertAllBumpPointerSpaceThreadLocalBuffersAreRevoked() {
3256   if (kIsDebugBuild) {
3257     if (bump_pointer_space_ != nullptr) {
3258       bump_pointer_space_->AssertAllThreadLocalBuffersAreRevoked();
3259     }
3260   }
3261 }
3262 
FindModUnionTableFromSpace(space::Space * space)3263 accounting::ModUnionTable* Heap::FindModUnionTableFromSpace(space::Space* space) {
3264   auto it = mod_union_tables_.find(space);
3265   if (it == mod_union_tables_.end()) {
3266     return nullptr;
3267   }
3268   return it->second;
3269 }
3270 
FindRememberedSetFromSpace(space::Space * space)3271 accounting::RememberedSet* Heap::FindRememberedSetFromSpace(space::Space* space) {
3272   auto it = remembered_sets_.find(space);
3273   if (it == remembered_sets_.end()) {
3274     return nullptr;
3275   }
3276   return it->second;
3277 }
3278 
ProcessCards(TimingLogger * timings,bool use_rem_sets,bool process_alloc_space_cards,bool clear_alloc_space_cards)3279 void Heap::ProcessCards(TimingLogger* timings,
3280                         bool use_rem_sets,
3281                         bool process_alloc_space_cards,
3282                         bool clear_alloc_space_cards) {
3283   TimingLogger::ScopedTiming t(__FUNCTION__, timings);
3284   // Clear cards and keep track of cards cleared in the mod-union table.
3285   for (const auto& space : continuous_spaces_) {
3286     accounting::ModUnionTable* table = FindModUnionTableFromSpace(space);
3287     accounting::RememberedSet* rem_set = FindRememberedSetFromSpace(space);
3288     if (table != nullptr) {
3289       const char* name = space->IsZygoteSpace() ? "ZygoteModUnionClearCards" :
3290           "ImageModUnionClearCards";
3291       TimingLogger::ScopedTiming t2(name, timings);
3292       table->ClearCards();
3293     } else if (use_rem_sets && rem_set != nullptr) {
3294       DCHECK(collector::SemiSpace::kUseRememberedSet && collector_type_ == kCollectorTypeGSS)
3295           << static_cast<int>(collector_type_);
3296       TimingLogger::ScopedTiming t2("AllocSpaceRemSetClearCards", timings);
3297       rem_set->ClearCards();
3298     } else if (process_alloc_space_cards) {
3299       TimingLogger::ScopedTiming t2("AllocSpaceClearCards", timings);
3300       if (clear_alloc_space_cards) {
3301         uint8_t* end = space->End();
3302         if (space->IsImageSpace()) {
3303           // Image space end is the end of the mirror objects, it is not necessarily page or card
3304           // aligned. Align up so that the check in ClearCardRange does not fail.
3305           end = AlignUp(end, accounting::CardTable::kCardSize);
3306         }
3307         card_table_->ClearCardRange(space->Begin(), end);
3308       } else {
3309         // No mod union table for the AllocSpace. Age the cards so that the GC knows that these
3310         // cards were dirty before the GC started.
3311         // TODO: Need to use atomic for the case where aged(cleaning thread) -> dirty(other thread)
3312         // -> clean(cleaning thread).
3313         // The races are we either end up with: Aged card, unaged card. Since we have the
3314         // checkpoint roots and then we scan / update mod union tables after. We will always
3315         // scan either card. If we end up with the non aged card, we scan it it in the pause.
3316         card_table_->ModifyCardsAtomic(space->Begin(), space->End(), AgeCardVisitor(),
3317                                        VoidFunctor());
3318       }
3319     }
3320   }
3321 }
3322 
3323 struct IdentityMarkHeapReferenceVisitor : public MarkObjectVisitor {
MarkObjectart::gc::IdentityMarkHeapReferenceVisitor3324   virtual mirror::Object* MarkObject(mirror::Object* obj) OVERRIDE {
3325     return obj;
3326   }
MarkHeapReferenceart::gc::IdentityMarkHeapReferenceVisitor3327   virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>*) OVERRIDE {
3328   }
3329 };
3330 
PreGcVerificationPaused(collector::GarbageCollector * gc)3331 void Heap::PreGcVerificationPaused(collector::GarbageCollector* gc) {
3332   Thread* const self = Thread::Current();
3333   TimingLogger* const timings = current_gc_iteration_.GetTimings();
3334   TimingLogger::ScopedTiming t(__FUNCTION__, timings);
3335   if (verify_pre_gc_heap_) {
3336     TimingLogger::ScopedTiming t2("(Paused)PreGcVerifyHeapReferences", timings);
3337     size_t failures = VerifyHeapReferences();
3338     if (failures > 0) {
3339       LOG(FATAL) << "Pre " << gc->GetName() << " heap verification failed with " << failures
3340           << " failures";
3341     }
3342   }
3343   // Check that all objects which reference things in the live stack are on dirty cards.
3344   if (verify_missing_card_marks_) {
3345     TimingLogger::ScopedTiming t2("(Paused)PreGcVerifyMissingCardMarks", timings);
3346     ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
3347     SwapStacks();
3348     // Sort the live stack so that we can quickly binary search it later.
3349     CHECK(VerifyMissingCardMarks()) << "Pre " << gc->GetName()
3350                                     << " missing card mark verification failed\n" << DumpSpaces();
3351     SwapStacks();
3352   }
3353   if (verify_mod_union_table_) {
3354     TimingLogger::ScopedTiming t2("(Paused)PreGcVerifyModUnionTables", timings);
3355     ReaderMutexLock reader_lock(self, *Locks::heap_bitmap_lock_);
3356     for (const auto& table_pair : mod_union_tables_) {
3357       accounting::ModUnionTable* mod_union_table = table_pair.second;
3358       IdentityMarkHeapReferenceVisitor visitor;
3359       mod_union_table->UpdateAndMarkReferences(&visitor);
3360       mod_union_table->Verify();
3361     }
3362   }
3363 }
3364 
PreGcVerification(collector::GarbageCollector * gc)3365 void Heap::PreGcVerification(collector::GarbageCollector* gc) {
3366   if (verify_pre_gc_heap_ || verify_missing_card_marks_ || verify_mod_union_table_) {
3367     collector::GarbageCollector::ScopedPause pause(gc);
3368     PreGcVerificationPaused(gc);
3369   }
3370 }
3371 
PrePauseRosAllocVerification(collector::GarbageCollector * gc ATTRIBUTE_UNUSED)3372 void Heap::PrePauseRosAllocVerification(collector::GarbageCollector* gc ATTRIBUTE_UNUSED) {
3373   // TODO: Add a new runtime option for this?
3374   if (verify_pre_gc_rosalloc_) {
3375     RosAllocVerification(current_gc_iteration_.GetTimings(), "PreGcRosAllocVerification");
3376   }
3377 }
3378 
PreSweepingGcVerification(collector::GarbageCollector * gc)3379 void Heap::PreSweepingGcVerification(collector::GarbageCollector* gc) {
3380   Thread* const self = Thread::Current();
3381   TimingLogger* const timings = current_gc_iteration_.GetTimings();
3382   TimingLogger::ScopedTiming t(__FUNCTION__, timings);
3383   // Called before sweeping occurs since we want to make sure we are not going so reclaim any
3384   // reachable objects.
3385   if (verify_pre_sweeping_heap_) {
3386     TimingLogger::ScopedTiming t2("(Paused)PostSweepingVerifyHeapReferences", timings);
3387     CHECK_NE(self->GetState(), kRunnable);
3388     {
3389       WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
3390       // Swapping bound bitmaps does nothing.
3391       gc->SwapBitmaps();
3392     }
3393     // Pass in false since concurrent reference processing can mean that the reference referents
3394     // may point to dead objects at the point which PreSweepingGcVerification is called.
3395     size_t failures = VerifyHeapReferences(false);
3396     if (failures > 0) {
3397       LOG(FATAL) << "Pre sweeping " << gc->GetName() << " GC verification failed with " << failures
3398           << " failures";
3399     }
3400     {
3401       WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
3402       gc->SwapBitmaps();
3403     }
3404   }
3405   if (verify_pre_sweeping_rosalloc_) {
3406     RosAllocVerification(timings, "PreSweepingRosAllocVerification");
3407   }
3408 }
3409 
PostGcVerificationPaused(collector::GarbageCollector * gc)3410 void Heap::PostGcVerificationPaused(collector::GarbageCollector* gc) {
3411   // Only pause if we have to do some verification.
3412   Thread* const self = Thread::Current();
3413   TimingLogger* const timings = GetCurrentGcIteration()->GetTimings();
3414   TimingLogger::ScopedTiming t(__FUNCTION__, timings);
3415   if (verify_system_weaks_) {
3416     ReaderMutexLock mu2(self, *Locks::heap_bitmap_lock_);
3417     collector::MarkSweep* mark_sweep = down_cast<collector::MarkSweep*>(gc);
3418     mark_sweep->VerifySystemWeaks();
3419   }
3420   if (verify_post_gc_rosalloc_) {
3421     RosAllocVerification(timings, "(Paused)PostGcRosAllocVerification");
3422   }
3423   if (verify_post_gc_heap_) {
3424     TimingLogger::ScopedTiming t2("(Paused)PostGcVerifyHeapReferences", timings);
3425     size_t failures = VerifyHeapReferences();
3426     if (failures > 0) {
3427       LOG(FATAL) << "Pre " << gc->GetName() << " heap verification failed with " << failures
3428           << " failures";
3429     }
3430   }
3431 }
3432 
PostGcVerification(collector::GarbageCollector * gc)3433 void Heap::PostGcVerification(collector::GarbageCollector* gc) {
3434   if (verify_system_weaks_ || verify_post_gc_rosalloc_ || verify_post_gc_heap_) {
3435     collector::GarbageCollector::ScopedPause pause(gc);
3436     PostGcVerificationPaused(gc);
3437   }
3438 }
3439 
RosAllocVerification(TimingLogger * timings,const char * name)3440 void Heap::RosAllocVerification(TimingLogger* timings, const char* name) {
3441   TimingLogger::ScopedTiming t(name, timings);
3442   for (const auto& space : continuous_spaces_) {
3443     if (space->IsRosAllocSpace()) {
3444       VLOG(heap) << name << " : " << space->GetName();
3445       space->AsRosAllocSpace()->Verify();
3446     }
3447   }
3448 }
3449 
WaitForGcToComplete(GcCause cause,Thread * self)3450 collector::GcType Heap::WaitForGcToComplete(GcCause cause, Thread* self) {
3451   ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
3452   MutexLock mu(self, *gc_complete_lock_);
3453   return WaitForGcToCompleteLocked(cause, self);
3454 }
3455 
WaitForGcToCompleteLocked(GcCause cause,Thread * self)3456 collector::GcType Heap::WaitForGcToCompleteLocked(GcCause cause, Thread* self) {
3457   collector::GcType last_gc_type = collector::kGcTypeNone;
3458   uint64_t wait_start = NanoTime();
3459   while (collector_type_running_ != kCollectorTypeNone) {
3460     if (self != task_processor_->GetRunningThread()) {
3461       // The current thread is about to wait for a currently running
3462       // collection to finish. If the waiting thread is not the heap
3463       // task daemon thread, the currently running collection is
3464       // considered as a blocking GC.
3465       running_collection_is_blocking_ = true;
3466       VLOG(gc) << "Waiting for a blocking GC " << cause;
3467     }
3468     ScopedTrace trace("GC: Wait For Completion");
3469     // We must wait, change thread state then sleep on gc_complete_cond_;
3470     gc_complete_cond_->Wait(self);
3471     last_gc_type = last_gc_type_;
3472   }
3473   uint64_t wait_time = NanoTime() - wait_start;
3474   total_wait_time_ += wait_time;
3475   if (wait_time > long_pause_log_threshold_) {
3476     LOG(INFO) << "WaitForGcToComplete blocked for " << PrettyDuration(wait_time)
3477         << " for cause " << cause;
3478   }
3479   if (self != task_processor_->GetRunningThread()) {
3480     // The current thread is about to run a collection. If the thread
3481     // is not the heap task daemon thread, it's considered as a
3482     // blocking GC (i.e., blocking itself).
3483     running_collection_is_blocking_ = true;
3484     VLOG(gc) << "Starting a blocking GC " << cause;
3485   }
3486   return last_gc_type;
3487 }
3488 
DumpForSigQuit(std::ostream & os)3489 void Heap::DumpForSigQuit(std::ostream& os) {
3490   os << "Heap: " << GetPercentFree() << "% free, " << PrettySize(GetBytesAllocated()) << "/"
3491      << PrettySize(GetTotalMemory()) << "; " << GetObjectsAllocated() << " objects\n";
3492   DumpGcPerformanceInfo(os);
3493 }
3494 
GetPercentFree()3495 size_t Heap::GetPercentFree() {
3496   return static_cast<size_t>(100.0f * static_cast<float>(GetFreeMemory()) / max_allowed_footprint_);
3497 }
3498 
SetIdealFootprint(size_t max_allowed_footprint)3499 void Heap::SetIdealFootprint(size_t max_allowed_footprint) {
3500   if (max_allowed_footprint > GetMaxMemory()) {
3501     VLOG(gc) << "Clamp target GC heap from " << PrettySize(max_allowed_footprint) << " to "
3502              << PrettySize(GetMaxMemory());
3503     max_allowed_footprint = GetMaxMemory();
3504   }
3505   max_allowed_footprint_ = max_allowed_footprint;
3506 }
3507 
IsMovableObject(const mirror::Object * obj) const3508 bool Heap::IsMovableObject(const mirror::Object* obj) const {
3509   if (kMovingCollector) {
3510     space::Space* space = FindContinuousSpaceFromObject(obj, true);
3511     if (space != nullptr) {
3512       // TODO: Check large object?
3513       return space->CanMoveObjects();
3514     }
3515   }
3516   return false;
3517 }
3518 
UpdateMaxNativeFootprint()3519 void Heap::UpdateMaxNativeFootprint() {
3520   size_t native_size = native_bytes_allocated_.LoadRelaxed();
3521   // TODO: Tune the native heap utilization to be a value other than the java heap utilization.
3522   size_t target_size = native_size / GetTargetHeapUtilization();
3523   if (target_size > native_size + max_free_) {
3524     target_size = native_size + max_free_;
3525   } else if (target_size < native_size + min_free_) {
3526     target_size = native_size + min_free_;
3527   }
3528   native_footprint_gc_watermark_ = std::min(growth_limit_, target_size);
3529 }
3530 
FindCollectorByGcType(collector::GcType gc_type)3531 collector::GarbageCollector* Heap::FindCollectorByGcType(collector::GcType gc_type) {
3532   for (const auto& collector : garbage_collectors_) {
3533     if (collector->GetCollectorType() == collector_type_ &&
3534         collector->GetGcType() == gc_type) {
3535       return collector;
3536     }
3537   }
3538   return nullptr;
3539 }
3540 
HeapGrowthMultiplier() const3541 double Heap::HeapGrowthMultiplier() const {
3542   // If we don't care about pause times we are background, so return 1.0.
3543   if (!CareAboutPauseTimes() || IsLowMemoryMode()) {
3544     return 1.0;
3545   }
3546   return foreground_heap_growth_multiplier_;
3547 }
3548 
GrowForUtilization(collector::GarbageCollector * collector_ran,uint64_t bytes_allocated_before_gc)3549 void Heap::GrowForUtilization(collector::GarbageCollector* collector_ran,
3550                               uint64_t bytes_allocated_before_gc) {
3551   // We know what our utilization is at this moment.
3552   // This doesn't actually resize any memory. It just lets the heap grow more when necessary.
3553   const uint64_t bytes_allocated = GetBytesAllocated();
3554   uint64_t target_size;
3555   collector::GcType gc_type = collector_ran->GetGcType();
3556   const double multiplier = HeapGrowthMultiplier();  // Use the multiplier to grow more for
3557   // foreground.
3558   const uint64_t adjusted_min_free = static_cast<uint64_t>(min_free_ * multiplier);
3559   const uint64_t adjusted_max_free = static_cast<uint64_t>(max_free_ * multiplier);
3560   if (gc_type != collector::kGcTypeSticky) {
3561     // Grow the heap for non sticky GC.
3562     ssize_t delta = bytes_allocated / GetTargetHeapUtilization() - bytes_allocated;
3563     CHECK_GE(delta, 0);
3564     target_size = bytes_allocated + delta * multiplier;
3565     target_size = std::min(target_size, bytes_allocated + adjusted_max_free);
3566     target_size = std::max(target_size, bytes_allocated + adjusted_min_free);
3567     native_need_to_run_finalization_ = true;
3568     next_gc_type_ = collector::kGcTypeSticky;
3569   } else {
3570     collector::GcType non_sticky_gc_type =
3571         HasZygoteSpace() ? collector::kGcTypePartial : collector::kGcTypeFull;
3572     // Find what the next non sticky collector will be.
3573     collector::GarbageCollector* non_sticky_collector = FindCollectorByGcType(non_sticky_gc_type);
3574     // If the throughput of the current sticky GC >= throughput of the non sticky collector, then
3575     // do another sticky collection next.
3576     // We also check that the bytes allocated aren't over the footprint limit in order to prevent a
3577     // pathological case where dead objects which aren't reclaimed by sticky could get accumulated
3578     // if the sticky GC throughput always remained >= the full/partial throughput.
3579     if (current_gc_iteration_.GetEstimatedThroughput() * kStickyGcThroughputAdjustment >=
3580         non_sticky_collector->GetEstimatedMeanThroughput() &&
3581         non_sticky_collector->NumberOfIterations() > 0 &&
3582         bytes_allocated <= max_allowed_footprint_) {
3583       next_gc_type_ = collector::kGcTypeSticky;
3584     } else {
3585       next_gc_type_ = non_sticky_gc_type;
3586     }
3587     // If we have freed enough memory, shrink the heap back down.
3588     if (bytes_allocated + adjusted_max_free < max_allowed_footprint_) {
3589       target_size = bytes_allocated + adjusted_max_free;
3590     } else {
3591       target_size = std::max(bytes_allocated, static_cast<uint64_t>(max_allowed_footprint_));
3592     }
3593   }
3594   if (!ignore_max_footprint_) {
3595     SetIdealFootprint(target_size);
3596     if (IsGcConcurrent()) {
3597       const uint64_t freed_bytes = current_gc_iteration_.GetFreedBytes() +
3598           current_gc_iteration_.GetFreedLargeObjectBytes() +
3599           current_gc_iteration_.GetFreedRevokeBytes();
3600       // Bytes allocated will shrink by freed_bytes after the GC runs, so if we want to figure out
3601       // how many bytes were allocated during the GC we need to add freed_bytes back on.
3602       CHECK_GE(bytes_allocated + freed_bytes, bytes_allocated_before_gc);
3603       const uint64_t bytes_allocated_during_gc = bytes_allocated + freed_bytes -
3604           bytes_allocated_before_gc;
3605       // Calculate when to perform the next ConcurrentGC.
3606       // Calculate the estimated GC duration.
3607       const double gc_duration_seconds = NsToMs(current_gc_iteration_.GetDurationNs()) / 1000.0;
3608       // Estimate how many remaining bytes we will have when we need to start the next GC.
3609       size_t remaining_bytes = bytes_allocated_during_gc * gc_duration_seconds;
3610       remaining_bytes = std::min(remaining_bytes, kMaxConcurrentRemainingBytes);
3611       remaining_bytes = std::max(remaining_bytes, kMinConcurrentRemainingBytes);
3612       if (UNLIKELY(remaining_bytes > max_allowed_footprint_)) {
3613         // A never going to happen situation that from the estimated allocation rate we will exceed
3614         // the applications entire footprint with the given estimated allocation rate. Schedule
3615         // another GC nearly straight away.
3616         remaining_bytes = kMinConcurrentRemainingBytes;
3617       }
3618       DCHECK_LE(remaining_bytes, max_allowed_footprint_);
3619       DCHECK_LE(max_allowed_footprint_, GetMaxMemory());
3620       // Start a concurrent GC when we get close to the estimated remaining bytes. When the
3621       // allocation rate is very high, remaining_bytes could tell us that we should start a GC
3622       // right away.
3623       concurrent_start_bytes_ = std::max(max_allowed_footprint_ - remaining_bytes,
3624                                          static_cast<size_t>(bytes_allocated));
3625     }
3626   }
3627 }
3628 
ClampGrowthLimit()3629 void Heap::ClampGrowthLimit() {
3630   // Use heap bitmap lock to guard against races with BindLiveToMarkBitmap.
3631   ScopedObjectAccess soa(Thread::Current());
3632   WriterMutexLock mu(soa.Self(), *Locks::heap_bitmap_lock_);
3633   capacity_ = growth_limit_;
3634   for (const auto& space : continuous_spaces_) {
3635     if (space->IsMallocSpace()) {
3636       gc::space::MallocSpace* malloc_space = space->AsMallocSpace();
3637       malloc_space->ClampGrowthLimit();
3638     }
3639   }
3640   // This space isn't added for performance reasons.
3641   if (main_space_backup_.get() != nullptr) {
3642     main_space_backup_->ClampGrowthLimit();
3643   }
3644 }
3645 
ClearGrowthLimit()3646 void Heap::ClearGrowthLimit() {
3647   growth_limit_ = capacity_;
3648   ScopedObjectAccess soa(Thread::Current());
3649   for (const auto& space : continuous_spaces_) {
3650     if (space->IsMallocSpace()) {
3651       gc::space::MallocSpace* malloc_space = space->AsMallocSpace();
3652       malloc_space->ClearGrowthLimit();
3653       malloc_space->SetFootprintLimit(malloc_space->Capacity());
3654     }
3655   }
3656   // This space isn't added for performance reasons.
3657   if (main_space_backup_.get() != nullptr) {
3658     main_space_backup_->ClearGrowthLimit();
3659     main_space_backup_->SetFootprintLimit(main_space_backup_->Capacity());
3660   }
3661 }
3662 
AddFinalizerReference(Thread * self,mirror::Object ** object)3663 void Heap::AddFinalizerReference(Thread* self, mirror::Object** object) {
3664   ScopedObjectAccess soa(self);
3665   ScopedLocalRef<jobject> arg(self->GetJniEnv(), soa.AddLocalReference<jobject>(*object));
3666   jvalue args[1];
3667   args[0].l = arg.get();
3668   InvokeWithJValues(soa, nullptr, WellKnownClasses::java_lang_ref_FinalizerReference_add, args);
3669   // Restore object in case it gets moved.
3670   *object = soa.Decode<mirror::Object*>(arg.get());
3671 }
3672 
RequestConcurrentGCAndSaveObject(Thread * self,bool force_full,mirror::Object ** obj)3673 void Heap::RequestConcurrentGCAndSaveObject(Thread* self, bool force_full, mirror::Object** obj) {
3674   StackHandleScope<1> hs(self);
3675   HandleWrapper<mirror::Object> wrapper(hs.NewHandleWrapper(obj));
3676   RequestConcurrentGC(self, force_full);
3677 }
3678 
3679 class Heap::ConcurrentGCTask : public HeapTask {
3680  public:
ConcurrentGCTask(uint64_t target_time,bool force_full)3681   ConcurrentGCTask(uint64_t target_time, bool force_full)
3682       : HeapTask(target_time), force_full_(force_full) { }
Run(Thread * self)3683   virtual void Run(Thread* self) OVERRIDE {
3684     gc::Heap* heap = Runtime::Current()->GetHeap();
3685     heap->ConcurrentGC(self, force_full_);
3686     heap->ClearConcurrentGCRequest();
3687   }
3688 
3689  private:
3690   const bool force_full_;  // If true, force full (or partial) collection.
3691 };
3692 
CanAddHeapTask(Thread * self)3693 static bool CanAddHeapTask(Thread* self) REQUIRES(!Locks::runtime_shutdown_lock_) {
3694   Runtime* runtime = Runtime::Current();
3695   return runtime != nullptr && runtime->IsFinishedStarting() && !runtime->IsShuttingDown(self) &&
3696       !self->IsHandlingStackOverflow();
3697 }
3698 
ClearConcurrentGCRequest()3699 void Heap::ClearConcurrentGCRequest() {
3700   concurrent_gc_pending_.StoreRelaxed(false);
3701 }
3702 
RequestConcurrentGC(Thread * self,bool force_full)3703 void Heap::RequestConcurrentGC(Thread* self, bool force_full) {
3704   if (CanAddHeapTask(self) &&
3705       concurrent_gc_pending_.CompareExchangeStrongSequentiallyConsistent(false, true)) {
3706     task_processor_->AddTask(self, new ConcurrentGCTask(NanoTime(),  // Start straight away.
3707                                                         force_full));
3708   }
3709 }
3710 
ConcurrentGC(Thread * self,bool force_full)3711 void Heap::ConcurrentGC(Thread* self, bool force_full) {
3712   if (!Runtime::Current()->IsShuttingDown(self)) {
3713     // Wait for any GCs currently running to finish.
3714     if (WaitForGcToComplete(kGcCauseBackground, self) == collector::kGcTypeNone) {
3715       // If the we can't run the GC type we wanted to run, find the next appropriate one and try that
3716       // instead. E.g. can't do partial, so do full instead.
3717       collector::GcType next_gc_type = next_gc_type_;
3718       // If forcing full and next gc type is sticky, override with a non-sticky type.
3719       if (force_full && next_gc_type == collector::kGcTypeSticky) {
3720         next_gc_type = HasZygoteSpace() ? collector::kGcTypePartial : collector::kGcTypeFull;
3721       }
3722       if (CollectGarbageInternal(next_gc_type, kGcCauseBackground, false) ==
3723           collector::kGcTypeNone) {
3724         for (collector::GcType gc_type : gc_plan_) {
3725           // Attempt to run the collector, if we succeed, we are done.
3726           if (gc_type > next_gc_type &&
3727               CollectGarbageInternal(gc_type, kGcCauseBackground, false) !=
3728                   collector::kGcTypeNone) {
3729             break;
3730           }
3731         }
3732       }
3733     }
3734   }
3735 }
3736 
3737 class Heap::CollectorTransitionTask : public HeapTask {
3738  public:
CollectorTransitionTask(uint64_t target_time)3739   explicit CollectorTransitionTask(uint64_t target_time) : HeapTask(target_time) {}
3740 
Run(Thread * self)3741   virtual void Run(Thread* self) OVERRIDE {
3742     gc::Heap* heap = Runtime::Current()->GetHeap();
3743     heap->DoPendingCollectorTransition();
3744     heap->ClearPendingCollectorTransition(self);
3745   }
3746 };
3747 
ClearPendingCollectorTransition(Thread * self)3748 void Heap::ClearPendingCollectorTransition(Thread* self) {
3749   MutexLock mu(self, *pending_task_lock_);
3750   pending_collector_transition_ = nullptr;
3751 }
3752 
RequestCollectorTransition(CollectorType desired_collector_type,uint64_t delta_time)3753 void Heap::RequestCollectorTransition(CollectorType desired_collector_type, uint64_t delta_time) {
3754   Thread* self = Thread::Current();
3755   desired_collector_type_ = desired_collector_type;
3756   if (desired_collector_type_ == collector_type_ || !CanAddHeapTask(self)) {
3757     return;
3758   }
3759   CollectorTransitionTask* added_task = nullptr;
3760   const uint64_t target_time = NanoTime() + delta_time;
3761   {
3762     MutexLock mu(self, *pending_task_lock_);
3763     // If we have an existing collector transition, update the targe time to be the new target.
3764     if (pending_collector_transition_ != nullptr) {
3765       task_processor_->UpdateTargetRunTime(self, pending_collector_transition_, target_time);
3766       return;
3767     }
3768     added_task = new CollectorTransitionTask(target_time);
3769     pending_collector_transition_ = added_task;
3770   }
3771   task_processor_->AddTask(self, added_task);
3772 }
3773 
3774 class Heap::HeapTrimTask : public HeapTask {
3775  public:
HeapTrimTask(uint64_t delta_time)3776   explicit HeapTrimTask(uint64_t delta_time) : HeapTask(NanoTime() + delta_time) { }
Run(Thread * self)3777   virtual void Run(Thread* self) OVERRIDE {
3778     gc::Heap* heap = Runtime::Current()->GetHeap();
3779     heap->Trim(self);
3780     heap->ClearPendingTrim(self);
3781   }
3782 };
3783 
ClearPendingTrim(Thread * self)3784 void Heap::ClearPendingTrim(Thread* self) {
3785   MutexLock mu(self, *pending_task_lock_);
3786   pending_heap_trim_ = nullptr;
3787 }
3788 
RequestTrim(Thread * self)3789 void Heap::RequestTrim(Thread* self) {
3790   if (!CanAddHeapTask(self)) {
3791     return;
3792   }
3793   // GC completed and now we must decide whether to request a heap trim (advising pages back to the
3794   // kernel) or not. Issuing a request will also cause trimming of the libc heap. As a trim scans
3795   // a space it will hold its lock and can become a cause of jank.
3796   // Note, the large object space self trims and the Zygote space was trimmed and unchanging since
3797   // forking.
3798 
3799   // We don't have a good measure of how worthwhile a trim might be. We can't use the live bitmap
3800   // because that only marks object heads, so a large array looks like lots of empty space. We
3801   // don't just call dlmalloc all the time, because the cost of an _attempted_ trim is proportional
3802   // to utilization (which is probably inversely proportional to how much benefit we can expect).
3803   // We could try mincore(2) but that's only a measure of how many pages we haven't given away,
3804   // not how much use we're making of those pages.
3805   HeapTrimTask* added_task = nullptr;
3806   {
3807     MutexLock mu(self, *pending_task_lock_);
3808     if (pending_heap_trim_ != nullptr) {
3809       // Already have a heap trim request in task processor, ignore this request.
3810       return;
3811     }
3812     added_task = new HeapTrimTask(kHeapTrimWait);
3813     pending_heap_trim_ = added_task;
3814   }
3815   task_processor_->AddTask(self, added_task);
3816 }
3817 
RevokeThreadLocalBuffers(Thread * thread)3818 void Heap::RevokeThreadLocalBuffers(Thread* thread) {
3819   if (rosalloc_space_ != nullptr) {
3820     size_t freed_bytes_revoke = rosalloc_space_->RevokeThreadLocalBuffers(thread);
3821     if (freed_bytes_revoke > 0U) {
3822       num_bytes_freed_revoke_.FetchAndAddSequentiallyConsistent(freed_bytes_revoke);
3823       CHECK_GE(num_bytes_allocated_.LoadRelaxed(), num_bytes_freed_revoke_.LoadRelaxed());
3824     }
3825   }
3826   if (bump_pointer_space_ != nullptr) {
3827     CHECK_EQ(bump_pointer_space_->RevokeThreadLocalBuffers(thread), 0U);
3828   }
3829   if (region_space_ != nullptr) {
3830     CHECK_EQ(region_space_->RevokeThreadLocalBuffers(thread), 0U);
3831   }
3832 }
3833 
RevokeRosAllocThreadLocalBuffers(Thread * thread)3834 void Heap::RevokeRosAllocThreadLocalBuffers(Thread* thread) {
3835   if (rosalloc_space_ != nullptr) {
3836     size_t freed_bytes_revoke = rosalloc_space_->RevokeThreadLocalBuffers(thread);
3837     if (freed_bytes_revoke > 0U) {
3838       num_bytes_freed_revoke_.FetchAndAddSequentiallyConsistent(freed_bytes_revoke);
3839       CHECK_GE(num_bytes_allocated_.LoadRelaxed(), num_bytes_freed_revoke_.LoadRelaxed());
3840     }
3841   }
3842 }
3843 
RevokeAllThreadLocalBuffers()3844 void Heap::RevokeAllThreadLocalBuffers() {
3845   if (rosalloc_space_ != nullptr) {
3846     size_t freed_bytes_revoke = rosalloc_space_->RevokeAllThreadLocalBuffers();
3847     if (freed_bytes_revoke > 0U) {
3848       num_bytes_freed_revoke_.FetchAndAddSequentiallyConsistent(freed_bytes_revoke);
3849       CHECK_GE(num_bytes_allocated_.LoadRelaxed(), num_bytes_freed_revoke_.LoadRelaxed());
3850     }
3851   }
3852   if (bump_pointer_space_ != nullptr) {
3853     CHECK_EQ(bump_pointer_space_->RevokeAllThreadLocalBuffers(), 0U);
3854   }
3855   if (region_space_ != nullptr) {
3856     CHECK_EQ(region_space_->RevokeAllThreadLocalBuffers(), 0U);
3857   }
3858 }
3859 
IsGCRequestPending() const3860 bool Heap::IsGCRequestPending() const {
3861   return concurrent_gc_pending_.LoadRelaxed();
3862 }
3863 
RunFinalization(JNIEnv * env,uint64_t timeout)3864 void Heap::RunFinalization(JNIEnv* env, uint64_t timeout) {
3865   env->CallStaticVoidMethod(WellKnownClasses::dalvik_system_VMRuntime,
3866                             WellKnownClasses::dalvik_system_VMRuntime_runFinalization,
3867                             static_cast<jlong>(timeout));
3868 }
3869 
RegisterNativeAllocation(JNIEnv * env,size_t bytes)3870 void Heap::RegisterNativeAllocation(JNIEnv* env, size_t bytes) {
3871   Thread* self = ThreadForEnv(env);
3872   {
3873     MutexLock mu(self, native_histogram_lock_);
3874     native_allocation_histogram_.AddValue(bytes);
3875   }
3876   if (native_need_to_run_finalization_) {
3877     RunFinalization(env, kNativeAllocationFinalizeTimeout);
3878     UpdateMaxNativeFootprint();
3879     native_need_to_run_finalization_ = false;
3880   }
3881   // Total number of native bytes allocated.
3882   size_t new_native_bytes_allocated = native_bytes_allocated_.FetchAndAddSequentiallyConsistent(bytes);
3883   new_native_bytes_allocated += bytes;
3884   if (new_native_bytes_allocated > native_footprint_gc_watermark_) {
3885     collector::GcType gc_type = HasZygoteSpace() ? collector::kGcTypePartial :
3886         collector::kGcTypeFull;
3887 
3888     // The second watermark is higher than the gc watermark. If you hit this it means you are
3889     // allocating native objects faster than the GC can keep up with.
3890     if (new_native_bytes_allocated > growth_limit_) {
3891       if (WaitForGcToComplete(kGcCauseForNativeAlloc, self) != collector::kGcTypeNone) {
3892         // Just finished a GC, attempt to run finalizers.
3893         RunFinalization(env, kNativeAllocationFinalizeTimeout);
3894         CHECK(!env->ExceptionCheck());
3895         // Native bytes allocated may be updated by finalization, refresh it.
3896         new_native_bytes_allocated = native_bytes_allocated_.LoadRelaxed();
3897       }
3898       // If we still are over the watermark, attempt a GC for alloc and run finalizers.
3899       if (new_native_bytes_allocated > growth_limit_) {
3900         CollectGarbageInternal(gc_type, kGcCauseForNativeAlloc, false);
3901         RunFinalization(env, kNativeAllocationFinalizeTimeout);
3902         native_need_to_run_finalization_ = false;
3903         CHECK(!env->ExceptionCheck());
3904       }
3905       // We have just run finalizers, update the native watermark since it is very likely that
3906       // finalizers released native managed allocations.
3907       UpdateMaxNativeFootprint();
3908     } else if (!IsGCRequestPending()) {
3909       if (IsGcConcurrent()) {
3910         RequestConcurrentGC(self, true);  // Request non-sticky type.
3911       } else {
3912         CollectGarbageInternal(gc_type, kGcCauseForNativeAlloc, false);
3913       }
3914     }
3915   }
3916 }
3917 
RegisterNativeFree(JNIEnv * env,size_t bytes)3918 void Heap::RegisterNativeFree(JNIEnv* env, size_t bytes) {
3919   size_t expected_size;
3920   {
3921     MutexLock mu(Thread::Current(), native_histogram_lock_);
3922     native_free_histogram_.AddValue(bytes);
3923   }
3924   do {
3925     expected_size = native_bytes_allocated_.LoadRelaxed();
3926     if (UNLIKELY(bytes > expected_size)) {
3927       ScopedObjectAccess soa(env);
3928       env->ThrowNew(WellKnownClasses::java_lang_RuntimeException,
3929                     StringPrintf("Attempted to free %zd native bytes with only %zd native bytes "
3930                                  "registered as allocated", bytes, expected_size).c_str());
3931       break;
3932     }
3933   } while (!native_bytes_allocated_.CompareExchangeWeakRelaxed(expected_size,
3934                                                                expected_size - bytes));
3935 }
3936 
GetTotalMemory() const3937 size_t Heap::GetTotalMemory() const {
3938   return std::max(max_allowed_footprint_, GetBytesAllocated());
3939 }
3940 
AddModUnionTable(accounting::ModUnionTable * mod_union_table)3941 void Heap::AddModUnionTable(accounting::ModUnionTable* mod_union_table) {
3942   DCHECK(mod_union_table != nullptr);
3943   mod_union_tables_.Put(mod_union_table->GetSpace(), mod_union_table);
3944 }
3945 
CheckPreconditionsForAllocObject(mirror::Class * c,size_t byte_count)3946 void Heap::CheckPreconditionsForAllocObject(mirror::Class* c, size_t byte_count) {
3947   CHECK(c == nullptr || (c->IsClassClass() && byte_count >= sizeof(mirror::Class)) ||
3948         (c->IsVariableSize() || c->GetObjectSize() == byte_count)) << c->GetClassFlags();
3949   CHECK_GE(byte_count, sizeof(mirror::Object));
3950 }
3951 
AddRememberedSet(accounting::RememberedSet * remembered_set)3952 void Heap::AddRememberedSet(accounting::RememberedSet* remembered_set) {
3953   CHECK(remembered_set != nullptr);
3954   space::Space* space = remembered_set->GetSpace();
3955   CHECK(space != nullptr);
3956   CHECK(remembered_sets_.find(space) == remembered_sets_.end()) << space;
3957   remembered_sets_.Put(space, remembered_set);
3958   CHECK(remembered_sets_.find(space) != remembered_sets_.end()) << space;
3959 }
3960 
RemoveRememberedSet(space::Space * space)3961 void Heap::RemoveRememberedSet(space::Space* space) {
3962   CHECK(space != nullptr);
3963   auto it = remembered_sets_.find(space);
3964   CHECK(it != remembered_sets_.end());
3965   delete it->second;
3966   remembered_sets_.erase(it);
3967   CHECK(remembered_sets_.find(space) == remembered_sets_.end());
3968 }
3969 
ClearMarkedObjects()3970 void Heap::ClearMarkedObjects() {
3971   // Clear all of the spaces' mark bitmaps.
3972   for (const auto& space : GetContinuousSpaces()) {
3973     accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap();
3974     if (space->GetLiveBitmap() != mark_bitmap) {
3975       mark_bitmap->Clear();
3976     }
3977   }
3978   // Clear the marked objects in the discontinous space object sets.
3979   for (const auto& space : GetDiscontinuousSpaces()) {
3980     space->GetMarkBitmap()->Clear();
3981   }
3982 }
3983 
SetAllocationRecords(AllocRecordObjectMap * records)3984 void Heap::SetAllocationRecords(AllocRecordObjectMap* records) {
3985   allocation_records_.reset(records);
3986 }
3987 
VisitAllocationRecords(RootVisitor * visitor) const3988 void Heap::VisitAllocationRecords(RootVisitor* visitor) const {
3989   if (IsAllocTrackingEnabled()) {
3990     MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
3991     if (IsAllocTrackingEnabled()) {
3992       GetAllocationRecords()->VisitRoots(visitor);
3993     }
3994   }
3995 }
3996 
SweepAllocationRecords(IsMarkedVisitor * visitor) const3997 void Heap::SweepAllocationRecords(IsMarkedVisitor* visitor) const {
3998   if (IsAllocTrackingEnabled()) {
3999     MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
4000     if (IsAllocTrackingEnabled()) {
4001       GetAllocationRecords()->SweepAllocationRecords(visitor);
4002     }
4003   }
4004 }
4005 
AllowNewAllocationRecords() const4006 void Heap::AllowNewAllocationRecords() const {
4007   CHECK(!kUseReadBarrier);
4008   MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
4009   AllocRecordObjectMap* allocation_records = GetAllocationRecords();
4010   if (allocation_records != nullptr) {
4011     allocation_records->AllowNewAllocationRecords();
4012   }
4013 }
4014 
DisallowNewAllocationRecords() const4015 void Heap::DisallowNewAllocationRecords() const {
4016   CHECK(!kUseReadBarrier);
4017   MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
4018   AllocRecordObjectMap* allocation_records = GetAllocationRecords();
4019   if (allocation_records != nullptr) {
4020     allocation_records->DisallowNewAllocationRecords();
4021   }
4022 }
4023 
BroadcastForNewAllocationRecords() const4024 void Heap::BroadcastForNewAllocationRecords() const {
4025   CHECK(kUseReadBarrier);
4026   // Always broadcast without checking IsAllocTrackingEnabled() because IsAllocTrackingEnabled() may
4027   // be set to false while some threads are waiting for system weak access in
4028   // AllocRecordObjectMap::RecordAllocation() and we may fail to wake them up. b/27467554.
4029   MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
4030   AllocRecordObjectMap* allocation_records = GetAllocationRecords();
4031   if (allocation_records != nullptr) {
4032     allocation_records->BroadcastForNewAllocationRecords();
4033   }
4034 }
4035 
4036 // Based on debug malloc logic from libc/bionic/debug_stacktrace.cpp.
4037 class StackCrawlState {
4038  public:
StackCrawlState(uintptr_t * frames,size_t max_depth,size_t skip_count)4039   StackCrawlState(uintptr_t* frames, size_t max_depth, size_t skip_count)
4040       : frames_(frames), frame_count_(0), max_depth_(max_depth), skip_count_(skip_count) {
4041   }
GetFrameCount() const4042   size_t GetFrameCount() const {
4043     return frame_count_;
4044   }
Callback(_Unwind_Context * context,void * arg)4045   static _Unwind_Reason_Code Callback(_Unwind_Context* context, void* arg) {
4046     auto* const state = reinterpret_cast<StackCrawlState*>(arg);
4047     const uintptr_t ip = _Unwind_GetIP(context);
4048     // The first stack frame is get_backtrace itself. Skip it.
4049     if (ip != 0 && state->skip_count_ > 0) {
4050       --state->skip_count_;
4051       return _URC_NO_REASON;
4052     }
4053     // ip may be off for ARM but it shouldn't matter since we only use it for hashing.
4054     state->frames_[state->frame_count_] = ip;
4055     state->frame_count_++;
4056     return state->frame_count_ >= state->max_depth_ ? _URC_END_OF_STACK : _URC_NO_REASON;
4057   }
4058 
4059  private:
4060   uintptr_t* const frames_;
4061   size_t frame_count_;
4062   const size_t max_depth_;
4063   size_t skip_count_;
4064 };
4065 
get_backtrace(uintptr_t * frames,size_t max_depth)4066 static size_t get_backtrace(uintptr_t* frames, size_t max_depth) {
4067   StackCrawlState state(frames, max_depth, 0u);
4068   _Unwind_Backtrace(&StackCrawlState::Callback, &state);
4069   return state.GetFrameCount();
4070 }
4071 
CheckGcStressMode(Thread * self,mirror::Object ** obj)4072 void Heap::CheckGcStressMode(Thread* self, mirror::Object** obj) {
4073   auto* const runtime = Runtime::Current();
4074   if (gc_stress_mode_ && runtime->GetClassLinker()->IsInitialized() &&
4075       !runtime->IsActiveTransaction() && mirror::Class::HasJavaLangClass()) {
4076     // Check if we should GC.
4077     bool new_backtrace = false;
4078     {
4079       static constexpr size_t kMaxFrames = 16u;
4080       uintptr_t backtrace[kMaxFrames];
4081       const size_t frames = get_backtrace(backtrace, kMaxFrames);
4082       uint64_t hash = 0;
4083       for (size_t i = 0; i < frames; ++i) {
4084         hash = hash * 2654435761 + backtrace[i];
4085         hash += (hash >> 13) ^ (hash << 6);
4086       }
4087       MutexLock mu(self, *backtrace_lock_);
4088       new_backtrace = seen_backtraces_.find(hash) == seen_backtraces_.end();
4089       if (new_backtrace) {
4090         seen_backtraces_.insert(hash);
4091       }
4092     }
4093     if (new_backtrace) {
4094       StackHandleScope<1> hs(self);
4095       auto h = hs.NewHandleWrapper(obj);
4096       CollectGarbage(false);
4097       unique_backtrace_count_.FetchAndAddSequentiallyConsistent(1);
4098     } else {
4099       seen_backtrace_count_.FetchAndAddSequentiallyConsistent(1);
4100     }
4101   }
4102 }
4103 
DisableGCForShutdown()4104 void Heap::DisableGCForShutdown() {
4105   Thread* const self = Thread::Current();
4106   CHECK(Runtime::Current()->IsShuttingDown(self));
4107   MutexLock mu(self, *gc_complete_lock_);
4108   gc_disabled_for_shutdown_ = true;
4109 }
4110 
ObjectIsInBootImageSpace(mirror::Object * obj) const4111 bool Heap::ObjectIsInBootImageSpace(mirror::Object* obj) const {
4112   for (gc::space::ImageSpace* space : boot_image_spaces_) {
4113     if (space->HasAddress(obj)) {
4114       return true;
4115     }
4116   }
4117   return false;
4118 }
4119 
IsInBootImageOatFile(const void * p) const4120 bool Heap::IsInBootImageOatFile(const void* p) const {
4121   for (gc::space::ImageSpace* space : boot_image_spaces_) {
4122     if (space->GetOatFile()->Contains(p)) {
4123       return true;
4124     }
4125   }
4126   return false;
4127 }
4128 
GetBootImagesSize(uint32_t * boot_image_begin,uint32_t * boot_image_end,uint32_t * boot_oat_begin,uint32_t * boot_oat_end)4129 void Heap::GetBootImagesSize(uint32_t* boot_image_begin,
4130                              uint32_t* boot_image_end,
4131                              uint32_t* boot_oat_begin,
4132                              uint32_t* boot_oat_end) {
4133   DCHECK(boot_image_begin != nullptr);
4134   DCHECK(boot_image_end != nullptr);
4135   DCHECK(boot_oat_begin != nullptr);
4136   DCHECK(boot_oat_end != nullptr);
4137   *boot_image_begin = 0u;
4138   *boot_image_end = 0u;
4139   *boot_oat_begin = 0u;
4140   *boot_oat_end = 0u;
4141   for (gc::space::ImageSpace* space_ : GetBootImageSpaces()) {
4142     const uint32_t image_begin = PointerToLowMemUInt32(space_->Begin());
4143     const uint32_t image_size = space_->GetImageHeader().GetImageSize();
4144     if (*boot_image_begin == 0 || image_begin < *boot_image_begin) {
4145       *boot_image_begin = image_begin;
4146     }
4147     *boot_image_end = std::max(*boot_image_end, image_begin + image_size);
4148     const OatFile* boot_oat_file = space_->GetOatFile();
4149     const uint32_t oat_begin = PointerToLowMemUInt32(boot_oat_file->Begin());
4150     const uint32_t oat_size = boot_oat_file->Size();
4151     if (*boot_oat_begin == 0 || oat_begin < *boot_oat_begin) {
4152       *boot_oat_begin = oat_begin;
4153     }
4154     *boot_oat_end = std::max(*boot_oat_end, oat_begin + oat_size);
4155   }
4156 }
4157 
4158 }  // namespace gc
4159 }  // namespace art
4160