1 /*
2  * Copyright (C) 2008 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_GC_HEAP_H_
18 #define ART_RUNTIME_GC_HEAP_H_
19 
20 #include <iosfwd>
21 #include <string>
22 #include <unordered_set>
23 #include <vector>
24 
25 #include <android-base/logging.h>
26 
27 #include "allocator_type.h"
28 #include "base/atomic.h"
29 #include "base/macros.h"
30 #include "base/mutex.h"
31 #include "base/runtime_debug.h"
32 #include "base/safe_map.h"
33 #include "base/time_utils.h"
34 #include "gc/collector/gc_type.h"
35 #include "gc/collector/iteration.h"
36 #include "gc/collector_type.h"
37 #include "gc/gc_cause.h"
38 #include "gc/space/image_space_loading_order.h"
39 #include "gc/space/large_object_space.h"
40 #include "handle.h"
41 #include "obj_ptr.h"
42 #include "offsets.h"
43 #include "process_state.h"
44 #include "read_barrier_config.h"
45 #include "runtime_globals.h"
46 #include "verify_object.h"
47 
48 namespace art {
49 
50 class ConditionVariable;
51 enum class InstructionSet;
52 class IsMarkedVisitor;
53 class Mutex;
54 class ReflectiveValueVisitor;
55 class RootVisitor;
56 class StackVisitor;
57 class Thread;
58 class ThreadPool;
59 class TimingLogger;
60 class VariableSizedHandleScope;
61 
62 namespace mirror {
63 class Class;
64 class Object;
65 }  // namespace mirror
66 
67 namespace gc {
68 
69 class AllocationListener;
70 class AllocRecordObjectMap;
71 class GcPauseListener;
72 class HeapTask;
73 class ReferenceProcessor;
74 class TaskProcessor;
75 class Verification;
76 
77 namespace accounting {
78 template <typename T> class AtomicStack;
79 typedef AtomicStack<mirror::Object> ObjectStack;
80 class CardTable;
81 class HeapBitmap;
82 class ModUnionTable;
83 class ReadBarrierTable;
84 class RememberedSet;
85 }  // namespace accounting
86 
87 namespace collector {
88 class ConcurrentCopying;
89 class GarbageCollector;
90 class MarkSweep;
91 class SemiSpace;
92 }  // namespace collector
93 
94 namespace allocator {
95 class RosAlloc;
96 }  // namespace allocator
97 
98 namespace space {
99 class AllocSpace;
100 class BumpPointerSpace;
101 class ContinuousMemMapAllocSpace;
102 class DiscontinuousSpace;
103 class DlMallocSpace;
104 class ImageSpace;
105 class LargeObjectSpace;
106 class MallocSpace;
107 class RegionSpace;
108 class RosAllocSpace;
109 class Space;
110 class ZygoteSpace;
111 }  // namespace space
112 
113 enum HomogeneousSpaceCompactResult {
114   // Success.
115   kSuccess,
116   // Reject due to disabled moving GC.
117   kErrorReject,
118   // Unsupported due to the current configuration.
119   kErrorUnsupported,
120   // System is shutting down.
121   kErrorVMShuttingDown,
122 };
123 
124 // If true, use rosalloc/RosAllocSpace instead of dlmalloc/DlMallocSpace
125 static constexpr bool kUseRosAlloc = true;
126 
127 // If true, use thread-local allocation stack.
128 static constexpr bool kUseThreadLocalAllocationStack = true;
129 
130 class Heap {
131  public:
132   // How much we grow the TLAB if we can do it.
133   static constexpr size_t kPartialTlabSize = 16 * KB;
134   static constexpr bool kUsePartialTlabs = true;
135 
136   static constexpr size_t kDefaultStartingSize = kPageSize;
137   static constexpr size_t kDefaultInitialSize = 2 * MB;
138   static constexpr size_t kDefaultMaximumSize = 256 * MB;
139   static constexpr size_t kDefaultNonMovingSpaceCapacity = 64 * MB;
140   static constexpr size_t kDefaultMaxFree = 2 * MB;
141   static constexpr size_t kDefaultMinFree = kDefaultMaxFree / 4;
142   static constexpr size_t kDefaultLongPauseLogThreshold = MsToNs(5);
143   static constexpr size_t kDefaultLongGCLogThreshold = MsToNs(100);
144   static constexpr size_t kDefaultTLABSize = 32 * KB;
145   static constexpr double kDefaultTargetUtilization = 0.75;
146   static constexpr double kDefaultHeapGrowthMultiplier = 2.0;
147   // Primitive arrays larger than this size are put in the large object space.
148   static constexpr size_t kMinLargeObjectThreshold = 3 * kPageSize;
149   static constexpr size_t kDefaultLargeObjectThreshold = kMinLargeObjectThreshold;
150   // Whether or not parallel GC is enabled. If not, then we never create the thread pool.
151   static constexpr bool kDefaultEnableParallelGC = false;
152   static uint8_t* const kPreferredAllocSpaceBegin;
153 
154   // Whether or not we use the free list large object space. Only use it if USE_ART_LOW_4G_ALLOCATOR
155   // since this means that we have to use the slow msync loop in MemMap::MapAnonymous.
156   static constexpr space::LargeObjectSpaceType kDefaultLargeObjectSpaceType =
157       USE_ART_LOW_4G_ALLOCATOR ?
158           space::LargeObjectSpaceType::kFreeList
159         : space::LargeObjectSpaceType::kMap;
160 
161   // Used so that we don't overflow the allocation time atomic integer.
162   static constexpr size_t kTimeAdjust = 1024;
163 
164   // Client should call NotifyNativeAllocation every kNotifyNativeInterval allocations.
165   // Should be chosen so that time_to_call_mallinfo / kNotifyNativeInterval is on the same order
166   // as object allocation time. time_to_call_mallinfo seems to be on the order of 1 usec.
167 #ifdef __ANDROID__
168   static constexpr uint32_t kNotifyNativeInterval = 32;
169 #else
170   // Some host mallinfo() implementations are slow. And memory is less scarce.
171   static constexpr uint32_t kNotifyNativeInterval = 512;
172 #endif
173 
174   // RegisterNativeAllocation checks immediately whether GC is needed if size exceeds the
175   // following. kCheckImmediatelyThreshold * kNotifyNativeInterval should be small enough to
176   // make it safe to allocate that many bytes between checks.
177   static constexpr size_t kCheckImmediatelyThreshold = 300000;
178 
179   // How often we allow heap trimming to happen (nanoseconds).
180   static constexpr uint64_t kHeapTrimWait = MsToNs(5000);
181   // How long we wait after a transition request to perform a collector transition (nanoseconds).
182   static constexpr uint64_t kCollectorTransitionWait = MsToNs(5000);
183   // Whether the transition-wait applies or not. Zero wait will stress the
184   // transition code and collector, but increases jank probability.
185   DECLARE_RUNTIME_DEBUG_FLAG(kStressCollectorTransition);
186 
187   // Create a heap with the requested sizes. The possible empty
188   // image_file_names names specify Spaces to load based on
189   // ImageWriter output.
190   Heap(size_t initial_size,
191        size_t growth_limit,
192        size_t min_free,
193        size_t max_free,
194        double target_utilization,
195        double foreground_heap_growth_multiplier,
196        size_t stop_for_native_allocs,
197        size_t capacity,
198        size_t non_moving_space_capacity,
199        const std::vector<std::string>& boot_class_path,
200        const std::vector<std::string>& boot_class_path_locations,
201        const std::string& image_file_name,
202        InstructionSet image_instruction_set,
203        CollectorType foreground_collector_type,
204        CollectorType background_collector_type,
205        space::LargeObjectSpaceType large_object_space_type,
206        size_t large_object_threshold,
207        size_t parallel_gc_threads,
208        size_t conc_gc_threads,
209        bool low_memory_mode,
210        size_t long_pause_threshold,
211        size_t long_gc_threshold,
212        bool ignore_target_footprint,
213        bool use_tlab,
214        bool verify_pre_gc_heap,
215        bool verify_pre_sweeping_heap,
216        bool verify_post_gc_heap,
217        bool verify_pre_gc_rosalloc,
218        bool verify_pre_sweeping_rosalloc,
219        bool verify_post_gc_rosalloc,
220        bool gc_stress_mode,
221        bool measure_gc_performance,
222        bool use_homogeneous_space_compaction,
223        bool use_generational_cc,
224        uint64_t min_interval_homogeneous_space_compaction_by_oom,
225        bool dump_region_info_before_gc,
226        bool dump_region_info_after_gc,
227        space::ImageSpaceLoadingOrder image_space_loading_order);
228 
229   ~Heap();
230 
231   // Allocates and initializes storage for an object instance.
232   template <bool kInstrumented = true, typename PreFenceVisitor>
AllocObject(Thread * self,ObjPtr<mirror::Class> klass,size_t num_bytes,const PreFenceVisitor & pre_fence_visitor)233   mirror::Object* AllocObject(Thread* self,
234                               ObjPtr<mirror::Class> klass,
235                               size_t num_bytes,
236                               const PreFenceVisitor& pre_fence_visitor)
237       REQUIRES_SHARED(Locks::mutator_lock_)
238       REQUIRES(!*gc_complete_lock_,
239                !*pending_task_lock_,
240                !*backtrace_lock_,
241                !process_state_update_lock_,
242                !Roles::uninterruptible_) {
243     return AllocObjectWithAllocator<kInstrumented>(self,
244                                                    klass,
245                                                    num_bytes,
246                                                    GetCurrentAllocator(),
247                                                    pre_fence_visitor);
248   }
249 
250   template <bool kInstrumented = true, typename PreFenceVisitor>
AllocNonMovableObject(Thread * self,ObjPtr<mirror::Class> klass,size_t num_bytes,const PreFenceVisitor & pre_fence_visitor)251   mirror::Object* AllocNonMovableObject(Thread* self,
252                                         ObjPtr<mirror::Class> klass,
253                                         size_t num_bytes,
254                                         const PreFenceVisitor& pre_fence_visitor)
255       REQUIRES_SHARED(Locks::mutator_lock_)
256       REQUIRES(!*gc_complete_lock_,
257                !*pending_task_lock_,
258                !*backtrace_lock_,
259                !process_state_update_lock_,
260                !Roles::uninterruptible_) {
261     return AllocObjectWithAllocator<kInstrumented>(self,
262                                                    klass,
263                                                    num_bytes,
264                                                    GetCurrentNonMovingAllocator(),
265                                                    pre_fence_visitor);
266   }
267 
268   template <bool kInstrumented = true, bool kCheckLargeObject = true, typename PreFenceVisitor>
269   ALWAYS_INLINE mirror::Object* AllocObjectWithAllocator(Thread* self,
270                                                          ObjPtr<mirror::Class> klass,
271                                                          size_t byte_count,
272                                                          AllocatorType allocator,
273                                                          const PreFenceVisitor& pre_fence_visitor)
274       REQUIRES_SHARED(Locks::mutator_lock_)
275       REQUIRES(!*gc_complete_lock_,
276                !*pending_task_lock_,
277                !*backtrace_lock_,
278                !process_state_update_lock_,
279                !Roles::uninterruptible_);
280 
GetCurrentAllocator()281   AllocatorType GetCurrentAllocator() const {
282     return current_allocator_;
283   }
284 
GetCurrentNonMovingAllocator()285   AllocatorType GetCurrentNonMovingAllocator() const {
286     return current_non_moving_allocator_;
287   }
288 
289   // Visit all of the live objects in the heap.
290   template <typename Visitor>
291   ALWAYS_INLINE void VisitObjects(Visitor&& visitor)
292       REQUIRES_SHARED(Locks::mutator_lock_)
293       REQUIRES(!Locks::heap_bitmap_lock_, !*gc_complete_lock_);
294   template <typename Visitor>
295   ALWAYS_INLINE void VisitObjectsPaused(Visitor&& visitor)
296       REQUIRES(Locks::mutator_lock_, !Locks::heap_bitmap_lock_, !*gc_complete_lock_);
297 
298   void VisitReflectiveTargets(ReflectiveValueVisitor* visitor)
299       REQUIRES(Locks::mutator_lock_, !Locks::heap_bitmap_lock_, !*gc_complete_lock_);
300 
301   void CheckPreconditionsForAllocObject(ObjPtr<mirror::Class> c, size_t byte_count)
302       REQUIRES_SHARED(Locks::mutator_lock_);
303 
304   // Inform the garbage collector of a non-malloc allocated native memory that might become
305   // reclaimable in the future as a result of Java garbage collection.
306   void RegisterNativeAllocation(JNIEnv* env, size_t bytes)
307       REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !process_state_update_lock_);
308   void RegisterNativeFree(JNIEnv* env, size_t bytes);
309 
310   // Notify the garbage collector of malloc allocations that might be reclaimable
311   // as a result of Java garbage collection. Each such call represents approximately
312   // kNotifyNativeInterval such allocations.
313   void NotifyNativeAllocations(JNIEnv* env)
314       REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !process_state_update_lock_);
315 
GetNotifyNativeInterval()316   uint32_t GetNotifyNativeInterval() {
317     return kNotifyNativeInterval;
318   }
319 
320   // Change the allocator, updates entrypoints.
321   void ChangeAllocator(AllocatorType allocator)
322       REQUIRES(Locks::mutator_lock_, !Locks::runtime_shutdown_lock_);
323 
324   // Change the collector to be one of the possible options (MS, CMS, SS).
325   void ChangeCollector(CollectorType collector_type)
326       REQUIRES(Locks::mutator_lock_);
327 
328   // The given reference is believed to be to an object in the Java heap, check the soundness of it.
329   // TODO: NO_THREAD_SAFETY_ANALYSIS since we call this everywhere and it is impossible to find a
330   // proper lock ordering for it.
331   void VerifyObjectBody(ObjPtr<mirror::Object> o) NO_THREAD_SAFETY_ANALYSIS;
332 
333   // Check sanity of all live references.
334   void VerifyHeap() REQUIRES(!Locks::heap_bitmap_lock_);
335   // Returns how many failures occured.
336   size_t VerifyHeapReferences(bool verify_referents = true)
337       REQUIRES(Locks::mutator_lock_, !*gc_complete_lock_);
338   bool VerifyMissingCardMarks()
339       REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
340 
341   // A weaker test than IsLiveObject or VerifyObject that doesn't require the heap lock,
342   // and doesn't abort on error, allowing the caller to report more
343   // meaningful diagnostics.
344   bool IsValidObjectAddress(const void* obj) const REQUIRES_SHARED(Locks::mutator_lock_);
345 
346   // Faster alternative to IsHeapAddress since finding if an object is in the large object space is
347   // very slow.
348   bool IsNonDiscontinuousSpaceHeapAddress(const void* addr) const
349       REQUIRES_SHARED(Locks::mutator_lock_);
350 
351   // Returns true if 'obj' is a live heap object, false otherwise (including for invalid addresses).
352   // Requires the heap lock to be held.
353   bool IsLiveObjectLocked(ObjPtr<mirror::Object> obj,
354                           bool search_allocation_stack = true,
355                           bool search_live_stack = true,
356                           bool sorted = false)
357       REQUIRES_SHARED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
358 
359   // Returns true if there is any chance that the object (obj) will move.
360   bool IsMovableObject(ObjPtr<mirror::Object> obj) const REQUIRES_SHARED(Locks::mutator_lock_);
361 
362   // Enables us to compacting GC until objects are released.
363   void IncrementDisableMovingGC(Thread* self) REQUIRES(!*gc_complete_lock_);
364   void DecrementDisableMovingGC(Thread* self) REQUIRES(!*gc_complete_lock_);
365 
366   // Temporarily disable thread flip for JNI critical calls.
367   void IncrementDisableThreadFlip(Thread* self) REQUIRES(!*thread_flip_lock_);
368   void DecrementDisableThreadFlip(Thread* self) REQUIRES(!*thread_flip_lock_);
369   void ThreadFlipBegin(Thread* self) REQUIRES(!*thread_flip_lock_);
370   void ThreadFlipEnd(Thread* self) REQUIRES(!*thread_flip_lock_);
371 
372   // Clear all of the mark bits, doesn't clear bitmaps which have the same live bits as mark bits.
373   // Mutator lock is required for GetContinuousSpaces.
374   void ClearMarkedObjects()
375       REQUIRES(Locks::heap_bitmap_lock_)
376       REQUIRES_SHARED(Locks::mutator_lock_);
377 
378   // Initiates an explicit garbage collection.
379   void CollectGarbage(bool clear_soft_references, GcCause cause = kGcCauseExplicit)
380       REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !process_state_update_lock_);
381 
382   // Does a concurrent GC, should only be called by the GC daemon thread
383   // through runtime.
384   void ConcurrentGC(Thread* self, GcCause cause, bool force_full)
385       REQUIRES(!Locks::runtime_shutdown_lock_, !*gc_complete_lock_,
386                !*pending_task_lock_, !process_state_update_lock_);
387 
388   // Implements VMDebug.countInstancesOfClass and JDWP VM_InstanceCount.
389   // The boolean decides whether to use IsAssignableFrom or == when comparing classes.
390   void CountInstances(const std::vector<Handle<mirror::Class>>& classes,
391                       bool use_is_assignable_from,
392                       uint64_t* counts)
393       REQUIRES(!Locks::heap_bitmap_lock_, !*gc_complete_lock_)
394       REQUIRES_SHARED(Locks::mutator_lock_);
395 
396   // Implements VMDebug.getInstancesOfClasses and JDWP RT_Instances.
397   void GetInstances(VariableSizedHandleScope& scope,
398                     Handle<mirror::Class> c,
399                     bool use_is_assignable_from,
400                     int32_t max_count,
401                     std::vector<Handle<mirror::Object>>& instances)
402       REQUIRES(!Locks::heap_bitmap_lock_, !*gc_complete_lock_)
403       REQUIRES_SHARED(Locks::mutator_lock_);
404 
405   // Implements JDWP OR_ReferringObjects.
406   void GetReferringObjects(VariableSizedHandleScope& scope,
407                            Handle<mirror::Object> o,
408                            int32_t max_count,
409                            std::vector<Handle<mirror::Object>>& referring_objects)
410       REQUIRES(!Locks::heap_bitmap_lock_, !*gc_complete_lock_)
411       REQUIRES_SHARED(Locks::mutator_lock_);
412 
413   // Removes the growth limit on the alloc space so it may grow to its maximum capacity. Used to
414   // implement dalvik.system.VMRuntime.clearGrowthLimit.
415   void ClearGrowthLimit();
416 
417   // Make the current growth limit the new maximum capacity, unmaps pages at the end of spaces
418   // which will never be used. Used to implement dalvik.system.VMRuntime.clampGrowthLimit.
419   void ClampGrowthLimit() REQUIRES(!Locks::heap_bitmap_lock_);
420 
421   // Target ideal heap utilization ratio, implements
422   // dalvik.system.VMRuntime.getTargetHeapUtilization.
GetTargetHeapUtilization()423   double GetTargetHeapUtilization() const {
424     return target_utilization_;
425   }
426 
427   // Data structure memory usage tracking.
428   void RegisterGCAllocation(size_t bytes);
429   void RegisterGCDeAllocation(size_t bytes);
430 
431   // Set the heap's private space pointers to be the same as the space based on it's type. Public
432   // due to usage by tests.
433   void SetSpaceAsDefault(space::ContinuousSpace* continuous_space)
434       REQUIRES(!Locks::heap_bitmap_lock_);
435   void AddSpace(space::Space* space)
436       REQUIRES(!Locks::heap_bitmap_lock_)
437       REQUIRES(Locks::mutator_lock_);
438   void RemoveSpace(space::Space* space)
439     REQUIRES(!Locks::heap_bitmap_lock_)
440     REQUIRES(Locks::mutator_lock_);
441 
GetPreGcWeightedAllocatedBytes()442   double GetPreGcWeightedAllocatedBytes() const {
443     return pre_gc_weighted_allocated_bytes_;
444   }
445 
GetPostGcWeightedAllocatedBytes()446   double GetPostGcWeightedAllocatedBytes() const {
447     return post_gc_weighted_allocated_bytes_;
448   }
449 
450   void CalculatePreGcWeightedAllocatedBytes();
451   void CalculatePostGcWeightedAllocatedBytes();
452   uint64_t GetTotalGcCpuTime();
453 
GetProcessCpuStartTime()454   uint64_t GetProcessCpuStartTime() const {
455     return process_cpu_start_time_ns_;
456   }
457 
GetPostGCLastProcessCpuTime()458   uint64_t GetPostGCLastProcessCpuTime() const {
459     return post_gc_last_process_cpu_time_ns_;
460   }
461 
462   // Set target ideal heap utilization ratio, implements
463   // dalvik.system.VMRuntime.setTargetHeapUtilization.
464   void SetTargetHeapUtilization(float target);
465 
466   // For the alloc space, sets the maximum number of bytes that the heap is allowed to allocate
467   // from the system. Doesn't allow the space to exceed its growth limit.
468   void SetIdealFootprint(size_t max_allowed_footprint);
469 
470   // Blocks the caller until the garbage collector becomes idle and returns the type of GC we
471   // waited for.
472   collector::GcType WaitForGcToComplete(GcCause cause, Thread* self) REQUIRES(!*gc_complete_lock_);
473 
474   // Update the heap's process state to a new value, may cause compaction to occur.
475   void UpdateProcessState(ProcessState old_process_state, ProcessState new_process_state)
476       REQUIRES(!*pending_task_lock_, !*gc_complete_lock_, !process_state_update_lock_);
477 
HaveContinuousSpaces()478   bool HaveContinuousSpaces() const NO_THREAD_SAFETY_ANALYSIS {
479     // No lock since vector empty is thread safe.
480     return !continuous_spaces_.empty();
481   }
482 
GetContinuousSpaces()483   const std::vector<space::ContinuousSpace*>& GetContinuousSpaces() const
484       REQUIRES_SHARED(Locks::mutator_lock_) {
485     return continuous_spaces_;
486   }
487 
GetDiscontinuousSpaces()488   const std::vector<space::DiscontinuousSpace*>& GetDiscontinuousSpaces() const {
489     return discontinuous_spaces_;
490   }
491 
GetCurrentGcIteration()492   const collector::Iteration* GetCurrentGcIteration() const {
493     return &current_gc_iteration_;
494   }
GetCurrentGcIteration()495   collector::Iteration* GetCurrentGcIteration() {
496     return &current_gc_iteration_;
497   }
498 
499   // Enable verification of object references when the runtime is sufficiently initialized.
EnableObjectValidation()500   void EnableObjectValidation() {
501     verify_object_mode_ = kVerifyObjectSupport;
502     if (verify_object_mode_ > kVerifyObjectModeDisabled) {
503       VerifyHeap();
504     }
505   }
506 
507   // Disable object reference verification for image writing.
DisableObjectValidation()508   void DisableObjectValidation() {
509     verify_object_mode_ = kVerifyObjectModeDisabled;
510   }
511 
512   // Other checks may be performed if we know the heap should be in a sane state.
IsObjectValidationEnabled()513   bool IsObjectValidationEnabled() const {
514     return verify_object_mode_ > kVerifyObjectModeDisabled;
515   }
516 
517   // Returns true if low memory mode is enabled.
IsLowMemoryMode()518   bool IsLowMemoryMode() const {
519     return low_memory_mode_;
520   }
521 
522   // Returns the heap growth multiplier, this affects how much we grow the heap after a GC.
523   // Scales heap growth, min free, and max free.
524   double HeapGrowthMultiplier() const;
525 
526   // Freed bytes can be negative in cases where we copy objects from a compacted space to a
527   // free-list backed space.
528   void RecordFree(uint64_t freed_objects, int64_t freed_bytes);
529 
530   // Record the bytes freed by thread-local buffer revoke.
531   void RecordFreeRevoke();
532 
GetCardTable()533   accounting::CardTable* GetCardTable() const {
534     return card_table_.get();
535   }
536 
GetReadBarrierTable()537   accounting::ReadBarrierTable* GetReadBarrierTable() const {
538     return rb_table_.get();
539   }
540 
541   void AddFinalizerReference(Thread* self, ObjPtr<mirror::Object>* object);
542 
543   // Returns the number of bytes currently allocated.
544   // The result should be treated as an approximation, if it is being concurrently updated.
GetBytesAllocated()545   size_t GetBytesAllocated() const {
546     return num_bytes_allocated_.load(std::memory_order_relaxed);
547   }
548 
GetUseGenerationalCC()549   bool GetUseGenerationalCC() const {
550     return use_generational_cc_;
551   }
552 
553   // Returns the number of objects currently allocated.
554   size_t GetObjectsAllocated() const
555       REQUIRES(!Locks::heap_bitmap_lock_);
556 
557   // Returns the total number of objects allocated since the heap was created.
558   uint64_t GetObjectsAllocatedEver() const;
559 
560   // Returns the total number of bytes allocated since the heap was created.
561   uint64_t GetBytesAllocatedEver() const;
562 
563   // Returns the total number of objects freed since the heap was created.
564   // With default memory order, this should be viewed only as a hint.
565   uint64_t GetObjectsFreedEver(std::memory_order mo = std::memory_order_relaxed) const {
566     return total_objects_freed_ever_.load(mo);
567   }
568 
569   // Returns the total number of bytes freed since the heap was created.
570   // With default memory order, this should be viewed only as a hint.
571   uint64_t GetBytesFreedEver(std::memory_order mo = std::memory_order_relaxed) const {
572     return total_bytes_freed_ever_.load(mo);
573   }
574 
GetRegionSpace()575   space::RegionSpace* GetRegionSpace() const {
576     return region_space_;
577   }
578 
579   // Implements java.lang.Runtime.maxMemory, returning the maximum amount of memory a program can
580   // consume. For a regular VM this would relate to the -Xmx option and would return -1 if no Xmx
581   // were specified. Android apps start with a growth limit (small heap size) which is
582   // cleared/extended for large apps.
GetMaxMemory()583   size_t GetMaxMemory() const {
584     // There are some race conditions in the allocation code that can cause bytes allocated to
585     // become larger than growth_limit_ in rare cases.
586     return std::max(GetBytesAllocated(), growth_limit_);
587   }
588 
589   // Implements java.lang.Runtime.totalMemory, returning approximate amount of memory currently
590   // consumed by an application.
591   size_t GetTotalMemory() const;
592 
593   // Returns approximately how much free memory we have until the next GC happens.
GetFreeMemoryUntilGC()594   size_t GetFreeMemoryUntilGC() const {
595     return UnsignedDifference(target_footprint_.load(std::memory_order_relaxed),
596                               GetBytesAllocated());
597   }
598 
599   // Returns approximately how much free memory we have until the next OOME happens.
GetFreeMemoryUntilOOME()600   size_t GetFreeMemoryUntilOOME() const {
601     return UnsignedDifference(growth_limit_, GetBytesAllocated());
602   }
603 
604   // Returns how much free memory we have until we need to grow the heap to perform an allocation.
605   // Similar to GetFreeMemoryUntilGC. Implements java.lang.Runtime.freeMemory.
GetFreeMemory()606   size_t GetFreeMemory() const {
607     return UnsignedDifference(GetTotalMemory(),
608                               num_bytes_allocated_.load(std::memory_order_relaxed));
609   }
610 
611   // Get the space that corresponds to an object's address. Current implementation searches all
612   // spaces in turn. If fail_ok is false then failing to find a space will cause an abort.
613   // TODO: consider using faster data structure like binary tree.
614   space::ContinuousSpace* FindContinuousSpaceFromObject(ObjPtr<mirror::Object>, bool fail_ok) const
615       REQUIRES_SHARED(Locks::mutator_lock_);
616 
617   space::ContinuousSpace* FindContinuousSpaceFromAddress(const mirror::Object* addr) const
618       REQUIRES_SHARED(Locks::mutator_lock_);
619 
620   space::DiscontinuousSpace* FindDiscontinuousSpaceFromObject(ObjPtr<mirror::Object>,
621                                                               bool fail_ok) const
622       REQUIRES_SHARED(Locks::mutator_lock_);
623 
624   space::Space* FindSpaceFromObject(ObjPtr<mirror::Object> obj, bool fail_ok) const
625       REQUIRES_SHARED(Locks::mutator_lock_);
626 
627   space::Space* FindSpaceFromAddress(const void* ptr) const
628       REQUIRES_SHARED(Locks::mutator_lock_);
629 
630   std::string DumpSpaceNameFromAddress(const void* addr) const
631       REQUIRES_SHARED(Locks::mutator_lock_);
632 
633   void DumpForSigQuit(std::ostream& os) REQUIRES(!*gc_complete_lock_);
634 
635   // Do a pending collector transition.
636   void DoPendingCollectorTransition()
637       REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !process_state_update_lock_);
638 
639   // Deflate monitors, ... and trim the spaces.
640   void Trim(Thread* self) REQUIRES(!*gc_complete_lock_);
641 
642   void RevokeThreadLocalBuffers(Thread* thread);
643   void RevokeRosAllocThreadLocalBuffers(Thread* thread);
644   void RevokeAllThreadLocalBuffers();
645   void AssertThreadLocalBuffersAreRevoked(Thread* thread);
646   void AssertAllBumpPointerSpaceThreadLocalBuffersAreRevoked();
647   void RosAllocVerification(TimingLogger* timings, const char* name)
648       REQUIRES(Locks::mutator_lock_);
649 
GetLiveBitmap()650   accounting::HeapBitmap* GetLiveBitmap() REQUIRES_SHARED(Locks::heap_bitmap_lock_) {
651     return live_bitmap_.get();
652   }
653 
GetMarkBitmap()654   accounting::HeapBitmap* GetMarkBitmap() REQUIRES_SHARED(Locks::heap_bitmap_lock_) {
655     return mark_bitmap_.get();
656   }
657 
GetLiveStack()658   accounting::ObjectStack* GetLiveStack() REQUIRES_SHARED(Locks::heap_bitmap_lock_) {
659     return live_stack_.get();
660   }
661 
662   void PreZygoteFork() NO_THREAD_SAFETY_ANALYSIS;
663 
664   // Mark and empty stack.
665   void FlushAllocStack()
666       REQUIRES_SHARED(Locks::mutator_lock_)
667       REQUIRES(Locks::heap_bitmap_lock_);
668 
669   // Revoke all the thread-local allocation stacks.
670   void RevokeAllThreadLocalAllocationStacks(Thread* self)
671       REQUIRES(Locks::mutator_lock_, !Locks::runtime_shutdown_lock_, !Locks::thread_list_lock_);
672 
673   // Mark all the objects in the allocation stack in the specified bitmap.
674   // TODO: Refactor?
675   void MarkAllocStack(accounting::SpaceBitmap<kObjectAlignment>* bitmap1,
676                       accounting::SpaceBitmap<kObjectAlignment>* bitmap2,
677                       accounting::SpaceBitmap<kLargeObjectAlignment>* large_objects,
678                       accounting::ObjectStack* stack)
679       REQUIRES_SHARED(Locks::mutator_lock_)
680       REQUIRES(Locks::heap_bitmap_lock_);
681 
682   // Mark the specified allocation stack as live.
683   void MarkAllocStackAsLive(accounting::ObjectStack* stack)
684       REQUIRES_SHARED(Locks::mutator_lock_)
685       REQUIRES(Locks::heap_bitmap_lock_);
686 
687   // Unbind any bound bitmaps.
688   void UnBindBitmaps()
689       REQUIRES(Locks::heap_bitmap_lock_)
690       REQUIRES_SHARED(Locks::mutator_lock_);
691 
692   // Returns the boot image spaces. There may be multiple boot image spaces.
GetBootImageSpaces()693   const std::vector<space::ImageSpace*>& GetBootImageSpaces() const {
694     return boot_image_spaces_;
695   }
696 
697   bool ObjectIsInBootImageSpace(ObjPtr<mirror::Object> obj) const
698       REQUIRES_SHARED(Locks::mutator_lock_);
699 
700   bool IsInBootImageOatFile(const void* p) const
701       REQUIRES_SHARED(Locks::mutator_lock_);
702 
703   // Get the start address of the boot images if any; otherwise returns 0.
GetBootImagesStartAddress()704   uint32_t GetBootImagesStartAddress() const {
705     return boot_images_start_address_;
706   }
707 
708   // Get the size of all boot images, including the heap and oat areas.
GetBootImagesSize()709   uint32_t GetBootImagesSize() const {
710     return boot_images_size_;
711   }
712 
713   // Check if a pointer points to a boot image.
IsBootImageAddress(const void * p)714   bool IsBootImageAddress(const void* p) const {
715     return reinterpret_cast<uintptr_t>(p) - boot_images_start_address_ < boot_images_size_;
716   }
717 
GetDlMallocSpace()718   space::DlMallocSpace* GetDlMallocSpace() const {
719     return dlmalloc_space_;
720   }
721 
GetRosAllocSpace()722   space::RosAllocSpace* GetRosAllocSpace() const {
723     return rosalloc_space_;
724   }
725 
726   // Return the corresponding rosalloc space.
727   space::RosAllocSpace* GetRosAllocSpace(gc::allocator::RosAlloc* rosalloc) const
728       REQUIRES_SHARED(Locks::mutator_lock_);
729 
GetNonMovingSpace()730   space::MallocSpace* GetNonMovingSpace() const {
731     return non_moving_space_;
732   }
733 
GetLargeObjectsSpace()734   space::LargeObjectSpace* GetLargeObjectsSpace() const {
735     return large_object_space_;
736   }
737 
738   // Returns the free list space that may contain movable objects (the
739   // one that's not the non-moving space), either rosalloc_space_ or
740   // dlmalloc_space_.
GetPrimaryFreeListSpace()741   space::MallocSpace* GetPrimaryFreeListSpace() {
742     if (kUseRosAlloc) {
743       DCHECK(rosalloc_space_ != nullptr);
744       // reinterpret_cast is necessary as the space class hierarchy
745       // isn't known (#included) yet here.
746       return reinterpret_cast<space::MallocSpace*>(rosalloc_space_);
747     } else {
748       DCHECK(dlmalloc_space_ != nullptr);
749       return reinterpret_cast<space::MallocSpace*>(dlmalloc_space_);
750     }
751   }
752 
753   void DumpSpaces(std::ostream& stream) const REQUIRES_SHARED(Locks::mutator_lock_);
754   std::string DumpSpaces() const REQUIRES_SHARED(Locks::mutator_lock_);
755 
756   // GC performance measuring
757   void DumpGcPerformanceInfo(std::ostream& os)
758       REQUIRES(!*gc_complete_lock_);
759   void ResetGcPerformanceInfo() REQUIRES(!*gc_complete_lock_);
760 
761   // Thread pool.
762   void CreateThreadPool();
763   void DeleteThreadPool();
GetThreadPool()764   ThreadPool* GetThreadPool() {
765     return thread_pool_.get();
766   }
GetParallelGCThreadCount()767   size_t GetParallelGCThreadCount() const {
768     return parallel_gc_threads_;
769   }
GetConcGCThreadCount()770   size_t GetConcGCThreadCount() const {
771     return conc_gc_threads_;
772   }
773   accounting::ModUnionTable* FindModUnionTableFromSpace(space::Space* space);
774   void AddModUnionTable(accounting::ModUnionTable* mod_union_table);
775 
776   accounting::RememberedSet* FindRememberedSetFromSpace(space::Space* space);
777   void AddRememberedSet(accounting::RememberedSet* remembered_set);
778   // Also deletes the remebered set.
779   void RemoveRememberedSet(space::Space* space);
780 
781   bool IsCompilingBoot() const;
HasBootImageSpace()782   bool HasBootImageSpace() const {
783     return !boot_image_spaces_.empty();
784   }
785 
GetReferenceProcessor()786   ReferenceProcessor* GetReferenceProcessor() {
787     return reference_processor_.get();
788   }
GetTaskProcessor()789   TaskProcessor* GetTaskProcessor() {
790     return task_processor_.get();
791   }
792 
HasZygoteSpace()793   bool HasZygoteSpace() const {
794     return zygote_space_ != nullptr;
795   }
796 
797   // Returns the active concurrent copying collector.
ConcurrentCopyingCollector()798   collector::ConcurrentCopying* ConcurrentCopyingCollector() {
799     if (use_generational_cc_) {
800       DCHECK((active_concurrent_copying_collector_ == concurrent_copying_collector_) ||
801              (active_concurrent_copying_collector_ == young_concurrent_copying_collector_));
802     } else {
803       DCHECK_EQ(active_concurrent_copying_collector_, concurrent_copying_collector_);
804     }
805     return active_concurrent_copying_collector_;
806   }
807 
CurrentCollectorType()808   CollectorType CurrentCollectorType() {
809     return collector_type_;
810   }
811 
IsGcConcurrentAndMoving()812   bool IsGcConcurrentAndMoving() const {
813     if (IsGcConcurrent() && IsMovingGc(collector_type_)) {
814       // Assume no transition when a concurrent moving collector is used.
815       DCHECK_EQ(collector_type_, foreground_collector_type_);
816       return true;
817     }
818     return false;
819   }
820 
IsMovingGCDisabled(Thread * self)821   bool IsMovingGCDisabled(Thread* self) REQUIRES(!*gc_complete_lock_) {
822     MutexLock mu(self, *gc_complete_lock_);
823     return disable_moving_gc_count_ > 0;
824   }
825 
826   // Request an asynchronous trim.
827   void RequestTrim(Thread* self) REQUIRES(!*pending_task_lock_);
828 
829   // Request asynchronous GC.
830   void RequestConcurrentGC(Thread* self, GcCause cause, bool force_full)
831       REQUIRES(!*pending_task_lock_);
832 
833   // Whether or not we may use a garbage collector, used so that we only create collectors we need.
834   bool MayUseCollector(CollectorType type) const;
835 
836   // Used by tests to reduce timinig-dependent flakiness in OOME behavior.
SetMinIntervalHomogeneousSpaceCompactionByOom(uint64_t interval)837   void SetMinIntervalHomogeneousSpaceCompactionByOom(uint64_t interval) {
838     min_interval_homogeneous_space_compaction_by_oom_ = interval;
839   }
840 
841   // Helpers for android.os.Debug.getRuntimeStat().
842   uint64_t GetGcCount() const;
843   uint64_t GetGcTime() const;
844   uint64_t GetBlockingGcCount() const;
845   uint64_t GetBlockingGcTime() const;
846   void DumpGcCountRateHistogram(std::ostream& os) const REQUIRES(!*gc_complete_lock_);
847   void DumpBlockingGcCountRateHistogram(std::ostream& os) const REQUIRES(!*gc_complete_lock_);
848 
849   // Allocation tracking support
850   // Callers to this function use double-checked locking to ensure safety on allocation_records_
IsAllocTrackingEnabled()851   bool IsAllocTrackingEnabled() const {
852     return alloc_tracking_enabled_.load(std::memory_order_relaxed);
853   }
854 
SetAllocTrackingEnabled(bool enabled)855   void SetAllocTrackingEnabled(bool enabled) REQUIRES(Locks::alloc_tracker_lock_) {
856     alloc_tracking_enabled_.store(enabled, std::memory_order_relaxed);
857   }
858 
859   // Return the current stack depth of allocation records.
GetAllocTrackerStackDepth()860   size_t GetAllocTrackerStackDepth() const {
861     return alloc_record_depth_;
862   }
863 
864   // Return the current stack depth of allocation records.
SetAllocTrackerStackDepth(size_t alloc_record_depth)865   void SetAllocTrackerStackDepth(size_t alloc_record_depth) {
866     alloc_record_depth_ = alloc_record_depth;
867   }
868 
GetAllocationRecords()869   AllocRecordObjectMap* GetAllocationRecords() const REQUIRES(Locks::alloc_tracker_lock_) {
870     return allocation_records_.get();
871   }
872 
873   void SetAllocationRecords(AllocRecordObjectMap* records)
874       REQUIRES(Locks::alloc_tracker_lock_);
875 
876   void VisitAllocationRecords(RootVisitor* visitor) const
877       REQUIRES_SHARED(Locks::mutator_lock_)
878       REQUIRES(!Locks::alloc_tracker_lock_);
879 
880   void SweepAllocationRecords(IsMarkedVisitor* visitor) const
881       REQUIRES_SHARED(Locks::mutator_lock_)
882       REQUIRES(!Locks::alloc_tracker_lock_);
883 
884   void DisallowNewAllocationRecords() const
885       REQUIRES_SHARED(Locks::mutator_lock_)
886       REQUIRES(!Locks::alloc_tracker_lock_);
887 
888   void AllowNewAllocationRecords() const
889       REQUIRES_SHARED(Locks::mutator_lock_)
890       REQUIRES(!Locks::alloc_tracker_lock_);
891 
892   void BroadcastForNewAllocationRecords() const
893       REQUIRES(!Locks::alloc_tracker_lock_);
894 
895   void DisableGCForShutdown() REQUIRES(!*gc_complete_lock_);
896 
897   // Create a new alloc space and compact default alloc space to it.
898   HomogeneousSpaceCompactResult PerformHomogeneousSpaceCompact()
899       REQUIRES(!*gc_complete_lock_, !process_state_update_lock_);
900   bool SupportHomogeneousSpaceCompactAndCollectorTransitions() const;
901 
902   // Install an allocation listener.
903   void SetAllocationListener(AllocationListener* l);
904   // Remove an allocation listener. Note: the listener must not be deleted, as for performance
905   // reasons, we assume it stays valid when we read it (so that we don't require a lock).
906   void RemoveAllocationListener();
907 
908   // Install a gc pause listener.
909   void SetGcPauseListener(GcPauseListener* l);
910   // Get the currently installed gc pause listener, or null.
GetGcPauseListener()911   GcPauseListener* GetGcPauseListener() {
912     return gc_pause_listener_.load(std::memory_order_acquire);
913   }
914   // Remove a gc pause listener. Note: the listener must not be deleted, as for performance
915   // reasons, we assume it stays valid when we read it (so that we don't require a lock).
916   void RemoveGcPauseListener();
917 
918   const Verification* GetVerification() const;
919 
920   void PostForkChildAction(Thread* self);
921 
922   void TraceHeapSize(size_t heap_size);
923 
924   bool AddHeapTask(gc::HeapTask* task);
925 
926  private:
927   class ConcurrentGCTask;
928   class CollectorTransitionTask;
929   class HeapTrimTask;
930   class TriggerPostForkCCGcTask;
931 
932   // Compact source space to target space. Returns the collector used.
933   collector::GarbageCollector* Compact(space::ContinuousMemMapAllocSpace* target_space,
934                                        space::ContinuousMemMapAllocSpace* source_space,
935                                        GcCause gc_cause)
936       REQUIRES(Locks::mutator_lock_);
937 
938   void LogGC(GcCause gc_cause, collector::GarbageCollector* collector);
939   void StartGC(Thread* self, GcCause cause, CollectorType collector_type)
940       REQUIRES(!*gc_complete_lock_);
941   void FinishGC(Thread* self, collector::GcType gc_type) REQUIRES(!*gc_complete_lock_);
942 
943   double CalculateGcWeightedAllocatedBytes(uint64_t gc_last_process_cpu_time_ns,
944                                            uint64_t current_process_cpu_time) const;
945 
946   // Create a mem map with a preferred base address.
947   static MemMap MapAnonymousPreferredAddress(const char* name,
948                                              uint8_t* request_begin,
949                                              size_t capacity,
950                                              std::string* out_error_str);
951 
SupportHSpaceCompaction()952   bool SupportHSpaceCompaction() const {
953     // Returns true if we can do hspace compaction
954     return main_space_backup_ != nullptr;
955   }
956 
957   // Size_t saturating arithmetic
UnsignedDifference(size_t x,size_t y)958   static ALWAYS_INLINE size_t UnsignedDifference(size_t x, size_t y) {
959     return x > y ? x - y : 0;
960   }
UnsignedSum(size_t x,size_t y)961   static ALWAYS_INLINE size_t UnsignedSum(size_t x, size_t y) {
962     return x + y >= x ? x + y : std::numeric_limits<size_t>::max();
963   }
964 
AllocatorHasAllocationStack(AllocatorType allocator_type)965   static ALWAYS_INLINE bool AllocatorHasAllocationStack(AllocatorType allocator_type) {
966     return
967         allocator_type != kAllocatorTypeRegionTLAB &&
968         allocator_type != kAllocatorTypeBumpPointer &&
969         allocator_type != kAllocatorTypeTLAB &&
970         allocator_type != kAllocatorTypeRegion;
971   }
AllocatorMayHaveConcurrentGC(AllocatorType allocator_type)972   static ALWAYS_INLINE bool AllocatorMayHaveConcurrentGC(AllocatorType allocator_type) {
973     if (kUseReadBarrier) {
974       // Read barrier may have the TLAB allocator but is always concurrent. TODO: clean this up.
975       return true;
976     }
977     return
978         allocator_type != kAllocatorTypeTLAB &&
979         allocator_type != kAllocatorTypeBumpPointer;
980   }
IsMovingGc(CollectorType collector_type)981   static bool IsMovingGc(CollectorType collector_type) {
982     return
983         collector_type == kCollectorTypeCC ||
984         collector_type == kCollectorTypeSS ||
985         collector_type == kCollectorTypeCCBackground ||
986         collector_type == kCollectorTypeHomogeneousSpaceCompact;
987   }
988   bool ShouldAllocLargeObject(ObjPtr<mirror::Class> c, size_t byte_count) const
989       REQUIRES_SHARED(Locks::mutator_lock_);
990 
991   // Checks whether we should garbage collect:
992   ALWAYS_INLINE bool ShouldConcurrentGCForJava(size_t new_num_bytes_allocated);
993   float NativeMemoryOverTarget(size_t current_native_bytes, bool is_gc_concurrent);
994   ALWAYS_INLINE void CheckConcurrentGCForJava(Thread* self,
995                                               size_t new_num_bytes_allocated,
996                                               ObjPtr<mirror::Object>* obj)
997       REQUIRES_SHARED(Locks::mutator_lock_)
998       REQUIRES(!*pending_task_lock_, !*gc_complete_lock_);
999   void CheckGCForNative(Thread* self)
1000       REQUIRES(!*pending_task_lock_, !*gc_complete_lock_, !process_state_update_lock_);
1001 
GetMarkStack()1002   accounting::ObjectStack* GetMarkStack() {
1003     return mark_stack_.get();
1004   }
1005 
1006   // We don't force this to be inlined since it is a slow path.
1007   template <bool kInstrumented, typename PreFenceVisitor>
1008   mirror::Object* AllocLargeObject(Thread* self,
1009                                    ObjPtr<mirror::Class>* klass,
1010                                    size_t byte_count,
1011                                    const PreFenceVisitor& pre_fence_visitor)
1012       REQUIRES_SHARED(Locks::mutator_lock_)
1013       REQUIRES(!*gc_complete_lock_, !*pending_task_lock_,
1014                !*backtrace_lock_, !process_state_update_lock_);
1015 
1016   // Handles Allocate()'s slow allocation path with GC involved after
1017   // an initial allocation attempt failed.
1018   mirror::Object* AllocateInternalWithGc(Thread* self,
1019                                          AllocatorType allocator,
1020                                          bool instrumented,
1021                                          size_t num_bytes,
1022                                          size_t* bytes_allocated,
1023                                          size_t* usable_size,
1024                                          size_t* bytes_tl_bulk_allocated,
1025                                          ObjPtr<mirror::Class>* klass)
1026       REQUIRES(!Locks::thread_suspend_count_lock_, !*gc_complete_lock_, !*pending_task_lock_)
1027       REQUIRES(Roles::uninterruptible_)
1028       REQUIRES_SHARED(Locks::mutator_lock_);
1029 
1030   // Allocate into a specific space.
1031   mirror::Object* AllocateInto(Thread* self,
1032                                space::AllocSpace* space,
1033                                ObjPtr<mirror::Class> c,
1034                                size_t bytes)
1035       REQUIRES_SHARED(Locks::mutator_lock_);
1036 
1037   // Need to do this with mutators paused so that somebody doesn't accidentally allocate into the
1038   // wrong space.
1039   void SwapSemiSpaces() REQUIRES(Locks::mutator_lock_);
1040 
1041   // Try to allocate a number of bytes, this function never does any GCs. Needs to be inlined so
1042   // that the switch statement is constant optimized in the entrypoints.
1043   template <const bool kInstrumented, const bool kGrow>
1044   ALWAYS_INLINE mirror::Object* TryToAllocate(Thread* self,
1045                                               AllocatorType allocator_type,
1046                                               size_t alloc_size,
1047                                               size_t* bytes_allocated,
1048                                               size_t* usable_size,
1049                                               size_t* bytes_tl_bulk_allocated)
1050       REQUIRES_SHARED(Locks::mutator_lock_);
1051 
1052   mirror::Object* AllocWithNewTLAB(Thread* self,
1053                                    size_t alloc_size,
1054                                    bool grow,
1055                                    size_t* bytes_allocated,
1056                                    size_t* usable_size,
1057                                    size_t* bytes_tl_bulk_allocated)
1058       REQUIRES_SHARED(Locks::mutator_lock_);
1059 
1060   void ThrowOutOfMemoryError(Thread* self, size_t byte_count, AllocatorType allocator_type)
1061       REQUIRES_SHARED(Locks::mutator_lock_);
1062 
1063   // Are we out of memory, and thus should force a GC or fail?
1064   // For concurrent collectors, out of memory is defined by growth_limit_.
1065   // For nonconcurrent collectors it is defined by target_footprint_ unless grow is
1066   // set. If grow is set, the limit is growth_limit_ and we adjust target_footprint_
1067   // to accomodate the allocation.
1068   ALWAYS_INLINE bool IsOutOfMemoryOnAllocation(AllocatorType allocator_type,
1069                                                size_t alloc_size,
1070                                                bool grow);
1071 
1072   // Run the finalizers. If timeout is non zero, then we use the VMRuntime version.
1073   void RunFinalization(JNIEnv* env, uint64_t timeout);
1074 
1075   // Blocks the caller until the garbage collector becomes idle and returns the type of GC we
1076   // waited for.
1077   collector::GcType WaitForGcToCompleteLocked(GcCause cause, Thread* self)
1078       REQUIRES(gc_complete_lock_);
1079 
1080   void RequestCollectorTransition(CollectorType desired_collector_type, uint64_t delta_time)
1081       REQUIRES(!*pending_task_lock_);
1082 
1083   void RequestConcurrentGCAndSaveObject(Thread* self, bool force_full, ObjPtr<mirror::Object>* obj)
1084       REQUIRES_SHARED(Locks::mutator_lock_)
1085       REQUIRES(!*pending_task_lock_);
1086   bool IsGCRequestPending() const;
1087 
1088   // Sometimes CollectGarbageInternal decides to run a different Gc than you requested. Returns
1089   // which type of Gc was actually ran.
1090   collector::GcType CollectGarbageInternal(collector::GcType gc_plan,
1091                                            GcCause gc_cause,
1092                                            bool clear_soft_references)
1093       REQUIRES(!*gc_complete_lock_, !Locks::heap_bitmap_lock_, !Locks::thread_suspend_count_lock_,
1094                !*pending_task_lock_, !process_state_update_lock_);
1095 
1096   void PreGcVerification(collector::GarbageCollector* gc)
1097       REQUIRES(!Locks::mutator_lock_, !*gc_complete_lock_);
1098   void PreGcVerificationPaused(collector::GarbageCollector* gc)
1099       REQUIRES(Locks::mutator_lock_, !*gc_complete_lock_);
1100   void PrePauseRosAllocVerification(collector::GarbageCollector* gc)
1101       REQUIRES(Locks::mutator_lock_);
1102   void PreSweepingGcVerification(collector::GarbageCollector* gc)
1103       REQUIRES(Locks::mutator_lock_, !Locks::heap_bitmap_lock_, !*gc_complete_lock_);
1104   void PostGcVerification(collector::GarbageCollector* gc)
1105       REQUIRES(!Locks::mutator_lock_, !*gc_complete_lock_);
1106   void PostGcVerificationPaused(collector::GarbageCollector* gc)
1107       REQUIRES(Locks::mutator_lock_, !*gc_complete_lock_);
1108 
1109   // Find a collector based on GC type.
1110   collector::GarbageCollector* FindCollectorByGcType(collector::GcType gc_type);
1111 
1112   // Create the main free list malloc space, either a RosAlloc space or DlMalloc space.
1113   void CreateMainMallocSpace(MemMap&& mem_map,
1114                              size_t initial_size,
1115                              size_t growth_limit,
1116                              size_t capacity);
1117 
1118   // Create a malloc space based on a mem map. Does not set the space as default.
1119   space::MallocSpace* CreateMallocSpaceFromMemMap(MemMap&& mem_map,
1120                                                   size_t initial_size,
1121                                                   size_t growth_limit,
1122                                                   size_t capacity,
1123                                                   const char* name,
1124                                                   bool can_move_objects);
1125 
1126   // Given the current contents of the alloc space, increase the allowed heap footprint to match
1127   // the target utilization ratio.  This should only be called immediately after a full garbage
1128   // collection. bytes_allocated_before_gc is used to measure bytes / second for the period which
1129   // the GC was run.
1130   void GrowForUtilization(collector::GarbageCollector* collector_ran,
1131                           size_t bytes_allocated_before_gc = 0)
1132       REQUIRES(!process_state_update_lock_);
1133 
1134   size_t GetPercentFree();
1135 
1136   // Swap the allocation stack with the live stack.
1137   void SwapStacks() REQUIRES_SHARED(Locks::mutator_lock_);
1138 
1139   // Clear cards and update the mod union table. When process_alloc_space_cards is true,
1140   // if clear_alloc_space_cards is true, then we clear cards instead of ageing them. We do
1141   // not process the alloc space if process_alloc_space_cards is false.
1142   void ProcessCards(TimingLogger* timings,
1143                     bool use_rem_sets,
1144                     bool process_alloc_space_cards,
1145                     bool clear_alloc_space_cards)
1146       REQUIRES_SHARED(Locks::mutator_lock_);
1147 
1148   // Push an object onto the allocation stack.
1149   void PushOnAllocationStack(Thread* self, ObjPtr<mirror::Object>* obj)
1150       REQUIRES_SHARED(Locks::mutator_lock_)
1151       REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !process_state_update_lock_);
1152   void PushOnAllocationStackWithInternalGC(Thread* self, ObjPtr<mirror::Object>* obj)
1153       REQUIRES_SHARED(Locks::mutator_lock_)
1154       REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !process_state_update_lock_);
1155   void PushOnThreadLocalAllocationStackWithInternalGC(Thread* thread, ObjPtr<mirror::Object>* obj)
1156       REQUIRES_SHARED(Locks::mutator_lock_)
1157       REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !process_state_update_lock_);
1158 
1159   void ClearConcurrentGCRequest();
1160   void ClearPendingTrim(Thread* self) REQUIRES(!*pending_task_lock_);
1161   void ClearPendingCollectorTransition(Thread* self) REQUIRES(!*pending_task_lock_);
1162 
1163   // What kind of concurrency behavior is the runtime after? Currently true for concurrent mark
1164   // sweep GC, false for other GC types.
IsGcConcurrent()1165   bool IsGcConcurrent() const ALWAYS_INLINE {
1166     return collector_type_ == kCollectorTypeCC ||
1167         collector_type_ == kCollectorTypeCMS ||
1168         collector_type_ == kCollectorTypeCCBackground;
1169   }
1170 
1171   // Trim the managed and native spaces by releasing unused memory back to the OS.
1172   void TrimSpaces(Thread* self) REQUIRES(!*gc_complete_lock_);
1173 
1174   // Trim 0 pages at the end of reference tables.
1175   void TrimIndirectReferenceTables(Thread* self);
1176 
1177   template <typename Visitor>
1178   ALWAYS_INLINE void VisitObjectsInternal(Visitor&& visitor)
1179       REQUIRES_SHARED(Locks::mutator_lock_)
1180       REQUIRES(!Locks::heap_bitmap_lock_, !*gc_complete_lock_);
1181   template <typename Visitor>
1182   ALWAYS_INLINE void VisitObjectsInternalRegionSpace(Visitor&& visitor)
1183       REQUIRES(Locks::mutator_lock_, !Locks::heap_bitmap_lock_, !*gc_complete_lock_);
1184 
1185   void UpdateGcCountRateHistograms() REQUIRES(gc_complete_lock_);
1186 
1187   // GC stress mode attempts to do one GC per unique backtrace.
1188   void CheckGcStressMode(Thread* self, ObjPtr<mirror::Object>* obj)
1189       REQUIRES_SHARED(Locks::mutator_lock_)
1190       REQUIRES(!*gc_complete_lock_, !*pending_task_lock_,
1191                !*backtrace_lock_, !process_state_update_lock_);
1192 
NonStickyGcType()1193   collector::GcType NonStickyGcType() const {
1194     return HasZygoteSpace() ? collector::kGcTypePartial : collector::kGcTypeFull;
1195   }
1196 
1197   // Return the amount of space we allow for native memory when deciding whether to
1198   // collect. We collect when a weighted sum of Java memory plus native memory exceeds
1199   // the similarly weighted sum of the Java heap size target and this value.
NativeAllocationGcWatermark()1200   ALWAYS_INLINE size_t NativeAllocationGcWatermark() const {
1201     // We keep the traditional limit of max_free_ in place for small heaps,
1202     // but allow it to be adjusted upward for large heaps to limit GC overhead.
1203     return target_footprint_.load(std::memory_order_relaxed) / 8 + max_free_;
1204   }
1205 
1206   ALWAYS_INLINE void IncrementNumberOfBytesFreedRevoke(size_t freed_bytes_revoke);
1207 
1208   // On switching app from background to foreground, grow the heap size
1209   // to incorporate foreground heap growth multiplier.
1210   void GrowHeapOnJankPerceptibleSwitch() REQUIRES(!process_state_update_lock_);
1211 
1212   // Update *_freed_ever_ counters to reflect current GC values.
1213   void IncrementFreedEver();
1214 
1215   // Remove a vlog code from heap-inl.h which is transitively included in half the world.
1216   static void VlogHeapGrowth(size_t max_allowed_footprint, size_t new_footprint, size_t alloc_size);
1217 
1218   // Return our best approximation of the number of bytes of native memory that
1219   // are currently in use, and could possibly be reclaimed as an indirect result
1220   // of a garbage collection.
1221   size_t GetNativeBytes();
1222 
1223   // All-known continuous spaces, where objects lie within fixed bounds.
1224   std::vector<space::ContinuousSpace*> continuous_spaces_ GUARDED_BY(Locks::mutator_lock_);
1225 
1226   // All-known discontinuous spaces, where objects may be placed throughout virtual memory.
1227   std::vector<space::DiscontinuousSpace*> discontinuous_spaces_ GUARDED_BY(Locks::mutator_lock_);
1228 
1229   // All-known alloc spaces, where objects may be or have been allocated.
1230   std::vector<space::AllocSpace*> alloc_spaces_;
1231 
1232   // A space where non-movable objects are allocated, when compaction is enabled it contains
1233   // Classes, ArtMethods, ArtFields, and non moving objects.
1234   space::MallocSpace* non_moving_space_;
1235 
1236   // Space which we use for the kAllocatorTypeROSAlloc.
1237   space::RosAllocSpace* rosalloc_space_;
1238 
1239   // Space which we use for the kAllocatorTypeDlMalloc.
1240   space::DlMallocSpace* dlmalloc_space_;
1241 
1242   // The main space is the space which the GC copies to and from on process state updates. This
1243   // space is typically either the dlmalloc_space_ or the rosalloc_space_.
1244   space::MallocSpace* main_space_;
1245 
1246   // The large object space we are currently allocating into.
1247   space::LargeObjectSpace* large_object_space_;
1248 
1249   // The card table, dirtied by the write barrier.
1250   std::unique_ptr<accounting::CardTable> card_table_;
1251 
1252   std::unique_ptr<accounting::ReadBarrierTable> rb_table_;
1253 
1254   // A mod-union table remembers all of the references from the it's space to other spaces.
1255   AllocationTrackingSafeMap<space::Space*, accounting::ModUnionTable*, kAllocatorTagHeap>
1256       mod_union_tables_;
1257 
1258   // A remembered set remembers all of the references from the it's space to the target space.
1259   AllocationTrackingSafeMap<space::Space*, accounting::RememberedSet*, kAllocatorTagHeap>
1260       remembered_sets_;
1261 
1262   // The current collector type.
1263   CollectorType collector_type_;
1264   // Which collector we use when the app is in the foreground.
1265   CollectorType foreground_collector_type_;
1266   // Which collector we will use when the app is notified of a transition to background.
1267   CollectorType background_collector_type_;
1268   // Desired collector type, heap trimming daemon transitions the heap if it is != collector_type_.
1269   CollectorType desired_collector_type_;
1270 
1271   // Lock which guards pending tasks.
1272   Mutex* pending_task_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
1273 
1274   // How many GC threads we may use for paused parts of garbage collection.
1275   const size_t parallel_gc_threads_;
1276 
1277   // How many GC threads we may use for unpaused parts of garbage collection.
1278   const size_t conc_gc_threads_;
1279 
1280   // Boolean for if we are in low memory mode.
1281   const bool low_memory_mode_;
1282 
1283   // If we get a pause longer than long pause log threshold, then we print out the GC after it
1284   // finishes.
1285   const size_t long_pause_log_threshold_;
1286 
1287   // If we get a GC longer than long GC log threshold, then we print out the GC after it finishes.
1288   const size_t long_gc_log_threshold_;
1289 
1290   // Starting time of the new process; meant to be used for measuring total process CPU time.
1291   uint64_t process_cpu_start_time_ns_;
1292 
1293   // Last time (before and after) GC started; meant to be used to measure the
1294   // duration between two GCs.
1295   uint64_t pre_gc_last_process_cpu_time_ns_;
1296   uint64_t post_gc_last_process_cpu_time_ns_;
1297 
1298   // allocated_bytes * (current_process_cpu_time - [pre|post]_gc_last_process_cpu_time)
1299   double pre_gc_weighted_allocated_bytes_;
1300   double post_gc_weighted_allocated_bytes_;
1301 
1302   // If we ignore the target footprint it lets the heap grow until it hits the heap capacity, this
1303   // is useful for benchmarking since it reduces time spent in GC to a low %.
1304   const bool ignore_target_footprint_;
1305 
1306   // Lock which guards zygote space creation.
1307   Mutex zygote_creation_lock_;
1308 
1309   // Non-null iff we have a zygote space. Doesn't contain the large objects allocated before
1310   // zygote space creation.
1311   space::ZygoteSpace* zygote_space_;
1312 
1313   // Minimum allocation size of large object.
1314   size_t large_object_threshold_;
1315 
1316   // Guards access to the state of GC, associated conditional variable is used to signal when a GC
1317   // completes.
1318   Mutex* gc_complete_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
1319   std::unique_ptr<ConditionVariable> gc_complete_cond_ GUARDED_BY(gc_complete_lock_);
1320 
1321   // Used to synchronize between JNI critical calls and the thread flip of the CC collector.
1322   Mutex* thread_flip_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
1323   std::unique_ptr<ConditionVariable> thread_flip_cond_ GUARDED_BY(thread_flip_lock_);
1324   // This counter keeps track of how many threads are currently in a JNI critical section. This is
1325   // incremented once per thread even with nested enters.
1326   size_t disable_thread_flip_count_ GUARDED_BY(thread_flip_lock_);
1327   bool thread_flip_running_ GUARDED_BY(thread_flip_lock_);
1328 
1329   // Reference processor;
1330   std::unique_ptr<ReferenceProcessor> reference_processor_;
1331 
1332   // Task processor, proxies heap trim requests to the daemon threads.
1333   std::unique_ptr<TaskProcessor> task_processor_;
1334 
1335   // Collector type of the running GC.
1336   volatile CollectorType collector_type_running_ GUARDED_BY(gc_complete_lock_);
1337 
1338   // Cause of the last running GC.
1339   volatile GcCause last_gc_cause_ GUARDED_BY(gc_complete_lock_);
1340 
1341   // The thread currently running the GC.
1342   volatile Thread* thread_running_gc_ GUARDED_BY(gc_complete_lock_);
1343 
1344   // Last Gc type we ran. Used by WaitForConcurrentGc to know which Gc was waited on.
1345   volatile collector::GcType last_gc_type_ GUARDED_BY(gc_complete_lock_);
1346   collector::GcType next_gc_type_;
1347 
1348   // Maximum size that the heap can reach.
1349   size_t capacity_;
1350 
1351   // The size the heap is limited to. This is initially smaller than capacity, but for largeHeap
1352   // programs it is "cleared" making it the same as capacity.
1353   // Only weakly enforced for simultaneous allocations.
1354   size_t growth_limit_;
1355 
1356   // Target size (as in maximum allocatable bytes) for the heap. Weakly enforced as a limit for
1357   // non-concurrent GC. Used as a guideline for computing concurrent_start_bytes_ in the
1358   // concurrent GC case.
1359   Atomic<size_t> target_footprint_;
1360 
1361   // Computed with foreground-multiplier in GrowForUtilization() when run in
1362   // jank non-perceptible state. On update to process state from background to
1363   // foreground we set target_footprint_ to this value.
1364   Mutex process_state_update_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
1365   size_t min_foreground_target_footprint_ GUARDED_BY(process_state_update_lock_);
1366 
1367   // When num_bytes_allocated_ exceeds this amount then a concurrent GC should be requested so that
1368   // it completes ahead of an allocation failing.
1369   // A multiple of this is also used to determine when to trigger a GC in response to native
1370   // allocation.
1371   size_t concurrent_start_bytes_;
1372 
1373   // Since the heap was created, how many bytes have been freed.
1374   std::atomic<uint64_t> total_bytes_freed_ever_;
1375 
1376   // Since the heap was created, how many objects have been freed.
1377   std::atomic<uint64_t> total_objects_freed_ever_;
1378 
1379   // Number of bytes currently allocated and not yet reclaimed. Includes active
1380   // TLABS in their entirety, even if they have not yet been parceled out.
1381   Atomic<size_t> num_bytes_allocated_;
1382 
1383   // Number of registered native bytes allocated. Adjusted after each RegisterNativeAllocation and
1384   // RegisterNativeFree. Used to  help determine when to trigger GC for native allocations. Should
1385   // not include bytes allocated through the system malloc, since those are implicitly included.
1386   Atomic<size_t> native_bytes_registered_;
1387 
1388   // Approximately the smallest value of GetNativeBytes() we've seen since the last GC.
1389   Atomic<size_t> old_native_bytes_allocated_;
1390 
1391   // Total number of native objects of which we were notified since the beginning of time, mod 2^32.
1392   // Allows us to check for GC only roughly every kNotifyNativeInterval allocations.
1393   Atomic<uint32_t> native_objects_notified_;
1394 
1395   // Number of bytes freed by thread local buffer revokes. This will
1396   // cancel out the ahead-of-time bulk counting of bytes allocated in
1397   // rosalloc thread-local buffers.  It is temporarily accumulated
1398   // here to be subtracted from num_bytes_allocated_ later at the next
1399   // GC.
1400   Atomic<size_t> num_bytes_freed_revoke_;
1401 
1402   // Info related to the current or previous GC iteration.
1403   collector::Iteration current_gc_iteration_;
1404 
1405   // Heap verification flags.
1406   const bool verify_missing_card_marks_;
1407   const bool verify_system_weaks_;
1408   const bool verify_pre_gc_heap_;
1409   const bool verify_pre_sweeping_heap_;
1410   const bool verify_post_gc_heap_;
1411   const bool verify_mod_union_table_;
1412   bool verify_pre_gc_rosalloc_;
1413   bool verify_pre_sweeping_rosalloc_;
1414   bool verify_post_gc_rosalloc_;
1415   const bool gc_stress_mode_;
1416 
1417   // RAII that temporarily disables the rosalloc verification during
1418   // the zygote fork.
1419   class ScopedDisableRosAllocVerification {
1420    private:
1421     Heap* const heap_;
1422     const bool orig_verify_pre_gc_;
1423     const bool orig_verify_pre_sweeping_;
1424     const bool orig_verify_post_gc_;
1425 
1426    public:
ScopedDisableRosAllocVerification(Heap * heap)1427     explicit ScopedDisableRosAllocVerification(Heap* heap)
1428         : heap_(heap),
1429           orig_verify_pre_gc_(heap_->verify_pre_gc_rosalloc_),
1430           orig_verify_pre_sweeping_(heap_->verify_pre_sweeping_rosalloc_),
1431           orig_verify_post_gc_(heap_->verify_post_gc_rosalloc_) {
1432       heap_->verify_pre_gc_rosalloc_ = false;
1433       heap_->verify_pre_sweeping_rosalloc_ = false;
1434       heap_->verify_post_gc_rosalloc_ = false;
1435     }
~ScopedDisableRosAllocVerification()1436     ~ScopedDisableRosAllocVerification() {
1437       heap_->verify_pre_gc_rosalloc_ = orig_verify_pre_gc_;
1438       heap_->verify_pre_sweeping_rosalloc_ = orig_verify_pre_sweeping_;
1439       heap_->verify_post_gc_rosalloc_ = orig_verify_post_gc_;
1440     }
1441   };
1442 
1443   // Parallel GC data structures.
1444   std::unique_ptr<ThreadPool> thread_pool_;
1445 
1446   // A bitmap that is set corresponding to the known live objects since the last GC cycle.
1447   std::unique_ptr<accounting::HeapBitmap> live_bitmap_ GUARDED_BY(Locks::heap_bitmap_lock_);
1448   // A bitmap that is set corresponding to the marked objects in the current GC cycle.
1449   std::unique_ptr<accounting::HeapBitmap> mark_bitmap_ GUARDED_BY(Locks::heap_bitmap_lock_);
1450 
1451   // Mark stack that we reuse to avoid re-allocating the mark stack.
1452   std::unique_ptr<accounting::ObjectStack> mark_stack_;
1453 
1454   // Allocation stack, new allocations go here so that we can do sticky mark bits. This enables us
1455   // to use the live bitmap as the old mark bitmap.
1456   const size_t max_allocation_stack_size_;
1457   std::unique_ptr<accounting::ObjectStack> allocation_stack_;
1458 
1459   // Second allocation stack so that we can process allocation with the heap unlocked.
1460   std::unique_ptr<accounting::ObjectStack> live_stack_;
1461 
1462   // Allocator type.
1463   AllocatorType current_allocator_;
1464   const AllocatorType current_non_moving_allocator_;
1465 
1466   // Which GCs we run in order when an allocation fails.
1467   std::vector<collector::GcType> gc_plan_;
1468 
1469   // Bump pointer spaces.
1470   space::BumpPointerSpace* bump_pointer_space_;
1471   // Temp space is the space which the semispace collector copies to.
1472   space::BumpPointerSpace* temp_space_;
1473 
1474   // Region space, used by the concurrent collector.
1475   space::RegionSpace* region_space_;
1476 
1477   // Minimum free guarantees that you always have at least min_free_ free bytes after growing for
1478   // utilization, regardless of target utilization ratio.
1479   const size_t min_free_;
1480 
1481   // The ideal maximum free size, when we grow the heap for utilization.
1482   const size_t max_free_;
1483 
1484   // Target ideal heap utilization ratio.
1485   double target_utilization_;
1486 
1487   // How much more we grow the heap when we are a foreground app instead of background.
1488   double foreground_heap_growth_multiplier_;
1489 
1490   // The amount of native memory allocation since the last GC required to cause us to wait for a
1491   // collection as a result of native allocation. Very large values can cause the device to run
1492   // out of memory, due to lack of finalization to reclaim native memory.  Making it too small can
1493   // cause jank in apps like launcher that intentionally allocate large amounts of memory in rapid
1494   // succession. (b/122099093) 1/4 to 1/3 of physical memory seems to be a good number.
1495   const size_t stop_for_native_allocs_;
1496 
1497   // Total time which mutators are paused or waiting for GC to complete.
1498   uint64_t total_wait_time_;
1499 
1500   // The current state of heap verification, may be enabled or disabled.
1501   VerifyObjectMode verify_object_mode_;
1502 
1503   // Compacting GC disable count, prevents compacting GC from running iff > 0.
1504   size_t disable_moving_gc_count_ GUARDED_BY(gc_complete_lock_);
1505 
1506   std::vector<collector::GarbageCollector*> garbage_collectors_;
1507   collector::SemiSpace* semi_space_collector_;
1508   collector::ConcurrentCopying* active_concurrent_copying_collector_;
1509   collector::ConcurrentCopying* young_concurrent_copying_collector_;
1510   collector::ConcurrentCopying* concurrent_copying_collector_;
1511 
1512   const bool is_running_on_memory_tool_;
1513   const bool use_tlab_;
1514 
1515   // Pointer to the space which becomes the new main space when we do homogeneous space compaction.
1516   // Use unique_ptr since the space is only added during the homogeneous compaction phase.
1517   std::unique_ptr<space::MallocSpace> main_space_backup_;
1518 
1519   // Minimal interval allowed between two homogeneous space compactions caused by OOM.
1520   uint64_t min_interval_homogeneous_space_compaction_by_oom_;
1521 
1522   // Times of the last homogeneous space compaction caused by OOM.
1523   uint64_t last_time_homogeneous_space_compaction_by_oom_;
1524 
1525   // Saved OOMs by homogeneous space compaction.
1526   Atomic<size_t> count_delayed_oom_;
1527 
1528   // Count for requested homogeneous space compaction.
1529   Atomic<size_t> count_requested_homogeneous_space_compaction_;
1530 
1531   // Count for ignored homogeneous space compaction.
1532   Atomic<size_t> count_ignored_homogeneous_space_compaction_;
1533 
1534   // Count for performed homogeneous space compaction.
1535   Atomic<size_t> count_performed_homogeneous_space_compaction_;
1536 
1537   // Whether or not a concurrent GC is pending.
1538   Atomic<bool> concurrent_gc_pending_;
1539 
1540   // Active tasks which we can modify (change target time, desired collector type, etc..).
1541   CollectorTransitionTask* pending_collector_transition_ GUARDED_BY(pending_task_lock_);
1542   HeapTrimTask* pending_heap_trim_ GUARDED_BY(pending_task_lock_);
1543 
1544   // Whether or not we use homogeneous space compaction to avoid OOM errors.
1545   bool use_homogeneous_space_compaction_for_oom_;
1546 
1547   // If true, enable generational collection when using the Concurrent Copying
1548   // (CC) collector, i.e. use sticky-bit CC for minor collections and (full) CC
1549   // for major collections. Set in Heap constructor.
1550   const bool use_generational_cc_;
1551 
1552   // True if the currently running collection has made some thread wait.
1553   bool running_collection_is_blocking_ GUARDED_BY(gc_complete_lock_);
1554   // The number of blocking GC runs.
1555   uint64_t blocking_gc_count_;
1556   // The total duration of blocking GC runs.
1557   uint64_t blocking_gc_time_;
1558   // The duration of the window for the GC count rate histograms.
1559   static constexpr uint64_t kGcCountRateHistogramWindowDuration = MsToNs(10 * 1000);  // 10s.
1560   // Maximum number of missed histogram windows for which statistics will be collected.
1561   static constexpr uint64_t kGcCountRateHistogramMaxNumMissedWindows = 100;
1562   // The last time when the GC count rate histograms were updated.
1563   // This is rounded by kGcCountRateHistogramWindowDuration (a multiple of 10s).
1564   uint64_t last_update_time_gc_count_rate_histograms_;
1565   // The running count of GC runs in the last window.
1566   uint64_t gc_count_last_window_;
1567   // The running count of blocking GC runs in the last window.
1568   uint64_t blocking_gc_count_last_window_;
1569   // The maximum number of buckets in the GC count rate histograms.
1570   static constexpr size_t kGcCountRateMaxBucketCount = 200;
1571   // The histogram of the number of GC invocations per window duration.
1572   Histogram<uint64_t> gc_count_rate_histogram_ GUARDED_BY(gc_complete_lock_);
1573   // The histogram of the number of blocking GC invocations per window duration.
1574   Histogram<uint64_t> blocking_gc_count_rate_histogram_ GUARDED_BY(gc_complete_lock_);
1575 
1576   // Allocation tracking support
1577   Atomic<bool> alloc_tracking_enabled_;
1578   std::unique_ptr<AllocRecordObjectMap> allocation_records_;
1579   size_t alloc_record_depth_;
1580 
1581   // GC stress related data structures.
1582   Mutex* backtrace_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
1583   // Debugging variables, seen backtraces vs unique backtraces.
1584   Atomic<uint64_t> seen_backtrace_count_;
1585   Atomic<uint64_t> unique_backtrace_count_;
1586   // Stack trace hashes that we already saw,
1587   std::unordered_set<uint64_t> seen_backtraces_ GUARDED_BY(backtrace_lock_);
1588 
1589   // We disable GC when we are shutting down the runtime in case there are daemon threads still
1590   // allocating.
1591   bool gc_disabled_for_shutdown_ GUARDED_BY(gc_complete_lock_);
1592 
1593   // Turned on by -XX:DumpRegionInfoBeforeGC and -XX:DumpRegionInfoAfterGC to
1594   // emit region info before and after each GC cycle.
1595   bool dump_region_info_before_gc_;
1596   bool dump_region_info_after_gc_;
1597 
1598   // Boot image spaces.
1599   std::vector<space::ImageSpace*> boot_image_spaces_;
1600 
1601   // Boot image address range. Includes images and oat files.
1602   uint32_t boot_images_start_address_;
1603   uint32_t boot_images_size_;
1604 
1605   // An installed allocation listener.
1606   Atomic<AllocationListener*> alloc_listener_;
1607   // An installed GC Pause listener.
1608   Atomic<GcPauseListener*> gc_pause_listener_;
1609 
1610   std::unique_ptr<Verification> verification_;
1611 
1612   friend class CollectorTransitionTask;
1613   friend class collector::GarbageCollector;
1614   friend class collector::ConcurrentCopying;
1615   friend class collector::MarkSweep;
1616   friend class collector::SemiSpace;
1617   friend class GCCriticalSection;
1618   friend class ReferenceQueue;
1619   friend class ScopedGCCriticalSection;
1620   friend class ScopedInterruptibleGCCriticalSection;
1621   friend class VerifyReferenceCardVisitor;
1622   friend class VerifyReferenceVisitor;
1623   friend class VerifyObjectVisitor;
1624 
1625   DISALLOW_IMPLICIT_CONSTRUCTORS(Heap);
1626 };
1627 
1628 }  // namespace gc
1629 }  // namespace art
1630 
1631 #endif  // ART_RUNTIME_GC_HEAP_H_
1632