1 /*
2  * Copyright (C) 2008 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_GC_HEAP_H_
18 #define ART_RUNTIME_GC_HEAP_H_
19 
20 #include <iosfwd>
21 #include <string>
22 #include <unordered_set>
23 #include <vector>
24 
25 #include "allocator_type.h"
26 #include "arch/instruction_set.h"
27 #include "atomic.h"
28 #include "base/time_utils.h"
29 #include "base/timing_logger.h"
30 #include "gc/accounting/atomic_stack.h"
31 #include "gc/accounting/card_table.h"
32 #include "gc/accounting/read_barrier_table.h"
33 #include "gc/gc_cause.h"
34 #include "gc/collector/garbage_collector.h"
35 #include "gc/collector/gc_type.h"
36 #include "gc/collector_type.h"
37 #include "gc/space/large_object_space.h"
38 #include "globals.h"
39 #include "jni.h"
40 #include "object_callbacks.h"
41 #include "offsets.h"
42 #include "reference_processor.h"
43 #include "safe_map.h"
44 #include "thread_pool.h"
45 #include "verify_object.h"
46 
47 namespace art {
48 
49 class ConditionVariable;
50 class Mutex;
51 class StackVisitor;
52 class Thread;
53 class TimingLogger;
54 
55 namespace mirror {
56   class Class;
57   class Object;
58 }  // namespace mirror
59 
60 namespace gc {
61 
62 class ReferenceProcessor;
63 class TaskProcessor;
64 
65 namespace accounting {
66   class HeapBitmap;
67   class ModUnionTable;
68   class RememberedSet;
69 }  // namespace accounting
70 
71 namespace collector {
72   class ConcurrentCopying;
73   class GarbageCollector;
74   class MarkCompact;
75   class MarkSweep;
76   class SemiSpace;
77 }  // namespace collector
78 
79 namespace allocator {
80   class RosAlloc;
81 }  // namespace allocator
82 
83 namespace space {
84   class AllocSpace;
85   class BumpPointerSpace;
86   class ContinuousMemMapAllocSpace;
87   class DiscontinuousSpace;
88   class DlMallocSpace;
89   class ImageSpace;
90   class LargeObjectSpace;
91   class MallocSpace;
92   class RegionSpace;
93   class RosAllocSpace;
94   class Space;
95   class SpaceTest;
96   class ZygoteSpace;
97 }  // namespace space
98 
99 class AgeCardVisitor {
100  public:
operator()101   uint8_t operator()(uint8_t card) const {
102     if (card == accounting::CardTable::kCardDirty) {
103       return card - 1;
104     } else {
105       return 0;
106     }
107   }
108 };
109 
110 enum HomogeneousSpaceCompactResult {
111   // Success.
112   kSuccess,
113   // Reject due to disabled moving GC.
114   kErrorReject,
115   // System is shutting down.
116   kErrorVMShuttingDown,
117 };
118 
119 // If true, use rosalloc/RosAllocSpace instead of dlmalloc/DlMallocSpace
120 static constexpr bool kUseRosAlloc = true;
121 
122 // If true, use thread-local allocation stack.
123 static constexpr bool kUseThreadLocalAllocationStack = true;
124 
125 // The process state passed in from the activity manager, used to determine when to do trimming
126 // and compaction.
127 enum ProcessState {
128   kProcessStateJankPerceptible = 0,
129   kProcessStateJankImperceptible = 1,
130 };
131 std::ostream& operator<<(std::ostream& os, const ProcessState& process_state);
132 
133 class Heap {
134  public:
135   // If true, measure the total allocation time.
136   static constexpr bool kMeasureAllocationTime = false;
137   static constexpr size_t kDefaultStartingSize = kPageSize;
138   static constexpr size_t kDefaultInitialSize = 2 * MB;
139   static constexpr size_t kDefaultMaximumSize = 256 * MB;
140   static constexpr size_t kDefaultNonMovingSpaceCapacity = 64 * MB;
141   static constexpr size_t kDefaultMaxFree = 2 * MB;
142   static constexpr size_t kDefaultMinFree = kDefaultMaxFree / 4;
143   static constexpr size_t kDefaultLongPauseLogThreshold = MsToNs(5);
144   static constexpr size_t kDefaultLongGCLogThreshold = MsToNs(100);
145   static constexpr size_t kDefaultTLABSize = 256 * KB;
146   static constexpr double kDefaultTargetUtilization = 0.5;
147   static constexpr double kDefaultHeapGrowthMultiplier = 2.0;
148   // Primitive arrays larger than this size are put in the large object space.
149   static constexpr size_t kDefaultLargeObjectThreshold = 3 * kPageSize;
150   // Whether or not parallel GC is enabled. If not, then we never create the thread pool.
151   static constexpr bool kDefaultEnableParallelGC = false;
152 
153   // Whether or not we use the free list large object space. Only use it if USE_ART_LOW_4G_ALLOCATOR
154   // since this means that we have to use the slow msync loop in MemMap::MapAnonymous.
155   static constexpr space::LargeObjectSpaceType kDefaultLargeObjectSpaceType =
156       USE_ART_LOW_4G_ALLOCATOR ?
157           space::LargeObjectSpaceType::kFreeList
158         : space::LargeObjectSpaceType::kMap;
159 
160   // Used so that we don't overflow the allocation time atomic integer.
161   static constexpr size_t kTimeAdjust = 1024;
162 
163   // How often we allow heap trimming to happen (nanoseconds).
164   static constexpr uint64_t kHeapTrimWait = MsToNs(5000);
165   // How long we wait after a transition request to perform a collector transition (nanoseconds).
166   static constexpr uint64_t kCollectorTransitionWait = MsToNs(5000);
167 
168   // Create a heap with the requested sizes. The possible empty
169   // image_file_names names specify Spaces to load based on
170   // ImageWriter output.
171   explicit Heap(size_t initial_size, size_t growth_limit, size_t min_free,
172                 size_t max_free, double target_utilization,
173                 double foreground_heap_growth_multiplier, size_t capacity,
174                 size_t non_moving_space_capacity,
175                 const std::string& original_image_file_name,
176                 InstructionSet image_instruction_set,
177                 CollectorType foreground_collector_type, CollectorType background_collector_type,
178                 space::LargeObjectSpaceType large_object_space_type, size_t large_object_threshold,
179                 size_t parallel_gc_threads, size_t conc_gc_threads, bool low_memory_mode,
180                 size_t long_pause_threshold, size_t long_gc_threshold,
181                 bool ignore_max_footprint, bool use_tlab,
182                 bool verify_pre_gc_heap, bool verify_pre_sweeping_heap, bool verify_post_gc_heap,
183                 bool verify_pre_gc_rosalloc, bool verify_pre_sweeping_rosalloc,
184                 bool verify_post_gc_rosalloc, bool gc_stress_mode,
185                 bool use_homogeneous_space_compaction,
186                 uint64_t min_interval_homogeneous_space_compaction_by_oom);
187 
188   ~Heap();
189 
190   // Allocates and initializes storage for an object instance.
191   template <bool kInstrumented, typename PreFenceVisitor>
AllocObject(Thread * self,mirror::Class * klass,size_t num_bytes,const PreFenceVisitor & pre_fence_visitor)192   mirror::Object* AllocObject(Thread* self, mirror::Class* klass, size_t num_bytes,
193                               const PreFenceVisitor& pre_fence_visitor)
194       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
195     return AllocObjectWithAllocator<kInstrumented, true>(self, klass, num_bytes,
196                                                          GetCurrentAllocator(),
197                                                          pre_fence_visitor);
198   }
199 
200   template <bool kInstrumented, typename PreFenceVisitor>
AllocNonMovableObject(Thread * self,mirror::Class * klass,size_t num_bytes,const PreFenceVisitor & pre_fence_visitor)201   mirror::Object* AllocNonMovableObject(Thread* self, mirror::Class* klass, size_t num_bytes,
202                                         const PreFenceVisitor& pre_fence_visitor)
203       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
204     return AllocObjectWithAllocator<kInstrumented, true>(self, klass, num_bytes,
205                                                          GetCurrentNonMovingAllocator(),
206                                                          pre_fence_visitor);
207   }
208 
209   template <bool kInstrumented, bool kCheckLargeObject, typename PreFenceVisitor>
210   ALWAYS_INLINE mirror::Object* AllocObjectWithAllocator(
211       Thread* self, mirror::Class* klass, size_t byte_count, AllocatorType allocator,
212       const PreFenceVisitor& pre_fence_visitor)
213       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
214 
GetCurrentAllocator()215   AllocatorType GetCurrentAllocator() const {
216     return current_allocator_;
217   }
218 
GetCurrentNonMovingAllocator()219   AllocatorType GetCurrentNonMovingAllocator() const {
220     return current_non_moving_allocator_;
221   }
222 
223   // Visit all of the live objects in the heap.
224   void VisitObjects(ObjectCallback callback, void* arg)
225       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
226       LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
227   void VisitObjectsPaused(ObjectCallback callback, void* arg)
228       EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
229       LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
230 
231   void CheckPreconditionsForAllocObject(mirror::Class* c, size_t byte_count)
232       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
233 
234   void RegisterNativeAllocation(JNIEnv* env, size_t bytes);
235   void RegisterNativeFree(JNIEnv* env, size_t bytes);
236 
237   // Change the allocator, updates entrypoints.
238   void ChangeAllocator(AllocatorType allocator)
239       EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
240       LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_);
241 
242   // Transition the garbage collector during runtime, may copy objects from one space to another.
243   void TransitionCollector(CollectorType collector_type);
244 
245   // Change the collector to be one of the possible options (MS, CMS, SS).
246   void ChangeCollector(CollectorType collector_type)
247       EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
248 
249   // The given reference is believed to be to an object in the Java heap, check the soundness of it.
250   // TODO: NO_THREAD_SAFETY_ANALYSIS since we call this everywhere and it is impossible to find a
251   // proper lock ordering for it.
252   void VerifyObjectBody(mirror::Object* o) NO_THREAD_SAFETY_ANALYSIS;
253 
254   // Check sanity of all live references.
255   void VerifyHeap() LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
256   // Returns how many failures occured.
257   size_t VerifyHeapReferences(bool verify_referents = true)
258       EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
259   bool VerifyMissingCardMarks()
260       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
261 
262   // A weaker test than IsLiveObject or VerifyObject that doesn't require the heap lock,
263   // and doesn't abort on error, allowing the caller to report more
264   // meaningful diagnostics.
265   bool IsValidObjectAddress(const mirror::Object* obj) const
266       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
267 
268   // Faster alternative to IsHeapAddress since finding if an object is in the large object space is
269   // very slow.
270   bool IsNonDiscontinuousSpaceHeapAddress(const mirror::Object* obj) const
271       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
272 
273   // Returns true if 'obj' is a live heap object, false otherwise (including for invalid addresses).
274   // Requires the heap lock to be held.
275   bool IsLiveObjectLocked(mirror::Object* obj, bool search_allocation_stack = true,
276                           bool search_live_stack = true, bool sorted = false)
277       SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
278 
279   // Returns true if there is any chance that the object (obj) will move.
280   bool IsMovableObject(const mirror::Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
281 
282   // Enables us to compacting GC until objects are released.
283   void IncrementDisableMovingGC(Thread* self);
284   void DecrementDisableMovingGC(Thread* self);
285 
286   // Clear all of the mark bits, doesn't clear bitmaps which have the same live bits as mark bits.
287   void ClearMarkedObjects() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
288 
289   // Initiates an explicit garbage collection.
290   void CollectGarbage(bool clear_soft_references);
291 
292   // Does a concurrent GC, should only be called by the GC daemon thread
293   // through runtime.
294   void ConcurrentGC(Thread* self, bool force_full) LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_);
295 
296   // Implements VMDebug.countInstancesOfClass and JDWP VM_InstanceCount.
297   // The boolean decides whether to use IsAssignableFrom or == when comparing classes.
298   void CountInstances(const std::vector<mirror::Class*>& classes, bool use_is_assignable_from,
299                       uint64_t* counts)
300       LOCKS_EXCLUDED(Locks::heap_bitmap_lock_)
301       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
302   // Implements JDWP RT_Instances.
303   void GetInstances(mirror::Class* c, int32_t max_count, std::vector<mirror::Object*>& instances)
304       LOCKS_EXCLUDED(Locks::heap_bitmap_lock_)
305       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
306   // Implements JDWP OR_ReferringObjects.
307   void GetReferringObjects(mirror::Object* o, int32_t max_count, std::vector<mirror::Object*>& referring_objects)
308       LOCKS_EXCLUDED(Locks::heap_bitmap_lock_)
309       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
310 
311   // Removes the growth limit on the alloc space so it may grow to its maximum capacity. Used to
312   // implement dalvik.system.VMRuntime.clearGrowthLimit.
313   void ClearGrowthLimit();
314 
315   // Make the current growth limit the new maximum capacity, unmaps pages at the end of spaces
316   // which will never be used. Used to implement dalvik.system.VMRuntime.clampGrowthLimit.
317   void ClampGrowthLimit() LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
318 
319   // Target ideal heap utilization ratio, implements
320   // dalvik.system.VMRuntime.getTargetHeapUtilization.
GetTargetHeapUtilization()321   double GetTargetHeapUtilization() const {
322     return target_utilization_;
323   }
324 
325   // Data structure memory usage tracking.
326   void RegisterGCAllocation(size_t bytes);
327   void RegisterGCDeAllocation(size_t bytes);
328 
329   // Set the heap's private space pointers to be the same as the space based on it's type. Public
330   // due to usage by tests.
331   void SetSpaceAsDefault(space::ContinuousSpace* continuous_space)
332       LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
333   void AddSpace(space::Space* space) LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
334   void RemoveSpace(space::Space* space) LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
335 
336   // Set target ideal heap utilization ratio, implements
337   // dalvik.system.VMRuntime.setTargetHeapUtilization.
338   void SetTargetHeapUtilization(float target);
339 
340   // For the alloc space, sets the maximum number of bytes that the heap is allowed to allocate
341   // from the system. Doesn't allow the space to exceed its growth limit.
342   void SetIdealFootprint(size_t max_allowed_footprint);
343 
344   // Blocks the caller until the garbage collector becomes idle and returns the type of GC we
345   // waited for.
346   collector::GcType WaitForGcToComplete(GcCause cause, Thread* self)
347       LOCKS_EXCLUDED(gc_complete_lock_);
348 
349   // Update the heap's process state to a new value, may cause compaction to occur.
350   void UpdateProcessState(ProcessState process_state);
351 
GetContinuousSpaces()352   const std::vector<space::ContinuousSpace*>& GetContinuousSpaces() const {
353     return continuous_spaces_;
354   }
355 
GetDiscontinuousSpaces()356   const std::vector<space::DiscontinuousSpace*>& GetDiscontinuousSpaces() const {
357     return discontinuous_spaces_;
358   }
359 
GetCurrentGcIteration()360   const collector::Iteration* GetCurrentGcIteration() const {
361     return &current_gc_iteration_;
362   }
GetCurrentGcIteration()363   collector::Iteration* GetCurrentGcIteration() {
364     return &current_gc_iteration_;
365   }
366 
367   // Enable verification of object references when the runtime is sufficiently initialized.
EnableObjectValidation()368   void EnableObjectValidation() {
369     verify_object_mode_ = kVerifyObjectSupport;
370     if (verify_object_mode_ > kVerifyObjectModeDisabled) {
371       VerifyHeap();
372     }
373   }
374 
375   // Disable object reference verification for image writing.
DisableObjectValidation()376   void DisableObjectValidation() {
377     verify_object_mode_ = kVerifyObjectModeDisabled;
378   }
379 
380   // Other checks may be performed if we know the heap should be in a sane state.
IsObjectValidationEnabled()381   bool IsObjectValidationEnabled() const {
382     return verify_object_mode_ > kVerifyObjectModeDisabled;
383   }
384 
385   // Returns true if low memory mode is enabled.
IsLowMemoryMode()386   bool IsLowMemoryMode() const {
387     return low_memory_mode_;
388   }
389 
390   // Returns the heap growth multiplier, this affects how much we grow the heap after a GC.
391   // Scales heap growth, min free, and max free.
392   double HeapGrowthMultiplier() const;
393 
394   // Freed bytes can be negative in cases where we copy objects from a compacted space to a
395   // free-list backed space.
396   void RecordFree(uint64_t freed_objects, int64_t freed_bytes);
397 
398   // Record the bytes freed by thread-local buffer revoke.
399   void RecordFreeRevoke();
400 
401   // Must be called if a field of an Object in the heap changes, and before any GC safe-point.
402   // The call is not needed if null is stored in the field.
WriteBarrierField(const mirror::Object * dst,MemberOffset,const mirror::Object *)403   ALWAYS_INLINE void WriteBarrierField(const mirror::Object* dst, MemberOffset /*offset*/,
404                                        const mirror::Object* /*new_value*/) {
405     card_table_->MarkCard(dst);
406   }
407 
408   // Write barrier for array operations that update many field positions
WriteBarrierArray(const mirror::Object * dst,int,size_t)409   ALWAYS_INLINE void WriteBarrierArray(const mirror::Object* dst, int /*start_offset*/,
410                                        size_t /*length TODO: element_count or byte_count?*/) {
411     card_table_->MarkCard(dst);
412   }
413 
WriteBarrierEveryFieldOf(const mirror::Object * obj)414   ALWAYS_INLINE void WriteBarrierEveryFieldOf(const mirror::Object* obj) {
415     card_table_->MarkCard(obj);
416   }
417 
GetCardTable()418   accounting::CardTable* GetCardTable() const {
419     return card_table_.get();
420   }
421 
GetReadBarrierTable()422   accounting::ReadBarrierTable* GetReadBarrierTable() const {
423     return rb_table_.get();
424   }
425 
426   void AddFinalizerReference(Thread* self, mirror::Object** object);
427 
428   // Returns the number of bytes currently allocated.
GetBytesAllocated()429   size_t GetBytesAllocated() const {
430     return num_bytes_allocated_.LoadSequentiallyConsistent();
431   }
432 
433   // Returns the number of objects currently allocated.
434   size_t GetObjectsAllocated() const LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
435 
436   // Returns the total number of objects allocated since the heap was created.
437   uint64_t GetObjectsAllocatedEver() const;
438 
439   // Returns the total number of bytes allocated since the heap was created.
440   uint64_t GetBytesAllocatedEver() const;
441 
442   // Returns the total number of objects freed since the heap was created.
GetObjectsFreedEver()443   uint64_t GetObjectsFreedEver() const {
444     return total_objects_freed_ever_;
445   }
446 
447   // Returns the total number of bytes freed since the heap was created.
GetBytesFreedEver()448   uint64_t GetBytesFreedEver() const {
449     return total_bytes_freed_ever_;
450   }
451 
452   // Implements java.lang.Runtime.maxMemory, returning the maximum amount of memory a program can
453   // consume. For a regular VM this would relate to the -Xmx option and would return -1 if no Xmx
454   // were specified. Android apps start with a growth limit (small heap size) which is
455   // cleared/extended for large apps.
GetMaxMemory()456   size_t GetMaxMemory() const {
457     // There is some race conditions in the allocation code that can cause bytes allocated to
458     // become larger than growth_limit_ in rare cases.
459     return std::max(GetBytesAllocated(), growth_limit_);
460   }
461 
462   // Implements java.lang.Runtime.totalMemory, returning approximate amount of memory currently
463   // consumed by an application.
464   size_t GetTotalMemory() const;
465 
466   // Returns approximately how much free memory we have until the next GC happens.
GetFreeMemoryUntilGC()467   size_t GetFreeMemoryUntilGC() const {
468     return max_allowed_footprint_ - GetBytesAllocated();
469   }
470 
471   // Returns approximately how much free memory we have until the next OOME happens.
GetFreeMemoryUntilOOME()472   size_t GetFreeMemoryUntilOOME() const {
473     return growth_limit_ - GetBytesAllocated();
474   }
475 
476   // Returns how much free memory we have until we need to grow the heap to perform an allocation.
477   // Similar to GetFreeMemoryUntilGC. Implements java.lang.Runtime.freeMemory.
GetFreeMemory()478   size_t GetFreeMemory() const {
479     size_t byte_allocated = num_bytes_allocated_.LoadSequentiallyConsistent();
480     size_t total_memory = GetTotalMemory();
481     // Make sure we don't get a negative number.
482     return total_memory - std::min(total_memory, byte_allocated);
483   }
484 
485   // get the space that corresponds to an object's address. Current implementation searches all
486   // spaces in turn. If fail_ok is false then failing to find a space will cause an abort.
487   // TODO: consider using faster data structure like binary tree.
488   space::ContinuousSpace* FindContinuousSpaceFromObject(const mirror::Object*, bool fail_ok) const;
489   space::DiscontinuousSpace* FindDiscontinuousSpaceFromObject(const mirror::Object*,
490                                                               bool fail_ok) const;
491   space::Space* FindSpaceFromObject(const mirror::Object*, bool fail_ok) const;
492 
493   void DumpForSigQuit(std::ostream& os);
494 
495   // Do a pending collector transition.
496   void DoPendingCollectorTransition();
497 
498   // Deflate monitors, ... and trim the spaces.
499   void Trim(Thread* self) LOCKS_EXCLUDED(gc_complete_lock_);
500 
501   void RevokeThreadLocalBuffers(Thread* thread);
502   void RevokeRosAllocThreadLocalBuffers(Thread* thread);
503   void RevokeAllThreadLocalBuffers();
504   void AssertThreadLocalBuffersAreRevoked(Thread* thread);
505   void AssertAllBumpPointerSpaceThreadLocalBuffersAreRevoked();
506   void RosAllocVerification(TimingLogger* timings, const char* name)
507       EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
508 
GetLiveBitmap()509   accounting::HeapBitmap* GetLiveBitmap() SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
510     return live_bitmap_.get();
511   }
512 
GetMarkBitmap()513   accounting::HeapBitmap* GetMarkBitmap() SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
514     return mark_bitmap_.get();
515   }
516 
GetLiveStack()517   accounting::ObjectStack* GetLiveStack() SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
518     return live_stack_.get();
519   }
520 
521   void PreZygoteFork() NO_THREAD_SAFETY_ANALYSIS;
522 
523   // Mark and empty stack.
524   void FlushAllocStack()
525       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
526       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
527 
528   // Revoke all the thread-local allocation stacks.
529   void RevokeAllThreadLocalAllocationStacks(Thread* self)
530       EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
531       LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_, Locks::thread_list_lock_);
532 
533   // Mark all the objects in the allocation stack in the specified bitmap.
534   // TODO: Refactor?
535   void MarkAllocStack(accounting::SpaceBitmap<kObjectAlignment>* bitmap1,
536                       accounting::SpaceBitmap<kObjectAlignment>* bitmap2,
537                       accounting::SpaceBitmap<kLargeObjectAlignment>* large_objects,
538                       accounting::ObjectStack* stack)
539       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
540       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
541 
542   // Mark the specified allocation stack as live.
543   void MarkAllocStackAsLive(accounting::ObjectStack* stack)
544       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
545       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
546 
547   // Unbind any bound bitmaps.
548   void UnBindBitmaps() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
549 
550   // DEPRECATED: Should remove in "near" future when support for multiple image spaces is added.
551   // Assumes there is only one image space.
552   space::ImageSpace* GetImageSpace() const;
553 
554   // Permenantly disable moving garbage collection.
555   void DisableMovingGc();
556 
GetDlMallocSpace()557   space::DlMallocSpace* GetDlMallocSpace() const {
558     return dlmalloc_space_;
559   }
560 
GetRosAllocSpace()561   space::RosAllocSpace* GetRosAllocSpace() const {
562     return rosalloc_space_;
563   }
564 
565   // Return the corresponding rosalloc space.
566   space::RosAllocSpace* GetRosAllocSpace(gc::allocator::RosAlloc* rosalloc) const;
567 
GetNonMovingSpace()568   space::MallocSpace* GetNonMovingSpace() const {
569     return non_moving_space_;
570   }
571 
GetLargeObjectsSpace()572   space::LargeObjectSpace* GetLargeObjectsSpace() const {
573     return large_object_space_;
574   }
575 
576   // Returns the free list space that may contain movable objects (the
577   // one that's not the non-moving space), either rosalloc_space_ or
578   // dlmalloc_space_.
GetPrimaryFreeListSpace()579   space::MallocSpace* GetPrimaryFreeListSpace() {
580     if (kUseRosAlloc) {
581       DCHECK(rosalloc_space_ != nullptr);
582       // reinterpret_cast is necessary as the space class hierarchy
583       // isn't known (#included) yet here.
584       return reinterpret_cast<space::MallocSpace*>(rosalloc_space_);
585     } else {
586       DCHECK(dlmalloc_space_ != nullptr);
587       return reinterpret_cast<space::MallocSpace*>(dlmalloc_space_);
588     }
589   }
590 
591   std::string DumpSpaces() const WARN_UNUSED;
592   void DumpSpaces(std::ostream& stream) const;
593 
594   // Dump object should only be used by the signal handler.
595   void DumpObject(std::ostream& stream, mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS;
596   // Safe version of pretty type of which check to make sure objects are heap addresses.
597   std::string SafeGetClassDescriptor(mirror::Class* klass) NO_THREAD_SAFETY_ANALYSIS;
598   std::string SafePrettyTypeOf(mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS;
599 
600   // GC performance measuring
601   void DumpGcPerformanceInfo(std::ostream& os);
602   void ResetGcPerformanceInfo();
603 
604   // Returns true if we currently care about pause times.
CareAboutPauseTimes()605   bool CareAboutPauseTimes() const {
606     return process_state_ == kProcessStateJankPerceptible;
607   }
608 
609   // Thread pool.
610   void CreateThreadPool();
611   void DeleteThreadPool();
GetThreadPool()612   ThreadPool* GetThreadPool() {
613     return thread_pool_.get();
614   }
GetParallelGCThreadCount()615   size_t GetParallelGCThreadCount() const {
616     return parallel_gc_threads_;
617   }
GetConcGCThreadCount()618   size_t GetConcGCThreadCount() const {
619     return conc_gc_threads_;
620   }
621   accounting::ModUnionTable* FindModUnionTableFromSpace(space::Space* space);
622   void AddModUnionTable(accounting::ModUnionTable* mod_union_table);
623 
624   accounting::RememberedSet* FindRememberedSetFromSpace(space::Space* space);
625   void AddRememberedSet(accounting::RememberedSet* remembered_set);
626   // Also deletes the remebered set.
627   void RemoveRememberedSet(space::Space* space);
628 
629   bool IsCompilingBoot() const;
630   bool HasImageSpace() const;
631 
GetReferenceProcessor()632   ReferenceProcessor* GetReferenceProcessor() {
633     return &reference_processor_;
634   }
GetTaskProcessor()635   TaskProcessor* GetTaskProcessor() {
636     return task_processor_.get();
637   }
638 
HasZygoteSpace()639   bool HasZygoteSpace() const {
640     return zygote_space_ != nullptr;
641   }
642 
ConcurrentCopyingCollector()643   collector::ConcurrentCopying* ConcurrentCopyingCollector() {
644     return concurrent_copying_collector_;
645   }
646 
CurrentCollectorType()647   CollectorType CurrentCollectorType() {
648     return collector_type_;
649   }
650 
IsGcConcurrentAndMoving()651   bool IsGcConcurrentAndMoving() const {
652     if (IsGcConcurrent() && IsMovingGc(collector_type_)) {
653       // Assume no transition when a concurrent moving collector is used.
654       DCHECK_EQ(collector_type_, foreground_collector_type_);
655       DCHECK_EQ(foreground_collector_type_, background_collector_type_)
656           << "Assume no transition such that collector_type_ won't change";
657       return true;
658     }
659     return false;
660   }
661 
IsMovingGCDisabled(Thread * self)662   bool IsMovingGCDisabled(Thread* self) {
663     MutexLock mu(self, *gc_complete_lock_);
664     return disable_moving_gc_count_ > 0;
665   }
666 
667   // Request an asynchronous trim.
668   void RequestTrim(Thread* self) LOCKS_EXCLUDED(pending_task_lock_);
669 
670   // Request asynchronous GC.
671   void RequestConcurrentGC(Thread* self, bool force_full) LOCKS_EXCLUDED(pending_task_lock_);
672 
673   // Whether or not we may use a garbage collector, used so that we only create collectors we need.
674   bool MayUseCollector(CollectorType type) const;
675 
676   // Used by tests to reduce timinig-dependent flakiness in OOME behavior.
SetMinIntervalHomogeneousSpaceCompactionByOom(uint64_t interval)677   void SetMinIntervalHomogeneousSpaceCompactionByOom(uint64_t interval) {
678     min_interval_homogeneous_space_compaction_by_oom_ = interval;
679   }
680 
681   // Helpers for android.os.Debug.getRuntimeStat().
682   uint64_t GetGcCount() const;
683   uint64_t GetGcTime() const;
684   uint64_t GetBlockingGcCount() const;
685   uint64_t GetBlockingGcTime() const;
686   void DumpGcCountRateHistogram(std::ostream& os) const;
687   void DumpBlockingGcCountRateHistogram(std::ostream& os) const;
688 
689  private:
690   class ConcurrentGCTask;
691   class CollectorTransitionTask;
692   class HeapTrimTask;
693 
694   // Compact source space to target space. Returns the collector used.
695   collector::GarbageCollector* Compact(space::ContinuousMemMapAllocSpace* target_space,
696                                        space::ContinuousMemMapAllocSpace* source_space,
697                                        GcCause gc_cause)
698       EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
699 
700   void LogGC(GcCause gc_cause, collector::GarbageCollector* collector);
701   void FinishGC(Thread* self, collector::GcType gc_type) LOCKS_EXCLUDED(gc_complete_lock_);
702 
703   // Create a mem map with a preferred base address.
704   static MemMap* MapAnonymousPreferredAddress(const char* name, uint8_t* request_begin,
705                                               size_t capacity, std::string* out_error_str);
706 
SupportHSpaceCompaction()707   bool SupportHSpaceCompaction() const {
708     // Returns true if we can do hspace compaction
709     return main_space_backup_ != nullptr;
710   }
711 
AllocatorHasAllocationStack(AllocatorType allocator_type)712   static ALWAYS_INLINE bool AllocatorHasAllocationStack(AllocatorType allocator_type) {
713     return
714         allocator_type != kAllocatorTypeBumpPointer &&
715         allocator_type != kAllocatorTypeTLAB &&
716         allocator_type != kAllocatorTypeRegion &&
717         allocator_type != kAllocatorTypeRegionTLAB;
718   }
AllocatorMayHaveConcurrentGC(AllocatorType allocator_type)719   static ALWAYS_INLINE bool AllocatorMayHaveConcurrentGC(AllocatorType allocator_type) {
720     return
721         allocator_type != kAllocatorTypeBumpPointer &&
722         allocator_type != kAllocatorTypeTLAB;
723   }
IsMovingGc(CollectorType collector_type)724   static bool IsMovingGc(CollectorType collector_type) {
725     return collector_type == kCollectorTypeSS || collector_type == kCollectorTypeGSS ||
726         collector_type == kCollectorTypeCC || collector_type == kCollectorTypeMC ||
727         collector_type == kCollectorTypeHomogeneousSpaceCompact;
728   }
729   bool ShouldAllocLargeObject(mirror::Class* c, size_t byte_count) const
730       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
731   ALWAYS_INLINE void CheckConcurrentGC(Thread* self, size_t new_num_bytes_allocated,
732                                        mirror::Object** obj)
733       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
734 
GetMarkStack()735   accounting::ObjectStack* GetMarkStack() {
736     return mark_stack_.get();
737   }
738 
739   // We don't force this to be inlined since it is a slow path.
740   template <bool kInstrumented, typename PreFenceVisitor>
741   mirror::Object* AllocLargeObject(Thread* self, mirror::Class** klass, size_t byte_count,
742                                    const PreFenceVisitor& pre_fence_visitor)
743       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
744 
745   // Handles Allocate()'s slow allocation path with GC involved after
746   // an initial allocation attempt failed.
747   mirror::Object* AllocateInternalWithGc(Thread* self, AllocatorType allocator, size_t num_bytes,
748                                          size_t* bytes_allocated, size_t* usable_size,
749                                          size_t* bytes_tl_bulk_allocated,
750                                          mirror::Class** klass)
751       LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
752       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
753 
754   // Allocate into a specific space.
755   mirror::Object* AllocateInto(Thread* self, space::AllocSpace* space, mirror::Class* c,
756                                size_t bytes)
757       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
758 
759   // Need to do this with mutators paused so that somebody doesn't accidentally allocate into the
760   // wrong space.
761   void SwapSemiSpaces() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
762 
763   // Try to allocate a number of bytes, this function never does any GCs. Needs to be inlined so
764   // that the switch statement is constant optimized in the entrypoints.
765   template <const bool kInstrumented, const bool kGrow>
766   ALWAYS_INLINE mirror::Object* TryToAllocate(Thread* self, AllocatorType allocator_type,
767                                               size_t alloc_size, size_t* bytes_allocated,
768                                               size_t* usable_size,
769                                               size_t* bytes_tl_bulk_allocated)
770       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
771 
772   void ThrowOutOfMemoryError(Thread* self, size_t byte_count, AllocatorType allocator_type)
773       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
774 
775   template <bool kGrow>
776   ALWAYS_INLINE bool IsOutOfMemoryOnAllocation(AllocatorType allocator_type, size_t alloc_size);
777 
778   // Returns true if the address passed in is within the address range of a continuous space.
779   bool IsValidContinuousSpaceObjectAddress(const mirror::Object* obj) const
780       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
781 
782   // Run the finalizers. If timeout is non zero, then we use the VMRuntime version.
783   void RunFinalization(JNIEnv* env, uint64_t timeout);
784 
785   // Blocks the caller until the garbage collector becomes idle and returns the type of GC we
786   // waited for.
787   collector::GcType WaitForGcToCompleteLocked(GcCause cause, Thread* self)
788       EXCLUSIVE_LOCKS_REQUIRED(gc_complete_lock_);
789 
790   void RequestCollectorTransition(CollectorType desired_collector_type, uint64_t delta_time)
791       LOCKS_EXCLUDED(pending_task_lock_);
792 
793   void RequestConcurrentGCAndSaveObject(Thread* self, bool force_full, mirror::Object** obj)
794       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
795   bool IsGCRequestPending() const;
796 
797   // Sometimes CollectGarbageInternal decides to run a different Gc than you requested. Returns
798   // which type of Gc was actually ran.
799   collector::GcType CollectGarbageInternal(collector::GcType gc_plan, GcCause gc_cause,
800                                            bool clear_soft_references)
801       LOCKS_EXCLUDED(gc_complete_lock_,
802                      Locks::heap_bitmap_lock_,
803                      Locks::thread_suspend_count_lock_);
804 
805   void PreGcVerification(collector::GarbageCollector* gc)
806       LOCKS_EXCLUDED(Locks::mutator_lock_);
807   void PreGcVerificationPaused(collector::GarbageCollector* gc)
808       EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
809   void PrePauseRosAllocVerification(collector::GarbageCollector* gc)
810       EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
811   void PreSweepingGcVerification(collector::GarbageCollector* gc)
812       EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
813       LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
814   void PostGcVerification(collector::GarbageCollector* gc)
815       LOCKS_EXCLUDED(Locks::mutator_lock_);
816   void PostGcVerificationPaused(collector::GarbageCollector* gc)
817       EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
818 
819   // Update the watermark for the native allocated bytes based on the current number of native
820   // bytes allocated and the target utilization ratio.
821   void UpdateMaxNativeFootprint();
822 
823   // Find a collector based on GC type.
824   collector::GarbageCollector* FindCollectorByGcType(collector::GcType gc_type);
825 
826   // Create a new alloc space and compact default alloc space to it.
827   HomogeneousSpaceCompactResult PerformHomogeneousSpaceCompact();
828 
829   // Create the main free list malloc space, either a RosAlloc space or DlMalloc space.
830   void CreateMainMallocSpace(MemMap* mem_map, size_t initial_size, size_t growth_limit,
831                              size_t capacity);
832 
833   // Create a malloc space based on a mem map. Does not set the space as default.
834   space::MallocSpace* CreateMallocSpaceFromMemMap(MemMap* mem_map, size_t initial_size,
835                                                   size_t growth_limit, size_t capacity,
836                                                   const char* name, bool can_move_objects);
837 
838   // Given the current contents of the alloc space, increase the allowed heap footprint to match
839   // the target utilization ratio.  This should only be called immediately after a full garbage
840   // collection. bytes_allocated_before_gc is used to measure bytes / second for the period which
841   // the GC was run.
842   void GrowForUtilization(collector::GarbageCollector* collector_ran,
843                           uint64_t bytes_allocated_before_gc = 0);
844 
845   size_t GetPercentFree();
846 
847   static void VerificationCallback(mirror::Object* obj, void* arg)
848       SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
849 
850   // Swap the allocation stack with the live stack.
851   void SwapStacks(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
852 
853   // Clear cards and update the mod union table. When process_alloc_space_cards is true,
854   // if clear_alloc_space_cards is true, then we clear cards instead of ageing them. We do
855   // not process the alloc space if process_alloc_space_cards is false.
856   void ProcessCards(TimingLogger* timings, bool use_rem_sets, bool process_alloc_space_cards,
857                     bool clear_alloc_space_cards);
858 
859   // Push an object onto the allocation stack.
860   void PushOnAllocationStack(Thread* self, mirror::Object** obj)
861       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
862   void PushOnAllocationStackWithInternalGC(Thread* self, mirror::Object** obj)
863       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
864   void PushOnThreadLocalAllocationStackWithInternalGC(Thread* thread, mirror::Object** obj)
865       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
866 
867   void ClearConcurrentGCRequest();
868   void ClearPendingTrim(Thread* self) LOCKS_EXCLUDED(pending_task_lock_);
869   void ClearPendingCollectorTransition(Thread* self) LOCKS_EXCLUDED(pending_task_lock_);
870 
871   // What kind of concurrency behavior is the runtime after? Currently true for concurrent mark
872   // sweep GC, false for other GC types.
IsGcConcurrent()873   bool IsGcConcurrent() const ALWAYS_INLINE {
874     return collector_type_ == kCollectorTypeCMS || collector_type_ == kCollectorTypeCC;
875   }
876 
877   // Trim the managed and native spaces by releasing unused memory back to the OS.
878   void TrimSpaces(Thread* self) LOCKS_EXCLUDED(gc_complete_lock_);
879 
880   // Trim 0 pages at the end of reference tables.
881   void TrimIndirectReferenceTables(Thread* self);
882 
883   void VisitObjectsInternal(ObjectCallback callback, void* arg)
884       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
885       LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
886   void VisitObjectsInternalRegionSpace(ObjectCallback callback, void* arg)
887       EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
888       LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
889 
890   void UpdateGcCountRateHistograms() EXCLUSIVE_LOCKS_REQUIRED(gc_complete_lock_);
891 
892   // GC stress mode attempts to do one GC per unique backtrace.
893   void CheckGcStressMode(Thread* self, mirror::Object** obj)
894       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
895 
896   // All-known continuous spaces, where objects lie within fixed bounds.
897   std::vector<space::ContinuousSpace*> continuous_spaces_;
898 
899   // All-known discontinuous spaces, where objects may be placed throughout virtual memory.
900   std::vector<space::DiscontinuousSpace*> discontinuous_spaces_;
901 
902   // All-known alloc spaces, where objects may be or have been allocated.
903   std::vector<space::AllocSpace*> alloc_spaces_;
904 
905   // A space where non-movable objects are allocated, when compaction is enabled it contains
906   // Classes, ArtMethods, ArtFields, and non moving objects.
907   space::MallocSpace* non_moving_space_;
908 
909   // Space which we use for the kAllocatorTypeROSAlloc.
910   space::RosAllocSpace* rosalloc_space_;
911 
912   // Space which we use for the kAllocatorTypeDlMalloc.
913   space::DlMallocSpace* dlmalloc_space_;
914 
915   // The main space is the space which the GC copies to and from on process state updates. This
916   // space is typically either the dlmalloc_space_ or the rosalloc_space_.
917   space::MallocSpace* main_space_;
918 
919   // The large object space we are currently allocating into.
920   space::LargeObjectSpace* large_object_space_;
921 
922   // The card table, dirtied by the write barrier.
923   std::unique_ptr<accounting::CardTable> card_table_;
924 
925   std::unique_ptr<accounting::ReadBarrierTable> rb_table_;
926 
927   // A mod-union table remembers all of the references from the it's space to other spaces.
928   AllocationTrackingSafeMap<space::Space*, accounting::ModUnionTable*, kAllocatorTagHeap>
929       mod_union_tables_;
930 
931   // A remembered set remembers all of the references from the it's space to the target space.
932   AllocationTrackingSafeMap<space::Space*, accounting::RememberedSet*, kAllocatorTagHeap>
933       remembered_sets_;
934 
935   // The current collector type.
936   CollectorType collector_type_;
937   // Which collector we use when the app is in the foreground.
938   CollectorType foreground_collector_type_;
939   // Which collector we will use when the app is notified of a transition to background.
940   CollectorType background_collector_type_;
941   // Desired collector type, heap trimming daemon transitions the heap if it is != collector_type_.
942   CollectorType desired_collector_type_;
943 
944   // Lock which guards pending tasks.
945   Mutex* pending_task_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
946 
947   // How many GC threads we may use for paused parts of garbage collection.
948   const size_t parallel_gc_threads_;
949 
950   // How many GC threads we may use for unpaused parts of garbage collection.
951   const size_t conc_gc_threads_;
952 
953   // Boolean for if we are in low memory mode.
954   const bool low_memory_mode_;
955 
956   // If we get a pause longer than long pause log threshold, then we print out the GC after it
957   // finishes.
958   const size_t long_pause_log_threshold_;
959 
960   // If we get a GC longer than long GC log threshold, then we print out the GC after it finishes.
961   const size_t long_gc_log_threshold_;
962 
963   // If we ignore the max footprint it lets the heap grow until it hits the heap capacity, this is
964   // useful for benchmarking since it reduces time spent in GC to a low %.
965   const bool ignore_max_footprint_;
966 
967   // Lock which guards zygote space creation.
968   Mutex zygote_creation_lock_;
969 
970   // Non-null iff we have a zygote space. Doesn't contain the large objects allocated before
971   // zygote space creation.
972   space::ZygoteSpace* zygote_space_;
973 
974   // Minimum allocation size of large object.
975   size_t large_object_threshold_;
976 
977   // Guards access to the state of GC, associated conditional variable is used to signal when a GC
978   // completes.
979   Mutex* gc_complete_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
980   std::unique_ptr<ConditionVariable> gc_complete_cond_ GUARDED_BY(gc_complete_lock_);
981 
982   // Reference processor;
983   ReferenceProcessor reference_processor_;
984 
985   // Task processor, proxies heap trim requests to the daemon threads.
986   std::unique_ptr<TaskProcessor> task_processor_;
987 
988   // True while the garbage collector is running.
989   volatile CollectorType collector_type_running_ GUARDED_BY(gc_complete_lock_);
990 
991   // Last Gc type we ran. Used by WaitForConcurrentGc to know which Gc was waited on.
992   volatile collector::GcType last_gc_type_ GUARDED_BY(gc_complete_lock_);
993   collector::GcType next_gc_type_;
994 
995   // Maximum size that the heap can reach.
996   size_t capacity_;
997 
998   // The size the heap is limited to. This is initially smaller than capacity, but for largeHeap
999   // programs it is "cleared" making it the same as capacity.
1000   size_t growth_limit_;
1001 
1002   // When the number of bytes allocated exceeds the footprint TryAllocate returns null indicating
1003   // a GC should be triggered.
1004   size_t max_allowed_footprint_;
1005 
1006   // The watermark at which a concurrent GC is requested by registerNativeAllocation.
1007   size_t native_footprint_gc_watermark_;
1008 
1009   // Whether or not we need to run finalizers in the next native allocation.
1010   bool native_need_to_run_finalization_;
1011 
1012   // Whether or not we currently care about pause times.
1013   ProcessState process_state_;
1014 
1015   // When num_bytes_allocated_ exceeds this amount then a concurrent GC should be requested so that
1016   // it completes ahead of an allocation failing.
1017   size_t concurrent_start_bytes_;
1018 
1019   // Since the heap was created, how many bytes have been freed.
1020   uint64_t total_bytes_freed_ever_;
1021 
1022   // Since the heap was created, how many objects have been freed.
1023   uint64_t total_objects_freed_ever_;
1024 
1025   // Number of bytes allocated.  Adjusted after each allocation and free.
1026   Atomic<size_t> num_bytes_allocated_;
1027 
1028   // Bytes which are allocated and managed by native code but still need to be accounted for.
1029   Atomic<size_t> native_bytes_allocated_;
1030 
1031   // Number of bytes freed by thread local buffer revokes. This will
1032   // cancel out the ahead-of-time bulk counting of bytes allocated in
1033   // rosalloc thread-local buffers.  It is temporarily accumulated
1034   // here to be subtracted from num_bytes_allocated_ later at the next
1035   // GC.
1036   Atomic<size_t> num_bytes_freed_revoke_;
1037 
1038   // Info related to the current or previous GC iteration.
1039   collector::Iteration current_gc_iteration_;
1040 
1041   // Heap verification flags.
1042   const bool verify_missing_card_marks_;
1043   const bool verify_system_weaks_;
1044   const bool verify_pre_gc_heap_;
1045   const bool verify_pre_sweeping_heap_;
1046   const bool verify_post_gc_heap_;
1047   const bool verify_mod_union_table_;
1048   bool verify_pre_gc_rosalloc_;
1049   bool verify_pre_sweeping_rosalloc_;
1050   bool verify_post_gc_rosalloc_;
1051   const bool gc_stress_mode_;
1052 
1053   // RAII that temporarily disables the rosalloc verification during
1054   // the zygote fork.
1055   class ScopedDisableRosAllocVerification {
1056    private:
1057     Heap* const heap_;
1058     const bool orig_verify_pre_gc_;
1059     const bool orig_verify_pre_sweeping_;
1060     const bool orig_verify_post_gc_;
1061 
1062    public:
ScopedDisableRosAllocVerification(Heap * heap)1063     explicit ScopedDisableRosAllocVerification(Heap* heap)
1064         : heap_(heap),
1065           orig_verify_pre_gc_(heap_->verify_pre_gc_rosalloc_),
1066           orig_verify_pre_sweeping_(heap_->verify_pre_sweeping_rosalloc_),
1067           orig_verify_post_gc_(heap_->verify_post_gc_rosalloc_) {
1068       heap_->verify_pre_gc_rosalloc_ = false;
1069       heap_->verify_pre_sweeping_rosalloc_ = false;
1070       heap_->verify_post_gc_rosalloc_ = false;
1071     }
~ScopedDisableRosAllocVerification()1072     ~ScopedDisableRosAllocVerification() {
1073       heap_->verify_pre_gc_rosalloc_ = orig_verify_pre_gc_;
1074       heap_->verify_pre_sweeping_rosalloc_ = orig_verify_pre_sweeping_;
1075       heap_->verify_post_gc_rosalloc_ = orig_verify_post_gc_;
1076     }
1077   };
1078 
1079   // Parallel GC data structures.
1080   std::unique_ptr<ThreadPool> thread_pool_;
1081 
1082   // Estimated allocation rate (bytes / second). Computed between the time of the last GC cycle
1083   // and the start of the current one.
1084   uint64_t allocation_rate_;
1085 
1086   // For a GC cycle, a bitmap that is set corresponding to the
1087   std::unique_ptr<accounting::HeapBitmap> live_bitmap_ GUARDED_BY(Locks::heap_bitmap_lock_);
1088   std::unique_ptr<accounting::HeapBitmap> mark_bitmap_ GUARDED_BY(Locks::heap_bitmap_lock_);
1089 
1090   // Mark stack that we reuse to avoid re-allocating the mark stack.
1091   std::unique_ptr<accounting::ObjectStack> mark_stack_;
1092 
1093   // Allocation stack, new allocations go here so that we can do sticky mark bits. This enables us
1094   // to use the live bitmap as the old mark bitmap.
1095   const size_t max_allocation_stack_size_;
1096   std::unique_ptr<accounting::ObjectStack> allocation_stack_;
1097 
1098   // Second allocation stack so that we can process allocation with the heap unlocked.
1099   std::unique_ptr<accounting::ObjectStack> live_stack_;
1100 
1101   // Allocator type.
1102   AllocatorType current_allocator_;
1103   const AllocatorType current_non_moving_allocator_;
1104 
1105   // Which GCs we run in order when we an allocation fails.
1106   std::vector<collector::GcType> gc_plan_;
1107 
1108   // Bump pointer spaces.
1109   space::BumpPointerSpace* bump_pointer_space_;
1110   // Temp space is the space which the semispace collector copies to.
1111   space::BumpPointerSpace* temp_space_;
1112 
1113   space::RegionSpace* region_space_;
1114 
1115   // Minimum free guarantees that you always have at least min_free_ free bytes after growing for
1116   // utilization, regardless of target utilization ratio.
1117   size_t min_free_;
1118 
1119   // The ideal maximum free size, when we grow the heap for utilization.
1120   size_t max_free_;
1121 
1122   // Target ideal heap utilization ratio
1123   double target_utilization_;
1124 
1125   // How much more we grow the heap when we are a foreground app instead of background.
1126   double foreground_heap_growth_multiplier_;
1127 
1128   // Total time which mutators are paused or waiting for GC to complete.
1129   uint64_t total_wait_time_;
1130 
1131   // Total number of objects allocated in microseconds.
1132   AtomicInteger total_allocation_time_;
1133 
1134   // The current state of heap verification, may be enabled or disabled.
1135   VerifyObjectMode verify_object_mode_;
1136 
1137   // Compacting GC disable count, prevents compacting GC from running iff > 0.
1138   size_t disable_moving_gc_count_ GUARDED_BY(gc_complete_lock_);
1139 
1140   std::vector<collector::GarbageCollector*> garbage_collectors_;
1141   collector::SemiSpace* semi_space_collector_;
1142   collector::MarkCompact* mark_compact_collector_;
1143   collector::ConcurrentCopying* concurrent_copying_collector_;
1144 
1145   const bool running_on_valgrind_;
1146   const bool use_tlab_;
1147 
1148   // Pointer to the space which becomes the new main space when we do homogeneous space compaction.
1149   // Use unique_ptr since the space is only added during the homogeneous compaction phase.
1150   std::unique_ptr<space::MallocSpace> main_space_backup_;
1151 
1152   // Minimal interval allowed between two homogeneous space compactions caused by OOM.
1153   uint64_t min_interval_homogeneous_space_compaction_by_oom_;
1154 
1155   // Times of the last homogeneous space compaction caused by OOM.
1156   uint64_t last_time_homogeneous_space_compaction_by_oom_;
1157 
1158   // Saved OOMs by homogeneous space compaction.
1159   Atomic<size_t> count_delayed_oom_;
1160 
1161   // Count for requested homogeneous space compaction.
1162   Atomic<size_t> count_requested_homogeneous_space_compaction_;
1163 
1164   // Count for ignored homogeneous space compaction.
1165   Atomic<size_t> count_ignored_homogeneous_space_compaction_;
1166 
1167   // Count for performed homogeneous space compaction.
1168   Atomic<size_t> count_performed_homogeneous_space_compaction_;
1169 
1170   // Whether or not a concurrent GC is pending.
1171   Atomic<bool> concurrent_gc_pending_;
1172 
1173   // Active tasks which we can modify (change target time, desired collector type, etc..).
1174   CollectorTransitionTask* pending_collector_transition_ GUARDED_BY(pending_task_lock_);
1175   HeapTrimTask* pending_heap_trim_ GUARDED_BY(pending_task_lock_);
1176 
1177   // Whether or not we use homogeneous space compaction to avoid OOM errors.
1178   bool use_homogeneous_space_compaction_for_oom_;
1179 
1180   // True if the currently running collection has made some thread wait.
1181   bool running_collection_is_blocking_ GUARDED_BY(gc_complete_lock_);
1182   // The number of blocking GC runs.
1183   uint64_t blocking_gc_count_;
1184   // The total duration of blocking GC runs.
1185   uint64_t blocking_gc_time_;
1186   // The duration of the window for the GC count rate histograms.
1187   static constexpr uint64_t kGcCountRateHistogramWindowDuration = MsToNs(10 * 1000);  // 10s.
1188   // The last time when the GC count rate histograms were updated.
1189   // This is rounded by kGcCountRateHistogramWindowDuration (a multiple of 10s).
1190   uint64_t last_update_time_gc_count_rate_histograms_;
1191   // The running count of GC runs in the last window.
1192   uint64_t gc_count_last_window_;
1193   // The running count of blocking GC runs in the last window.
1194   uint64_t blocking_gc_count_last_window_;
1195   // The maximum number of buckets in the GC count rate histograms.
1196   static constexpr size_t kGcCountRateMaxBucketCount = 200;
1197   // The histogram of the number of GC invocations per window duration.
1198   Histogram<uint64_t> gc_count_rate_histogram_ GUARDED_BY(gc_complete_lock_);
1199   // The histogram of the number of blocking GC invocations per window duration.
1200   Histogram<uint64_t> blocking_gc_count_rate_histogram_ GUARDED_BY(gc_complete_lock_);
1201 
1202   // GC stress related data structures.
1203   Mutex* backtrace_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
1204   // Debugging variables, seen backtraces vs unique backtraces.
1205   Atomic<uint64_t> seen_backtrace_count_;
1206   Atomic<uint64_t> unique_backtrace_count_;
1207   // Stack trace hashes that we already saw,
1208   std::unordered_set<uint64_t> seen_backtraces_ GUARDED_BY(backtrace_lock_);
1209 
1210   friend class CollectorTransitionTask;
1211   friend class collector::GarbageCollector;
1212   friend class collector::MarkCompact;
1213   friend class collector::ConcurrentCopying;
1214   friend class collector::MarkSweep;
1215   friend class collector::SemiSpace;
1216   friend class ReferenceQueue;
1217   friend class VerifyReferenceCardVisitor;
1218   friend class VerifyReferenceVisitor;
1219   friend class VerifyObjectVisitor;
1220   friend class ScopedHeapFill;
1221   friend class space::SpaceTest;
1222 
1223   class AllocationTimer {
1224    public:
1225     ALWAYS_INLINE AllocationTimer(Heap* heap, mirror::Object** allocated_obj_ptr);
1226     ALWAYS_INLINE ~AllocationTimer();
1227    private:
1228     Heap* const heap_;
1229     mirror::Object** allocated_obj_ptr_;
1230     const uint64_t allocation_start_time_;
1231 
1232     DISALLOW_IMPLICIT_CONSTRUCTORS(AllocationTimer);
1233   };
1234 
1235   DISALLOW_IMPLICIT_CONSTRUCTORS(Heap);
1236 };
1237 
1238 }  // namespace gc
1239 }  // namespace art
1240 
1241 #endif  // ART_RUNTIME_GC_HEAP_H_
1242