1 /*
2  * Copyright (C) 2008 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_GC_HEAP_H_
18 #define ART_RUNTIME_GC_HEAP_H_
19 
20 #include <iosfwd>
21 #include <string>
22 #include <unordered_set>
23 #include <vector>
24 
25 #include "allocator_type.h"
26 #include "arch/instruction_set.h"
27 #include "atomic.h"
28 #include "base/time_utils.h"
29 #include "gc/accounting/atomic_stack.h"
30 #include "gc/accounting/card_table.h"
31 #include "gc/accounting/read_barrier_table.h"
32 #include "gc/gc_cause.h"
33 #include "gc/collector/gc_type.h"
34 #include "gc/collector_type.h"
35 #include "gc/space/large_object_space.h"
36 #include "globals.h"
37 #include "object_callbacks.h"
38 #include "offsets.h"
39 #include "process_state.h"
40 #include "safe_map.h"
41 #include "verify_object.h"
42 
43 namespace art {
44 
45 class ConditionVariable;
46 class Mutex;
47 class StackVisitor;
48 class Thread;
49 class ThreadPool;
50 class TimingLogger;
51 
52 namespace mirror {
53   class Class;
54   class Object;
55 }  // namespace mirror
56 
57 namespace gc {
58 
59 class AllocRecordObjectMap;
60 class ReferenceProcessor;
61 class TaskProcessor;
62 
63 namespace accounting {
64   class HeapBitmap;
65   class ModUnionTable;
66   class RememberedSet;
67 }  // namespace accounting
68 
69 namespace collector {
70   class ConcurrentCopying;
71   class GarbageCollector;
72   class MarkCompact;
73   class MarkSweep;
74   class SemiSpace;
75 }  // namespace collector
76 
77 namespace allocator {
78   class RosAlloc;
79 }  // namespace allocator
80 
81 namespace space {
82   class AllocSpace;
83   class BumpPointerSpace;
84   class ContinuousMemMapAllocSpace;
85   class DiscontinuousSpace;
86   class DlMallocSpace;
87   class ImageSpace;
88   class LargeObjectSpace;
89   class MallocSpace;
90   class RegionSpace;
91   class RosAllocSpace;
92   class Space;
93   class ZygoteSpace;
94 }  // namespace space
95 
96 class AgeCardVisitor {
97  public:
operator()98   uint8_t operator()(uint8_t card) const {
99     return (card == accounting::CardTable::kCardDirty) ? card - 1 : 0;
100   }
101 };
102 
103 enum HomogeneousSpaceCompactResult {
104   // Success.
105   kSuccess,
106   // Reject due to disabled moving GC.
107   kErrorReject,
108   // Unsupported due to the current configuration.
109   kErrorUnsupported,
110   // System is shutting down.
111   kErrorVMShuttingDown,
112 };
113 
114 // If true, use rosalloc/RosAllocSpace instead of dlmalloc/DlMallocSpace
115 static constexpr bool kUseRosAlloc = true;
116 
117 // If true, use thread-local allocation stack.
118 static constexpr bool kUseThreadLocalAllocationStack = true;
119 
120 class Heap {
121  public:
122   // If true, measure the total allocation time.
123   static constexpr size_t kDefaultStartingSize = kPageSize;
124   static constexpr size_t kDefaultInitialSize = 2 * MB;
125   static constexpr size_t kDefaultMaximumSize = 256 * MB;
126   static constexpr size_t kDefaultNonMovingSpaceCapacity = 64 * MB;
127   static constexpr size_t kDefaultMaxFree = 2 * MB;
128   static constexpr size_t kDefaultMinFree = kDefaultMaxFree / 4;
129   static constexpr size_t kDefaultLongPauseLogThreshold = MsToNs(5);
130   static constexpr size_t kDefaultLongGCLogThreshold = MsToNs(100);
131   static constexpr size_t kDefaultTLABSize = 256 * KB;
132   static constexpr double kDefaultTargetUtilization = 0.5;
133   static constexpr double kDefaultHeapGrowthMultiplier = 2.0;
134   // Primitive arrays larger than this size are put in the large object space.
135   static constexpr size_t kDefaultLargeObjectThreshold = 3 * kPageSize;
136   // Whether or not parallel GC is enabled. If not, then we never create the thread pool.
137   static constexpr bool kDefaultEnableParallelGC = false;
138 
139   // Whether or not we use the free list large object space. Only use it if USE_ART_LOW_4G_ALLOCATOR
140   // since this means that we have to use the slow msync loop in MemMap::MapAnonymous.
141   static constexpr space::LargeObjectSpaceType kDefaultLargeObjectSpaceType =
142       USE_ART_LOW_4G_ALLOCATOR ?
143           space::LargeObjectSpaceType::kFreeList
144         : space::LargeObjectSpaceType::kMap;
145 
146   // Used so that we don't overflow the allocation time atomic integer.
147   static constexpr size_t kTimeAdjust = 1024;
148 
149   // How often we allow heap trimming to happen (nanoseconds).
150   static constexpr uint64_t kHeapTrimWait = MsToNs(5000);
151   // How long we wait after a transition request to perform a collector transition (nanoseconds).
152   static constexpr uint64_t kCollectorTransitionWait = MsToNs(5000);
153 
154   // Create a heap with the requested sizes. The possible empty
155   // image_file_names names specify Spaces to load based on
156   // ImageWriter output.
157   Heap(size_t initial_size,
158        size_t growth_limit,
159        size_t min_free,
160        size_t max_free,
161        double target_utilization,
162        double foreground_heap_growth_multiplier,
163        size_t capacity,
164        size_t non_moving_space_capacity,
165        const std::string& original_image_file_name,
166        InstructionSet image_instruction_set,
167        CollectorType foreground_collector_type,
168        CollectorType background_collector_type,
169        space::LargeObjectSpaceType large_object_space_type,
170        size_t large_object_threshold,
171        size_t parallel_gc_threads,
172        size_t conc_gc_threads,
173        bool low_memory_mode,
174        size_t long_pause_threshold,
175        size_t long_gc_threshold,
176        bool ignore_max_footprint,
177        bool use_tlab,
178        bool verify_pre_gc_heap,
179        bool verify_pre_sweeping_heap,
180        bool verify_post_gc_heap,
181        bool verify_pre_gc_rosalloc,
182        bool verify_pre_sweeping_rosalloc,
183        bool verify_post_gc_rosalloc,
184        bool gc_stress_mode,
185        bool use_homogeneous_space_compaction,
186        uint64_t min_interval_homogeneous_space_compaction_by_oom);
187 
188   ~Heap();
189 
190   // Allocates and initializes storage for an object instance.
191   template <bool kInstrumented, typename PreFenceVisitor>
AllocObject(Thread * self,mirror::Class * klass,size_t num_bytes,const PreFenceVisitor & pre_fence_visitor)192   mirror::Object* AllocObject(Thread* self,
193                               mirror::Class* klass,
194                               size_t num_bytes,
195                               const PreFenceVisitor& pre_fence_visitor)
196       SHARED_REQUIRES(Locks::mutator_lock_)
197       REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !*backtrace_lock_,
198                !Roles::uninterruptible_) {
199     return AllocObjectWithAllocator<kInstrumented, true>(
200         self, klass, num_bytes, GetCurrentAllocator(), pre_fence_visitor);
201   }
202 
203   template <bool kInstrumented, typename PreFenceVisitor>
AllocNonMovableObject(Thread * self,mirror::Class * klass,size_t num_bytes,const PreFenceVisitor & pre_fence_visitor)204   mirror::Object* AllocNonMovableObject(Thread* self,
205                                         mirror::Class* klass,
206                                         size_t num_bytes,
207                                         const PreFenceVisitor& pre_fence_visitor)
208       SHARED_REQUIRES(Locks::mutator_lock_)
209       REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !*backtrace_lock_,
210                !Roles::uninterruptible_) {
211     return AllocObjectWithAllocator<kInstrumented, true>(
212         self, klass, num_bytes, GetCurrentNonMovingAllocator(), pre_fence_visitor);
213   }
214 
215   template <bool kInstrumented, bool kCheckLargeObject, typename PreFenceVisitor>
216   ALWAYS_INLINE mirror::Object* AllocObjectWithAllocator(Thread* self,
217                                                          mirror::Class* klass,
218                                                          size_t byte_count,
219                                                          AllocatorType allocator,
220                                                          const PreFenceVisitor& pre_fence_visitor)
221       SHARED_REQUIRES(Locks::mutator_lock_)
222       REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !*backtrace_lock_,
223                !Roles::uninterruptible_);
224 
GetCurrentAllocator()225   AllocatorType GetCurrentAllocator() const {
226     return current_allocator_;
227   }
228 
GetCurrentNonMovingAllocator()229   AllocatorType GetCurrentNonMovingAllocator() const {
230     return current_non_moving_allocator_;
231   }
232 
233   // Visit all of the live objects in the heap.
234   void VisitObjects(ObjectCallback callback, void* arg)
235       SHARED_REQUIRES(Locks::mutator_lock_)
236       REQUIRES(!Locks::heap_bitmap_lock_, !*gc_complete_lock_);
237   void VisitObjectsPaused(ObjectCallback callback, void* arg)
238       REQUIRES(Locks::mutator_lock_, !Locks::heap_bitmap_lock_, !*gc_complete_lock_);
239 
240   void CheckPreconditionsForAllocObject(mirror::Class* c, size_t byte_count)
241       SHARED_REQUIRES(Locks::mutator_lock_);
242 
243   void RegisterNativeAllocation(JNIEnv* env, size_t bytes)
244       REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !native_histogram_lock_);
245   void RegisterNativeFree(JNIEnv* env, size_t bytes)
246       REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !native_histogram_lock_);
247 
248   // Change the allocator, updates entrypoints.
249   void ChangeAllocator(AllocatorType allocator)
250       REQUIRES(Locks::mutator_lock_, !Locks::runtime_shutdown_lock_);
251 
252   // Transition the garbage collector during runtime, may copy objects from one space to another.
253   void TransitionCollector(CollectorType collector_type) REQUIRES(!*gc_complete_lock_);
254 
255   // Change the collector to be one of the possible options (MS, CMS, SS).
256   void ChangeCollector(CollectorType collector_type)
257       REQUIRES(Locks::mutator_lock_);
258 
259   // The given reference is believed to be to an object in the Java heap, check the soundness of it.
260   // TODO: NO_THREAD_SAFETY_ANALYSIS since we call this everywhere and it is impossible to find a
261   // proper lock ordering for it.
262   void VerifyObjectBody(mirror::Object* o) NO_THREAD_SAFETY_ANALYSIS;
263 
264   // Check sanity of all live references.
265   void VerifyHeap() REQUIRES(!Locks::heap_bitmap_lock_);
266   // Returns how many failures occured.
267   size_t VerifyHeapReferences(bool verify_referents = true)
268       REQUIRES(Locks::mutator_lock_, !*gc_complete_lock_);
269   bool VerifyMissingCardMarks()
270       REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
271 
272   // A weaker test than IsLiveObject or VerifyObject that doesn't require the heap lock,
273   // and doesn't abort on error, allowing the caller to report more
274   // meaningful diagnostics.
275   bool IsValidObjectAddress(const mirror::Object* obj) const SHARED_REQUIRES(Locks::mutator_lock_);
276 
277   // Faster alternative to IsHeapAddress since finding if an object is in the large object space is
278   // very slow.
279   bool IsNonDiscontinuousSpaceHeapAddress(const mirror::Object* obj) const
280       SHARED_REQUIRES(Locks::mutator_lock_);
281 
282   // Returns true if 'obj' is a live heap object, false otherwise (including for invalid addresses).
283   // Requires the heap lock to be held.
284   bool IsLiveObjectLocked(mirror::Object* obj,
285                           bool search_allocation_stack = true,
286                           bool search_live_stack = true,
287                           bool sorted = false)
288       SHARED_REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
289 
290   // Returns true if there is any chance that the object (obj) will move.
291   bool IsMovableObject(const mirror::Object* obj) const SHARED_REQUIRES(Locks::mutator_lock_);
292 
293   // Enables us to compacting GC until objects are released.
294   void IncrementDisableMovingGC(Thread* self) REQUIRES(!*gc_complete_lock_);
295   void DecrementDisableMovingGC(Thread* self) REQUIRES(!*gc_complete_lock_);
296 
297   // Temporarily disable thread flip for JNI critical calls.
298   void IncrementDisableThreadFlip(Thread* self) REQUIRES(!*thread_flip_lock_);
299   void DecrementDisableThreadFlip(Thread* self) REQUIRES(!*thread_flip_lock_);
300   void ThreadFlipBegin(Thread* self) REQUIRES(!*thread_flip_lock_);
301   void ThreadFlipEnd(Thread* self) REQUIRES(!*thread_flip_lock_);
302 
303   // Clear all of the mark bits, doesn't clear bitmaps which have the same live bits as mark bits.
304   // Mutator lock is required for GetContinuousSpaces.
305   void ClearMarkedObjects()
306       REQUIRES(Locks::heap_bitmap_lock_)
307       SHARED_REQUIRES(Locks::mutator_lock_);
308 
309   // Initiates an explicit garbage collection.
310   void CollectGarbage(bool clear_soft_references)
311       REQUIRES(!*gc_complete_lock_, !*pending_task_lock_);
312 
313   // Does a concurrent GC, should only be called by the GC daemon thread
314   // through runtime.
315   void ConcurrentGC(Thread* self, bool force_full)
316       REQUIRES(!Locks::runtime_shutdown_lock_, !*gc_complete_lock_, !*pending_task_lock_);
317 
318   // Implements VMDebug.countInstancesOfClass and JDWP VM_InstanceCount.
319   // The boolean decides whether to use IsAssignableFrom or == when comparing classes.
320   void CountInstances(const std::vector<mirror::Class*>& classes,
321                       bool use_is_assignable_from,
322                       uint64_t* counts)
323       REQUIRES(!Locks::heap_bitmap_lock_, !*gc_complete_lock_)
324       SHARED_REQUIRES(Locks::mutator_lock_);
325   // Implements JDWP RT_Instances.
326   void GetInstances(mirror::Class* c, int32_t max_count, std::vector<mirror::Object*>& instances)
327       REQUIRES(!Locks::heap_bitmap_lock_, !*gc_complete_lock_)
328       SHARED_REQUIRES(Locks::mutator_lock_);
329   // Implements JDWP OR_ReferringObjects.
330   void GetReferringObjects(mirror::Object* o,
331                            int32_t max_count,
332                            std::vector<mirror::Object*>& referring_objects)
333       REQUIRES(!Locks::heap_bitmap_lock_, !*gc_complete_lock_)
334       SHARED_REQUIRES(Locks::mutator_lock_);
335 
336   // Removes the growth limit on the alloc space so it may grow to its maximum capacity. Used to
337   // implement dalvik.system.VMRuntime.clearGrowthLimit.
338   void ClearGrowthLimit();
339 
340   // Make the current growth limit the new maximum capacity, unmaps pages at the end of spaces
341   // which will never be used. Used to implement dalvik.system.VMRuntime.clampGrowthLimit.
342   void ClampGrowthLimit() REQUIRES(!Locks::heap_bitmap_lock_);
343 
344   // Target ideal heap utilization ratio, implements
345   // dalvik.system.VMRuntime.getTargetHeapUtilization.
GetTargetHeapUtilization()346   double GetTargetHeapUtilization() const {
347     return target_utilization_;
348   }
349 
350   // Data structure memory usage tracking.
351   void RegisterGCAllocation(size_t bytes);
352   void RegisterGCDeAllocation(size_t bytes);
353 
354   // Set the heap's private space pointers to be the same as the space based on it's type. Public
355   // due to usage by tests.
356   void SetSpaceAsDefault(space::ContinuousSpace* continuous_space)
357       REQUIRES(!Locks::heap_bitmap_lock_);
358   void AddSpace(space::Space* space)
359       REQUIRES(!Locks::heap_bitmap_lock_)
360       REQUIRES(Locks::mutator_lock_);
361   void RemoveSpace(space::Space* space)
362     REQUIRES(!Locks::heap_bitmap_lock_)
363     REQUIRES(Locks::mutator_lock_);
364 
365   // Set target ideal heap utilization ratio, implements
366   // dalvik.system.VMRuntime.setTargetHeapUtilization.
367   void SetTargetHeapUtilization(float target);
368 
369   // For the alloc space, sets the maximum number of bytes that the heap is allowed to allocate
370   // from the system. Doesn't allow the space to exceed its growth limit.
371   void SetIdealFootprint(size_t max_allowed_footprint);
372 
373   // Blocks the caller until the garbage collector becomes idle and returns the type of GC we
374   // waited for.
375   collector::GcType WaitForGcToComplete(GcCause cause, Thread* self) REQUIRES(!*gc_complete_lock_);
376 
377   // Update the heap's process state to a new value, may cause compaction to occur.
378   void UpdateProcessState(ProcessState old_process_state, ProcessState new_process_state)
379       REQUIRES(!*pending_task_lock_, !*gc_complete_lock_);
380 
HaveContinuousSpaces()381   bool HaveContinuousSpaces() const NO_THREAD_SAFETY_ANALYSIS {
382     // No lock since vector empty is thread safe.
383     return !continuous_spaces_.empty();
384   }
385 
GetContinuousSpaces()386   const std::vector<space::ContinuousSpace*>& GetContinuousSpaces() const
387       SHARED_REQUIRES(Locks::mutator_lock_) {
388     return continuous_spaces_;
389   }
390 
GetDiscontinuousSpaces()391   const std::vector<space::DiscontinuousSpace*>& GetDiscontinuousSpaces() const {
392     return discontinuous_spaces_;
393   }
394 
GetCurrentGcIteration()395   const collector::Iteration* GetCurrentGcIteration() const {
396     return &current_gc_iteration_;
397   }
GetCurrentGcIteration()398   collector::Iteration* GetCurrentGcIteration() {
399     return &current_gc_iteration_;
400   }
401 
402   // Enable verification of object references when the runtime is sufficiently initialized.
EnableObjectValidation()403   void EnableObjectValidation() {
404     verify_object_mode_ = kVerifyObjectSupport;
405     if (verify_object_mode_ > kVerifyObjectModeDisabled) {
406       VerifyHeap();
407     }
408   }
409 
410   // Disable object reference verification for image writing.
DisableObjectValidation()411   void DisableObjectValidation() {
412     verify_object_mode_ = kVerifyObjectModeDisabled;
413   }
414 
415   // Other checks may be performed if we know the heap should be in a sane state.
IsObjectValidationEnabled()416   bool IsObjectValidationEnabled() const {
417     return verify_object_mode_ > kVerifyObjectModeDisabled;
418   }
419 
420   // Returns true if low memory mode is enabled.
IsLowMemoryMode()421   bool IsLowMemoryMode() const {
422     return low_memory_mode_;
423   }
424 
425   // Returns the heap growth multiplier, this affects how much we grow the heap after a GC.
426   // Scales heap growth, min free, and max free.
427   double HeapGrowthMultiplier() const;
428 
429   // Freed bytes can be negative in cases where we copy objects from a compacted space to a
430   // free-list backed space.
431   void RecordFree(uint64_t freed_objects, int64_t freed_bytes);
432 
433   // Record the bytes freed by thread-local buffer revoke.
434   void RecordFreeRevoke();
435 
436   // Must be called if a field of an Object in the heap changes, and before any GC safe-point.
437   // The call is not needed if null is stored in the field.
WriteBarrierField(const mirror::Object * dst,MemberOffset offset ATTRIBUTE_UNUSED,const mirror::Object * new_value ATTRIBUTE_UNUSED)438   ALWAYS_INLINE void WriteBarrierField(const mirror::Object* dst,
439                                        MemberOffset offset ATTRIBUTE_UNUSED,
440                                        const mirror::Object* new_value ATTRIBUTE_UNUSED) {
441     card_table_->MarkCard(dst);
442   }
443 
444   // Write barrier for array operations that update many field positions
WriteBarrierArray(const mirror::Object * dst,int start_offset ATTRIBUTE_UNUSED,size_t length ATTRIBUTE_UNUSED)445   ALWAYS_INLINE void WriteBarrierArray(const mirror::Object* dst,
446                                        int start_offset ATTRIBUTE_UNUSED,
447                                        // TODO: element_count or byte_count?
448                                        size_t length ATTRIBUTE_UNUSED) {
449     card_table_->MarkCard(dst);
450   }
451 
WriteBarrierEveryFieldOf(const mirror::Object * obj)452   ALWAYS_INLINE void WriteBarrierEveryFieldOf(const mirror::Object* obj) {
453     card_table_->MarkCard(obj);
454   }
455 
GetCardTable()456   accounting::CardTable* GetCardTable() const {
457     return card_table_.get();
458   }
459 
GetReadBarrierTable()460   accounting::ReadBarrierTable* GetReadBarrierTable() const {
461     return rb_table_.get();
462   }
463 
464   void AddFinalizerReference(Thread* self, mirror::Object** object);
465 
466   // Returns the number of bytes currently allocated.
GetBytesAllocated()467   size_t GetBytesAllocated() const {
468     return num_bytes_allocated_.LoadSequentiallyConsistent();
469   }
470 
471   // Returns the number of objects currently allocated.
472   size_t GetObjectsAllocated() const
473       REQUIRES(!Locks::heap_bitmap_lock_);
474 
475   // Returns the total number of objects allocated since the heap was created.
476   uint64_t GetObjectsAllocatedEver() const;
477 
478   // Returns the total number of bytes allocated since the heap was created.
479   uint64_t GetBytesAllocatedEver() const;
480 
481   // Returns the total number of objects freed since the heap was created.
GetObjectsFreedEver()482   uint64_t GetObjectsFreedEver() const {
483     return total_objects_freed_ever_;
484   }
485 
486   // Returns the total number of bytes freed since the heap was created.
GetBytesFreedEver()487   uint64_t GetBytesFreedEver() const {
488     return total_bytes_freed_ever_;
489   }
490 
491   // Implements java.lang.Runtime.maxMemory, returning the maximum amount of memory a program can
492   // consume. For a regular VM this would relate to the -Xmx option and would return -1 if no Xmx
493   // were specified. Android apps start with a growth limit (small heap size) which is
494   // cleared/extended for large apps.
GetMaxMemory()495   size_t GetMaxMemory() const {
496     // There is some race conditions in the allocation code that can cause bytes allocated to
497     // become larger than growth_limit_ in rare cases.
498     return std::max(GetBytesAllocated(), growth_limit_);
499   }
500 
501   // Implements java.lang.Runtime.totalMemory, returning approximate amount of memory currently
502   // consumed by an application.
503   size_t GetTotalMemory() const;
504 
505   // Returns approximately how much free memory we have until the next GC happens.
GetFreeMemoryUntilGC()506   size_t GetFreeMemoryUntilGC() const {
507     return max_allowed_footprint_ - GetBytesAllocated();
508   }
509 
510   // Returns approximately how much free memory we have until the next OOME happens.
GetFreeMemoryUntilOOME()511   size_t GetFreeMemoryUntilOOME() const {
512     return growth_limit_ - GetBytesAllocated();
513   }
514 
515   // Returns how much free memory we have until we need to grow the heap to perform an allocation.
516   // Similar to GetFreeMemoryUntilGC. Implements java.lang.Runtime.freeMemory.
GetFreeMemory()517   size_t GetFreeMemory() const {
518     size_t byte_allocated = num_bytes_allocated_.LoadSequentiallyConsistent();
519     size_t total_memory = GetTotalMemory();
520     // Make sure we don't get a negative number.
521     return total_memory - std::min(total_memory, byte_allocated);
522   }
523 
524   // get the space that corresponds to an object's address. Current implementation searches all
525   // spaces in turn. If fail_ok is false then failing to find a space will cause an abort.
526   // TODO: consider using faster data structure like binary tree.
527   space::ContinuousSpace* FindContinuousSpaceFromObject(const mirror::Object*, bool fail_ok) const
528       SHARED_REQUIRES(Locks::mutator_lock_);
529   space::DiscontinuousSpace* FindDiscontinuousSpaceFromObject(const mirror::Object*,
530                                                               bool fail_ok) const
531       SHARED_REQUIRES(Locks::mutator_lock_);
532   space::Space* FindSpaceFromObject(const mirror::Object*, bool fail_ok) const
533       SHARED_REQUIRES(Locks::mutator_lock_);
534 
535   void DumpForSigQuit(std::ostream& os) REQUIRES(!*gc_complete_lock_, !native_histogram_lock_);
536 
537   // Do a pending collector transition.
538   void DoPendingCollectorTransition() REQUIRES(!*gc_complete_lock_);
539 
540   // Deflate monitors, ... and trim the spaces.
541   void Trim(Thread* self) REQUIRES(!*gc_complete_lock_);
542 
543   void RevokeThreadLocalBuffers(Thread* thread);
544   void RevokeRosAllocThreadLocalBuffers(Thread* thread);
545   void RevokeAllThreadLocalBuffers();
546   void AssertThreadLocalBuffersAreRevoked(Thread* thread);
547   void AssertAllBumpPointerSpaceThreadLocalBuffersAreRevoked();
548   void RosAllocVerification(TimingLogger* timings, const char* name)
549       REQUIRES(Locks::mutator_lock_);
550 
GetLiveBitmap()551   accounting::HeapBitmap* GetLiveBitmap() SHARED_REQUIRES(Locks::heap_bitmap_lock_) {
552     return live_bitmap_.get();
553   }
554 
GetMarkBitmap()555   accounting::HeapBitmap* GetMarkBitmap() SHARED_REQUIRES(Locks::heap_bitmap_lock_) {
556     return mark_bitmap_.get();
557   }
558 
GetLiveStack()559   accounting::ObjectStack* GetLiveStack() SHARED_REQUIRES(Locks::heap_bitmap_lock_) {
560     return live_stack_.get();
561   }
562 
563   void PreZygoteFork() NO_THREAD_SAFETY_ANALYSIS;
564 
565   // Mark and empty stack.
566   void FlushAllocStack()
567       SHARED_REQUIRES(Locks::mutator_lock_)
568       REQUIRES(Locks::heap_bitmap_lock_);
569 
570   // Revoke all the thread-local allocation stacks.
571   void RevokeAllThreadLocalAllocationStacks(Thread* self)
572       REQUIRES(Locks::mutator_lock_, !Locks::runtime_shutdown_lock_, !Locks::thread_list_lock_);
573 
574   // Mark all the objects in the allocation stack in the specified bitmap.
575   // TODO: Refactor?
576   void MarkAllocStack(accounting::SpaceBitmap<kObjectAlignment>* bitmap1,
577                       accounting::SpaceBitmap<kObjectAlignment>* bitmap2,
578                       accounting::SpaceBitmap<kLargeObjectAlignment>* large_objects,
579                       accounting::ObjectStack* stack)
580       SHARED_REQUIRES(Locks::mutator_lock_)
581       REQUIRES(Locks::heap_bitmap_lock_);
582 
583   // Mark the specified allocation stack as live.
584   void MarkAllocStackAsLive(accounting::ObjectStack* stack)
585       SHARED_REQUIRES(Locks::mutator_lock_)
586       REQUIRES(Locks::heap_bitmap_lock_);
587 
588   // Unbind any bound bitmaps.
589   void UnBindBitmaps()
590       REQUIRES(Locks::heap_bitmap_lock_)
591       SHARED_REQUIRES(Locks::mutator_lock_);
592 
593   // Returns the boot image spaces. There may be multiple boot image spaces.
GetBootImageSpaces()594   const std::vector<space::ImageSpace*>& GetBootImageSpaces() const {
595     return boot_image_spaces_;
596   }
597 
598   bool ObjectIsInBootImageSpace(mirror::Object* obj) const
599       SHARED_REQUIRES(Locks::mutator_lock_);
600 
601   bool IsInBootImageOatFile(const void* p) const
602       SHARED_REQUIRES(Locks::mutator_lock_);
603 
604   void GetBootImagesSize(uint32_t* boot_image_begin,
605                          uint32_t* boot_image_end,
606                          uint32_t* boot_oat_begin,
607                          uint32_t* boot_oat_end);
608 
609   // Permenantly disable moving garbage collection.
610   void DisableMovingGc() REQUIRES(!*gc_complete_lock_);
611 
GetDlMallocSpace()612   space::DlMallocSpace* GetDlMallocSpace() const {
613     return dlmalloc_space_;
614   }
615 
GetRosAllocSpace()616   space::RosAllocSpace* GetRosAllocSpace() const {
617     return rosalloc_space_;
618   }
619 
620   // Return the corresponding rosalloc space.
621   space::RosAllocSpace* GetRosAllocSpace(gc::allocator::RosAlloc* rosalloc) const
622       SHARED_REQUIRES(Locks::mutator_lock_);
623 
GetNonMovingSpace()624   space::MallocSpace* GetNonMovingSpace() const {
625     return non_moving_space_;
626   }
627 
GetLargeObjectsSpace()628   space::LargeObjectSpace* GetLargeObjectsSpace() const {
629     return large_object_space_;
630   }
631 
632   // Returns the free list space that may contain movable objects (the
633   // one that's not the non-moving space), either rosalloc_space_ or
634   // dlmalloc_space_.
GetPrimaryFreeListSpace()635   space::MallocSpace* GetPrimaryFreeListSpace() {
636     if (kUseRosAlloc) {
637       DCHECK(rosalloc_space_ != nullptr);
638       // reinterpret_cast is necessary as the space class hierarchy
639       // isn't known (#included) yet here.
640       return reinterpret_cast<space::MallocSpace*>(rosalloc_space_);
641     } else {
642       DCHECK(dlmalloc_space_ != nullptr);
643       return reinterpret_cast<space::MallocSpace*>(dlmalloc_space_);
644     }
645   }
646 
647   void DumpSpaces(std::ostream& stream) const SHARED_REQUIRES(Locks::mutator_lock_);
648   std::string DumpSpaces() const SHARED_REQUIRES(Locks::mutator_lock_);
649 
650   // Dump object should only be used by the signal handler.
651   void DumpObject(std::ostream& stream, mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS;
652   // Safe version of pretty type of which check to make sure objects are heap addresses.
653   std::string SafeGetClassDescriptor(mirror::Class* klass) NO_THREAD_SAFETY_ANALYSIS;
654   std::string SafePrettyTypeOf(mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS;
655 
656   // GC performance measuring
657   void DumpGcPerformanceInfo(std::ostream& os)
658       REQUIRES(!*gc_complete_lock_, !native_histogram_lock_);
659   void ResetGcPerformanceInfo() REQUIRES(!*gc_complete_lock_);
660 
661   // Thread pool.
662   void CreateThreadPool();
663   void DeleteThreadPool();
GetThreadPool()664   ThreadPool* GetThreadPool() {
665     return thread_pool_.get();
666   }
GetParallelGCThreadCount()667   size_t GetParallelGCThreadCount() const {
668     return parallel_gc_threads_;
669   }
GetConcGCThreadCount()670   size_t GetConcGCThreadCount() const {
671     return conc_gc_threads_;
672   }
673   accounting::ModUnionTable* FindModUnionTableFromSpace(space::Space* space);
674   void AddModUnionTable(accounting::ModUnionTable* mod_union_table);
675 
676   accounting::RememberedSet* FindRememberedSetFromSpace(space::Space* space);
677   void AddRememberedSet(accounting::RememberedSet* remembered_set);
678   // Also deletes the remebered set.
679   void RemoveRememberedSet(space::Space* space);
680 
681   bool IsCompilingBoot() const;
HasBootImageSpace()682   bool HasBootImageSpace() const {
683     return !boot_image_spaces_.empty();
684   }
685 
GetReferenceProcessor()686   ReferenceProcessor* GetReferenceProcessor() {
687     return reference_processor_.get();
688   }
GetTaskProcessor()689   TaskProcessor* GetTaskProcessor() {
690     return task_processor_.get();
691   }
692 
HasZygoteSpace()693   bool HasZygoteSpace() const {
694     return zygote_space_ != nullptr;
695   }
696 
ConcurrentCopyingCollector()697   collector::ConcurrentCopying* ConcurrentCopyingCollector() {
698     return concurrent_copying_collector_;
699   }
700 
CurrentCollectorType()701   CollectorType CurrentCollectorType() {
702     return collector_type_;
703   }
704 
IsGcConcurrentAndMoving()705   bool IsGcConcurrentAndMoving() const {
706     if (IsGcConcurrent() && IsMovingGc(collector_type_)) {
707       // Assume no transition when a concurrent moving collector is used.
708       DCHECK_EQ(collector_type_, foreground_collector_type_);
709       DCHECK_EQ(foreground_collector_type_, background_collector_type_)
710           << "Assume no transition such that collector_type_ won't change";
711       return true;
712     }
713     return false;
714   }
715 
IsMovingGCDisabled(Thread * self)716   bool IsMovingGCDisabled(Thread* self) REQUIRES(!*gc_complete_lock_) {
717     MutexLock mu(self, *gc_complete_lock_);
718     return disable_moving_gc_count_ > 0;
719   }
720 
721   // Request an asynchronous trim.
722   void RequestTrim(Thread* self) REQUIRES(!*pending_task_lock_);
723 
724   // Request asynchronous GC.
725   void RequestConcurrentGC(Thread* self, bool force_full) REQUIRES(!*pending_task_lock_);
726 
727   // Whether or not we may use a garbage collector, used so that we only create collectors we need.
728   bool MayUseCollector(CollectorType type) const;
729 
730   // Used by tests to reduce timinig-dependent flakiness in OOME behavior.
SetMinIntervalHomogeneousSpaceCompactionByOom(uint64_t interval)731   void SetMinIntervalHomogeneousSpaceCompactionByOom(uint64_t interval) {
732     min_interval_homogeneous_space_compaction_by_oom_ = interval;
733   }
734 
735   // Helpers for android.os.Debug.getRuntimeStat().
736   uint64_t GetGcCount() const;
737   uint64_t GetGcTime() const;
738   uint64_t GetBlockingGcCount() const;
739   uint64_t GetBlockingGcTime() const;
740   void DumpGcCountRateHistogram(std::ostream& os) const REQUIRES(!*gc_complete_lock_);
741   void DumpBlockingGcCountRateHistogram(std::ostream& os) const REQUIRES(!*gc_complete_lock_);
742 
743   // Allocation tracking support
744   // Callers to this function use double-checked locking to ensure safety on allocation_records_
IsAllocTrackingEnabled()745   bool IsAllocTrackingEnabled() const {
746     return alloc_tracking_enabled_.LoadRelaxed();
747   }
748 
SetAllocTrackingEnabled(bool enabled)749   void SetAllocTrackingEnabled(bool enabled) REQUIRES(Locks::alloc_tracker_lock_) {
750     alloc_tracking_enabled_.StoreRelaxed(enabled);
751   }
752 
GetAllocationRecords()753   AllocRecordObjectMap* GetAllocationRecords() const
754       REQUIRES(Locks::alloc_tracker_lock_) {
755     return allocation_records_.get();
756   }
757 
758   void SetAllocationRecords(AllocRecordObjectMap* records)
759       REQUIRES(Locks::alloc_tracker_lock_);
760 
761   void VisitAllocationRecords(RootVisitor* visitor) const
762       SHARED_REQUIRES(Locks::mutator_lock_)
763       REQUIRES(!Locks::alloc_tracker_lock_);
764 
765   void SweepAllocationRecords(IsMarkedVisitor* visitor) const
766       SHARED_REQUIRES(Locks::mutator_lock_)
767       REQUIRES(!Locks::alloc_tracker_lock_);
768 
769   void DisallowNewAllocationRecords() const
770       SHARED_REQUIRES(Locks::mutator_lock_)
771       REQUIRES(!Locks::alloc_tracker_lock_);
772 
773   void AllowNewAllocationRecords() const
774       SHARED_REQUIRES(Locks::mutator_lock_)
775       REQUIRES(!Locks::alloc_tracker_lock_);
776 
777   void BroadcastForNewAllocationRecords() const
778       SHARED_REQUIRES(Locks::mutator_lock_)
779       REQUIRES(!Locks::alloc_tracker_lock_);
780 
781   void DisableGCForShutdown() REQUIRES(!*gc_complete_lock_);
782 
783   // Create a new alloc space and compact default alloc space to it.
784   HomogeneousSpaceCompactResult PerformHomogeneousSpaceCompact() REQUIRES(!*gc_complete_lock_);
785   bool SupportHomogeneousSpaceCompactAndCollectorTransitions() const;
786 
787  private:
788   class ConcurrentGCTask;
789   class CollectorTransitionTask;
790   class HeapTrimTask;
791 
792   // Compact source space to target space. Returns the collector used.
793   collector::GarbageCollector* Compact(space::ContinuousMemMapAllocSpace* target_space,
794                                        space::ContinuousMemMapAllocSpace* source_space,
795                                        GcCause gc_cause)
796       REQUIRES(Locks::mutator_lock_);
797 
798   void LogGC(GcCause gc_cause, collector::GarbageCollector* collector);
799   void StartGC(Thread* self, GcCause cause, CollectorType collector_type)
800       REQUIRES(!*gc_complete_lock_);
801   void FinishGC(Thread* self, collector::GcType gc_type) REQUIRES(!*gc_complete_lock_);
802 
803   // Create a mem map with a preferred base address.
804   static MemMap* MapAnonymousPreferredAddress(const char* name, uint8_t* request_begin,
805                                               size_t capacity, std::string* out_error_str);
806 
SupportHSpaceCompaction()807   bool SupportHSpaceCompaction() const {
808     // Returns true if we can do hspace compaction
809     return main_space_backup_ != nullptr;
810   }
811 
AllocatorHasAllocationStack(AllocatorType allocator_type)812   static ALWAYS_INLINE bool AllocatorHasAllocationStack(AllocatorType allocator_type) {
813     return
814         allocator_type != kAllocatorTypeBumpPointer &&
815         allocator_type != kAllocatorTypeTLAB &&
816         allocator_type != kAllocatorTypeRegion &&
817         allocator_type != kAllocatorTypeRegionTLAB;
818   }
AllocatorMayHaveConcurrentGC(AllocatorType allocator_type)819   static ALWAYS_INLINE bool AllocatorMayHaveConcurrentGC(AllocatorType allocator_type) {
820     return
821         allocator_type != kAllocatorTypeBumpPointer &&
822         allocator_type != kAllocatorTypeTLAB;
823   }
IsMovingGc(CollectorType collector_type)824   static bool IsMovingGc(CollectorType collector_type) {
825     return
826         collector_type == kCollectorTypeSS ||
827         collector_type == kCollectorTypeGSS ||
828         collector_type == kCollectorTypeCC ||
829         collector_type == kCollectorTypeMC ||
830         collector_type == kCollectorTypeHomogeneousSpaceCompact;
831   }
832   bool ShouldAllocLargeObject(mirror::Class* c, size_t byte_count) const
833       SHARED_REQUIRES(Locks::mutator_lock_);
834   ALWAYS_INLINE void CheckConcurrentGC(Thread* self,
835                                        size_t new_num_bytes_allocated,
836                                        mirror::Object** obj)
837       SHARED_REQUIRES(Locks::mutator_lock_)
838       REQUIRES(!*pending_task_lock_, !*gc_complete_lock_);
839 
GetMarkStack()840   accounting::ObjectStack* GetMarkStack() {
841     return mark_stack_.get();
842   }
843 
844   // We don't force this to be inlined since it is a slow path.
845   template <bool kInstrumented, typename PreFenceVisitor>
846   mirror::Object* AllocLargeObject(Thread* self,
847                                    mirror::Class** klass,
848                                    size_t byte_count,
849                                    const PreFenceVisitor& pre_fence_visitor)
850       SHARED_REQUIRES(Locks::mutator_lock_)
851       REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !*backtrace_lock_);
852 
853   // Handles Allocate()'s slow allocation path with GC involved after
854   // an initial allocation attempt failed.
855   mirror::Object* AllocateInternalWithGc(Thread* self,
856                                          AllocatorType allocator,
857                                          bool instrumented,
858                                          size_t num_bytes,
859                                          size_t* bytes_allocated,
860                                          size_t* usable_size,
861                                          size_t* bytes_tl_bulk_allocated,
862                                          mirror::Class** klass)
863       REQUIRES(!Locks::thread_suspend_count_lock_, !*gc_complete_lock_, !*pending_task_lock_)
864       SHARED_REQUIRES(Locks::mutator_lock_);
865 
866   // Allocate into a specific space.
867   mirror::Object* AllocateInto(Thread* self,
868                                space::AllocSpace* space,
869                                mirror::Class* c,
870                                size_t bytes)
871       SHARED_REQUIRES(Locks::mutator_lock_);
872 
873   // Need to do this with mutators paused so that somebody doesn't accidentally allocate into the
874   // wrong space.
875   void SwapSemiSpaces() REQUIRES(Locks::mutator_lock_);
876 
877   // Try to allocate a number of bytes, this function never does any GCs. Needs to be inlined so
878   // that the switch statement is constant optimized in the entrypoints.
879   template <const bool kInstrumented, const bool kGrow>
880   ALWAYS_INLINE mirror::Object* TryToAllocate(Thread* self,
881                                               AllocatorType allocator_type,
882                                               size_t alloc_size,
883                                               size_t* bytes_allocated,
884                                               size_t* usable_size,
885                                               size_t* bytes_tl_bulk_allocated)
886       SHARED_REQUIRES(Locks::mutator_lock_);
887 
888   void ThrowOutOfMemoryError(Thread* self, size_t byte_count, AllocatorType allocator_type)
889       SHARED_REQUIRES(Locks::mutator_lock_);
890 
891   template <bool kGrow>
892   ALWAYS_INLINE bool IsOutOfMemoryOnAllocation(AllocatorType allocator_type, size_t alloc_size);
893 
894   // Returns true if the address passed in is within the address range of a continuous space.
895   bool IsValidContinuousSpaceObjectAddress(const mirror::Object* obj) const
896       SHARED_REQUIRES(Locks::mutator_lock_);
897 
898   // Run the finalizers. If timeout is non zero, then we use the VMRuntime version.
899   void RunFinalization(JNIEnv* env, uint64_t timeout);
900 
901   // Blocks the caller until the garbage collector becomes idle and returns the type of GC we
902   // waited for.
903   collector::GcType WaitForGcToCompleteLocked(GcCause cause, Thread* self)
904       REQUIRES(gc_complete_lock_);
905 
906   void RequestCollectorTransition(CollectorType desired_collector_type, uint64_t delta_time)
907       REQUIRES(!*pending_task_lock_);
908 
909   void RequestConcurrentGCAndSaveObject(Thread* self, bool force_full, mirror::Object** obj)
910       SHARED_REQUIRES(Locks::mutator_lock_)
911       REQUIRES(!*pending_task_lock_);
912   bool IsGCRequestPending() const;
913 
914   // Sometimes CollectGarbageInternal decides to run a different Gc than you requested. Returns
915   // which type of Gc was actually ran.
916   collector::GcType CollectGarbageInternal(collector::GcType gc_plan,
917                                            GcCause gc_cause,
918                                            bool clear_soft_references)
919       REQUIRES(!*gc_complete_lock_, !Locks::heap_bitmap_lock_, !Locks::thread_suspend_count_lock_,
920                !*pending_task_lock_);
921 
922   void PreGcVerification(collector::GarbageCollector* gc)
923       REQUIRES(!Locks::mutator_lock_, !*gc_complete_lock_);
924   void PreGcVerificationPaused(collector::GarbageCollector* gc)
925       REQUIRES(Locks::mutator_lock_, !*gc_complete_lock_);
926   void PrePauseRosAllocVerification(collector::GarbageCollector* gc)
927       REQUIRES(Locks::mutator_lock_);
928   void PreSweepingGcVerification(collector::GarbageCollector* gc)
929       REQUIRES(Locks::mutator_lock_, !Locks::heap_bitmap_lock_, !*gc_complete_lock_);
930   void PostGcVerification(collector::GarbageCollector* gc)
931       REQUIRES(!Locks::mutator_lock_, !*gc_complete_lock_);
932   void PostGcVerificationPaused(collector::GarbageCollector* gc)
933       REQUIRES(Locks::mutator_lock_, !*gc_complete_lock_);
934 
935   // Update the watermark for the native allocated bytes based on the current number of native
936   // bytes allocated and the target utilization ratio.
937   void UpdateMaxNativeFootprint();
938 
939   // Find a collector based on GC type.
940   collector::GarbageCollector* FindCollectorByGcType(collector::GcType gc_type);
941 
942   // Create the main free list malloc space, either a RosAlloc space or DlMalloc space.
943   void CreateMainMallocSpace(MemMap* mem_map,
944                              size_t initial_size,
945                              size_t growth_limit,
946                              size_t capacity);
947 
948   // Create a malloc space based on a mem map. Does not set the space as default.
949   space::MallocSpace* CreateMallocSpaceFromMemMap(MemMap* mem_map,
950                                                   size_t initial_size,
951                                                   size_t growth_limit,
952                                                   size_t capacity,
953                                                   const char* name,
954                                                   bool can_move_objects);
955 
956   // Given the current contents of the alloc space, increase the allowed heap footprint to match
957   // the target utilization ratio.  This should only be called immediately after a full garbage
958   // collection. bytes_allocated_before_gc is used to measure bytes / second for the period which
959   // the GC was run.
960   void GrowForUtilization(collector::GarbageCollector* collector_ran,
961                           uint64_t bytes_allocated_before_gc = 0);
962 
963   size_t GetPercentFree();
964 
965   static void VerificationCallback(mirror::Object* obj, void* arg)
966       SHARED_REQUIRES(Locks::heap_bitmap_lock_);
967 
968   // Swap the allocation stack with the live stack.
969   void SwapStacks() SHARED_REQUIRES(Locks::mutator_lock_);
970 
971   // Clear cards and update the mod union table. When process_alloc_space_cards is true,
972   // if clear_alloc_space_cards is true, then we clear cards instead of ageing them. We do
973   // not process the alloc space if process_alloc_space_cards is false.
974   void ProcessCards(TimingLogger* timings,
975                     bool use_rem_sets,
976                     bool process_alloc_space_cards,
977                     bool clear_alloc_space_cards)
978       SHARED_REQUIRES(Locks::mutator_lock_);
979 
980   // Push an object onto the allocation stack.
981   void PushOnAllocationStack(Thread* self, mirror::Object** obj)
982       SHARED_REQUIRES(Locks::mutator_lock_)
983       REQUIRES(!*gc_complete_lock_, !*pending_task_lock_);
984   void PushOnAllocationStackWithInternalGC(Thread* self, mirror::Object** obj)
985       SHARED_REQUIRES(Locks::mutator_lock_)
986       REQUIRES(!*gc_complete_lock_, !*pending_task_lock_);
987   void PushOnThreadLocalAllocationStackWithInternalGC(Thread* thread, mirror::Object** obj)
988       SHARED_REQUIRES(Locks::mutator_lock_)
989       REQUIRES(!*gc_complete_lock_, !*pending_task_lock_);
990 
991   void ClearConcurrentGCRequest();
992   void ClearPendingTrim(Thread* self) REQUIRES(!*pending_task_lock_);
993   void ClearPendingCollectorTransition(Thread* self) REQUIRES(!*pending_task_lock_);
994 
995   // What kind of concurrency behavior is the runtime after? Currently true for concurrent mark
996   // sweep GC, false for other GC types.
IsGcConcurrent()997   bool IsGcConcurrent() const ALWAYS_INLINE {
998     return collector_type_ == kCollectorTypeCMS || collector_type_ == kCollectorTypeCC;
999   }
1000 
1001   // Trim the managed and native spaces by releasing unused memory back to the OS.
1002   void TrimSpaces(Thread* self) REQUIRES(!*gc_complete_lock_);
1003 
1004   // Trim 0 pages at the end of reference tables.
1005   void TrimIndirectReferenceTables(Thread* self);
1006 
1007   void VisitObjectsInternal(ObjectCallback callback, void* arg)
1008       SHARED_REQUIRES(Locks::mutator_lock_)
1009       REQUIRES(!Locks::heap_bitmap_lock_, !*gc_complete_lock_);
1010   void VisitObjectsInternalRegionSpace(ObjectCallback callback, void* arg)
1011       REQUIRES(Locks::mutator_lock_, !Locks::heap_bitmap_lock_, !*gc_complete_lock_);
1012 
1013   void UpdateGcCountRateHistograms() REQUIRES(gc_complete_lock_);
1014 
1015   // GC stress mode attempts to do one GC per unique backtrace.
1016   void CheckGcStressMode(Thread* self, mirror::Object** obj)
1017       SHARED_REQUIRES(Locks::mutator_lock_)
1018       REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !*backtrace_lock_);
1019 
1020   // All-known continuous spaces, where objects lie within fixed bounds.
1021   std::vector<space::ContinuousSpace*> continuous_spaces_ GUARDED_BY(Locks::mutator_lock_);
1022 
1023   // All-known discontinuous spaces, where objects may be placed throughout virtual memory.
1024   std::vector<space::DiscontinuousSpace*> discontinuous_spaces_ GUARDED_BY(Locks::mutator_lock_);
1025 
1026   // All-known alloc spaces, where objects may be or have been allocated.
1027   std::vector<space::AllocSpace*> alloc_spaces_;
1028 
1029   // A space where non-movable objects are allocated, when compaction is enabled it contains
1030   // Classes, ArtMethods, ArtFields, and non moving objects.
1031   space::MallocSpace* non_moving_space_;
1032 
1033   // Space which we use for the kAllocatorTypeROSAlloc.
1034   space::RosAllocSpace* rosalloc_space_;
1035 
1036   // Space which we use for the kAllocatorTypeDlMalloc.
1037   space::DlMallocSpace* dlmalloc_space_;
1038 
1039   // The main space is the space which the GC copies to and from on process state updates. This
1040   // space is typically either the dlmalloc_space_ or the rosalloc_space_.
1041   space::MallocSpace* main_space_;
1042 
1043   // The large object space we are currently allocating into.
1044   space::LargeObjectSpace* large_object_space_;
1045 
1046   // The card table, dirtied by the write barrier.
1047   std::unique_ptr<accounting::CardTable> card_table_;
1048 
1049   std::unique_ptr<accounting::ReadBarrierTable> rb_table_;
1050 
1051   // A mod-union table remembers all of the references from the it's space to other spaces.
1052   AllocationTrackingSafeMap<space::Space*, accounting::ModUnionTable*, kAllocatorTagHeap>
1053       mod_union_tables_;
1054 
1055   // A remembered set remembers all of the references from the it's space to the target space.
1056   AllocationTrackingSafeMap<space::Space*, accounting::RememberedSet*, kAllocatorTagHeap>
1057       remembered_sets_;
1058 
1059   // The current collector type.
1060   CollectorType collector_type_;
1061   // Which collector we use when the app is in the foreground.
1062   CollectorType foreground_collector_type_;
1063   // Which collector we will use when the app is notified of a transition to background.
1064   CollectorType background_collector_type_;
1065   // Desired collector type, heap trimming daemon transitions the heap if it is != collector_type_.
1066   CollectorType desired_collector_type_;
1067 
1068   // Lock which guards pending tasks.
1069   Mutex* pending_task_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
1070 
1071   // How many GC threads we may use for paused parts of garbage collection.
1072   const size_t parallel_gc_threads_;
1073 
1074   // How many GC threads we may use for unpaused parts of garbage collection.
1075   const size_t conc_gc_threads_;
1076 
1077   // Boolean for if we are in low memory mode.
1078   const bool low_memory_mode_;
1079 
1080   // If we get a pause longer than long pause log threshold, then we print out the GC after it
1081   // finishes.
1082   const size_t long_pause_log_threshold_;
1083 
1084   // If we get a GC longer than long GC log threshold, then we print out the GC after it finishes.
1085   const size_t long_gc_log_threshold_;
1086 
1087   // If we ignore the max footprint it lets the heap grow until it hits the heap capacity, this is
1088   // useful for benchmarking since it reduces time spent in GC to a low %.
1089   const bool ignore_max_footprint_;
1090 
1091   // Lock which guards zygote space creation.
1092   Mutex zygote_creation_lock_;
1093 
1094   // Non-null iff we have a zygote space. Doesn't contain the large objects allocated before
1095   // zygote space creation.
1096   space::ZygoteSpace* zygote_space_;
1097 
1098   // Minimum allocation size of large object.
1099   size_t large_object_threshold_;
1100 
1101   // Guards access to the state of GC, associated conditional variable is used to signal when a GC
1102   // completes.
1103   Mutex* gc_complete_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
1104   std::unique_ptr<ConditionVariable> gc_complete_cond_ GUARDED_BY(gc_complete_lock_);
1105 
1106   // Used to synchronize between JNI critical calls and the thread flip of the CC collector.
1107   Mutex* thread_flip_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
1108   std::unique_ptr<ConditionVariable> thread_flip_cond_ GUARDED_BY(thread_flip_lock_);
1109   // This counter keeps track of how many threads are currently in a JNI critical section. This is
1110   // incremented once per thread even with nested enters.
1111   size_t disable_thread_flip_count_ GUARDED_BY(thread_flip_lock_);
1112   bool thread_flip_running_ GUARDED_BY(thread_flip_lock_);
1113 
1114   // Reference processor;
1115   std::unique_ptr<ReferenceProcessor> reference_processor_;
1116 
1117   // Task processor, proxies heap trim requests to the daemon threads.
1118   std::unique_ptr<TaskProcessor> task_processor_;
1119 
1120   // True while the garbage collector is running.
1121   volatile CollectorType collector_type_running_ GUARDED_BY(gc_complete_lock_);
1122 
1123   // Last Gc type we ran. Used by WaitForConcurrentGc to know which Gc was waited on.
1124   volatile collector::GcType last_gc_type_ GUARDED_BY(gc_complete_lock_);
1125   collector::GcType next_gc_type_;
1126 
1127   // Maximum size that the heap can reach.
1128   size_t capacity_;
1129 
1130   // The size the heap is limited to. This is initially smaller than capacity, but for largeHeap
1131   // programs it is "cleared" making it the same as capacity.
1132   size_t growth_limit_;
1133 
1134   // When the number of bytes allocated exceeds the footprint TryAllocate returns null indicating
1135   // a GC should be triggered.
1136   size_t max_allowed_footprint_;
1137 
1138   // The watermark at which a concurrent GC is requested by registerNativeAllocation.
1139   size_t native_footprint_gc_watermark_;
1140 
1141   // Whether or not we need to run finalizers in the next native allocation.
1142   bool native_need_to_run_finalization_;
1143 
1144   // When num_bytes_allocated_ exceeds this amount then a concurrent GC should be requested so that
1145   // it completes ahead of an allocation failing.
1146   size_t concurrent_start_bytes_;
1147 
1148   // Since the heap was created, how many bytes have been freed.
1149   uint64_t total_bytes_freed_ever_;
1150 
1151   // Since the heap was created, how many objects have been freed.
1152   uint64_t total_objects_freed_ever_;
1153 
1154   // Number of bytes allocated.  Adjusted after each allocation and free.
1155   Atomic<size_t> num_bytes_allocated_;
1156 
1157   // Bytes which are allocated and managed by native code but still need to be accounted for.
1158   Atomic<size_t> native_bytes_allocated_;
1159 
1160   // Native allocation stats.
1161   Mutex native_histogram_lock_;
1162   Histogram<uint64_t> native_allocation_histogram_;
1163   Histogram<uint64_t> native_free_histogram_;
1164 
1165   // Number of bytes freed by thread local buffer revokes. This will
1166   // cancel out the ahead-of-time bulk counting of bytes allocated in
1167   // rosalloc thread-local buffers.  It is temporarily accumulated
1168   // here to be subtracted from num_bytes_allocated_ later at the next
1169   // GC.
1170   Atomic<size_t> num_bytes_freed_revoke_;
1171 
1172   // Info related to the current or previous GC iteration.
1173   collector::Iteration current_gc_iteration_;
1174 
1175   // Heap verification flags.
1176   const bool verify_missing_card_marks_;
1177   const bool verify_system_weaks_;
1178   const bool verify_pre_gc_heap_;
1179   const bool verify_pre_sweeping_heap_;
1180   const bool verify_post_gc_heap_;
1181   const bool verify_mod_union_table_;
1182   bool verify_pre_gc_rosalloc_;
1183   bool verify_pre_sweeping_rosalloc_;
1184   bool verify_post_gc_rosalloc_;
1185   const bool gc_stress_mode_;
1186 
1187   // RAII that temporarily disables the rosalloc verification during
1188   // the zygote fork.
1189   class ScopedDisableRosAllocVerification {
1190    private:
1191     Heap* const heap_;
1192     const bool orig_verify_pre_gc_;
1193     const bool orig_verify_pre_sweeping_;
1194     const bool orig_verify_post_gc_;
1195 
1196    public:
ScopedDisableRosAllocVerification(Heap * heap)1197     explicit ScopedDisableRosAllocVerification(Heap* heap)
1198         : heap_(heap),
1199           orig_verify_pre_gc_(heap_->verify_pre_gc_rosalloc_),
1200           orig_verify_pre_sweeping_(heap_->verify_pre_sweeping_rosalloc_),
1201           orig_verify_post_gc_(heap_->verify_post_gc_rosalloc_) {
1202       heap_->verify_pre_gc_rosalloc_ = false;
1203       heap_->verify_pre_sweeping_rosalloc_ = false;
1204       heap_->verify_post_gc_rosalloc_ = false;
1205     }
~ScopedDisableRosAllocVerification()1206     ~ScopedDisableRosAllocVerification() {
1207       heap_->verify_pre_gc_rosalloc_ = orig_verify_pre_gc_;
1208       heap_->verify_pre_sweeping_rosalloc_ = orig_verify_pre_sweeping_;
1209       heap_->verify_post_gc_rosalloc_ = orig_verify_post_gc_;
1210     }
1211   };
1212 
1213   // Parallel GC data structures.
1214   std::unique_ptr<ThreadPool> thread_pool_;
1215 
1216   // Estimated allocation rate (bytes / second). Computed between the time of the last GC cycle
1217   // and the start of the current one.
1218   uint64_t allocation_rate_;
1219 
1220   // For a GC cycle, a bitmap that is set corresponding to the
1221   std::unique_ptr<accounting::HeapBitmap> live_bitmap_ GUARDED_BY(Locks::heap_bitmap_lock_);
1222   std::unique_ptr<accounting::HeapBitmap> mark_bitmap_ GUARDED_BY(Locks::heap_bitmap_lock_);
1223 
1224   // Mark stack that we reuse to avoid re-allocating the mark stack.
1225   std::unique_ptr<accounting::ObjectStack> mark_stack_;
1226 
1227   // Allocation stack, new allocations go here so that we can do sticky mark bits. This enables us
1228   // to use the live bitmap as the old mark bitmap.
1229   const size_t max_allocation_stack_size_;
1230   std::unique_ptr<accounting::ObjectStack> allocation_stack_;
1231 
1232   // Second allocation stack so that we can process allocation with the heap unlocked.
1233   std::unique_ptr<accounting::ObjectStack> live_stack_;
1234 
1235   // Allocator type.
1236   AllocatorType current_allocator_;
1237   const AllocatorType current_non_moving_allocator_;
1238 
1239   // Which GCs we run in order when we an allocation fails.
1240   std::vector<collector::GcType> gc_plan_;
1241 
1242   // Bump pointer spaces.
1243   space::BumpPointerSpace* bump_pointer_space_;
1244   // Temp space is the space which the semispace collector copies to.
1245   space::BumpPointerSpace* temp_space_;
1246 
1247   space::RegionSpace* region_space_;
1248 
1249   // Minimum free guarantees that you always have at least min_free_ free bytes after growing for
1250   // utilization, regardless of target utilization ratio.
1251   size_t min_free_;
1252 
1253   // The ideal maximum free size, when we grow the heap for utilization.
1254   size_t max_free_;
1255 
1256   // Target ideal heap utilization ratio
1257   double target_utilization_;
1258 
1259   // How much more we grow the heap when we are a foreground app instead of background.
1260   double foreground_heap_growth_multiplier_;
1261 
1262   // Total time which mutators are paused or waiting for GC to complete.
1263   uint64_t total_wait_time_;
1264 
1265   // The current state of heap verification, may be enabled or disabled.
1266   VerifyObjectMode verify_object_mode_;
1267 
1268   // Compacting GC disable count, prevents compacting GC from running iff > 0.
1269   size_t disable_moving_gc_count_ GUARDED_BY(gc_complete_lock_);
1270 
1271   std::vector<collector::GarbageCollector*> garbage_collectors_;
1272   collector::SemiSpace* semi_space_collector_;
1273   collector::MarkCompact* mark_compact_collector_;
1274   collector::ConcurrentCopying* concurrent_copying_collector_;
1275 
1276   const bool is_running_on_memory_tool_;
1277   const bool use_tlab_;
1278 
1279   // Pointer to the space which becomes the new main space when we do homogeneous space compaction.
1280   // Use unique_ptr since the space is only added during the homogeneous compaction phase.
1281   std::unique_ptr<space::MallocSpace> main_space_backup_;
1282 
1283   // Minimal interval allowed between two homogeneous space compactions caused by OOM.
1284   uint64_t min_interval_homogeneous_space_compaction_by_oom_;
1285 
1286   // Times of the last homogeneous space compaction caused by OOM.
1287   uint64_t last_time_homogeneous_space_compaction_by_oom_;
1288 
1289   // Saved OOMs by homogeneous space compaction.
1290   Atomic<size_t> count_delayed_oom_;
1291 
1292   // Count for requested homogeneous space compaction.
1293   Atomic<size_t> count_requested_homogeneous_space_compaction_;
1294 
1295   // Count for ignored homogeneous space compaction.
1296   Atomic<size_t> count_ignored_homogeneous_space_compaction_;
1297 
1298   // Count for performed homogeneous space compaction.
1299   Atomic<size_t> count_performed_homogeneous_space_compaction_;
1300 
1301   // Whether or not a concurrent GC is pending.
1302   Atomic<bool> concurrent_gc_pending_;
1303 
1304   // Active tasks which we can modify (change target time, desired collector type, etc..).
1305   CollectorTransitionTask* pending_collector_transition_ GUARDED_BY(pending_task_lock_);
1306   HeapTrimTask* pending_heap_trim_ GUARDED_BY(pending_task_lock_);
1307 
1308   // Whether or not we use homogeneous space compaction to avoid OOM errors.
1309   bool use_homogeneous_space_compaction_for_oom_;
1310 
1311   // True if the currently running collection has made some thread wait.
1312   bool running_collection_is_blocking_ GUARDED_BY(gc_complete_lock_);
1313   // The number of blocking GC runs.
1314   uint64_t blocking_gc_count_;
1315   // The total duration of blocking GC runs.
1316   uint64_t blocking_gc_time_;
1317   // The duration of the window for the GC count rate histograms.
1318   static constexpr uint64_t kGcCountRateHistogramWindowDuration = MsToNs(10 * 1000);  // 10s.
1319   // The last time when the GC count rate histograms were updated.
1320   // This is rounded by kGcCountRateHistogramWindowDuration (a multiple of 10s).
1321   uint64_t last_update_time_gc_count_rate_histograms_;
1322   // The running count of GC runs in the last window.
1323   uint64_t gc_count_last_window_;
1324   // The running count of blocking GC runs in the last window.
1325   uint64_t blocking_gc_count_last_window_;
1326   // The maximum number of buckets in the GC count rate histograms.
1327   static constexpr size_t kGcCountRateMaxBucketCount = 200;
1328   // The histogram of the number of GC invocations per window duration.
1329   Histogram<uint64_t> gc_count_rate_histogram_ GUARDED_BY(gc_complete_lock_);
1330   // The histogram of the number of blocking GC invocations per window duration.
1331   Histogram<uint64_t> blocking_gc_count_rate_histogram_ GUARDED_BY(gc_complete_lock_);
1332 
1333   // Allocation tracking support
1334   Atomic<bool> alloc_tracking_enabled_;
1335   std::unique_ptr<AllocRecordObjectMap> allocation_records_;
1336 
1337   // GC stress related data structures.
1338   Mutex* backtrace_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
1339   // Debugging variables, seen backtraces vs unique backtraces.
1340   Atomic<uint64_t> seen_backtrace_count_;
1341   Atomic<uint64_t> unique_backtrace_count_;
1342   // Stack trace hashes that we already saw,
1343   std::unordered_set<uint64_t> seen_backtraces_ GUARDED_BY(backtrace_lock_);
1344 
1345   // We disable GC when we are shutting down the runtime in case there are daemon threads still
1346   // allocating.
1347   bool gc_disabled_for_shutdown_ GUARDED_BY(gc_complete_lock_);
1348 
1349   // Boot image spaces.
1350   std::vector<space::ImageSpace*> boot_image_spaces_;
1351 
1352   friend class CollectorTransitionTask;
1353   friend class collector::GarbageCollector;
1354   friend class collector::MarkCompact;
1355   friend class collector::ConcurrentCopying;
1356   friend class collector::MarkSweep;
1357   friend class collector::SemiSpace;
1358   friend class ReferenceQueue;
1359   friend class ScopedGCCriticalSection;
1360   friend class VerifyReferenceCardVisitor;
1361   friend class VerifyReferenceVisitor;
1362   friend class VerifyObjectVisitor;
1363 
1364   DISALLOW_IMPLICIT_CONSTRUCTORS(Heap);
1365 };
1366 
1367 }  // namespace gc
1368 }  // namespace art
1369 
1370 #endif  // ART_RUNTIME_GC_HEAP_H_
1371