1 /*
2  * Copyright (C) 2014 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_H_
18 #define ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_H_
19 
20 #include "garbage_collector.h"
21 #include "gc/accounting/space_bitmap.h"
22 #include "immune_spaces.h"
23 #include "offsets.h"
24 
25 #include <map>
26 #include <memory>
27 #include <unordered_map>
28 #include <vector>
29 
30 namespace art {
31 class Barrier;
32 class Closure;
33 class RootInfo;
34 
35 namespace mirror {
36 template<class MirrorType> class CompressedReference;
37 template<class MirrorType> class HeapReference;
38 class Object;
39 }  // namespace mirror
40 
41 namespace gc {
42 
43 namespace accounting {
44 template<typename T> class AtomicStack;
45 typedef AtomicStack<mirror::Object> ObjectStack;
46 template <size_t kAlignment> class SpaceBitmap;
47 typedef SpaceBitmap<kObjectAlignment> ContinuousSpaceBitmap;
48 class HeapBitmap;
49 class ReadBarrierTable;
50 }  // namespace accounting
51 
52 namespace space {
53 class RegionSpace;
54 }  // namespace space
55 
56 namespace collector {
57 
58 class ConcurrentCopying : public GarbageCollector {
59  public:
60   // Enable the no-from-space-refs verification at the pause.
61   static constexpr bool kEnableNoFromSpaceRefsVerification = kIsDebugBuild;
62   // Enable the from-space bytes/objects check.
63   static constexpr bool kEnableFromSpaceAccountingCheck = kIsDebugBuild;
64   // Enable verbose mode.
65   static constexpr bool kVerboseMode = false;
66   // If kGrayDirtyImmuneObjects is true then we gray dirty objects in the GC pause to prevent dirty
67   // pages.
68   static constexpr bool kGrayDirtyImmuneObjects = true;
69 
70   ConcurrentCopying(Heap* heap,
71                     bool young_gen,
72                     bool use_generational_cc,
73                     const std::string& name_prefix = "",
74                     bool measure_read_barrier_slow_path = false);
75   ~ConcurrentCopying();
76 
77   void RunPhases() override
78       REQUIRES(!immune_gray_stack_lock_,
79                !mark_stack_lock_,
80                !rb_slow_path_histogram_lock_,
81                !skipped_blocks_lock_);
82   void InitializePhase() REQUIRES_SHARED(Locks::mutator_lock_)
83       REQUIRES(!mark_stack_lock_, !immune_gray_stack_lock_);
84   void MarkingPhase() REQUIRES_SHARED(Locks::mutator_lock_)
85       REQUIRES(!mark_stack_lock_);
86   void CopyingPhase() REQUIRES_SHARED(Locks::mutator_lock_)
87       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
88   void ReclaimPhase() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
89   void FinishPhase() REQUIRES(!mark_stack_lock_,
90                               !rb_slow_path_histogram_lock_,
91                               !skipped_blocks_lock_);
92 
93   void CaptureRssAtPeak() REQUIRES(!mark_stack_lock_);
94   void BindBitmaps() REQUIRES_SHARED(Locks::mutator_lock_)
95       REQUIRES(!Locks::heap_bitmap_lock_);
GetGcType()96   GcType GetGcType() const override {
97     return (use_generational_cc_ && young_gen_)
98         ? kGcTypeSticky
99         : kGcTypePartial;
100   }
GetCollectorType()101   CollectorType GetCollectorType() const override {
102     return kCollectorTypeCC;
103   }
104   void RevokeAllThreadLocalBuffers() override;
105   // Creates inter-region ref bitmaps for region-space and non-moving-space.
106   // Gets called in Heap construction after the two spaces are created.
107   void CreateInterRegionRefBitmaps();
SetRegionSpace(space::RegionSpace * region_space)108   void SetRegionSpace(space::RegionSpace* region_space) {
109     DCHECK(region_space != nullptr);
110     region_space_ = region_space;
111   }
RegionSpace()112   space::RegionSpace* RegionSpace() {
113     return region_space_;
114   }
115   // Assert the to-space invariant for a heap reference `ref` held in `obj` at offset `offset`.
116   void AssertToSpaceInvariant(mirror::Object* obj, MemberOffset offset, mirror::Object* ref)
117       REQUIRES_SHARED(Locks::mutator_lock_);
118   // Assert the to-space invariant for a GC root reference `ref`.
119   void AssertToSpaceInvariant(GcRootSource* gc_root_source, mirror::Object* ref)
120       REQUIRES_SHARED(Locks::mutator_lock_);
IsInToSpace(mirror::Object * ref)121   bool IsInToSpace(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_) {
122     DCHECK(ref != nullptr);
123     return IsMarked(ref) == ref;
124   }
125   // Mark object `from_ref`, copying it to the to-space if needed.
126   template<bool kGrayImmuneObject = true, bool kNoUnEvac = false, bool kFromGCThread = false>
127   ALWAYS_INLINE mirror::Object* Mark(Thread* const self,
128                                      mirror::Object* from_ref,
129                                      mirror::Object* holder = nullptr,
130                                      MemberOffset offset = MemberOffset(0))
131       REQUIRES_SHARED(Locks::mutator_lock_)
132       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
133   ALWAYS_INLINE mirror::Object* MarkFromReadBarrier(mirror::Object* from_ref)
134       REQUIRES_SHARED(Locks::mutator_lock_)
135       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
IsMarking()136   bool IsMarking() const {
137     return is_marking_;
138   }
139   // We may want to use read barrier entrypoints before is_marking_ is true since concurrent graying
140   // creates a small window where we might dispatch on these entrypoints.
IsUsingReadBarrierEntrypoints()141   bool IsUsingReadBarrierEntrypoints() const {
142     return is_using_read_barrier_entrypoints_;
143   }
IsActive()144   bool IsActive() const {
145     return is_active_;
146   }
GetBarrier()147   Barrier& GetBarrier() {
148     return *gc_barrier_;
149   }
IsWeakRefAccessEnabled()150   bool IsWeakRefAccessEnabled() REQUIRES(Locks::thread_list_lock_) {
151     return weak_ref_access_enabled_;
152   }
153   void RevokeThreadLocalMarkStack(Thread* thread) REQUIRES(!mark_stack_lock_);
154 
155   mirror::Object* IsMarked(mirror::Object* from_ref) override
156       REQUIRES_SHARED(Locks::mutator_lock_);
157 
158   void AssertNoThreadMarkStackMapping(Thread* thread) REQUIRES(!mark_stack_lock_);
159 
160  private:
161   void PushOntoMarkStack(Thread* const self, mirror::Object* obj)
162       REQUIRES_SHARED(Locks::mutator_lock_)
163       REQUIRES(!mark_stack_lock_);
164   mirror::Object* Copy(Thread* const self,
165                        mirror::Object* from_ref,
166                        mirror::Object* holder,
167                        MemberOffset offset)
168       REQUIRES_SHARED(Locks::mutator_lock_)
169       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
170   // Scan the reference fields of object `to_ref`.
171   template <bool kNoUnEvac>
172   void Scan(mirror::Object* to_ref) REQUIRES_SHARED(Locks::mutator_lock_)
173       REQUIRES(!mark_stack_lock_);
174   // Scan the reference fields of object 'obj' in the dirty cards during
175   // card-table scan. In addition to visiting the references, it also sets the
176   // read-barrier state to gray for Reference-type objects to ensure that
177   // GetReferent() called on these objects calls the read-barrier on the referent.
178   template <bool kNoUnEvac>
179   void ScanDirtyObject(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_)
180       REQUIRES(!mark_stack_lock_);
181   // Process a field.
182   template <bool kNoUnEvac>
183   void Process(mirror::Object* obj, MemberOffset offset)
184       REQUIRES_SHARED(Locks::mutator_lock_)
185       REQUIRES(!mark_stack_lock_ , !skipped_blocks_lock_, !immune_gray_stack_lock_);
186   void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info) override
187       REQUIRES_SHARED(Locks::mutator_lock_)
188       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
189   template<bool kGrayImmuneObject>
190   void MarkRoot(Thread* const self, mirror::CompressedReference<mirror::Object>* root)
191       REQUIRES_SHARED(Locks::mutator_lock_)
192       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
193   void VisitRoots(mirror::CompressedReference<mirror::Object>** roots,
194                   size_t count,
195                   const RootInfo& info) override
196       REQUIRES_SHARED(Locks::mutator_lock_)
197       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
198   void VerifyNoFromSpaceReferences() REQUIRES(Locks::mutator_lock_);
199   accounting::ObjectStack* GetAllocationStack();
200   accounting::ObjectStack* GetLiveStack();
201   void ProcessMarkStack() override REQUIRES_SHARED(Locks::mutator_lock_)
202       REQUIRES(!mark_stack_lock_);
203   bool ProcessMarkStackOnce() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
204   void ProcessMarkStackRef(mirror::Object* to_ref) REQUIRES_SHARED(Locks::mutator_lock_)
205       REQUIRES(!mark_stack_lock_);
206   void GrayAllDirtyImmuneObjects()
207       REQUIRES(Locks::mutator_lock_)
208       REQUIRES(!mark_stack_lock_);
209   void GrayAllNewlyDirtyImmuneObjects()
210       REQUIRES(Locks::mutator_lock_)
211       REQUIRES(!mark_stack_lock_);
212   void VerifyGrayImmuneObjects()
213       REQUIRES(Locks::mutator_lock_)
214       REQUIRES(!mark_stack_lock_);
215   void VerifyNoMissingCardMarks()
216       REQUIRES(Locks::mutator_lock_)
217       REQUIRES(!mark_stack_lock_);
218   template <typename Processor>
219   size_t ProcessThreadLocalMarkStacks(bool disable_weak_ref_access,
220                                       Closure* checkpoint_callback,
221                                       const Processor& processor)
222       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
223   void RevokeThreadLocalMarkStacks(bool disable_weak_ref_access, Closure* checkpoint_callback)
224       REQUIRES_SHARED(Locks::mutator_lock_);
225   void SwitchToSharedMarkStackMode() REQUIRES_SHARED(Locks::mutator_lock_)
226       REQUIRES(!mark_stack_lock_);
227   void SwitchToGcExclusiveMarkStackMode() REQUIRES_SHARED(Locks::mutator_lock_);
228   void DelayReferenceReferent(ObjPtr<mirror::Class> klass,
229                               ObjPtr<mirror::Reference> reference) override
230       REQUIRES_SHARED(Locks::mutator_lock_);
231   void ProcessReferences(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
232   mirror::Object* MarkObject(mirror::Object* from_ref) override
233       REQUIRES_SHARED(Locks::mutator_lock_)
234       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
235   void MarkHeapReference(mirror::HeapReference<mirror::Object>* from_ref,
236                          bool do_atomic_update) override
237       REQUIRES_SHARED(Locks::mutator_lock_)
238       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
239   bool IsMarkedInUnevacFromSpace(mirror::Object* from_ref)
240       REQUIRES_SHARED(Locks::mutator_lock_);
241   bool IsMarkedInNonMovingSpace(mirror::Object* from_ref)
242       REQUIRES_SHARED(Locks::mutator_lock_);
243   bool IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* field,
244                                    bool do_atomic_update) override
245       REQUIRES_SHARED(Locks::mutator_lock_);
246   void SweepSystemWeaks(Thread* self)
247       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::heap_bitmap_lock_);
248   // Sweep unmarked objects to complete the garbage collection. Full GCs sweep
249   // all allocation spaces (except the region space). Sticky-bit GCs just sweep
250   // a subset of the heap.
251   void Sweep(bool swap_bitmaps)
252       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_);
253   // Sweep only pointers within an array.
254   void SweepArray(accounting::ObjectStack* allocation_stack_, bool swap_bitmaps)
255       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_);
256   void SweepLargeObjects(bool swap_bitmaps)
257       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_);
258   void MarkZygoteLargeObjects()
259       REQUIRES_SHARED(Locks::mutator_lock_);
260   void FillWithDummyObject(Thread* const self, mirror::Object* dummy_obj, size_t byte_size)
261       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_)
262       REQUIRES_SHARED(Locks::mutator_lock_);
263   mirror::Object* AllocateInSkippedBlock(Thread* const self, size_t alloc_size)
264       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_)
265       REQUIRES_SHARED(Locks::mutator_lock_);
266   void CheckEmptyMarkStack() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
267   void IssueEmptyCheckpoint() REQUIRES_SHARED(Locks::mutator_lock_);
268   bool IsOnAllocStack(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_);
269   mirror::Object* GetFwdPtr(mirror::Object* from_ref)
270       REQUIRES_SHARED(Locks::mutator_lock_);
271   void FlipThreadRoots() REQUIRES(!Locks::mutator_lock_);
272   void SwapStacks() REQUIRES_SHARED(Locks::mutator_lock_);
273   void RecordLiveStackFreezeSize(Thread* self);
274   void ComputeUnevacFromSpaceLiveRatio();
275   void LogFromSpaceRefHolder(mirror::Object* obj, MemberOffset offset)
276       REQUIRES_SHARED(Locks::mutator_lock_);
277   // Dump information about reference `ref` and return it as a string.
278   // Use `ref_name` to name the reference in messages. Each message is prefixed with `indent`.
279   std::string DumpReferenceInfo(mirror::Object* ref, const char* ref_name, const char* indent = "")
280       REQUIRES_SHARED(Locks::mutator_lock_);
281   // Dump information about heap reference `ref`, referenced from object `obj` at offset `offset`,
282   // and return it as a string.
283   std::string DumpHeapReference(mirror::Object* obj, MemberOffset offset, mirror::Object* ref)
284       REQUIRES_SHARED(Locks::mutator_lock_);
285   // Dump information about GC root `ref` and return it as a string.
286   std::string DumpGcRoot(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_);
287   void AssertToSpaceInvariantInNonMovingSpace(mirror::Object* obj, mirror::Object* ref)
288       REQUIRES_SHARED(Locks::mutator_lock_);
289   void ReenableWeakRefAccess(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
290   void DisableMarking() REQUIRES_SHARED(Locks::mutator_lock_);
291   void IssueDisableMarkingCheckpoint() REQUIRES_SHARED(Locks::mutator_lock_);
292   void ExpandGcMarkStack() REQUIRES_SHARED(Locks::mutator_lock_);
293   mirror::Object* MarkNonMoving(Thread* const self,
294                                 mirror::Object* from_ref,
295                                 mirror::Object* holder = nullptr,
296                                 MemberOffset offset = MemberOffset(0))
297       REQUIRES_SHARED(Locks::mutator_lock_)
298       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_);
299   ALWAYS_INLINE mirror::Object* MarkUnevacFromSpaceRegion(Thread* const self,
300       mirror::Object* from_ref,
301       accounting::SpaceBitmap<kObjectAlignment>* bitmap)
302       REQUIRES_SHARED(Locks::mutator_lock_)
303       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_);
304   template<bool kGrayImmuneObject>
305   ALWAYS_INLINE mirror::Object* MarkImmuneSpace(Thread* const self,
306                                                 mirror::Object* from_ref)
307       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!immune_gray_stack_lock_);
308   void ScanImmuneObject(mirror::Object* obj)
309       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
310   mirror::Object* MarkFromReadBarrierWithMeasurements(Thread* const self,
311                                                       mirror::Object* from_ref)
312       REQUIRES_SHARED(Locks::mutator_lock_)
313       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
314   void DumpPerformanceInfo(std::ostream& os) override REQUIRES(!rb_slow_path_histogram_lock_);
315   // Set the read barrier mark entrypoints to non-null.
316   void ActivateReadBarrierEntrypoints();
317 
318   void CaptureThreadRootsForMarking() REQUIRES_SHARED(Locks::mutator_lock_);
319   void AddLiveBytesAndScanRef(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_);
320   bool TestMarkBitmapForRef(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_);
321   template <bool kAtomic = false>
322   bool TestAndSetMarkBitForRef(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_);
323   void PushOntoLocalMarkStack(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_);
324   void ProcessMarkStackForMarkingAndComputeLiveBytes() REQUIRES_SHARED(Locks::mutator_lock_)
325       REQUIRES(!mark_stack_lock_);
326 
327   void RemoveThreadMarkStackMapping(Thread* thread, accounting::ObjectStack* tl_mark_stack)
328       REQUIRES(mark_stack_lock_);
329   void AddThreadMarkStackMapping(Thread* thread, accounting::ObjectStack* tl_mark_stack)
330       REQUIRES(mark_stack_lock_);
331   void AssertEmptyThreadMarkStackMap() REQUIRES(mark_stack_lock_);
332 
333   space::RegionSpace* region_space_;      // The underlying region space.
334   std::unique_ptr<Barrier> gc_barrier_;
335   std::unique_ptr<accounting::ObjectStack> gc_mark_stack_;
336 
337   // If true, enable generational collection when using the Concurrent Copying
338   // (CC) collector, i.e. use sticky-bit CC for minor collections and (full) CC
339   // for major collections. Generational CC collection is currently only
340   // compatible with Baker read barriers. Set in Heap constructor.
341   const bool use_generational_cc_;
342 
343   // Generational "sticky", only trace through dirty objects in region space.
344   const bool young_gen_;
345 
346   // If true, the GC thread is done scanning marked objects on dirty and aged
347   // card (see ConcurrentCopying::CopyingPhase).
348   Atomic<bool> done_scanning_;
349 
350   // The read-barrier mark-bit stack. Stores object references whose
351   // mark bit has been set by ConcurrentCopying::MarkFromReadBarrier,
352   // so that this bit can be reset at the end of the collection in
353   // ConcurrentCopying::FinishPhase. The mark bit of an object can be
354   // used by mutator read barrier code to quickly test whether that
355   // object has been already marked.
356   std::unique_ptr<accounting::ObjectStack> rb_mark_bit_stack_;
357   // Thread-unsafe Boolean value hinting that `rb_mark_bit_stack_` is
358   // full. A thread-safe test of whether the read-barrier mark-bit
359   // stack is full is implemented by `rb_mark_bit_stack_->AtomicPushBack(ref)`
360   // (see use case in ConcurrentCopying::MarkFromReadBarrier).
361   bool rb_mark_bit_stack_full_;
362 
363   // Guards access to pooled_mark_stacks_ and revoked_mark_stacks_ vectors.
364   // Also guards destruction and revocations of thread-local mark-stacks.
365   // Clearing thread-local mark-stack (by other threads or during destruction)
366   // should be guarded by it.
367   Mutex mark_stack_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
368   std::vector<accounting::ObjectStack*> revoked_mark_stacks_
369       GUARDED_BY(mark_stack_lock_);
370   static constexpr size_t kMarkStackSize = kPageSize;
371   static constexpr size_t kMarkStackPoolSize = 256;
372   std::vector<accounting::ObjectStack*> pooled_mark_stacks_
373       GUARDED_BY(mark_stack_lock_);
374   // TODO(lokeshgidra b/140119552): remove this after bug fix.
375   std::unordered_map<Thread*, accounting::ObjectStack*> thread_mark_stack_map_
376       GUARDED_BY(mark_stack_lock_);
377   Thread* thread_running_gc_;
378   bool is_marking_;                       // True while marking is ongoing.
379   // True while we might dispatch on the read barrier entrypoints.
380   bool is_using_read_barrier_entrypoints_;
381   bool is_active_;                        // True while the collection is ongoing.
382   bool is_asserting_to_space_invariant_;  // True while asserting the to-space invariant.
383   ImmuneSpaces immune_spaces_;
384   accounting::ContinuousSpaceBitmap* region_space_bitmap_;
385   // A cache of Heap::GetMarkBitmap().
386   accounting::HeapBitmap* heap_mark_bitmap_;
387   size_t live_stack_freeze_size_;
388   size_t from_space_num_objects_at_first_pause_;  // Computed if kEnableFromSpaceAccountingCheck
389   size_t from_space_num_bytes_at_first_pause_;  // Computed if kEnableFromSpaceAccountingCheck
390   Atomic<int> is_mark_stack_push_disallowed_;
391   enum MarkStackMode {
392     kMarkStackModeOff = 0,      // Mark stack is off.
393     kMarkStackModeThreadLocal,  // All threads except for the GC-running thread push refs onto
394                                 // thread-local mark stacks. The GC-running thread pushes onto and
395                                 // pops off the GC mark stack without a lock.
396     kMarkStackModeShared,       // All threads share the GC mark stack with a lock.
397     kMarkStackModeGcExclusive   // The GC-running thread pushes onto and pops from the GC mark stack
398                                 // without a lock. Other threads won't access the mark stack.
399   };
400   Atomic<MarkStackMode> mark_stack_mode_;
401   bool weak_ref_access_enabled_ GUARDED_BY(Locks::thread_list_lock_);
402 
403   // How many objects and bytes we moved. The GC thread moves many more objects
404   // than mutators.  Therefore, we separate the two to avoid CAS.  Bytes_moved_ and
405   // bytes_moved_gc_thread_ are critical for GC triggering; the others are just informative.
406   Atomic<size_t> bytes_moved_;  // Used by mutators
407   Atomic<size_t> objects_moved_;  // Used by mutators
408   size_t bytes_moved_gc_thread_;  // Used by GC
409   size_t objects_moved_gc_thread_;  // Used by GC
410   Atomic<uint64_t> cumulative_bytes_moved_;
411   Atomic<uint64_t> cumulative_objects_moved_;
412 
413   // copied_live_bytes_ratio_sum_ is read and written by CC per GC, in
414   // ReclaimPhase, and is read by DumpPerformanceInfo (potentially from another
415   // thread). However, at present, DumpPerformanceInfo is only called when the
416   // runtime shuts down, so no concurrent access. The same reasoning goes for
417   // gc_count_ and reclaimed_bytes_ratio_sum_
418 
419   // The sum of of all copied live bytes ratio (to_bytes/from_bytes)
420   float copied_live_bytes_ratio_sum_;
421   // The number of GC counts, used to calculate the average above. (It doesn't
422   // include GC where from_bytes is zero, IOW, from-space is empty, which is
423   // possible for minor GC if all allocated objects are in non-moving
424   // space.)
425   size_t gc_count_;
426   // Bit is set if the corresponding object has inter-region references that
427   // were found during the marking phase of two-phase full-heap GC cycle.
428   accounting::ContinuousSpaceBitmap region_space_inter_region_bitmap_;
429   accounting::ContinuousSpaceBitmap non_moving_space_inter_region_bitmap_;
430 
431   // reclaimed_bytes_ratio = reclaimed_bytes/num_allocated_bytes per GC cycle
432   float reclaimed_bytes_ratio_sum_;
433 
434   // The skipped blocks are memory blocks/chucks that were copies of
435   // objects that were unused due to lost races (cas failures) at
436   // object copy/forward pointer install. They may be reused.
437   // Skipped blocks are always in region space. Their size is included directly
438   // in num_bytes_allocated_, i.e. they are treated as allocated, but may be directly
439   // used without going through a GC cycle like other objects. They are reused only
440   // if we run out of region space. TODO: Revisit this design.
441   Mutex skipped_blocks_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
442   std::multimap<size_t, uint8_t*> skipped_blocks_map_ GUARDED_BY(skipped_blocks_lock_);
443   Atomic<size_t> to_space_bytes_skipped_;
444   Atomic<size_t> to_space_objects_skipped_;
445 
446   // If measure_read_barrier_slow_path_ is true, we count how long is spent in MarkFromReadBarrier
447   // and also log.
448   bool measure_read_barrier_slow_path_;
449   // mark_from_read_barrier_measurements_ is true if systrace is enabled or
450   // measure_read_barrier_time_ is true.
451   bool mark_from_read_barrier_measurements_;
452   Atomic<uint64_t> rb_slow_path_ns_;
453   Atomic<uint64_t> rb_slow_path_count_;
454   Atomic<uint64_t> rb_slow_path_count_gc_;
455   mutable Mutex rb_slow_path_histogram_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
456   Histogram<uint64_t> rb_slow_path_time_histogram_ GUARDED_BY(rb_slow_path_histogram_lock_);
457   uint64_t rb_slow_path_count_total_ GUARDED_BY(rb_slow_path_histogram_lock_);
458   uint64_t rb_slow_path_count_gc_total_ GUARDED_BY(rb_slow_path_histogram_lock_);
459 
460   accounting::ReadBarrierTable* rb_table_;
461   bool force_evacuate_all_;  // True if all regions are evacuated.
462   Atomic<bool> updated_all_immune_objects_;
463   bool gc_grays_immune_objects_;
464   Mutex immune_gray_stack_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
465   std::vector<mirror::Object*> immune_gray_stack_ GUARDED_BY(immune_gray_stack_lock_);
466 
467   // Class of java.lang.Object. Filled in from WellKnownClasses in FlipCallback. Must
468   // be filled in before flipping thread roots so that FillDummyObject can run. Not
469   // ObjPtr since the GC may transition to suspended and runnable between phases.
470   mirror::Class* java_lang_Object_;
471 
472   // Sweep array free buffer, used to sweep the spaces based on an array more
473   // efficiently, by recording dead objects to be freed in batches (see
474   // ConcurrentCopying::SweepArray).
475   MemMap sweep_array_free_buffer_mem_map_;
476 
477   // Use signed because after_gc may be larger than before_gc.
478   int64_t num_bytes_allocated_before_gc_;
479 
480   class ActivateReadBarrierEntrypointsCallback;
481   class ActivateReadBarrierEntrypointsCheckpoint;
482   class AssertToSpaceInvariantFieldVisitor;
483   class AssertToSpaceInvariantRefsVisitor;
484   class ClearBlackPtrsVisitor;
485   class ComputeUnevacFromSpaceLiveRatioVisitor;
486   class DisableMarkingCallback;
487   class DisableMarkingCheckpoint;
488   class DisableWeakRefAccessCallback;
489   class FlipCallback;
490   template <bool kConcurrent> class GrayImmuneObjectVisitor;
491   class ImmuneSpaceScanObjVisitor;
492   class LostCopyVisitor;
493   template <bool kNoUnEvac> class RefFieldsVisitor;
494   class RevokeThreadLocalMarkStackCheckpoint;
495   class ScopedGcGraysImmuneObjects;
496   class ThreadFlipVisitor;
497   class VerifyGrayImmuneObjectsVisitor;
498   class VerifyNoFromSpaceRefsFieldVisitor;
499   class VerifyNoFromSpaceRefsVisitor;
500   class VerifyNoMissingCardMarkVisitor;
501   class ImmuneSpaceCaptureRefsVisitor;
502   template <bool kAtomicTestAndSet = false> class CaptureRootsForMarkingVisitor;
503   class CaptureThreadRootsForMarkingAndCheckpoint;
504   template <bool kHandleInterRegionRefs> class ComputeLiveBytesAndMarkRefFieldsVisitor;
505 
506   DISALLOW_IMPLICIT_CONSTRUCTORS(ConcurrentCopying);
507 };
508 
509 }  // namespace collector
510 }  // namespace gc
511 }  // namespace art
512 
513 #endif  // ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_H_
514