1 /*
2  * Copyright (C) 2014 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_H_
18 #define ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_H_
19 
20 #include "barrier.h"
21 #include "garbage_collector.h"
22 #include "immune_region.h"
23 #include "jni.h"
24 #include "object_callbacks.h"
25 #include "offsets.h"
26 #include "gc/accounting/atomic_stack.h"
27 #include "gc/accounting/read_barrier_table.h"
28 #include "gc/accounting/space_bitmap.h"
29 #include "mirror/object.h"
30 #include "mirror/object_reference.h"
31 #include "safe_map.h"
32 
33 #include <unordered_map>
34 #include <vector>
35 
36 namespace art {
37 class RootInfo;
38 
39 namespace gc {
40 
41 namespace accounting {
42   typedef SpaceBitmap<kObjectAlignment> ContinuousSpaceBitmap;
43   class HeapBitmap;
44 }  // namespace accounting
45 
46 namespace space {
47   class RegionSpace;
48 }  // namespace space
49 
50 namespace collector {
51 
52 // Concurrent queue. Used as the mark stack. TODO: use a concurrent
53 // stack for locality.
54 class MarkQueue {
55  public:
MarkQueue(size_t size)56   explicit MarkQueue(size_t size) : size_(size) {
57     CHECK(IsPowerOfTwo(size_));
58     buf_.reset(new Atomic<mirror::Object*>[size_]);
59     CHECK(buf_.get() != nullptr);
60     Clear();
61   }
62 
GetSlotAddr(size_t index)63   ALWAYS_INLINE Atomic<mirror::Object*>* GetSlotAddr(size_t index) {
64     return &(buf_.get()[index & (size_ - 1)]);
65   }
66 
67   // Multiple-proceducer enqueue.
Enqueue(mirror::Object * to_ref)68   bool Enqueue(mirror::Object* to_ref) {
69     size_t t;
70     do {
71       t = tail_.LoadRelaxed();
72       size_t h = head_.LoadSequentiallyConsistent();
73       if (t + size_ == h) {
74         // It's full.
75         return false;
76       }
77     } while (!tail_.CompareExchangeWeakSequentiallyConsistent(t, t + 1));
78     // We got a slot but its content has not been filled yet at this point.
79     GetSlotAddr(t)->StoreSequentiallyConsistent(to_ref);
80     return true;
81   }
82 
83   // Thread-unsafe.
EnqueueThreadUnsafe(mirror::Object * to_ref)84   bool EnqueueThreadUnsafe(mirror::Object* to_ref) {
85     size_t t = tail_.LoadRelaxed();
86     size_t h = head_.LoadRelaxed();
87     if (t + size_ == h) {
88       // It's full.
89       return false;
90     }
91     GetSlotAddr(t)->StoreRelaxed(to_ref);
92     tail_.StoreRelaxed(t + 1);
93     return true;
94   }
95 
96   // Single-consumer dequeue.
Dequeue()97   mirror::Object* Dequeue() {
98     size_t h = head_.LoadRelaxed();
99     size_t t = tail_.LoadSequentiallyConsistent();
100     if (h == t) {
101       // it's empty.
102       return nullptr;
103     }
104     Atomic<mirror::Object*>* slot = GetSlotAddr(h);
105     mirror::Object* ref = slot->LoadSequentiallyConsistent();
106     while (ref == nullptr) {
107       // Wait until the slot content becomes visible.
108       ref = slot->LoadSequentiallyConsistent();
109     }
110     slot->StoreRelaxed(nullptr);
111     head_.StoreSequentiallyConsistent(h + 1);
112     return ref;
113   }
114 
IsEmpty()115   bool IsEmpty() {
116     size_t h = head_.LoadSequentiallyConsistent();
117     size_t t = tail_.LoadSequentiallyConsistent();
118     return h == t;
119   }
120 
Clear()121   void Clear() {
122     head_.StoreRelaxed(0);
123     tail_.StoreRelaxed(0);
124     memset(buf_.get(), 0, size_ * sizeof(Atomic<mirror::Object*>));
125   }
126 
127  private:
128   Atomic<size_t> head_;
129   Atomic<size_t> tail_;
130 
131   size_t size_;
132   std::unique_ptr<Atomic<mirror::Object*>[]> buf_;
133 };
134 
135 class ConcurrentCopying : public GarbageCollector {
136  public:
137   // TODO: disable thse flags for production use.
138   // Enable the no-from-space-refs verification at the pause.
139   static constexpr bool kEnableNoFromSpaceRefsVerification = true;
140   // Enable the from-space bytes/objects check.
141   static constexpr bool kEnableFromSpaceAccountingCheck = true;
142   // Enable verbose mode.
143   static constexpr bool kVerboseMode = true;
144 
145   ConcurrentCopying(Heap* heap, const std::string& name_prefix = "");
146   ~ConcurrentCopying();
147 
148   virtual void RunPhases() OVERRIDE;
149   void InitializePhase() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
150   void MarkingPhase() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
151   void ReclaimPhase() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
152   void FinishPhase();
153 
154   void BindBitmaps() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
155       LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
GetGcType()156   virtual GcType GetGcType() const OVERRIDE {
157     return kGcTypePartial;
158   }
GetCollectorType()159   virtual CollectorType GetCollectorType() const OVERRIDE {
160     return kCollectorTypeCC;
161   }
162   virtual void RevokeAllThreadLocalBuffers() OVERRIDE;
SetRegionSpace(space::RegionSpace * region_space)163   void SetRegionSpace(space::RegionSpace* region_space) {
164     DCHECK(region_space != nullptr);
165     region_space_ = region_space;
166   }
RegionSpace()167   space::RegionSpace* RegionSpace() {
168     return region_space_;
169   }
170   void AssertToSpaceInvariant(mirror::Object* obj, MemberOffset offset, mirror::Object* ref)
171       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
IsInToSpace(mirror::Object * ref)172   bool IsInToSpace(mirror::Object* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
173     DCHECK(ref != nullptr);
174     return IsMarked(ref) == ref;
175   }
176   mirror::Object* Mark(mirror::Object* from_ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
IsMarking()177   bool IsMarking() const {
178     return is_marking_;
179   }
IsActive()180   bool IsActive() const {
181     return is_active_;
182   }
GetBarrier()183   Barrier& GetBarrier() {
184     return *gc_barrier_;
185   }
186 
187  private:
188   mirror::Object* PopOffMarkStack();
189   template<bool kThreadSafe>
190   void PushOntoMarkStack(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
191   mirror::Object* Copy(mirror::Object* from_ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
192   void Scan(mirror::Object* to_ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
193   void Process(mirror::Object* obj, MemberOffset offset)
194       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
195   virtual void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info)
196       OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
197   virtual void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
198                           const RootInfo& info)
199       OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
200   void VerifyNoFromSpaceReferences() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
201   accounting::ObjectStack* GetAllocationStack();
202   accounting::ObjectStack* GetLiveStack();
203   bool ProcessMarkStack() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
204   void DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference)
205       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
206   void ProcessReferences(Thread* self, bool concurrent)
207       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
208   mirror::Object* IsMarked(mirror::Object* from_ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
209   static mirror::Object* MarkCallback(mirror::Object* from_ref, void* arg)
210       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
211   static mirror::Object* IsMarkedCallback(mirror::Object* from_ref, void* arg)
212       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
213   static bool IsHeapReferenceMarkedCallback(
214       mirror::HeapReference<mirror::Object>* field, void* arg)
215       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
216   static void ProcessMarkStackCallback(void* arg)
217       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
218   void SweepSystemWeaks(Thread* self)
219       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
220   void Sweep(bool swap_bitmaps)
221       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
222   void SweepLargeObjects(bool swap_bitmaps)
223       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
224   void ClearBlackPtrs()
225       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
226   void FillWithDummyObject(mirror::Object* dummy_obj, size_t byte_size)
227       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
228   mirror::Object* AllocateInSkippedBlock(size_t alloc_size)
229       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
230   void CheckEmptyMarkQueue() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
231   void IssueEmptyCheckpoint() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
232   bool IsOnAllocStack(mirror::Object* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
233   mirror::Object* GetFwdPtr(mirror::Object* from_ref)
234       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
235   void FlipThreadRoots() LOCKS_EXCLUDED(Locks::mutator_lock_);
236   void SwapStacks(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
237   void RecordLiveStackFreezeSize(Thread* self);
238   void ComputeUnevacFromSpaceLiveRatio();
239 
240   space::RegionSpace* region_space_;      // The underlying region space.
241   std::unique_ptr<Barrier> gc_barrier_;
242   MarkQueue mark_queue_;
243   bool is_marking_;                       // True while marking is ongoing.
244   bool is_active_;                        // True while the collection is ongoing.
245   bool is_asserting_to_space_invariant_;  // True while asserting the to-space invariant.
246   ImmuneRegion immune_region_;
247   std::unique_ptr<accounting::HeapBitmap> cc_heap_bitmap_;
248   std::vector<accounting::SpaceBitmap<kObjectAlignment>*> cc_bitmaps_;
249   accounting::SpaceBitmap<kObjectAlignment>* region_space_bitmap_;
250   // A cache of Heap::GetMarkBitmap().
251   accounting::HeapBitmap* heap_mark_bitmap_;
252   size_t live_stack_freeze_size_;
253   size_t from_space_num_objects_at_first_pause_;
254   size_t from_space_num_bytes_at_first_pause_;
255   Atomic<int> is_mark_queue_push_disallowed_;
256 
257   // How many objects and bytes we moved. Used for accounting.
258   Atomic<size_t> bytes_moved_;
259   Atomic<size_t> objects_moved_;
260 
261   // The skipped blocks are memory blocks/chucks that were copies of
262   // objects that were unused due to lost races (cas failures) at
263   // object copy/forward pointer install. They are reused.
264   Mutex skipped_blocks_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
265   std::multimap<size_t, uint8_t*> skipped_blocks_map_ GUARDED_BY(skipped_blocks_lock_);
266   Atomic<size_t> to_space_bytes_skipped_;
267   Atomic<size_t> to_space_objects_skipped_;
268 
269   accounting::ReadBarrierTable* rb_table_;
270   bool force_evacuate_all_;  // True if all regions are evacuated.
271 
272   friend class ConcurrentCopyingRefFieldsVisitor;
273   friend class ConcurrentCopyingImmuneSpaceObjVisitor;
274   friend class ConcurrentCopyingVerifyNoFromSpaceRefsVisitor;
275   friend class ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor;
276   friend class ConcurrentCopyingClearBlackPtrsVisitor;
277   friend class ConcurrentCopyingLostCopyVisitor;
278   friend class ThreadFlipVisitor;
279   friend class FlipCallback;
280   friend class ConcurrentCopyingComputeUnevacFromSpaceLiveRatioVisitor;
281 
282   DISALLOW_IMPLICIT_CONSTRUCTORS(ConcurrentCopying);
283 };
284 
285 }  // namespace collector
286 }  // namespace gc
287 }  // namespace art
288 
289 #endif  // ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_H_
290