1 /*
2  * Copyright (C) 2012 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_GC_COLLECTOR_GARBAGE_COLLECTOR_H_
18 #define ART_RUNTIME_GC_COLLECTOR_GARBAGE_COLLECTOR_H_
19 
20 #include <stdint.h>
21 #include <vector>
22 
23 #include "base/histogram.h"
24 #include "base/mutex.h"
25 #include "base/timing_logger.h"
26 #include "gc/collector_type.h"
27 #include "gc/gc_cause.h"
28 #include "gc_root.h"
29 #include "gc_type.h"
30 #include "object_callbacks.h"
31 
32 namespace art {
33 
34 namespace mirror {
35 class Class;
36 class Object;
37 class Reference;
38 }  // namespace mirror
39 
40 namespace gc {
41 
42 class Heap;
43 
44 namespace collector {
45 
46 struct ObjectBytePair {
47   explicit ObjectBytePair(uint64_t num_objects = 0, int64_t num_bytes = 0)
objectsObjectBytePair48       : objects(num_objects), bytes(num_bytes) {}
AddObjectBytePair49   void Add(const ObjectBytePair& other) {
50     objects += other.objects;
51     bytes += other.bytes;
52   }
53   // Number of objects which were freed.
54   uint64_t objects;
55   // Freed bytes are signed since the GC can free negative bytes if it promotes objects to a space
56   // which has a larger allocation size.
57   int64_t bytes;
58 };
59 
60 // A information related single garbage collector iteration. Since we only ever have one GC running
61 // at any given time, we can have a single iteration info.
62 class Iteration {
63  public:
64   Iteration();
65   // Returns how long the mutators were paused in nanoseconds.
GetPauseTimes()66   const std::vector<uint64_t>& GetPauseTimes() const {
67     return pause_times_;
68   }
GetTimings()69   TimingLogger* GetTimings() {
70     return &timings_;
71   }
72   // Returns how long the GC took to complete in nanoseconds.
GetDurationNs()73   uint64_t GetDurationNs() const {
74     return duration_ns_;
75   }
GetFreedBytes()76   int64_t GetFreedBytes() const {
77     return freed_.bytes;
78   }
GetFreedLargeObjectBytes()79   int64_t GetFreedLargeObjectBytes() const {
80     return freed_los_.bytes;
81   }
GetFreedObjects()82   uint64_t GetFreedObjects() const {
83     return freed_.objects;
84   }
GetFreedLargeObjects()85   uint64_t GetFreedLargeObjects() const {
86     return freed_los_.objects;
87   }
GetFreedRevokeBytes()88   uint64_t GetFreedRevokeBytes() const {
89     return freed_bytes_revoke_;
90   }
SetFreedRevoke(uint64_t freed)91   void SetFreedRevoke(uint64_t freed) {
92     freed_bytes_revoke_ = freed;
93   }
94   void Reset(GcCause gc_cause, bool clear_soft_references);
95   // Returns the estimated throughput of the iteration.
96   uint64_t GetEstimatedThroughput() const;
GetClearSoftReferences()97   bool GetClearSoftReferences() const {
98     return clear_soft_references_;
99   }
SetClearSoftReferences(bool clear_soft_references)100   void SetClearSoftReferences(bool clear_soft_references) {
101     clear_soft_references_ = clear_soft_references;
102   }
GetGcCause()103   GcCause GetGcCause() const {
104     return gc_cause_;
105   }
106 
107  private:
SetDurationNs(uint64_t duration)108   void SetDurationNs(uint64_t duration) {
109     duration_ns_ = duration;
110   }
111 
112   GcCause gc_cause_;
113   bool clear_soft_references_;
114   uint64_t duration_ns_;
115   TimingLogger timings_;
116   ObjectBytePair freed_;
117   ObjectBytePair freed_los_;
118   uint64_t freed_bytes_revoke_;  // see Heap::num_bytes_freed_revoke_.
119   std::vector<uint64_t> pause_times_;
120 
121   friend class GarbageCollector;
122   DISALLOW_COPY_AND_ASSIGN(Iteration);
123 };
124 
125 class GarbageCollector : public RootVisitor, public IsMarkedVisitor, public MarkObjectVisitor {
126  public:
127   class SCOPED_LOCKABLE ScopedPause {
128    public:
129     explicit ScopedPause(GarbageCollector* collector, bool with_reporting = true)
130         EXCLUSIVE_LOCK_FUNCTION(Locks::mutator_lock_);
131     ~ScopedPause() UNLOCK_FUNCTION();
132 
133    private:
134     const uint64_t start_time_;
135     GarbageCollector* const collector_;
136     bool with_reporting_;
137   };
138 
139   GarbageCollector(Heap* heap, const std::string& name);
~GarbageCollector()140   virtual ~GarbageCollector() { }
GetName()141   const char* GetName() const {
142     return name_.c_str();
143   }
144   virtual GcType GetGcType() const = 0;
145   virtual CollectorType GetCollectorType() const = 0;
146   // Run the garbage collector.
147   void Run(GcCause gc_cause, bool clear_soft_references) REQUIRES(!pause_histogram_lock_);
GetHeap()148   Heap* GetHeap() const {
149     return heap_;
150   }
151   void RegisterPause(uint64_t nano_length);
GetCumulativeTimings()152   const CumulativeLogger& GetCumulativeTimings() const {
153     return cumulative_timings_;
154   }
155   void ResetCumulativeStatistics() REQUIRES(!pause_histogram_lock_);
156   // Swap the live and mark bitmaps of spaces that are active for the collector. For partial GC,
157   // this is the allocation space, for full GC then we swap the zygote bitmaps too.
158   void SwapBitmaps()
159       REQUIRES(Locks::heap_bitmap_lock_)
160       REQUIRES_SHARED(Locks::mutator_lock_);
161   uint64_t GetTotalPausedTimeNs() REQUIRES(!pause_histogram_lock_);
GetTotalFreedBytes()162   int64_t GetTotalFreedBytes() const {
163     return total_freed_bytes_;
164   }
GetTotalFreedObjects()165   uint64_t GetTotalFreedObjects() const {
166     return total_freed_objects_;
167   }
168   // Reset the cumulative timings and pause histogram.
169   void ResetMeasurements() REQUIRES(!pause_histogram_lock_);
170   // Returns the estimated throughput in bytes / second.
171   uint64_t GetEstimatedMeanThroughput() const;
172   // Returns how many GC iterations have been run.
NumberOfIterations()173   size_t NumberOfIterations() const {
174     return GetCumulativeTimings().GetIterations();
175   }
176   // Returns the current GC iteration and assocated info.
177   Iteration* GetCurrentIteration();
178   const Iteration* GetCurrentIteration() const;
GetTimings()179   TimingLogger* GetTimings() {
180     return &GetCurrentIteration()->timings_;
181   }
182   // Record a free of normal objects.
183   void RecordFree(const ObjectBytePair& freed);
184   // Record a free of large objects.
185   void RecordFreeLOS(const ObjectBytePair& freed);
186   virtual void DumpPerformanceInfo(std::ostream& os) REQUIRES(!pause_histogram_lock_);
187 
188   // Helper functions for querying if objects are marked. These are used for processing references,
189   // and will be used for reading system weaks while the GC is running.
190   virtual mirror::Object* IsMarked(mirror::Object* obj)
191       REQUIRES_SHARED(Locks::mutator_lock_) = 0;
192   // Returns true if the given heap reference is null or is already marked. If it's already marked,
193   // update the reference (uses a CAS if do_atomic_update is true. Otherwise, returns false.
194   virtual bool IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* obj,
195                                            bool do_atomic_update)
196       REQUIRES_SHARED(Locks::mutator_lock_) = 0;
197   // Used by reference processor.
198   virtual void ProcessMarkStack() REQUIRES_SHARED(Locks::mutator_lock_) = 0;
199   // Force mark an object.
200   virtual mirror::Object* MarkObject(mirror::Object* obj)
201       REQUIRES_SHARED(Locks::mutator_lock_) = 0;
202   virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* obj,
203                                  bool do_atomic_update)
204       REQUIRES_SHARED(Locks::mutator_lock_) = 0;
205   virtual void DelayReferenceReferent(ObjPtr<mirror::Class> klass,
206                                       ObjPtr<mirror::Reference> reference)
207       REQUIRES_SHARED(Locks::mutator_lock_) = 0;
208 
IsTransactionActive()209   bool IsTransactionActive() const {
210     return is_transaction_active_;
211   }
212 
213  protected:
214   // Run all of the GC phases.
215   virtual void RunPhases() = 0;
216   // Revoke all the thread-local buffers.
217   virtual void RevokeAllThreadLocalBuffers() = 0;
218 
219   static constexpr size_t kPauseBucketSize = 500;
220   static constexpr size_t kPauseBucketCount = 32;
221 
222   Heap* const heap_;
223   std::string name_;
224   // Cumulative statistics.
225   Histogram<uint64_t> pause_histogram_ GUARDED_BY(pause_histogram_lock_);
226   uint64_t total_time_ns_;
227   uint64_t total_freed_objects_;
228   int64_t total_freed_bytes_;
229   CumulativeLogger cumulative_timings_;
230   mutable Mutex pause_histogram_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
231   bool is_transaction_active_;
232 
233  private:
234   DISALLOW_IMPLICIT_CONSTRUCTORS(GarbageCollector);
235 };
236 
237 }  // namespace collector
238 }  // namespace gc
239 }  // namespace art
240 
241 #endif  // ART_RUNTIME_GC_COLLECTOR_GARBAGE_COLLECTOR_H_
242