1 /*
2  * Copyright (C) 2012 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_GC_COLLECTOR_GARBAGE_COLLECTOR_H_
18 #define ART_RUNTIME_GC_COLLECTOR_GARBAGE_COLLECTOR_H_
19 
20 #include <stdint.h>
21 #include <list>
22 
23 #include "base/histogram.h"
24 #include "base/metrics/metrics.h"
25 #include "base/mutex.h"
26 #include "base/timing_logger.h"
27 #include "gc/collector_type.h"
28 #include "gc/gc_cause.h"
29 #include "gc_root.h"
30 #include "gc_type.h"
31 #include "iteration.h"
32 #include "object_byte_pair.h"
33 #include "object_callbacks.h"
34 
35 namespace art {
36 
37 namespace mirror {
38 class Class;
39 class Object;
40 class Reference;
41 }  // namespace mirror
42 
43 namespace gc {
44 
45 class Heap;
46 
47 namespace collector {
48 
49 class GarbageCollector : public RootVisitor, public IsMarkedVisitor, public MarkObjectVisitor {
50  public:
51   class SCOPED_LOCKABLE ScopedPause {
52    public:
53     explicit ScopedPause(GarbageCollector* collector, bool with_reporting = true)
54         EXCLUSIVE_LOCK_FUNCTION(Locks::mutator_lock_);
55     ~ScopedPause() UNLOCK_FUNCTION();
56 
57    private:
58     const uint64_t start_time_;
59     GarbageCollector* const collector_;
60     bool with_reporting_;
61   };
62 
63   GarbageCollector(Heap* heap, const std::string& name);
~GarbageCollector()64   virtual ~GarbageCollector() { }
GetName()65   const char* GetName() const {
66     return name_.c_str();
67   }
68   virtual GcType GetGcType() const = 0;
69   virtual CollectorType GetCollectorType() const = 0;
70   // Run the garbage collector.
71   void Run(GcCause gc_cause, bool clear_soft_references) REQUIRES(!pause_histogram_lock_);
GetHeap()72   Heap* GetHeap() const {
73     return heap_;
74   }
75   void RegisterPause(uint64_t nano_length);
GetCumulativeTimings()76   const CumulativeLogger& GetCumulativeTimings() const {
77     return cumulative_timings_;
78   }
79   void ResetCumulativeStatistics() REQUIRES(!pause_histogram_lock_);
80   // Swap the live and mark bitmaps of spaces that are active for the collector. For partial GC,
81   // this is the allocation space, for full GC then we swap the zygote bitmaps too.
82   void SwapBitmaps()
83       REQUIRES(Locks::heap_bitmap_lock_)
84       REQUIRES_SHARED(Locks::mutator_lock_);
GetTotalCpuTime()85   uint64_t GetTotalCpuTime() const {
86     return total_thread_cpu_time_ns_;
87   }
88   uint64_t GetTotalPausedTimeNs() REQUIRES(!pause_histogram_lock_);
GetTotalFreedBytes()89   int64_t GetTotalFreedBytes() const {
90     return total_freed_bytes_;
91   }
GetTotalFreedObjects()92   uint64_t GetTotalFreedObjects() const {
93     return total_freed_objects_;
94   }
GetTotalScannedBytes()95   uint64_t GetTotalScannedBytes() const {
96     return total_scanned_bytes_;
97   }
98   // Reset the cumulative timings and pause histogram.
99   void ResetMeasurements() REQUIRES(!pause_histogram_lock_);
100   // Returns the estimated throughput in bytes / second.
101   uint64_t GetEstimatedMeanThroughput() const;
102   // Returns how many GC iterations have been run.
NumberOfIterations()103   size_t NumberOfIterations() const {
104     return GetCumulativeTimings().GetIterations();
105   }
106   // Returns the current GC iteration and assocated info.
107   Iteration* GetCurrentIteration();
108   const Iteration* GetCurrentIteration() const;
GetTimings()109   TimingLogger* GetTimings() {
110     return &GetCurrentIteration()->timings_;
111   }
112   // Record a free of normal objects.
113   void RecordFree(const ObjectBytePair& freed);
114   // Record a free of large objects.
115   void RecordFreeLOS(const ObjectBytePair& freed);
116   virtual void DumpPerformanceInfo(std::ostream& os) REQUIRES(!pause_histogram_lock_);
117 
118   // Extract RSS for GC-specific memory ranges using mincore().
119   uint64_t ExtractRssFromMincore(std::list<std::pair<void*, void*>>* gc_ranges);
120 
121   // Helper functions for querying if objects are marked. These are used for processing references,
122   // and will be used for reading system weaks while the GC is running.
123   virtual mirror::Object* IsMarked(mirror::Object* obj)
124       REQUIRES_SHARED(Locks::mutator_lock_) = 0;
125   // Returns true if the given heap reference is null or is already marked. If it's already marked,
126   // update the reference (uses a CAS if do_atomic_update is true). Otherwise, returns false.
127   virtual bool IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* obj,
128                                            bool do_atomic_update)
129       REQUIRES_SHARED(Locks::mutator_lock_) = 0;
130   // Used by reference processor.
131   virtual void ProcessMarkStack() REQUIRES_SHARED(Locks::mutator_lock_) = 0;
132   // Force mark an object.
133   virtual mirror::Object* MarkObject(mirror::Object* obj)
134       REQUIRES_SHARED(Locks::mutator_lock_) = 0;
135   virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* obj,
136                                  bool do_atomic_update)
137       REQUIRES_SHARED(Locks::mutator_lock_) = 0;
138   virtual void DelayReferenceReferent(ObjPtr<mirror::Class> klass,
139                                       ObjPtr<mirror::Reference> reference)
140       REQUIRES_SHARED(Locks::mutator_lock_) = 0;
141 
IsTransactionActive()142   bool IsTransactionActive() const {
143     return is_transaction_active_;
144   }
145 
146  protected:
147   // Run all of the GC phases.
148   virtual void RunPhases() = 0;
149   // Revoke all the thread-local buffers.
150   virtual void RevokeAllThreadLocalBuffers() = 0;
151 
152   static constexpr size_t kPauseBucketSize = 500;
153   static constexpr size_t kPauseBucketCount = 32;
154   static constexpr size_t kMemBucketSize = 10;
155   static constexpr size_t kMemBucketCount = 16;
156 
157   Heap* const heap_;
158   std::string name_;
159   // Cumulative statistics.
160   Histogram<uint64_t> pause_histogram_ GUARDED_BY(pause_histogram_lock_);
161   Histogram<uint64_t> rss_histogram_;
162   Histogram<size_t> freed_bytes_histogram_;
163   metrics::MetricsBase<int64_t>* gc_time_histogram_;
164   metrics::MetricsBase<uint64_t>* metrics_gc_count_;
165   metrics::MetricsBase<int64_t>* gc_throughput_histogram_;
166   metrics::MetricsBase<int64_t>* gc_tracing_throughput_hist_;
167   metrics::MetricsBase<uint64_t>* gc_throughput_avg_;
168   metrics::MetricsBase<uint64_t>* gc_tracing_throughput_avg_;
169   uint64_t total_thread_cpu_time_ns_;
170   uint64_t total_time_ns_;
171   uint64_t total_freed_objects_;
172   int64_t total_freed_bytes_;
173   uint64_t total_scanned_bytes_;
174   CumulativeLogger cumulative_timings_;
175   mutable Mutex pause_histogram_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
176   bool is_transaction_active_;
177   // The garbage collector algorithms will either have all the metrics pointers
178   // (above) initialized, or none of them. So instead of checking each time, we
179   // use this flag.
180   bool are_metrics_initialized_;
181 
182  private:
183   DISALLOW_IMPLICIT_CONSTRUCTORS(GarbageCollector);
184 };
185 
186 }  // namespace collector
187 }  // namespace gc
188 }  // namespace art
189 
190 #endif  // ART_RUNTIME_GC_COLLECTOR_GARBAGE_COLLECTOR_H_
191