1 /*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <stdio.h>
18
19 #include "garbage_collector.h"
20
21 #include "android-base/stringprintf.h"
22
23 #include "base/dumpable.h"
24 #include "base/histogram-inl.h"
25 #include "base/logging.h"
26 #include "base/mutex-inl.h"
27 #include "base/systrace.h"
28 #include "base/time_utils.h"
29 #include "gc/accounting/heap_bitmap.h"
30 #include "gc/gc_pause_listener.h"
31 #include "gc/heap.h"
32 #include "gc/space/large_object_space.h"
33 #include "gc/space/space-inl.h"
34 #include "thread-inl.h"
35 #include "thread_list.h"
36 #include "utils.h"
37
38 namespace art {
39 namespace gc {
40 namespace collector {
41
Iteration()42 Iteration::Iteration()
43 : duration_ns_(0), timings_("GC iteration timing logger", true, VLOG_IS_ON(heap)) {
44 Reset(kGcCauseBackground, false); // Reset to some place holder values.
45 }
46
Reset(GcCause gc_cause,bool clear_soft_references)47 void Iteration::Reset(GcCause gc_cause, bool clear_soft_references) {
48 timings_.Reset();
49 pause_times_.clear();
50 duration_ns_ = 0;
51 clear_soft_references_ = clear_soft_references;
52 gc_cause_ = gc_cause;
53 freed_ = ObjectBytePair();
54 freed_los_ = ObjectBytePair();
55 freed_bytes_revoke_ = 0;
56 }
57
GetEstimatedThroughput() const58 uint64_t Iteration::GetEstimatedThroughput() const {
59 // Add 1ms to prevent possible division by 0.
60 return (static_cast<uint64_t>(freed_.bytes) * 1000) / (NsToMs(GetDurationNs()) + 1);
61 }
62
GarbageCollector(Heap * heap,const std::string & name)63 GarbageCollector::GarbageCollector(Heap* heap, const std::string& name)
64 : heap_(heap),
65 name_(name),
66 pause_histogram_((name_ + " paused").c_str(), kPauseBucketSize, kPauseBucketCount),
67 cumulative_timings_(name),
68 pause_histogram_lock_("pause histogram lock", kDefaultMutexLevel, true),
69 is_transaction_active_(false) {
70 ResetCumulativeStatistics();
71 }
72
RegisterPause(uint64_t nano_length)73 void GarbageCollector::RegisterPause(uint64_t nano_length) {
74 GetCurrentIteration()->pause_times_.push_back(nano_length);
75 }
76
ResetCumulativeStatistics()77 void GarbageCollector::ResetCumulativeStatistics() {
78 cumulative_timings_.Reset();
79 total_time_ns_ = 0;
80 total_freed_objects_ = 0;
81 total_freed_bytes_ = 0;
82 MutexLock mu(Thread::Current(), pause_histogram_lock_);
83 pause_histogram_.Reset();
84 }
85
Run(GcCause gc_cause,bool clear_soft_references)86 void GarbageCollector::Run(GcCause gc_cause, bool clear_soft_references) {
87 ScopedTrace trace(android::base::StringPrintf("%s %s GC", PrettyCause(gc_cause), GetName()));
88 Thread* self = Thread::Current();
89 uint64_t start_time = NanoTime();
90 Iteration* current_iteration = GetCurrentIteration();
91 current_iteration->Reset(gc_cause, clear_soft_references);
92 // Note transaction mode is single-threaded and there's no asynchronous GC and this flag doesn't
93 // change in the middle of a GC.
94 is_transaction_active_ = Runtime::Current()->IsActiveTransaction();
95 RunPhases(); // Run all the GC phases.
96 // Add the current timings to the cumulative timings.
97 cumulative_timings_.AddLogger(*GetTimings());
98 // Update cumulative statistics with how many bytes the GC iteration freed.
99 total_freed_objects_ += current_iteration->GetFreedObjects() +
100 current_iteration->GetFreedLargeObjects();
101 total_freed_bytes_ += current_iteration->GetFreedBytes() +
102 current_iteration->GetFreedLargeObjectBytes();
103 uint64_t end_time = NanoTime();
104 current_iteration->SetDurationNs(end_time - start_time);
105 if (Locks::mutator_lock_->IsExclusiveHeld(self)) {
106 // The entire GC was paused, clear the fake pauses which might be in the pause times and add
107 // the whole GC duration.
108 current_iteration->pause_times_.clear();
109 RegisterPause(current_iteration->GetDurationNs());
110 }
111 total_time_ns_ += current_iteration->GetDurationNs();
112 for (uint64_t pause_time : current_iteration->GetPauseTimes()) {
113 MutexLock mu(self, pause_histogram_lock_);
114 pause_histogram_.AdjustAndAddValue(pause_time);
115 }
116 is_transaction_active_ = false;
117 }
118
SwapBitmaps()119 void GarbageCollector::SwapBitmaps() {
120 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
121 // Swap the live and mark bitmaps for each alloc space. This is needed since sweep re-swaps
122 // these bitmaps. The bitmap swapping is an optimization so that we do not need to clear the live
123 // bits of dead objects in the live bitmap.
124 const GcType gc_type = GetGcType();
125 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
126 // We never allocate into zygote spaces.
127 if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect ||
128 (gc_type == kGcTypeFull &&
129 space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect)) {
130 accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
131 accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap();
132 if (live_bitmap != nullptr && live_bitmap != mark_bitmap) {
133 heap_->GetLiveBitmap()->ReplaceBitmap(live_bitmap, mark_bitmap);
134 heap_->GetMarkBitmap()->ReplaceBitmap(mark_bitmap, live_bitmap);
135 CHECK(space->IsContinuousMemMapAllocSpace());
136 space->AsContinuousMemMapAllocSpace()->SwapBitmaps();
137 }
138 }
139 }
140 for (const auto& disc_space : GetHeap()->GetDiscontinuousSpaces()) {
141 space::LargeObjectSpace* space = disc_space->AsLargeObjectSpace();
142 accounting::LargeObjectBitmap* live_set = space->GetLiveBitmap();
143 accounting::LargeObjectBitmap* mark_set = space->GetMarkBitmap();
144 heap_->GetLiveBitmap()->ReplaceLargeObjectBitmap(live_set, mark_set);
145 heap_->GetMarkBitmap()->ReplaceLargeObjectBitmap(mark_set, live_set);
146 space->SwapBitmaps();
147 }
148 }
149
GetEstimatedMeanThroughput() const150 uint64_t GarbageCollector::GetEstimatedMeanThroughput() const {
151 // Add 1ms to prevent possible division by 0.
152 return (total_freed_bytes_ * 1000) / (NsToMs(GetCumulativeTimings().GetTotalNs()) + 1);
153 }
154
ResetMeasurements()155 void GarbageCollector::ResetMeasurements() {
156 {
157 MutexLock mu(Thread::Current(), pause_histogram_lock_);
158 pause_histogram_.Reset();
159 }
160 cumulative_timings_.Reset();
161 total_time_ns_ = 0;
162 total_freed_objects_ = 0;
163 total_freed_bytes_ = 0;
164 }
165
ScopedPause(GarbageCollector * collector,bool with_reporting)166 GarbageCollector::ScopedPause::ScopedPause(GarbageCollector* collector, bool with_reporting)
167 : start_time_(NanoTime()), collector_(collector), with_reporting_(with_reporting) {
168 Runtime* runtime = Runtime::Current();
169 runtime->GetThreadList()->SuspendAll(__FUNCTION__);
170 if (with_reporting) {
171 GcPauseListener* pause_listener = runtime->GetHeap()->GetGcPauseListener();
172 if (pause_listener != nullptr) {
173 pause_listener->StartPause();
174 }
175 }
176 }
177
~ScopedPause()178 GarbageCollector::ScopedPause::~ScopedPause() {
179 collector_->RegisterPause(NanoTime() - start_time_);
180 Runtime* runtime = Runtime::Current();
181 if (with_reporting_) {
182 GcPauseListener* pause_listener = runtime->GetHeap()->GetGcPauseListener();
183 if (pause_listener != nullptr) {
184 pause_listener->EndPause();
185 }
186 }
187 runtime->GetThreadList()->ResumeAll();
188 }
189
190 // Returns the current GC iteration and assocated info.
GetCurrentIteration()191 Iteration* GarbageCollector::GetCurrentIteration() {
192 return heap_->GetCurrentGcIteration();
193 }
GetCurrentIteration() const194 const Iteration* GarbageCollector::GetCurrentIteration() const {
195 return heap_->GetCurrentGcIteration();
196 }
197
RecordFree(const ObjectBytePair & freed)198 void GarbageCollector::RecordFree(const ObjectBytePair& freed) {
199 GetCurrentIteration()->freed_.Add(freed);
200 heap_->RecordFree(freed.objects, freed.bytes);
201 }
RecordFreeLOS(const ObjectBytePair & freed)202 void GarbageCollector::RecordFreeLOS(const ObjectBytePair& freed) {
203 GetCurrentIteration()->freed_los_.Add(freed);
204 heap_->RecordFree(freed.objects, freed.bytes);
205 }
206
GetTotalPausedTimeNs()207 uint64_t GarbageCollector::GetTotalPausedTimeNs() {
208 MutexLock mu(Thread::Current(), pause_histogram_lock_);
209 return pause_histogram_.AdjustedSum();
210 }
211
DumpPerformanceInfo(std::ostream & os)212 void GarbageCollector::DumpPerformanceInfo(std::ostream& os) {
213 const CumulativeLogger& logger = GetCumulativeTimings();
214 const size_t iterations = logger.GetIterations();
215 if (iterations == 0) {
216 return;
217 }
218 os << Dumpable<CumulativeLogger>(logger);
219 const uint64_t total_ns = logger.GetTotalNs();
220 double seconds = NsToMs(logger.GetTotalNs()) / 1000.0;
221 const uint64_t freed_bytes = GetTotalFreedBytes();
222 const uint64_t freed_objects = GetTotalFreedObjects();
223 {
224 MutexLock mu(Thread::Current(), pause_histogram_lock_);
225 if (pause_histogram_.SampleSize() > 0) {
226 Histogram<uint64_t>::CumulativeData cumulative_data;
227 pause_histogram_.CreateHistogram(&cumulative_data);
228 pause_histogram_.PrintConfidenceIntervals(os, 0.99, cumulative_data);
229 }
230 }
231 os << GetName() << " total time: " << PrettyDuration(total_ns)
232 << " mean time: " << PrettyDuration(total_ns / iterations) << "\n"
233 << GetName() << " freed: " << freed_objects
234 << " objects with total size " << PrettySize(freed_bytes) << "\n"
235 << GetName() << " throughput: " << freed_objects / seconds << "/s / "
236 << PrettySize(freed_bytes / seconds) << "/s\n";
237 }
238
239 } // namespace collector
240 } // namespace gc
241 } // namespace art
242