1 /*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <stdio.h>
18 #include <unistd.h>
19 #include <sys/mman.h>
20
21 #include "garbage_collector.h"
22
23 #include "android-base/stringprintf.h"
24
25 #include "base/dumpable.h"
26 #include "base/histogram-inl.h"
27 #include "base/logging.h" // For VLOG_IS_ON.
28 #include "base/mutex-inl.h"
29 #include "base/systrace.h"
30 #include "base/time_utils.h"
31 #include "base/utils.h"
32 #include "gc/accounting/heap_bitmap.h"
33 #include "gc/gc_pause_listener.h"
34 #include "gc/heap.h"
35 #include "gc/space/large_object_space.h"
36 #include "gc/space/space-inl.h"
37 #include "runtime.h"
38 #include "thread-current-inl.h"
39 #include "thread_list.h"
40
41 namespace art {
42 namespace gc {
43 namespace collector {
44
Iteration()45 Iteration::Iteration()
46 : duration_ns_(0), timings_("GC iteration timing logger", true, VLOG_IS_ON(heap)) {
47 Reset(kGcCauseBackground, false); // Reset to some place holder values.
48 }
49
Reset(GcCause gc_cause,bool clear_soft_references)50 void Iteration::Reset(GcCause gc_cause, bool clear_soft_references) {
51 timings_.Reset();
52 pause_times_.clear();
53 duration_ns_ = 0;
54 clear_soft_references_ = clear_soft_references;
55 gc_cause_ = gc_cause;
56 freed_ = ObjectBytePair();
57 freed_los_ = ObjectBytePair();
58 freed_bytes_revoke_ = 0;
59 }
60
GetEstimatedThroughput() const61 uint64_t Iteration::GetEstimatedThroughput() const {
62 // Add 1ms to prevent possible division by 0.
63 return (static_cast<uint64_t>(freed_.bytes) * 1000) / (NsToMs(GetDurationNs()) + 1);
64 }
65
GarbageCollector(Heap * heap,const std::string & name)66 GarbageCollector::GarbageCollector(Heap* heap, const std::string& name)
67 : heap_(heap),
68 name_(name),
69 pause_histogram_((name_ + " paused").c_str(), kPauseBucketSize, kPauseBucketCount),
70 rss_histogram_((name_ + " peak-rss").c_str(), kMemBucketSize, kMemBucketCount),
71 freed_bytes_histogram_((name_ + " freed-bytes").c_str(), kMemBucketSize, kMemBucketCount),
72 cumulative_timings_(name),
73 pause_histogram_lock_("pause histogram lock", kDefaultMutexLevel, true),
74 is_transaction_active_(false) {
75 ResetCumulativeStatistics();
76 }
77
RegisterPause(uint64_t nano_length)78 void GarbageCollector::RegisterPause(uint64_t nano_length) {
79 GetCurrentIteration()->pause_times_.push_back(nano_length);
80 }
81
ResetCumulativeStatistics()82 void GarbageCollector::ResetCumulativeStatistics() {
83 cumulative_timings_.Reset();
84 total_thread_cpu_time_ns_ = 0u;
85 total_time_ns_ = 0u;
86 total_freed_objects_ = 0u;
87 total_freed_bytes_ = 0;
88 rss_histogram_.Reset();
89 freed_bytes_histogram_.Reset();
90 MutexLock mu(Thread::Current(), pause_histogram_lock_);
91 pause_histogram_.Reset();
92 }
93
ExtractRssFromMincore(std::list<std::pair<void *,void * >> * gc_ranges)94 uint64_t GarbageCollector::ExtractRssFromMincore(
95 std::list<std::pair<void*, void*>>* gc_ranges) {
96 uint64_t rss = 0;
97 if (gc_ranges->empty()) {
98 return 0;
99 }
100 // mincore() is linux-specific syscall.
101 #if defined(__linux__)
102 using range_t = std::pair<void*, void*>;
103 // Sort gc_ranges
104 gc_ranges->sort([](const range_t& a, const range_t& b) {
105 return std::less()(a.first, b.first);
106 });
107 // Merge gc_ranges. It's necessary because the kernel may merge contiguous
108 // regions if their properties match. This is sufficient as kernel doesn't
109 // merge those adjoining ranges which differ only in name.
110 size_t vec_len = 0;
111 for (auto it = gc_ranges->begin(); it != gc_ranges->end(); it++) {
112 auto next_it = it;
113 next_it++;
114 while (next_it != gc_ranges->end()) {
115 if (it->second == next_it->first) {
116 it->second = next_it->second;
117 next_it = gc_ranges->erase(next_it);
118 } else {
119 break;
120 }
121 }
122 size_t length = static_cast<uint8_t*>(it->second) - static_cast<uint8_t*>(it->first);
123 // Compute max length for vector allocation later.
124 vec_len = std::max(vec_len, length / kPageSize);
125 }
126 std::unique_ptr<unsigned char[]> vec(new unsigned char[vec_len]);
127 for (const auto it : *gc_ranges) {
128 size_t length = static_cast<uint8_t*>(it.second) - static_cast<uint8_t*>(it.first);
129 if (mincore(it.first, length, vec.get()) == 0) {
130 for (size_t i = 0; i < length / kPageSize; i++) {
131 // Least significant bit represents residency of a page. Other bits are
132 // reserved.
133 rss += vec[i] & 0x1;
134 }
135 } else {
136 LOG(WARNING) << "Call to mincore() on memory range [0x" << std::hex << it.first
137 << ", 0x" << it.second << std::dec << ") failed: " << strerror(errno);
138 }
139 }
140 rss *= kPageSize;
141 rss_histogram_.AddValue(rss / KB);
142 #endif
143 return rss;
144 }
145
Run(GcCause gc_cause,bool clear_soft_references)146 void GarbageCollector::Run(GcCause gc_cause, bool clear_soft_references) {
147 ScopedTrace trace(android::base::StringPrintf("%s %s GC", PrettyCause(gc_cause), GetName()));
148 Thread* self = Thread::Current();
149 uint64_t start_time = NanoTime();
150 uint64_t thread_cpu_start_time = ThreadCpuNanoTime();
151 GetHeap()->CalculatePreGcWeightedAllocatedBytes();
152 Iteration* current_iteration = GetCurrentIteration();
153 current_iteration->Reset(gc_cause, clear_soft_references);
154 // Note transaction mode is single-threaded and there's no asynchronous GC and this flag doesn't
155 // change in the middle of a GC.
156 is_transaction_active_ = Runtime::Current()->IsActiveTransaction();
157 RunPhases(); // Run all the GC phases.
158 GetHeap()->CalculatePostGcWeightedAllocatedBytes();
159 // Add the current timings to the cumulative timings.
160 cumulative_timings_.AddLogger(*GetTimings());
161 // Update cumulative statistics with how many bytes the GC iteration freed.
162 total_freed_objects_ += current_iteration->GetFreedObjects() +
163 current_iteration->GetFreedLargeObjects();
164 int64_t freed_bytes = current_iteration->GetFreedBytes() +
165 current_iteration->GetFreedLargeObjectBytes();
166 total_freed_bytes_ += freed_bytes;
167 // Rounding negative freed bytes to 0 as we are not interested in such corner cases.
168 freed_bytes_histogram_.AddValue(std::max<int64_t>(freed_bytes / KB, 0));
169 uint64_t end_time = NanoTime();
170 uint64_t thread_cpu_end_time = ThreadCpuNanoTime();
171 total_thread_cpu_time_ns_ += thread_cpu_end_time - thread_cpu_start_time;
172 current_iteration->SetDurationNs(end_time - start_time);
173 if (Locks::mutator_lock_->IsExclusiveHeld(self)) {
174 // The entire GC was paused, clear the fake pauses which might be in the pause times and add
175 // the whole GC duration.
176 current_iteration->pause_times_.clear();
177 RegisterPause(current_iteration->GetDurationNs());
178 }
179 total_time_ns_ += current_iteration->GetDurationNs();
180 for (uint64_t pause_time : current_iteration->GetPauseTimes()) {
181 MutexLock mu(self, pause_histogram_lock_);
182 pause_histogram_.AdjustAndAddValue(pause_time);
183 }
184 is_transaction_active_ = false;
185 }
186
SwapBitmaps()187 void GarbageCollector::SwapBitmaps() {
188 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
189 // Swap the live and mark bitmaps for each alloc space. This is needed since sweep re-swaps
190 // these bitmaps. The bitmap swapping is an optimization so that we do not need to clear the live
191 // bits of dead objects in the live bitmap.
192 const GcType gc_type = GetGcType();
193 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
194 // We never allocate into zygote spaces.
195 if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect ||
196 (gc_type == kGcTypeFull &&
197 space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect)) {
198 if (space->GetLiveBitmap() != nullptr && !space->HasBoundBitmaps()) {
199 CHECK(space->IsContinuousMemMapAllocSpace());
200 space->AsContinuousMemMapAllocSpace()->SwapBitmaps();
201 }
202 }
203 }
204 for (const auto& disc_space : GetHeap()->GetDiscontinuousSpaces()) {
205 disc_space->AsLargeObjectSpace()->SwapBitmaps();
206 }
207 }
208
GetEstimatedMeanThroughput() const209 uint64_t GarbageCollector::GetEstimatedMeanThroughput() const {
210 // Add 1ms to prevent possible division by 0.
211 return (total_freed_bytes_ * 1000) / (NsToMs(GetCumulativeTimings().GetTotalNs()) + 1);
212 }
213
ResetMeasurements()214 void GarbageCollector::ResetMeasurements() {
215 {
216 MutexLock mu(Thread::Current(), pause_histogram_lock_);
217 pause_histogram_.Reset();
218 }
219 cumulative_timings_.Reset();
220 rss_histogram_.Reset();
221 freed_bytes_histogram_.Reset();
222 total_thread_cpu_time_ns_ = 0u;
223 total_time_ns_ = 0u;
224 total_freed_objects_ = 0u;
225 total_freed_bytes_ = 0;
226 }
227
ScopedPause(GarbageCollector * collector,bool with_reporting)228 GarbageCollector::ScopedPause::ScopedPause(GarbageCollector* collector, bool with_reporting)
229 : start_time_(NanoTime()), collector_(collector), with_reporting_(with_reporting) {
230 Runtime* runtime = Runtime::Current();
231 runtime->GetThreadList()->SuspendAll(__FUNCTION__);
232 if (with_reporting) {
233 GcPauseListener* pause_listener = runtime->GetHeap()->GetGcPauseListener();
234 if (pause_listener != nullptr) {
235 pause_listener->StartPause();
236 }
237 }
238 }
239
~ScopedPause()240 GarbageCollector::ScopedPause::~ScopedPause() {
241 collector_->RegisterPause(NanoTime() - start_time_);
242 Runtime* runtime = Runtime::Current();
243 if (with_reporting_) {
244 GcPauseListener* pause_listener = runtime->GetHeap()->GetGcPauseListener();
245 if (pause_listener != nullptr) {
246 pause_listener->EndPause();
247 }
248 }
249 runtime->GetThreadList()->ResumeAll();
250 }
251
252 // Returns the current GC iteration and assocated info.
GetCurrentIteration()253 Iteration* GarbageCollector::GetCurrentIteration() {
254 return heap_->GetCurrentGcIteration();
255 }
GetCurrentIteration() const256 const Iteration* GarbageCollector::GetCurrentIteration() const {
257 return heap_->GetCurrentGcIteration();
258 }
259
RecordFree(const ObjectBytePair & freed)260 void GarbageCollector::RecordFree(const ObjectBytePair& freed) {
261 GetCurrentIteration()->freed_.Add(freed);
262 heap_->RecordFree(freed.objects, freed.bytes);
263 }
RecordFreeLOS(const ObjectBytePair & freed)264 void GarbageCollector::RecordFreeLOS(const ObjectBytePair& freed) {
265 GetCurrentIteration()->freed_los_.Add(freed);
266 heap_->RecordFree(freed.objects, freed.bytes);
267 }
268
GetTotalPausedTimeNs()269 uint64_t GarbageCollector::GetTotalPausedTimeNs() {
270 MutexLock mu(Thread::Current(), pause_histogram_lock_);
271 return pause_histogram_.AdjustedSum();
272 }
273
DumpPerformanceInfo(std::ostream & os)274 void GarbageCollector::DumpPerformanceInfo(std::ostream& os) {
275 const CumulativeLogger& logger = GetCumulativeTimings();
276 const size_t iterations = logger.GetIterations();
277 if (iterations == 0) {
278 return;
279 }
280 os << Dumpable<CumulativeLogger>(logger);
281 const uint64_t total_ns = logger.GetTotalNs();
282 const double seconds = NsToMs(total_ns) / 1000.0;
283 const uint64_t freed_bytes = GetTotalFreedBytes();
284 const uint64_t freed_objects = GetTotalFreedObjects();
285 {
286 MutexLock mu(Thread::Current(), pause_histogram_lock_);
287 if (pause_histogram_.SampleSize() > 0) {
288 Histogram<uint64_t>::CumulativeData cumulative_data;
289 pause_histogram_.CreateHistogram(&cumulative_data);
290 pause_histogram_.PrintConfidenceIntervals(os, 0.99, cumulative_data);
291 }
292 }
293 #if defined(__linux__)
294 if (rss_histogram_.SampleSize() > 0) {
295 os << rss_histogram_.Name()
296 << ": Avg: " << PrettySize(rss_histogram_.Mean() * KB)
297 << " Max: " << PrettySize(rss_histogram_.Max() * KB)
298 << " Min: " << PrettySize(rss_histogram_.Min() * KB) << "\n";
299 os << "Peak-rss Histogram: ";
300 rss_histogram_.DumpBins(os);
301 os << "\n";
302 }
303 #endif
304 if (freed_bytes_histogram_.SampleSize() > 0) {
305 os << freed_bytes_histogram_.Name()
306 << ": Avg: " << PrettySize(freed_bytes_histogram_.Mean() * KB)
307 << " Max: " << PrettySize(freed_bytes_histogram_.Max() * KB)
308 << " Min: " << PrettySize(freed_bytes_histogram_.Min() * KB) << "\n";
309 os << "Freed-bytes histogram: ";
310 freed_bytes_histogram_.DumpBins(os);
311 os << "\n";
312 }
313 const double cpu_seconds = NsToMs(GetTotalCpuTime()) / 1000.0;
314 os << GetName() << " total time: " << PrettyDuration(total_ns)
315 << " mean time: " << PrettyDuration(total_ns / iterations) << "\n"
316 << GetName() << " freed: " << freed_objects
317 << " objects with total size " << PrettySize(freed_bytes) << "\n"
318 << GetName() << " throughput: " << freed_objects / seconds << "/s / "
319 << PrettySize(freed_bytes / seconds) << "/s"
320 << " per cpu-time: "
321 << static_cast<uint64_t>(freed_bytes / cpu_seconds) << "/s / "
322 << PrettySize(freed_bytes / cpu_seconds) << "/s\n";
323 }
324
325 } // namespace collector
326 } // namespace gc
327 } // namespace art
328