1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "base/trace_event/process_memory_dump.h"
6
7 #include <errno.h>
8
9 #include <vector>
10
11 #include "base/memory/ptr_util.h"
12 #include "base/process/process_metrics.h"
13 #include "base/strings/stringprintf.h"
14 #include "base/trace_event/heap_profiler_heap_dump_writer.h"
15 #include "base/trace_event/memory_infra_background_whitelist.h"
16 #include "base/trace_event/process_memory_totals.h"
17 #include "base/trace_event/trace_event_argument.h"
18 #include "build/build_config.h"
19
20 #if defined(OS_IOS)
21 #include <sys/sysctl.h>
22 #endif
23
24 #if defined(OS_POSIX)
25 #include <sys/mman.h>
26 #endif
27
28 #if defined(OS_WIN)
29 #include <Psapi.h>
30 #endif
31
32 namespace base {
33 namespace trace_event {
34
35 namespace {
36
37 const char kEdgeTypeOwnership[] = "ownership";
38
GetSharedGlobalAllocatorDumpName(const MemoryAllocatorDumpGuid & guid)39 std::string GetSharedGlobalAllocatorDumpName(
40 const MemoryAllocatorDumpGuid& guid) {
41 return "global/" + guid.ToString();
42 }
43
44 #if defined(COUNT_RESIDENT_BYTES_SUPPORTED)
GetSystemPageCount(size_t mapped_size,size_t page_size)45 size_t GetSystemPageCount(size_t mapped_size, size_t page_size) {
46 return (mapped_size + page_size - 1) / page_size;
47 }
48 #endif
49
50 } // namespace
51
52 // static
53 bool ProcessMemoryDump::is_black_hole_non_fatal_for_testing_ = false;
54
55 #if defined(COUNT_RESIDENT_BYTES_SUPPORTED)
56 // static
GetSystemPageSize()57 size_t ProcessMemoryDump::GetSystemPageSize() {
58 #if defined(OS_IOS)
59 // On iOS, getpagesize() returns the user page sizes, but for allocating
60 // arrays for mincore(), kernel page sizes is needed. sysctlbyname() should
61 // be used for this. Refer to crbug.com/542671 and Apple rdar://23651782
62 int pagesize;
63 size_t pagesize_len;
64 int status = sysctlbyname("vm.pagesize", NULL, &pagesize_len, nullptr, 0);
65 if (!status && pagesize_len == sizeof(pagesize)) {
66 if (!sysctlbyname("vm.pagesize", &pagesize, &pagesize_len, nullptr, 0))
67 return pagesize;
68 }
69 LOG(ERROR) << "sysctlbyname(\"vm.pagesize\") failed.";
70 // Falls back to getpagesize() although it may be wrong in certain cases.
71 #endif // defined(OS_IOS)
72 return base::GetPageSize();
73 }
74
75 // static
CountResidentBytes(void * start_address,size_t mapped_size)76 size_t ProcessMemoryDump::CountResidentBytes(void* start_address,
77 size_t mapped_size) {
78 const size_t page_size = GetSystemPageSize();
79 const uintptr_t start_pointer = reinterpret_cast<uintptr_t>(start_address);
80 DCHECK_EQ(0u, start_pointer % page_size);
81
82 size_t offset = 0;
83 size_t total_resident_size = 0;
84 bool failure = false;
85
86 // An array as large as number of pages in memory segment needs to be passed
87 // to the query function. To avoid allocating a large array, the given block
88 // of memory is split into chunks of size |kMaxChunkSize|.
89 const size_t kMaxChunkSize = 8 * 1024 * 1024;
90 size_t max_vec_size =
91 GetSystemPageCount(std::min(mapped_size, kMaxChunkSize), page_size);
92 #if defined(OS_MACOSX) || defined(OS_IOS)
93 std::unique_ptr<char[]> vec(new char[max_vec_size]);
94 #elif defined(OS_WIN)
95 std::unique_ptr<PSAPI_WORKING_SET_EX_INFORMATION[]> vec(
96 new PSAPI_WORKING_SET_EX_INFORMATION[max_vec_size]);
97 #elif defined(OS_POSIX)
98 std::unique_ptr<unsigned char[]> vec(new unsigned char[max_vec_size]);
99 #endif
100
101 while (offset < mapped_size) {
102 uintptr_t chunk_start = (start_pointer + offset);
103 const size_t chunk_size = std::min(mapped_size - offset, kMaxChunkSize);
104 const size_t page_count = GetSystemPageCount(chunk_size, page_size);
105 size_t resident_page_count = 0;
106
107 #if defined(OS_MACOSX) || defined(OS_IOS)
108 // mincore in MAC does not fail with EAGAIN.
109 failure =
110 !!mincore(reinterpret_cast<void*>(chunk_start), chunk_size, vec.get());
111 for (size_t i = 0; i < page_count; i++)
112 resident_page_count += vec[i] & MINCORE_INCORE ? 1 : 0;
113 #elif defined(OS_WIN)
114 for (size_t i = 0; i < page_count; i++) {
115 vec[i].VirtualAddress =
116 reinterpret_cast<void*>(chunk_start + i * page_size);
117 }
118 DWORD vec_size = static_cast<DWORD>(
119 page_count * sizeof(PSAPI_WORKING_SET_EX_INFORMATION));
120 failure = !QueryWorkingSetEx(GetCurrentProcess(), vec.get(), vec_size);
121
122 for (size_t i = 0; i < page_count; i++)
123 resident_page_count += vec[i].VirtualAttributes.Valid;
124 #elif defined(OS_POSIX)
125 int error_counter = 0;
126 int result = 0;
127 // HANDLE_EINTR tries for 100 times. So following the same pattern.
128 do {
129 result =
130 mincore(reinterpret_cast<void*>(chunk_start), chunk_size, vec.get());
131 } while (result == -1 && errno == EAGAIN && error_counter++ < 100);
132 failure = !!result;
133
134 for (size_t i = 0; i < page_count; i++)
135 resident_page_count += vec[i] & 1;
136 #endif
137
138 if (failure)
139 break;
140
141 total_resident_size += resident_page_count * page_size;
142 offset += kMaxChunkSize;
143 }
144
145 DCHECK(!failure);
146 if (failure) {
147 total_resident_size = 0;
148 LOG(ERROR) << "CountResidentBytes failed. The resident size is invalid";
149 }
150 return total_resident_size;
151 }
152 #endif // defined(COUNT_RESIDENT_BYTES_SUPPORTED)
153
ProcessMemoryDump(scoped_refptr<MemoryDumpSessionState> session_state,const MemoryDumpArgs & dump_args)154 ProcessMemoryDump::ProcessMemoryDump(
155 scoped_refptr<MemoryDumpSessionState> session_state,
156 const MemoryDumpArgs& dump_args)
157 : has_process_totals_(false),
158 has_process_mmaps_(false),
159 session_state_(std::move(session_state)),
160 dump_args_(dump_args) {}
161
~ProcessMemoryDump()162 ProcessMemoryDump::~ProcessMemoryDump() {}
163
CreateAllocatorDump(const std::string & absolute_name)164 MemoryAllocatorDump* ProcessMemoryDump::CreateAllocatorDump(
165 const std::string& absolute_name) {
166 return AddAllocatorDumpInternal(
167 WrapUnique(new MemoryAllocatorDump(absolute_name, this)));
168 }
169
CreateAllocatorDump(const std::string & absolute_name,const MemoryAllocatorDumpGuid & guid)170 MemoryAllocatorDump* ProcessMemoryDump::CreateAllocatorDump(
171 const std::string& absolute_name,
172 const MemoryAllocatorDumpGuid& guid) {
173 return AddAllocatorDumpInternal(
174 WrapUnique(new MemoryAllocatorDump(absolute_name, this, guid)));
175 }
176
AddAllocatorDumpInternal(std::unique_ptr<MemoryAllocatorDump> mad)177 MemoryAllocatorDump* ProcessMemoryDump::AddAllocatorDumpInternal(
178 std::unique_ptr<MemoryAllocatorDump> mad) {
179 // In background mode return the black hole dump, if invalid dump name is
180 // given.
181 if (dump_args_.level_of_detail == MemoryDumpLevelOfDetail::BACKGROUND &&
182 !IsMemoryAllocatorDumpNameWhitelisted(mad->absolute_name())) {
183 return GetBlackHoleMad();
184 }
185
186 auto insertion_result = allocator_dumps_.insert(
187 std::make_pair(mad->absolute_name(), std::move(mad)));
188 MemoryAllocatorDump* inserted_mad = insertion_result.first->second.get();
189 DCHECK(insertion_result.second) << "Duplicate name: "
190 << inserted_mad->absolute_name();
191 return inserted_mad;
192 }
193
GetAllocatorDump(const std::string & absolute_name) const194 MemoryAllocatorDump* ProcessMemoryDump::GetAllocatorDump(
195 const std::string& absolute_name) const {
196 auto it = allocator_dumps_.find(absolute_name);
197 if (it != allocator_dumps_.end())
198 return it->second.get();
199 if (black_hole_mad_)
200 return black_hole_mad_.get();
201 return nullptr;
202 }
203
GetOrCreateAllocatorDump(const std::string & absolute_name)204 MemoryAllocatorDump* ProcessMemoryDump::GetOrCreateAllocatorDump(
205 const std::string& absolute_name) {
206 MemoryAllocatorDump* mad = GetAllocatorDump(absolute_name);
207 return mad ? mad : CreateAllocatorDump(absolute_name);
208 }
209
CreateSharedGlobalAllocatorDump(const MemoryAllocatorDumpGuid & guid)210 MemoryAllocatorDump* ProcessMemoryDump::CreateSharedGlobalAllocatorDump(
211 const MemoryAllocatorDumpGuid& guid) {
212 // Global dumps are disabled in background mode.
213 if (dump_args_.level_of_detail == MemoryDumpLevelOfDetail::BACKGROUND)
214 return GetBlackHoleMad();
215
216 // A shared allocator dump can be shared within a process and the guid could
217 // have been created already.
218 MemoryAllocatorDump* mad = GetSharedGlobalAllocatorDump(guid);
219 if (mad) {
220 // The weak flag is cleared because this method should create a non-weak
221 // dump.
222 mad->clear_flags(MemoryAllocatorDump::Flags::WEAK);
223 return mad;
224 }
225 return CreateAllocatorDump(GetSharedGlobalAllocatorDumpName(guid), guid);
226 }
227
CreateWeakSharedGlobalAllocatorDump(const MemoryAllocatorDumpGuid & guid)228 MemoryAllocatorDump* ProcessMemoryDump::CreateWeakSharedGlobalAllocatorDump(
229 const MemoryAllocatorDumpGuid& guid) {
230 // Global dumps are disabled in background mode.
231 if (dump_args_.level_of_detail == MemoryDumpLevelOfDetail::BACKGROUND)
232 return GetBlackHoleMad();
233
234 MemoryAllocatorDump* mad = GetSharedGlobalAllocatorDump(guid);
235 if (mad)
236 return mad;
237 mad = CreateAllocatorDump(GetSharedGlobalAllocatorDumpName(guid), guid);
238 mad->set_flags(MemoryAllocatorDump::Flags::WEAK);
239 return mad;
240 }
241
GetSharedGlobalAllocatorDump(const MemoryAllocatorDumpGuid & guid) const242 MemoryAllocatorDump* ProcessMemoryDump::GetSharedGlobalAllocatorDump(
243 const MemoryAllocatorDumpGuid& guid) const {
244 return GetAllocatorDump(GetSharedGlobalAllocatorDumpName(guid));
245 }
246
DumpHeapUsage(const base::hash_map<base::trace_event::AllocationContext,base::trace_event::AllocationMetrics> & metrics_by_context,base::trace_event::TraceEventMemoryOverhead & overhead,const char * allocator_name)247 void ProcessMemoryDump::DumpHeapUsage(
248 const base::hash_map<base::trace_event::AllocationContext,
249 base::trace_event::AllocationMetrics>& metrics_by_context,
250 base::trace_event::TraceEventMemoryOverhead& overhead,
251 const char* allocator_name) {
252 if (!metrics_by_context.empty()) {
253 DCHECK_EQ(0ul, heap_dumps_.count(allocator_name));
254 std::unique_ptr<TracedValue> heap_dump = ExportHeapDump(
255 metrics_by_context, *session_state());
256 heap_dumps_[allocator_name] = std::move(heap_dump);
257 }
258
259 std::string base_name = base::StringPrintf("tracing/heap_profiler_%s",
260 allocator_name);
261 overhead.DumpInto(base_name.c_str(), this);
262 }
263
Clear()264 void ProcessMemoryDump::Clear() {
265 if (has_process_totals_) {
266 process_totals_.Clear();
267 has_process_totals_ = false;
268 }
269
270 if (has_process_mmaps_) {
271 process_mmaps_.Clear();
272 has_process_mmaps_ = false;
273 }
274
275 allocator_dumps_.clear();
276 allocator_dumps_edges_.clear();
277 heap_dumps_.clear();
278 }
279
TakeAllDumpsFrom(ProcessMemoryDump * other)280 void ProcessMemoryDump::TakeAllDumpsFrom(ProcessMemoryDump* other) {
281 DCHECK(!other->has_process_totals() && !other->has_process_mmaps());
282
283 // Moves the ownership of all MemoryAllocatorDump(s) contained in |other|
284 // into this ProcessMemoryDump, checking for duplicates.
285 for (auto& it : other->allocator_dumps_)
286 AddAllocatorDumpInternal(std::move(it.second));
287 other->allocator_dumps_.clear();
288
289 // Move all the edges.
290 allocator_dumps_edges_.insert(allocator_dumps_edges_.end(),
291 other->allocator_dumps_edges_.begin(),
292 other->allocator_dumps_edges_.end());
293 other->allocator_dumps_edges_.clear();
294
295 for (auto& it : other->heap_dumps_) {
296 DCHECK_EQ(0ul, heap_dumps_.count(it.first));
297 heap_dumps_.insert(std::make_pair(it.first, std::move(it.second)));
298 }
299 other->heap_dumps_.clear();
300 }
301
AsValueInto(TracedValue * value) const302 void ProcessMemoryDump::AsValueInto(TracedValue* value) const {
303 if (has_process_totals_) {
304 value->BeginDictionary("process_totals");
305 process_totals_.AsValueInto(value);
306 value->EndDictionary();
307 }
308
309 if (has_process_mmaps_) {
310 value->BeginDictionary("process_mmaps");
311 process_mmaps_.AsValueInto(value);
312 value->EndDictionary();
313 }
314
315 if (allocator_dumps_.size() > 0) {
316 value->BeginDictionary("allocators");
317 for (const auto& allocator_dump_it : allocator_dumps_)
318 allocator_dump_it.second->AsValueInto(value);
319 value->EndDictionary();
320 }
321
322 if (heap_dumps_.size() > 0) {
323 value->BeginDictionary("heaps");
324 for (const auto& name_and_dump : heap_dumps_)
325 value->SetValueWithCopiedName(name_and_dump.first, *name_and_dump.second);
326 value->EndDictionary(); // "heaps"
327 }
328
329 value->BeginArray("allocators_graph");
330 for (const MemoryAllocatorDumpEdge& edge : allocator_dumps_edges_) {
331 value->BeginDictionary();
332 value->SetString("source", edge.source.ToString());
333 value->SetString("target", edge.target.ToString());
334 value->SetInteger("importance", edge.importance);
335 value->SetString("type", edge.type);
336 value->EndDictionary();
337 }
338 value->EndArray();
339 }
340
AddOwnershipEdge(const MemoryAllocatorDumpGuid & source,const MemoryAllocatorDumpGuid & target,int importance)341 void ProcessMemoryDump::AddOwnershipEdge(const MemoryAllocatorDumpGuid& source,
342 const MemoryAllocatorDumpGuid& target,
343 int importance) {
344 allocator_dumps_edges_.push_back(
345 {source, target, importance, kEdgeTypeOwnership});
346 }
347
AddOwnershipEdge(const MemoryAllocatorDumpGuid & source,const MemoryAllocatorDumpGuid & target)348 void ProcessMemoryDump::AddOwnershipEdge(
349 const MemoryAllocatorDumpGuid& source,
350 const MemoryAllocatorDumpGuid& target) {
351 AddOwnershipEdge(source, target, 0 /* importance */);
352 }
353
AddSuballocation(const MemoryAllocatorDumpGuid & source,const std::string & target_node_name)354 void ProcessMemoryDump::AddSuballocation(const MemoryAllocatorDumpGuid& source,
355 const std::string& target_node_name) {
356 // Do not create new dumps for suballocations in background mode.
357 if (dump_args_.level_of_detail == MemoryDumpLevelOfDetail::BACKGROUND)
358 return;
359
360 std::string child_mad_name = target_node_name + "/__" + source.ToString();
361 MemoryAllocatorDump* target_child_mad = CreateAllocatorDump(child_mad_name);
362 AddOwnershipEdge(source, target_child_mad->guid());
363 }
364
GetBlackHoleMad()365 MemoryAllocatorDump* ProcessMemoryDump::GetBlackHoleMad() {
366 DCHECK(is_black_hole_non_fatal_for_testing_);
367 if (!black_hole_mad_)
368 black_hole_mad_.reset(new MemoryAllocatorDump("discarded", this));
369 return black_hole_mad_.get();
370 }
371
372 } // namespace trace_event
373 } // namespace base
374