1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "base/trace_event/process_memory_dump.h"
6
7 #include <errno.h>
8 #include <vector>
9
10 #include "base/process/process_metrics.h"
11 #include "base/trace_event/process_memory_totals.h"
12 #include "base/trace_event/trace_event_argument.h"
13 #include "build/build_config.h"
14
15 #if defined(OS_POSIX)
16 #include <sys/mman.h>
17 #endif
18
19 namespace base {
20 namespace trace_event {
21
22 namespace {
23
24 const char kEdgeTypeOwnership[] = "ownership";
25
GetSharedGlobalAllocatorDumpName(const MemoryAllocatorDumpGuid & guid)26 std::string GetSharedGlobalAllocatorDumpName(
27 const MemoryAllocatorDumpGuid& guid) {
28 return "global/" + guid.ToString();
29 }
30
31 } // namespace
32
33 #if defined(COUNT_RESIDENT_BYTES_SUPPORTED)
34 // static
CountResidentBytes(void * start_address,size_t mapped_size)35 size_t ProcessMemoryDump::CountResidentBytes(void* start_address,
36 size_t mapped_size) {
37 const size_t page_size = GetPageSize();
38 const uintptr_t start_pointer = reinterpret_cast<uintptr_t>(start_address);
39 DCHECK_EQ(0u, start_pointer % page_size);
40
41 // This function allocates a char vector of size number of pages in the given
42 // mapped_size. To avoid allocating a large array, the memory is split into
43 // chunks. Maximum size of vector allocated, will be
44 // kPageChunkSize / page_size.
45 const size_t kMaxChunkSize = 32 * 1024 * 1024;
46 size_t offset = 0;
47 size_t total_resident_size = 0;
48 int result = 0;
49 while (offset < mapped_size) {
50 void* chunk_start = reinterpret_cast<void*>(start_pointer + offset);
51 const size_t chunk_size = std::min(mapped_size - offset, kMaxChunkSize);
52 const size_t page_count = (chunk_size + page_size - 1) / page_size;
53 size_t resident_page_count = 0;
54
55 #if defined(OS_MACOSX) || defined(OS_IOS)
56 std::vector<char> vec(page_count + 1);
57 // mincore in MAC does not fail with EAGAIN.
58 result = mincore(chunk_start, chunk_size, vec.data());
59 if (result)
60 break;
61
62 for (size_t i = 0; i < page_count; i++)
63 resident_page_count += vec[i] & MINCORE_INCORE ? 1 : 0;
64 #else // defined(OS_MACOSX) || defined(OS_IOS)
65 std::vector<unsigned char> vec(page_count + 1);
66 int error_counter = 0;
67 // HANDLE_EINTR tries for 100 times. So following the same pattern.
68 do {
69 result = mincore(chunk_start, chunk_size, vec.data());
70 } while (result == -1 && errno == EAGAIN && error_counter++ < 100);
71 if (result)
72 break;
73
74 for (size_t i = 0; i < page_count; i++)
75 resident_page_count += vec[i];
76 #endif // defined(OS_MACOSX) || defined(OS_IOS)
77
78 total_resident_size += resident_page_count * page_size;
79 offset += kMaxChunkSize;
80 }
81
82 DCHECK_EQ(0, result);
83 if (result) {
84 total_resident_size = 0;
85 LOG(ERROR) << "mincore() call failed. The resident size is invalid";
86 }
87 return total_resident_size;
88 }
89 #endif // defined(COUNT_RESIDENT_BYTES_SUPPORTED)
90
ProcessMemoryDump(const scoped_refptr<MemoryDumpSessionState> & session_state)91 ProcessMemoryDump::ProcessMemoryDump(
92 const scoped_refptr<MemoryDumpSessionState>& session_state)
93 : has_process_totals_(false),
94 has_process_mmaps_(false),
95 session_state_(session_state) {
96 }
97
~ProcessMemoryDump()98 ProcessMemoryDump::~ProcessMemoryDump() {
99 }
100
CreateAllocatorDump(const std::string & absolute_name)101 MemoryAllocatorDump* ProcessMemoryDump::CreateAllocatorDump(
102 const std::string& absolute_name) {
103 MemoryAllocatorDump* mad = new MemoryAllocatorDump(absolute_name, this);
104 AddAllocatorDumpInternal(mad); // Takes ownership of |mad|.
105 return mad;
106 }
107
CreateAllocatorDump(const std::string & absolute_name,const MemoryAllocatorDumpGuid & guid)108 MemoryAllocatorDump* ProcessMemoryDump::CreateAllocatorDump(
109 const std::string& absolute_name,
110 const MemoryAllocatorDumpGuid& guid) {
111 MemoryAllocatorDump* mad = new MemoryAllocatorDump(absolute_name, this, guid);
112 AddAllocatorDumpInternal(mad); // Takes ownership of |mad|.
113 return mad;
114 }
115
AddAllocatorDumpInternal(MemoryAllocatorDump * mad)116 void ProcessMemoryDump::AddAllocatorDumpInternal(MemoryAllocatorDump* mad) {
117 DCHECK_EQ(0ul, allocator_dumps_.count(mad->absolute_name()));
118 allocator_dumps_storage_.push_back(mad);
119 allocator_dumps_[mad->absolute_name()] = mad;
120 }
121
GetAllocatorDump(const std::string & absolute_name) const122 MemoryAllocatorDump* ProcessMemoryDump::GetAllocatorDump(
123 const std::string& absolute_name) const {
124 auto it = allocator_dumps_.find(absolute_name);
125 return it == allocator_dumps_.end() ? nullptr : it->second;
126 }
127
GetOrCreateAllocatorDump(const std::string & absolute_name)128 MemoryAllocatorDump* ProcessMemoryDump::GetOrCreateAllocatorDump(
129 const std::string& absolute_name) {
130 MemoryAllocatorDump* mad = GetAllocatorDump(absolute_name);
131 return mad ? mad : CreateAllocatorDump(absolute_name);
132 }
133
CreateSharedGlobalAllocatorDump(const MemoryAllocatorDumpGuid & guid)134 MemoryAllocatorDump* ProcessMemoryDump::CreateSharedGlobalAllocatorDump(
135 const MemoryAllocatorDumpGuid& guid) {
136 // A shared allocator dump can be shared within a process and the guid could
137 // have been created already.
138 MemoryAllocatorDump* allocator_dump = GetSharedGlobalAllocatorDump(guid);
139 return allocator_dump ? allocator_dump
140 : CreateAllocatorDump(
141 GetSharedGlobalAllocatorDumpName(guid), guid);
142 }
143
GetSharedGlobalAllocatorDump(const MemoryAllocatorDumpGuid & guid) const144 MemoryAllocatorDump* ProcessMemoryDump::GetSharedGlobalAllocatorDump(
145 const MemoryAllocatorDumpGuid& guid) const {
146 return GetAllocatorDump(GetSharedGlobalAllocatorDumpName(guid));
147 }
148
AddHeapDump(const std::string & absolute_name,scoped_refptr<TracedValue> heap_dump)149 void ProcessMemoryDump::AddHeapDump(const std::string& absolute_name,
150 scoped_refptr<TracedValue> heap_dump) {
151 DCHECK_EQ(0ul, heap_dumps_.count(absolute_name));
152 heap_dumps_[absolute_name] = heap_dump;
153 }
154
Clear()155 void ProcessMemoryDump::Clear() {
156 if (has_process_totals_) {
157 process_totals_.Clear();
158 has_process_totals_ = false;
159 }
160
161 if (has_process_mmaps_) {
162 process_mmaps_.Clear();
163 has_process_mmaps_ = false;
164 }
165
166 allocator_dumps_storage_.clear();
167 allocator_dumps_.clear();
168 allocator_dumps_edges_.clear();
169 heap_dumps_.clear();
170 }
171
TakeAllDumpsFrom(ProcessMemoryDump * other)172 void ProcessMemoryDump::TakeAllDumpsFrom(ProcessMemoryDump* other) {
173 DCHECK(!other->has_process_totals() && !other->has_process_mmaps());
174
175 // Moves the ownership of all MemoryAllocatorDump(s) contained in |other|
176 // into this ProcessMemoryDump.
177 for (MemoryAllocatorDump* mad : other->allocator_dumps_storage_) {
178 // Check that we don't merge duplicates.
179 DCHECK_EQ(0ul, allocator_dumps_.count(mad->absolute_name()));
180 allocator_dumps_storage_.push_back(mad);
181 allocator_dumps_[mad->absolute_name()] = mad;
182 }
183 other->allocator_dumps_storage_.weak_clear();
184 other->allocator_dumps_.clear();
185
186 // Move all the edges.
187 allocator_dumps_edges_.insert(allocator_dumps_edges_.end(),
188 other->allocator_dumps_edges_.begin(),
189 other->allocator_dumps_edges_.end());
190 other->allocator_dumps_edges_.clear();
191
192 heap_dumps_.insert(other->heap_dumps_.begin(), other->heap_dumps_.end());
193 other->heap_dumps_.clear();
194 }
195
AsValueInto(TracedValue * value) const196 void ProcessMemoryDump::AsValueInto(TracedValue* value) const {
197 if (has_process_totals_) {
198 value->BeginDictionary("process_totals");
199 process_totals_.AsValueInto(value);
200 value->EndDictionary();
201 }
202
203 if (has_process_mmaps_) {
204 value->BeginDictionary("process_mmaps");
205 process_mmaps_.AsValueInto(value);
206 value->EndDictionary();
207 }
208
209 if (allocator_dumps_storage_.size() > 0) {
210 value->BeginDictionary("allocators");
211 for (const MemoryAllocatorDump* allocator_dump : allocator_dumps_storage_)
212 allocator_dump->AsValueInto(value);
213 value->EndDictionary();
214 }
215
216 if (heap_dumps_.size() > 0) {
217 value->BeginDictionary("heaps");
218 for (const auto& name_and_dump : heap_dumps_)
219 value->SetValueWithCopiedName(name_and_dump.first, *name_and_dump.second);
220 value->EndDictionary(); // "heaps"
221 }
222
223 value->BeginArray("allocators_graph");
224 for (const MemoryAllocatorDumpEdge& edge : allocator_dumps_edges_) {
225 value->BeginDictionary();
226 value->SetString("source", edge.source.ToString());
227 value->SetString("target", edge.target.ToString());
228 value->SetInteger("importance", edge.importance);
229 value->SetString("type", edge.type);
230 value->EndDictionary();
231 }
232 value->EndArray();
233 }
234
AddOwnershipEdge(const MemoryAllocatorDumpGuid & source,const MemoryAllocatorDumpGuid & target,int importance)235 void ProcessMemoryDump::AddOwnershipEdge(const MemoryAllocatorDumpGuid& source,
236 const MemoryAllocatorDumpGuid& target,
237 int importance) {
238 allocator_dumps_edges_.push_back(
239 {source, target, importance, kEdgeTypeOwnership});
240 }
241
AddOwnershipEdge(const MemoryAllocatorDumpGuid & source,const MemoryAllocatorDumpGuid & target)242 void ProcessMemoryDump::AddOwnershipEdge(
243 const MemoryAllocatorDumpGuid& source,
244 const MemoryAllocatorDumpGuid& target) {
245 AddOwnershipEdge(source, target, 0 /* importance */);
246 }
247
AddSuballocation(const MemoryAllocatorDumpGuid & source,const std::string & target_node_name)248 void ProcessMemoryDump::AddSuballocation(const MemoryAllocatorDumpGuid& source,
249 const std::string& target_node_name) {
250 std::string child_mad_name = target_node_name + "/__" + source.ToString();
251 MemoryAllocatorDump* target_child_mad = CreateAllocatorDump(child_mad_name);
252 AddOwnershipEdge(source, target_child_mad->guid());
253 }
254
255 } // namespace trace_event
256 } // namespace base
257