1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "base/trace_event/process_memory_dump.h"
6 
7 #include <stddef.h>
8 
9 #include "base/memory/aligned_memory.h"
10 #include "base/memory/ptr_util.h"
11 #include "base/memory/shared_memory_tracker.h"
12 #include "base/process/process_metrics.h"
13 #include "base/trace_event/memory_allocator_dump_guid.h"
14 #include "base/trace_event/memory_infra_background_whitelist.h"
15 #include "base/trace_event/trace_event_argument.h"
16 #include "base/trace_event/trace_log.h"
17 #include "build/build_config.h"
18 #include "testing/gtest/include/gtest/gtest.h"
19 
20 #if defined(OS_WIN)
21 #include <windows.h>
22 #include "winbase.h"
23 #elif defined(OS_POSIX) || defined(OS_FUCHSIA)
24 #include <sys/mman.h>
25 #endif
26 
27 #if defined(OS_IOS)
28 #include "base/ios/ios_util.h"
29 #endif
30 
31 namespace base {
32 namespace trace_event {
33 
34 namespace {
35 
36 const MemoryDumpArgs kDetailedDumpArgs = {MemoryDumpLevelOfDetail::DETAILED};
37 const char* const kTestDumpNameWhitelist[] = {
38     "Whitelisted/TestName", "Whitelisted/TestName_0x?",
39     "Whitelisted/0x?/TestName", "Whitelisted/0x?", nullptr};
40 
Map(size_t size)41 void* Map(size_t size) {
42 #if defined(OS_WIN)
43   return ::VirtualAlloc(nullptr, size, MEM_RESERVE | MEM_COMMIT,
44                         PAGE_READWRITE);
45 #elif defined(OS_POSIX) || defined(OS_FUCHSIA)
46   return ::mmap(nullptr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON,
47                 0, 0);
48 #endif
49 }
50 
Unmap(void * addr,size_t size)51 void Unmap(void* addr, size_t size) {
52 #if defined(OS_WIN)
53   ::VirtualFree(addr, 0, MEM_DECOMMIT);
54 #elif defined(OS_POSIX) || defined(OS_FUCHSIA)
55   ::munmap(addr, size);
56 #else
57 #error This architecture is not (yet) supported.
58 #endif
59 }
60 
61 }  // namespace
62 
TEST(ProcessMemoryDumpTest,MoveConstructor)63 TEST(ProcessMemoryDumpTest, MoveConstructor) {
64   ProcessMemoryDump pmd1 = ProcessMemoryDump(kDetailedDumpArgs);
65   pmd1.CreateAllocatorDump("mad1");
66   pmd1.CreateAllocatorDump("mad2");
67   pmd1.AddOwnershipEdge(MemoryAllocatorDumpGuid(42),
68                         MemoryAllocatorDumpGuid(4242));
69 
70   ProcessMemoryDump pmd2(std::move(pmd1));
71 
72   EXPECT_EQ(1u, pmd2.allocator_dumps().count("mad1"));
73   EXPECT_EQ(1u, pmd2.allocator_dumps().count("mad2"));
74   EXPECT_EQ(MemoryDumpLevelOfDetail::DETAILED,
75             pmd2.dump_args().level_of_detail);
76   EXPECT_EQ(1u, pmd2.allocator_dumps_edges().size());
77 
78   // Check that calling serialization routines doesn't cause a crash.
79   auto traced_value = std::make_unique<TracedValue>();
80   pmd2.SerializeAllocatorDumpsInto(traced_value.get());
81 }
82 
TEST(ProcessMemoryDumpTest,MoveAssignment)83 TEST(ProcessMemoryDumpTest, MoveAssignment) {
84   ProcessMemoryDump pmd1 = ProcessMemoryDump(kDetailedDumpArgs);
85   pmd1.CreateAllocatorDump("mad1");
86   pmd1.CreateAllocatorDump("mad2");
87   pmd1.AddOwnershipEdge(MemoryAllocatorDumpGuid(42),
88                         MemoryAllocatorDumpGuid(4242));
89 
90   ProcessMemoryDump pmd2({MemoryDumpLevelOfDetail::BACKGROUND});
91   pmd2.CreateAllocatorDump("malloc");
92 
93   pmd2 = std::move(pmd1);
94   EXPECT_EQ(1u, pmd2.allocator_dumps().count("mad1"));
95   EXPECT_EQ(1u, pmd2.allocator_dumps().count("mad2"));
96   EXPECT_EQ(0u, pmd2.allocator_dumps().count("mad3"));
97   EXPECT_EQ(MemoryDumpLevelOfDetail::DETAILED,
98             pmd2.dump_args().level_of_detail);
99   EXPECT_EQ(1u, pmd2.allocator_dumps_edges().size());
100 
101   // Check that calling serialization routines doesn't cause a crash.
102   auto traced_value = std::make_unique<TracedValue>();
103   pmd2.SerializeAllocatorDumpsInto(traced_value.get());
104 }
105 
TEST(ProcessMemoryDumpTest,Clear)106 TEST(ProcessMemoryDumpTest, Clear) {
107   std::unique_ptr<ProcessMemoryDump> pmd1(
108       new ProcessMemoryDump(kDetailedDumpArgs));
109   pmd1->CreateAllocatorDump("mad1");
110   pmd1->CreateAllocatorDump("mad2");
111   ASSERT_FALSE(pmd1->allocator_dumps().empty());
112 
113   pmd1->AddOwnershipEdge(MemoryAllocatorDumpGuid(42),
114                          MemoryAllocatorDumpGuid(4242));
115 
116   MemoryAllocatorDumpGuid shared_mad_guid1(1);
117   MemoryAllocatorDumpGuid shared_mad_guid2(2);
118   pmd1->CreateSharedGlobalAllocatorDump(shared_mad_guid1);
119   pmd1->CreateSharedGlobalAllocatorDump(shared_mad_guid2);
120 
121   pmd1->Clear();
122   ASSERT_TRUE(pmd1->allocator_dumps().empty());
123   ASSERT_TRUE(pmd1->allocator_dumps_edges().empty());
124   ASSERT_EQ(nullptr, pmd1->GetAllocatorDump("mad1"));
125   ASSERT_EQ(nullptr, pmd1->GetAllocatorDump("mad2"));
126   ASSERT_EQ(nullptr, pmd1->GetSharedGlobalAllocatorDump(shared_mad_guid1));
127   ASSERT_EQ(nullptr, pmd1->GetSharedGlobalAllocatorDump(shared_mad_guid2));
128 
129   // Check that calling serialization routines doesn't cause a crash.
130   auto traced_value = std::make_unique<TracedValue>();
131   pmd1->SerializeAllocatorDumpsInto(traced_value.get());
132 
133   // Check that the pmd can be reused and behaves as expected.
134   auto* mad1 = pmd1->CreateAllocatorDump("mad1");
135   auto* mad3 = pmd1->CreateAllocatorDump("mad3");
136   auto* shared_mad1 = pmd1->CreateSharedGlobalAllocatorDump(shared_mad_guid1);
137   auto* shared_mad2 =
138       pmd1->CreateWeakSharedGlobalAllocatorDump(shared_mad_guid2);
139   ASSERT_EQ(4u, pmd1->allocator_dumps().size());
140   ASSERT_EQ(mad1, pmd1->GetAllocatorDump("mad1"));
141   ASSERT_EQ(nullptr, pmd1->GetAllocatorDump("mad2"));
142   ASSERT_EQ(mad3, pmd1->GetAllocatorDump("mad3"));
143   ASSERT_EQ(shared_mad1, pmd1->GetSharedGlobalAllocatorDump(shared_mad_guid1));
144   ASSERT_EQ(MemoryAllocatorDump::Flags::DEFAULT, shared_mad1->flags());
145   ASSERT_EQ(shared_mad2, pmd1->GetSharedGlobalAllocatorDump(shared_mad_guid2));
146   ASSERT_EQ(MemoryAllocatorDump::Flags::WEAK, shared_mad2->flags());
147 
148   traced_value.reset(new TracedValue);
149   pmd1->SerializeAllocatorDumpsInto(traced_value.get());
150 
151   pmd1.reset();
152 }
153 
TEST(ProcessMemoryDumpTest,TakeAllDumpsFrom)154 TEST(ProcessMemoryDumpTest, TakeAllDumpsFrom) {
155   std::unique_ptr<TracedValue> traced_value(new TracedValue);
156   std::unordered_map<AllocationContext, AllocationMetrics> metrics_by_context;
157   metrics_by_context[AllocationContext()] = {1, 1};
158   TraceEventMemoryOverhead overhead;
159 
160   std::unique_ptr<ProcessMemoryDump> pmd1(
161       new ProcessMemoryDump(kDetailedDumpArgs));
162   auto* mad1_1 = pmd1->CreateAllocatorDump("pmd1/mad1");
163   auto* mad1_2 = pmd1->CreateAllocatorDump("pmd1/mad2");
164   pmd1->AddOwnershipEdge(mad1_1->guid(), mad1_2->guid());
165   pmd1->DumpHeapUsage(metrics_by_context, overhead, "pmd1/heap_dump1");
166   pmd1->DumpHeapUsage(metrics_by_context, overhead, "pmd1/heap_dump2");
167 
168   std::unique_ptr<ProcessMemoryDump> pmd2(
169       new ProcessMemoryDump(kDetailedDumpArgs));
170   auto* mad2_1 = pmd2->CreateAllocatorDump("pmd2/mad1");
171   auto* mad2_2 = pmd2->CreateAllocatorDump("pmd2/mad2");
172   pmd2->AddOwnershipEdge(mad2_1->guid(), mad2_2->guid());
173   pmd2->DumpHeapUsage(metrics_by_context, overhead, "pmd2/heap_dump1");
174   pmd2->DumpHeapUsage(metrics_by_context, overhead, "pmd2/heap_dump2");
175 
176   MemoryAllocatorDumpGuid shared_mad_guid1(1);
177   MemoryAllocatorDumpGuid shared_mad_guid2(2);
178   auto* shared_mad1 = pmd2->CreateSharedGlobalAllocatorDump(shared_mad_guid1);
179   auto* shared_mad2 =
180       pmd2->CreateWeakSharedGlobalAllocatorDump(shared_mad_guid2);
181 
182   pmd1->TakeAllDumpsFrom(pmd2.get());
183 
184   // Make sure that pmd2 is empty but still usable after it has been emptied.
185   ASSERT_TRUE(pmd2->allocator_dumps().empty());
186   ASSERT_TRUE(pmd2->allocator_dumps_edges().empty());
187   pmd2->CreateAllocatorDump("pmd2/this_mad_stays_with_pmd2");
188   ASSERT_EQ(1u, pmd2->allocator_dumps().size());
189   ASSERT_EQ(1u, pmd2->allocator_dumps().count("pmd2/this_mad_stays_with_pmd2"));
190   pmd2->AddOwnershipEdge(MemoryAllocatorDumpGuid(42),
191                          MemoryAllocatorDumpGuid(4242));
192 
193   // Check that calling serialization routines doesn't cause a crash.
194   pmd2->SerializeAllocatorDumpsInto(traced_value.get());
195 
196   // Free the |pmd2| to check that the memory ownership of the two MAD(s)
197   // has been transferred to |pmd1|.
198   pmd2.reset();
199 
200   // Now check that |pmd1| has been effectively merged.
201   ASSERT_EQ(6u, pmd1->allocator_dumps().size());
202   ASSERT_EQ(1u, pmd1->allocator_dumps().count("pmd1/mad1"));
203   ASSERT_EQ(1u, pmd1->allocator_dumps().count("pmd1/mad2"));
204   ASSERT_EQ(1u, pmd1->allocator_dumps().count("pmd2/mad1"));
205   ASSERT_EQ(1u, pmd1->allocator_dumps().count("pmd1/mad2"));
206   ASSERT_EQ(2u, pmd1->allocator_dumps_edges().size());
207   ASSERT_EQ(shared_mad1, pmd1->GetSharedGlobalAllocatorDump(shared_mad_guid1));
208   ASSERT_EQ(shared_mad2, pmd1->GetSharedGlobalAllocatorDump(shared_mad_guid2));
209   ASSERT_TRUE(MemoryAllocatorDump::Flags::WEAK & shared_mad2->flags());
210 
211   // Check that calling serialization routines doesn't cause a crash.
212   traced_value.reset(new TracedValue);
213   pmd1->SerializeAllocatorDumpsInto(traced_value.get());
214 
215   pmd1.reset();
216 }
217 
TEST(ProcessMemoryDumpTest,OverrideOwnershipEdge)218 TEST(ProcessMemoryDumpTest, OverrideOwnershipEdge) {
219   std::unique_ptr<ProcessMemoryDump> pmd(
220       new ProcessMemoryDump(kDetailedDumpArgs));
221 
222   auto* shm_dump1 = pmd->CreateAllocatorDump("shared_mem/seg1");
223   auto* shm_dump2 = pmd->CreateAllocatorDump("shared_mem/seg2");
224   auto* shm_dump3 = pmd->CreateAllocatorDump("shared_mem/seg3");
225   auto* shm_dump4 = pmd->CreateAllocatorDump("shared_mem/seg4");
226 
227   // Create one allocation with an auto-assigned guid and mark it as a
228   // suballocation of "fakealloc/allocated_objects".
229   auto* child1_dump = pmd->CreateAllocatorDump("shared_mem/child/seg1");
230   pmd->AddOverridableOwnershipEdge(child1_dump->guid(), shm_dump1->guid(),
231                                    0 /* importance */);
232   auto* child2_dump = pmd->CreateAllocatorDump("shared_mem/child/seg2");
233   pmd->AddOwnershipEdge(child2_dump->guid(), shm_dump2->guid(),
234                         3 /* importance */);
235   MemoryAllocatorDumpGuid shared_mad_guid(1);
236   pmd->CreateSharedGlobalAllocatorDump(shared_mad_guid);
237   pmd->AddOverridableOwnershipEdge(shm_dump3->guid(), shared_mad_guid,
238                                    0 /* importance */);
239   auto* child4_dump = pmd->CreateAllocatorDump("shared_mem/child/seg4");
240   pmd->AddOverridableOwnershipEdge(child4_dump->guid(), shm_dump4->guid(),
241                                    4 /* importance */);
242 
243   const ProcessMemoryDump::AllocatorDumpEdgesMap& edges =
244       pmd->allocator_dumps_edges();
245   EXPECT_EQ(4u, edges.size());
246   EXPECT_EQ(shm_dump1->guid(), edges.find(child1_dump->guid())->second.target);
247   EXPECT_EQ(0, edges.find(child1_dump->guid())->second.importance);
248   EXPECT_TRUE(edges.find(child1_dump->guid())->second.overridable);
249   EXPECT_EQ(shm_dump2->guid(), edges.find(child2_dump->guid())->second.target);
250   EXPECT_EQ(3, edges.find(child2_dump->guid())->second.importance);
251   EXPECT_FALSE(edges.find(child2_dump->guid())->second.overridable);
252   EXPECT_EQ(shared_mad_guid, edges.find(shm_dump3->guid())->second.target);
253   EXPECT_EQ(0, edges.find(shm_dump3->guid())->second.importance);
254   EXPECT_TRUE(edges.find(shm_dump3->guid())->second.overridable);
255   EXPECT_EQ(shm_dump4->guid(), edges.find(child4_dump->guid())->second.target);
256   EXPECT_EQ(4, edges.find(child4_dump->guid())->second.importance);
257   EXPECT_TRUE(edges.find(child4_dump->guid())->second.overridable);
258 
259   // These should override old edges:
260   pmd->AddOwnershipEdge(child1_dump->guid(), shm_dump1->guid(),
261                         1 /* importance */);
262   pmd->AddOwnershipEdge(shm_dump3->guid(), shared_mad_guid, 2 /* importance */);
263   // This should not change the old edges.
264   pmd->AddOverridableOwnershipEdge(child2_dump->guid(), shm_dump2->guid(),
265                                    0 /* importance */);
266   pmd->AddOwnershipEdge(child4_dump->guid(), shm_dump4->guid(),
267                         0 /* importance */);
268 
269   EXPECT_EQ(4u, edges.size());
270   EXPECT_EQ(shm_dump1->guid(), edges.find(child1_dump->guid())->second.target);
271   EXPECT_EQ(1, edges.find(child1_dump->guid())->second.importance);
272   EXPECT_FALSE(edges.find(child1_dump->guid())->second.overridable);
273   EXPECT_EQ(shm_dump2->guid(), edges.find(child2_dump->guid())->second.target);
274   EXPECT_EQ(3, edges.find(child2_dump->guid())->second.importance);
275   EXPECT_FALSE(edges.find(child2_dump->guid())->second.overridable);
276   EXPECT_EQ(shared_mad_guid, edges.find(shm_dump3->guid())->second.target);
277   EXPECT_EQ(2, edges.find(shm_dump3->guid())->second.importance);
278   EXPECT_FALSE(edges.find(shm_dump3->guid())->second.overridable);
279   EXPECT_EQ(shm_dump4->guid(), edges.find(child4_dump->guid())->second.target);
280   EXPECT_EQ(4, edges.find(child4_dump->guid())->second.importance);
281   EXPECT_FALSE(edges.find(child4_dump->guid())->second.overridable);
282 }
283 
TEST(ProcessMemoryDumpTest,Suballocations)284 TEST(ProcessMemoryDumpTest, Suballocations) {
285   std::unique_ptr<ProcessMemoryDump> pmd(
286       new ProcessMemoryDump(kDetailedDumpArgs));
287   const std::string allocator_dump_name = "fakealloc/allocated_objects";
288   pmd->CreateAllocatorDump(allocator_dump_name);
289 
290   // Create one allocation with an auto-assigned guid and mark it as a
291   // suballocation of "fakealloc/allocated_objects".
292   auto* pic1_dump = pmd->CreateAllocatorDump("picturemanager/picture1");
293   pmd->AddSuballocation(pic1_dump->guid(), allocator_dump_name);
294 
295   // Same here, but this time create an allocation with an explicit guid.
296   auto* pic2_dump = pmd->CreateAllocatorDump("picturemanager/picture2",
297                                             MemoryAllocatorDumpGuid(0x42));
298   pmd->AddSuballocation(pic2_dump->guid(), allocator_dump_name);
299 
300   // Now check that AddSuballocation() has created anonymous child dumps under
301   // "fakealloc/allocated_objects".
302   auto anon_node_1_it = pmd->allocator_dumps().find(
303       allocator_dump_name + "/__" + pic1_dump->guid().ToString());
304   ASSERT_NE(pmd->allocator_dumps().end(), anon_node_1_it);
305 
306   auto anon_node_2_it =
307       pmd->allocator_dumps().find(allocator_dump_name + "/__42");
308   ASSERT_NE(pmd->allocator_dumps().end(), anon_node_2_it);
309 
310   // Finally check that AddSuballocation() has created also the
311   // edges between the pictures and the anonymous allocator child dumps.
312   bool found_edge[2]{false, false};
313   for (const auto& e : pmd->allocator_dumps_edges()) {
314     found_edge[0] |= (e.first == pic1_dump->guid() &&
315                       e.second.target == anon_node_1_it->second->guid());
316     found_edge[1] |= (e.first == pic2_dump->guid() &&
317                       e.second.target == anon_node_2_it->second->guid());
318   }
319   ASSERT_TRUE(found_edge[0]);
320   ASSERT_TRUE(found_edge[1]);
321 
322   // Check that calling serialization routines doesn't cause a crash.
323   std::unique_ptr<TracedValue> traced_value(new TracedValue);
324   pmd->SerializeAllocatorDumpsInto(traced_value.get());
325 
326   pmd.reset();
327 }
328 
TEST(ProcessMemoryDumpTest,GlobalAllocatorDumpTest)329 TEST(ProcessMemoryDumpTest, GlobalAllocatorDumpTest) {
330   std::unique_ptr<ProcessMemoryDump> pmd(
331       new ProcessMemoryDump(kDetailedDumpArgs));
332   MemoryAllocatorDumpGuid shared_mad_guid(1);
333   auto* shared_mad1 = pmd->CreateWeakSharedGlobalAllocatorDump(shared_mad_guid);
334   ASSERT_EQ(shared_mad_guid, shared_mad1->guid());
335   ASSERT_EQ(MemoryAllocatorDump::Flags::WEAK, shared_mad1->flags());
336 
337   auto* shared_mad2 = pmd->GetSharedGlobalAllocatorDump(shared_mad_guid);
338   ASSERT_EQ(shared_mad1, shared_mad2);
339   ASSERT_EQ(MemoryAllocatorDump::Flags::WEAK, shared_mad1->flags());
340 
341   auto* shared_mad3 = pmd->CreateWeakSharedGlobalAllocatorDump(shared_mad_guid);
342   ASSERT_EQ(shared_mad1, shared_mad3);
343   ASSERT_EQ(MemoryAllocatorDump::Flags::WEAK, shared_mad1->flags());
344 
345   auto* shared_mad4 = pmd->CreateSharedGlobalAllocatorDump(shared_mad_guid);
346   ASSERT_EQ(shared_mad1, shared_mad4);
347   ASSERT_EQ(MemoryAllocatorDump::Flags::DEFAULT, shared_mad1->flags());
348 
349   auto* shared_mad5 = pmd->CreateWeakSharedGlobalAllocatorDump(shared_mad_guid);
350   ASSERT_EQ(shared_mad1, shared_mad5);
351   ASSERT_EQ(MemoryAllocatorDump::Flags::DEFAULT, shared_mad1->flags());
352 }
353 
TEST(ProcessMemoryDumpTest,SharedMemoryOwnershipTest)354 TEST(ProcessMemoryDumpTest, SharedMemoryOwnershipTest) {
355   std::unique_ptr<ProcessMemoryDump> pmd(
356       new ProcessMemoryDump(kDetailedDumpArgs));
357   const ProcessMemoryDump::AllocatorDumpEdgesMap& edges =
358       pmd->allocator_dumps_edges();
359 
360   auto* client_dump2 = pmd->CreateAllocatorDump("discardable/segment2");
361   auto shm_token2 = UnguessableToken::Create();
362   MemoryAllocatorDumpGuid shm_local_guid2 =
363       pmd->GetDumpId(SharedMemoryTracker::GetDumpNameForTracing(shm_token2));
364   MemoryAllocatorDumpGuid shm_global_guid2 =
365       SharedMemoryTracker::GetGlobalDumpIdForTracing(shm_token2);
366   pmd->AddOverridableOwnershipEdge(shm_local_guid2, shm_global_guid2,
367                                    0 /* importance */);
368 
369   pmd->CreateSharedMemoryOwnershipEdge(client_dump2->guid(), shm_token2,
370                                        1 /* importance */);
371   EXPECT_EQ(2u, edges.size());
372 
373   EXPECT_EQ(shm_global_guid2, edges.find(shm_local_guid2)->second.target);
374   EXPECT_EQ(1, edges.find(shm_local_guid2)->second.importance);
375   EXPECT_FALSE(edges.find(shm_local_guid2)->second.overridable);
376   EXPECT_EQ(shm_local_guid2, edges.find(client_dump2->guid())->second.target);
377   EXPECT_EQ(1, edges.find(client_dump2->guid())->second.importance);
378   EXPECT_FALSE(edges.find(client_dump2->guid())->second.overridable);
379 }
380 
TEST(ProcessMemoryDumpTest,BackgroundModeTest)381 TEST(ProcessMemoryDumpTest, BackgroundModeTest) {
382   MemoryDumpArgs background_args = {MemoryDumpLevelOfDetail::BACKGROUND};
383   std::unique_ptr<ProcessMemoryDump> pmd(
384       new ProcessMemoryDump(background_args));
385   ProcessMemoryDump::is_black_hole_non_fatal_for_testing_ = true;
386   SetAllocatorDumpNameWhitelistForTesting(kTestDumpNameWhitelist);
387   MemoryAllocatorDump* black_hole_mad = pmd->GetBlackHoleMad();
388 
389   // GetAllocatorDump works for uncreated dumps.
390   EXPECT_EQ(nullptr, pmd->GetAllocatorDump("NotWhitelisted/TestName"));
391   EXPECT_EQ(nullptr, pmd->GetAllocatorDump("Whitelisted/TestName"));
392 
393   // Invalid dump names.
394   EXPECT_EQ(black_hole_mad,
395             pmd->CreateAllocatorDump("NotWhitelisted/TestName"));
396   EXPECT_EQ(black_hole_mad, pmd->CreateAllocatorDump("TestName"));
397   EXPECT_EQ(black_hole_mad, pmd->CreateAllocatorDump("Whitelisted/Test"));
398   EXPECT_EQ(black_hole_mad,
399             pmd->CreateAllocatorDump("Not/Whitelisted/TestName"));
400   EXPECT_EQ(black_hole_mad,
401             pmd->CreateAllocatorDump("Whitelisted/TestName/Google"));
402   EXPECT_EQ(black_hole_mad,
403             pmd->CreateAllocatorDump("Whitelisted/TestName/0x1a2Google"));
404   EXPECT_EQ(black_hole_mad,
405             pmd->CreateAllocatorDump("Whitelisted/TestName/__12/Google"));
406 
407   // Suballocations.
408   MemoryAllocatorDumpGuid guid(1);
409   pmd->AddSuballocation(guid, "malloc/allocated_objects");
410   EXPECT_EQ(0u, pmd->allocator_dumps_edges_.size());
411   EXPECT_EQ(0u, pmd->allocator_dumps_.size());
412 
413   // Global dumps.
414   EXPECT_NE(black_hole_mad, pmd->CreateSharedGlobalAllocatorDump(guid));
415   EXPECT_NE(black_hole_mad, pmd->CreateWeakSharedGlobalAllocatorDump(guid));
416   EXPECT_NE(black_hole_mad, pmd->GetSharedGlobalAllocatorDump(guid));
417 
418   // Valid dump names.
419   EXPECT_NE(black_hole_mad, pmd->CreateAllocatorDump("Whitelisted/TestName"));
420   EXPECT_NE(black_hole_mad,
421             pmd->CreateAllocatorDump("Whitelisted/TestName_0xA1b2"));
422   EXPECT_NE(black_hole_mad,
423             pmd->CreateAllocatorDump("Whitelisted/0xaB/TestName"));
424 
425   // GetAllocatorDump is consistent.
426   EXPECT_EQ(nullptr, pmd->GetAllocatorDump("NotWhitelisted/TestName"));
427   EXPECT_NE(black_hole_mad, pmd->GetAllocatorDump("Whitelisted/TestName"));
428 
429   // Test whitelisted entries.
430   ASSERT_TRUE(IsMemoryAllocatorDumpNameWhitelisted("Whitelisted/TestName"));
431 
432   // Global dumps should be whitelisted.
433   ASSERT_TRUE(IsMemoryAllocatorDumpNameWhitelisted("global/13456"));
434 
435   // Global dumps with non-guids should not be.
436   ASSERT_FALSE(IsMemoryAllocatorDumpNameWhitelisted("global/random"));
437 
438   // Random names should not.
439   ASSERT_FALSE(IsMemoryAllocatorDumpNameWhitelisted("NotWhitelisted/TestName"));
440 
441   // Check hex processing.
442   ASSERT_TRUE(IsMemoryAllocatorDumpNameWhitelisted("Whitelisted/0xA1b2"));
443 }
444 
TEST(ProcessMemoryDumpTest,GuidsTest)445 TEST(ProcessMemoryDumpTest, GuidsTest) {
446   MemoryDumpArgs dump_args = {MemoryDumpLevelOfDetail::DETAILED};
447 
448   const auto process_token_one = UnguessableToken::Create();
449   const auto process_token_two = UnguessableToken::Create();
450 
451   ProcessMemoryDump pmd1(dump_args);
452   pmd1.set_process_token_for_testing(process_token_one);
453   MemoryAllocatorDump* mad1 = pmd1.CreateAllocatorDump("foo");
454 
455   ProcessMemoryDump pmd2(dump_args);
456   pmd2.set_process_token_for_testing(process_token_one);
457   MemoryAllocatorDump* mad2 = pmd2.CreateAllocatorDump("foo");
458 
459   // If we don't pass the argument we get a random PMD:
460   ProcessMemoryDump pmd3(dump_args);
461   MemoryAllocatorDump* mad3 = pmd3.CreateAllocatorDump("foo");
462 
463   // PMD's for different processes produce different GUIDs even for the same
464   // names:
465   ProcessMemoryDump pmd4(dump_args);
466   pmd4.set_process_token_for_testing(process_token_two);
467   MemoryAllocatorDump* mad4 = pmd4.CreateAllocatorDump("foo");
468 
469   ASSERT_EQ(mad1->guid(), mad2->guid());
470 
471   ASSERT_NE(mad2->guid(), mad3->guid());
472   ASSERT_NE(mad3->guid(), mad4->guid());
473   ASSERT_NE(mad4->guid(), mad2->guid());
474 
475   ASSERT_EQ(mad1->guid(), pmd1.GetDumpId("foo"));
476 }
477 
478 #if defined(COUNT_RESIDENT_BYTES_SUPPORTED)
479 #if defined(OS_FUCHSIA)
480 // TODO(crbug.com/851760): Counting resident bytes is not supported on Fuchsia.
481 #define MAYBE_CountResidentBytes DISABLED_CountResidentBytes
482 #else
483 #define MAYBE_CountResidentBytes CountResidentBytes
484 #endif
TEST(ProcessMemoryDumpTest,MAYBE_CountResidentBytes)485 TEST(ProcessMemoryDumpTest, MAYBE_CountResidentBytes) {
486   const size_t page_size = ProcessMemoryDump::GetSystemPageSize();
487 
488   // Allocate few page of dirty memory and check if it is resident.
489   const size_t size1 = 5 * page_size;
490   void* memory1 = Map(size1);
491   memset(memory1, 0, size1);
492   size_t res1 = ProcessMemoryDump::CountResidentBytes(memory1, size1);
493   ASSERT_EQ(res1, size1);
494   Unmap(memory1, size1);
495 
496   // Allocate a large memory segment (> 8Mib).
497   const size_t kVeryLargeMemorySize = 15 * 1024 * 1024;
498   void* memory2 = Map(kVeryLargeMemorySize);
499   memset(memory2, 0, kVeryLargeMemorySize);
500   size_t res2 =
501       ProcessMemoryDump::CountResidentBytes(memory2, kVeryLargeMemorySize);
502   ASSERT_EQ(res2, kVeryLargeMemorySize);
503   Unmap(memory2, kVeryLargeMemorySize);
504 }
505 
506 #if defined(OS_FUCHSIA)
507 // TODO(crbug.com/851760): Counting resident bytes is not supported on Fuchsia.
508 #define MAYBE_CountResidentBytesInSharedMemory \
509   DISABLED_CountResidentBytesInSharedMemory
510 #else
511 #define MAYBE_CountResidentBytesInSharedMemory CountResidentBytesInSharedMemory
512 #endif
TEST(ProcessMemoryDumpTest,MAYBE_CountResidentBytesInSharedMemory)513 TEST(ProcessMemoryDumpTest, MAYBE_CountResidentBytesInSharedMemory) {
514 #if defined(OS_IOS)
515   // TODO(crbug.com/748410): Reenable this test.
516   if (!base::ios::IsRunningOnIOS10OrLater()) {
517     return;
518   }
519 #endif
520 
521   const size_t page_size = ProcessMemoryDump::GetSystemPageSize();
522 
523   // Allocate few page of dirty memory and check if it is resident.
524   const size_t size1 = 5 * page_size;
525   SharedMemory shared_memory1;
526   shared_memory1.CreateAndMapAnonymous(size1);
527   memset(shared_memory1.memory(), 0, size1);
528   base::Optional<size_t> res1 =
529       ProcessMemoryDump::CountResidentBytesInSharedMemory(
530           shared_memory1.memory(), shared_memory1.mapped_size());
531   ASSERT_TRUE(res1.has_value());
532   ASSERT_EQ(res1.value(), size1);
533   shared_memory1.Unmap();
534   shared_memory1.Close();
535 
536   // Allocate a large memory segment (> 8Mib).
537   const size_t kVeryLargeMemorySize = 15 * 1024 * 1024;
538   SharedMemory shared_memory2;
539   shared_memory2.CreateAndMapAnonymous(kVeryLargeMemorySize);
540   memset(shared_memory2.memory(), 0, kVeryLargeMemorySize);
541   base::Optional<size_t> res2 =
542       ProcessMemoryDump::CountResidentBytesInSharedMemory(
543           shared_memory2.memory(), shared_memory2.mapped_size());
544   ASSERT_TRUE(res2.has_value());
545   ASSERT_EQ(res2.value(), kVeryLargeMemorySize);
546   shared_memory2.Unmap();
547   shared_memory2.Close();
548 
549   // Allocate a large memory segment, but touch about half of all pages.
550   const size_t kTouchedMemorySize = 7 * 1024 * 1024;
551   SharedMemory shared_memory3;
552   shared_memory3.CreateAndMapAnonymous(kVeryLargeMemorySize);
553   memset(shared_memory3.memory(), 0, kTouchedMemorySize);
554   base::Optional<size_t> res3 =
555       ProcessMemoryDump::CountResidentBytesInSharedMemory(
556           shared_memory3.memory(), shared_memory3.mapped_size());
557   ASSERT_TRUE(res3.has_value());
558   ASSERT_EQ(res3.value(), kTouchedMemorySize);
559   shared_memory3.Unmap();
560   shared_memory3.Close();
561 }
562 #endif  // defined(COUNT_RESIDENT_BYTES_SUPPORTED)
563 
564 }  // namespace trace_event
565 }  // namespace base
566