1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "base/trace_event/memory_dump_manager.h"
6 
7 #include <stdint.h>
8 
9 #include <vector>
10 
11 #include "base/bind_helpers.h"
12 #include "base/memory/scoped_ptr.h"
13 #include "base/message_loop/message_loop.h"
14 #include "base/run_loop.h"
15 #include "base/strings/stringprintf.h"
16 #include "base/synchronization/waitable_event.h"
17 #include "base/test/test_io_thread.h"
18 #include "base/test/trace_event_analyzer.h"
19 #include "base/thread_task_runner_handle.h"
20 #include "base/threading/platform_thread.h"
21 #include "base/threading/thread.h"
22 #include "base/trace_event/memory_dump_provider.h"
23 #include "base/trace_event/process_memory_dump.h"
24 #include "base/trace_event/trace_buffer.h"
25 #include "base/trace_event/trace_config_memory_test_util.h"
26 #include "testing/gmock/include/gmock/gmock.h"
27 #include "testing/gtest/include/gtest/gtest.h"
28 
29 using testing::_;
30 using testing::AnyNumber;
31 using testing::AtMost;
32 using testing::Between;
33 using testing::Invoke;
34 using testing::Return;
35 
36 namespace base {
37 namespace trace_event {
38 
39 // GTest matchers for MemoryDumpRequestArgs arguments.
40 MATCHER(IsDetailedDump, "") {
41   return arg.level_of_detail == MemoryDumpLevelOfDetail::DETAILED;
42 }
43 
44 MATCHER(IsLightDump, "") {
45   return arg.level_of_detail == MemoryDumpLevelOfDetail::LIGHT;
46 }
47 
48 namespace {
49 
RegisterDumpProvider(MemoryDumpProvider * mdp,const scoped_refptr<base::SingleThreadTaskRunner> & task_runner,const MemoryDumpProvider::Options & options)50 void RegisterDumpProvider(
51     MemoryDumpProvider* mdp,
52     const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
53     const MemoryDumpProvider::Options& options) {
54   MemoryDumpManager* mdm = MemoryDumpManager::GetInstance();
55   mdm->set_dumper_registrations_ignored_for_testing(false);
56   mdm->RegisterDumpProvider(mdp, "TestDumpProvider", task_runner, options);
57   mdm->set_dumper_registrations_ignored_for_testing(true);
58 }
59 
RegisterDumpProvider(MemoryDumpProvider * mdp)60 void RegisterDumpProvider(MemoryDumpProvider* mdp) {
61   RegisterDumpProvider(mdp, nullptr, MemoryDumpProvider::Options());
62 }
63 
OnTraceDataCollected(Closure quit_closure,trace_event::TraceResultBuffer * buffer,const scoped_refptr<RefCountedString> & json,bool has_more_events)64 void OnTraceDataCollected(Closure quit_closure,
65                           trace_event::TraceResultBuffer* buffer,
66                           const scoped_refptr<RefCountedString>& json,
67                           bool has_more_events) {
68   buffer->AddFragment(json->data());
69   if (!has_more_events)
70     quit_closure.Run();
71 }
72 
73 }  // namespace
74 
75 // Testing MemoryDumpManagerDelegate which, by default, short-circuits dump
76 // requests locally to the MemoryDumpManager instead of performing IPC dances.
77 class MemoryDumpManagerDelegateForTesting : public MemoryDumpManagerDelegate {
78  public:
MemoryDumpManagerDelegateForTesting()79   MemoryDumpManagerDelegateForTesting() {
80     ON_CALL(*this, RequestGlobalMemoryDump(_, _))
81         .WillByDefault(Invoke(
82             this, &MemoryDumpManagerDelegateForTesting::CreateProcessDump));
83   }
84 
85   MOCK_METHOD2(RequestGlobalMemoryDump,
86                void(const MemoryDumpRequestArgs& args,
87                     const MemoryDumpCallback& callback));
88 
GetTracingProcessId() const89   uint64_t GetTracingProcessId() const override {
90     NOTREACHED();
91     return MemoryDumpManager::kInvalidTracingProcessId;
92   }
93 };
94 
95 class MockMemoryDumpProvider : public MemoryDumpProvider {
96  public:
97   MOCK_METHOD0(Destructor, void());
98   MOCK_METHOD2(OnMemoryDump,
99                bool(const MemoryDumpArgs& args, ProcessMemoryDump* pmd));
100 
MockMemoryDumpProvider()101   MockMemoryDumpProvider() : enable_mock_destructor(false) {}
~MockMemoryDumpProvider()102   ~MockMemoryDumpProvider() override {
103     if (enable_mock_destructor)
104       Destructor();
105   }
106 
107   bool enable_mock_destructor;
108 };
109 
110 class MemoryDumpManagerTest : public testing::Test {
111  public:
MemoryDumpManagerTest()112   MemoryDumpManagerTest() : testing::Test(), kDefaultOptions() {}
113 
SetUp()114   void SetUp() override {
115     last_callback_success_ = false;
116     message_loop_.reset(new MessageLoop());
117     mdm_.reset(new MemoryDumpManager());
118     MemoryDumpManager::SetInstanceForTesting(mdm_.get());
119     ASSERT_EQ(mdm_.get(), MemoryDumpManager::GetInstance());
120     delegate_.reset(new MemoryDumpManagerDelegateForTesting);
121   }
122 
TearDown()123   void TearDown() override {
124     MemoryDumpManager::SetInstanceForTesting(nullptr);
125     mdm_.reset();
126     delegate_.reset();
127     message_loop_.reset();
128     TraceLog::DeleteForTesting();
129   }
130 
131   // Turns a Closure into a MemoryDumpCallback, keeping track of the callback
132   // result and taking care of posting the closure on the correct task runner.
DumpCallbackAdapter(scoped_refptr<SingleThreadTaskRunner> task_runner,Closure closure,uint64_t dump_guid,bool success)133   void DumpCallbackAdapter(scoped_refptr<SingleThreadTaskRunner> task_runner,
134                            Closure closure,
135                            uint64_t dump_guid,
136                            bool success) {
137     last_callback_success_ = success;
138     task_runner->PostTask(FROM_HERE, closure);
139   }
140 
141  protected:
InitializeMemoryDumpManager(bool is_coordinator)142   void InitializeMemoryDumpManager(bool is_coordinator) {
143     mdm_->set_dumper_registrations_ignored_for_testing(true);
144     mdm_->Initialize(delegate_.get(), is_coordinator);
145   }
146 
RequestGlobalDumpAndWait(MemoryDumpType dump_type,MemoryDumpLevelOfDetail level_of_detail)147   void RequestGlobalDumpAndWait(MemoryDumpType dump_type,
148                                 MemoryDumpLevelOfDetail level_of_detail) {
149     RunLoop run_loop;
150     MemoryDumpCallback callback =
151         Bind(&MemoryDumpManagerTest::DumpCallbackAdapter, Unretained(this),
152              MessageLoop::current()->task_runner(), run_loop.QuitClosure());
153     mdm_->RequestGlobalDump(dump_type, level_of_detail, callback);
154     run_loop.Run();
155   }
156 
EnableTracingWithLegacyCategories(const char * category)157   void EnableTracingWithLegacyCategories(const char* category) {
158     TraceLog::GetInstance()->SetEnabled(TraceConfig(category, ""),
159                                         TraceLog::RECORDING_MODE);
160   }
161 
EnableTracingWithTraceConfig(const std::string & trace_config)162   void EnableTracingWithTraceConfig(const std::string& trace_config) {
163     TraceLog::GetInstance()->SetEnabled(TraceConfig(trace_config),
164                                         TraceLog::RECORDING_MODE);
165   }
166 
DisableTracing()167   void DisableTracing() { TraceLog::GetInstance()->SetDisabled(); }
168 
IsPeriodicDumpingEnabled() const169   bool IsPeriodicDumpingEnabled() const {
170     return mdm_->periodic_dump_timer_.IsRunning();
171   }
172 
GetMaxConsecutiveFailuresCount() const173   int GetMaxConsecutiveFailuresCount() const {
174     return MemoryDumpManager::kMaxConsecutiveFailuresCount;
175   }
176 
177   const MemoryDumpProvider::Options kDefaultOptions;
178   scoped_ptr<MemoryDumpManager> mdm_;
179   scoped_ptr<MemoryDumpManagerDelegateForTesting> delegate_;
180   bool last_callback_success_;
181 
182  private:
183   scoped_ptr<MessageLoop> message_loop_;
184 
185   // We want our singleton torn down after each test.
186   ShadowingAtExitManager at_exit_manager_;
187 };
188 
189 // Basic sanity checks. Registers a memory dump provider and checks that it is
190 // called, but only when memory-infra is enabled.
TEST_F(MemoryDumpManagerTest,SingleDumper)191 TEST_F(MemoryDumpManagerTest, SingleDumper) {
192   InitializeMemoryDumpManager(false /* is_coordinator */);
193   MockMemoryDumpProvider mdp;
194   RegisterDumpProvider(&mdp);
195 
196   // Check that the dumper is not called if the memory category is not enabled.
197   EnableTracingWithLegacyCategories("foobar-but-not-memory");
198   EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(0);
199   EXPECT_CALL(mdp, OnMemoryDump(_, _)).Times(0);
200   RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
201                            MemoryDumpLevelOfDetail::DETAILED);
202   DisableTracing();
203 
204   // Now repeat enabling the memory category and check that the dumper is
205   // invoked this time.
206   EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
207   EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(3);
208   EXPECT_CALL(mdp, OnMemoryDump(_, _)).Times(3).WillRepeatedly(Return(true));
209   for (int i = 0; i < 3; ++i)
210     RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
211                              MemoryDumpLevelOfDetail::DETAILED);
212   DisableTracing();
213 
214   mdm_->UnregisterDumpProvider(&mdp);
215 
216   // Finally check the unregister logic: the delegate will be invoked but not
217   // the dump provider, as it has been unregistered.
218   EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
219   EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(3);
220   EXPECT_CALL(mdp, OnMemoryDump(_, _)).Times(0);
221 
222   for (int i = 0; i < 3; ++i) {
223     RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
224                              MemoryDumpLevelOfDetail::DETAILED);
225   }
226   DisableTracing();
227 }
228 
229 // Checks that requesting dumps with high level of detail actually propagates
230 // the level of the detail properly to OnMemoryDump() call on dump providers.
TEST_F(MemoryDumpManagerTest,CheckMemoryDumpArgs)231 TEST_F(MemoryDumpManagerTest, CheckMemoryDumpArgs) {
232   InitializeMemoryDumpManager(false /* is_coordinator */);
233   MockMemoryDumpProvider mdp;
234 
235   RegisterDumpProvider(&mdp);
236   EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
237   EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
238   EXPECT_CALL(mdp, OnMemoryDump(IsDetailedDump(), _)).WillOnce(Return(true));
239   RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
240                            MemoryDumpLevelOfDetail::DETAILED);
241   DisableTracing();
242   mdm_->UnregisterDumpProvider(&mdp);
243 
244   // Check that requesting dumps with low level of detail actually propagates to
245   // OnMemoryDump() call on dump providers.
246   RegisterDumpProvider(&mdp);
247   EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
248   EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
249   EXPECT_CALL(mdp, OnMemoryDump(IsLightDump(), _)).WillOnce(Return(true));
250   RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
251                            MemoryDumpLevelOfDetail::LIGHT);
252   DisableTracing();
253   mdm_->UnregisterDumpProvider(&mdp);
254 }
255 
256 // Checks that the SharedSessionState object is acqually shared over time.
TEST_F(MemoryDumpManagerTest,SharedSessionState)257 TEST_F(MemoryDumpManagerTest, SharedSessionState) {
258   InitializeMemoryDumpManager(false /* is_coordinator */);
259   MockMemoryDumpProvider mdp1;
260   MockMemoryDumpProvider mdp2;
261   RegisterDumpProvider(&mdp1);
262   RegisterDumpProvider(&mdp2);
263 
264   EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
265   const MemoryDumpSessionState* session_state = mdm_->session_state().get();
266   EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(2);
267   EXPECT_CALL(mdp1, OnMemoryDump(_, _))
268       .Times(2)
269       .WillRepeatedly(Invoke([session_state](const MemoryDumpArgs&,
270                                              ProcessMemoryDump* pmd) -> bool {
271         EXPECT_EQ(session_state, pmd->session_state().get());
272         return true;
273       }));
274   EXPECT_CALL(mdp2, OnMemoryDump(_, _))
275       .Times(2)
276       .WillRepeatedly(Invoke([session_state](const MemoryDumpArgs&,
277                                              ProcessMemoryDump* pmd) -> bool {
278         EXPECT_EQ(session_state, pmd->session_state().get());
279         return true;
280       }));
281 
282   for (int i = 0; i < 2; ++i) {
283     RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
284                              MemoryDumpLevelOfDetail::DETAILED);
285   }
286 
287   DisableTracing();
288 }
289 
290 // Checks that the (Un)RegisterDumpProvider logic behaves sanely.
TEST_F(MemoryDumpManagerTest,MultipleDumpers)291 TEST_F(MemoryDumpManagerTest, MultipleDumpers) {
292   InitializeMemoryDumpManager(false /* is_coordinator */);
293   MockMemoryDumpProvider mdp1;
294   MockMemoryDumpProvider mdp2;
295 
296   // Enable only mdp1.
297   RegisterDumpProvider(&mdp1);
298   EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
299   EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
300   EXPECT_CALL(mdp1, OnMemoryDump(_, _)).WillOnce(Return(true));
301   EXPECT_CALL(mdp2, OnMemoryDump(_, _)).Times(0);
302   RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
303                            MemoryDumpLevelOfDetail::DETAILED);
304   DisableTracing();
305 
306   // Invert: enable mdp1 and disable mdp2.
307   mdm_->UnregisterDumpProvider(&mdp1);
308   RegisterDumpProvider(&mdp2);
309   EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
310   EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
311   EXPECT_CALL(mdp1, OnMemoryDump(_, _)).Times(0);
312   EXPECT_CALL(mdp2, OnMemoryDump(_, _)).WillOnce(Return(true));
313   RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
314                            MemoryDumpLevelOfDetail::DETAILED);
315   DisableTracing();
316 
317   // Enable both mdp1 and mdp2.
318   RegisterDumpProvider(&mdp1);
319   EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
320   EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
321   EXPECT_CALL(mdp1, OnMemoryDump(_, _)).WillOnce(Return(true));
322   EXPECT_CALL(mdp2, OnMemoryDump(_, _)).WillOnce(Return(true));
323   RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
324                            MemoryDumpLevelOfDetail::DETAILED);
325   DisableTracing();
326 }
327 
328 // Checks that the dump provider invocations depend only on the current
329 // registration state and not on previous registrations and dumps.
TEST_F(MemoryDumpManagerTest,RegistrationConsistency)330 TEST_F(MemoryDumpManagerTest, RegistrationConsistency) {
331   InitializeMemoryDumpManager(false /* is_coordinator */);
332   MockMemoryDumpProvider mdp;
333 
334   RegisterDumpProvider(&mdp);
335 
336   {
337     EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
338     EXPECT_CALL(mdp, OnMemoryDump(_, _)).WillOnce(Return(true));
339     EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
340     RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
341                              MemoryDumpLevelOfDetail::DETAILED);
342     DisableTracing();
343   }
344 
345   mdm_->UnregisterDumpProvider(&mdp);
346 
347   {
348     EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
349     EXPECT_CALL(mdp, OnMemoryDump(_, _)).Times(0);
350     EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
351     RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
352                              MemoryDumpLevelOfDetail::DETAILED);
353     DisableTracing();
354   }
355 
356   RegisterDumpProvider(&mdp);
357   mdm_->UnregisterDumpProvider(&mdp);
358 
359   {
360     EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
361     EXPECT_CALL(mdp, OnMemoryDump(_, _)).Times(0);
362     EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
363     RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
364                              MemoryDumpLevelOfDetail::DETAILED);
365     DisableTracing();
366   }
367 
368   RegisterDumpProvider(&mdp);
369   mdm_->UnregisterDumpProvider(&mdp);
370   RegisterDumpProvider(&mdp);
371 
372   {
373     EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
374     EXPECT_CALL(mdp, OnMemoryDump(_, _)).WillOnce(Return(true));
375     EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
376     RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
377                              MemoryDumpLevelOfDetail::DETAILED);
378     DisableTracing();
379   }
380 }
381 
382 // Checks that the MemoryDumpManager respects the thread affinity when a
383 // MemoryDumpProvider specifies a task_runner(). The test starts creating 8
384 // threads and registering a MemoryDumpProvider on each of them. At each
385 // iteration, one thread is removed, to check the live unregistration logic.
TEST_F(MemoryDumpManagerTest,RespectTaskRunnerAffinity)386 TEST_F(MemoryDumpManagerTest, RespectTaskRunnerAffinity) {
387   InitializeMemoryDumpManager(false /* is_coordinator */);
388   const uint32_t kNumInitialThreads = 8;
389 
390   std::vector<scoped_ptr<Thread>> threads;
391   std::vector<scoped_ptr<MockMemoryDumpProvider>> mdps;
392 
393   // Create the threads and setup the expectations. Given that at each iteration
394   // we will pop out one thread/MemoryDumpProvider, each MDP is supposed to be
395   // invoked a number of times equal to its index.
396   for (uint32_t i = kNumInitialThreads; i > 0; --i) {
397     threads.push_back(make_scoped_ptr(new Thread("test thread")));
398     auto thread = threads.back().get();
399     thread->Start();
400     scoped_refptr<SingleThreadTaskRunner> task_runner = thread->task_runner();
401     mdps.push_back(make_scoped_ptr(new MockMemoryDumpProvider()));
402     auto mdp = mdps.back().get();
403     RegisterDumpProvider(mdp, task_runner, kDefaultOptions);
404     EXPECT_CALL(*mdp, OnMemoryDump(_, _))
405         .Times(i)
406         .WillRepeatedly(Invoke(
407             [task_runner](const MemoryDumpArgs&, ProcessMemoryDump*) -> bool {
408               EXPECT_TRUE(task_runner->RunsTasksOnCurrentThread());
409               return true;
410             }));
411   }
412   EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
413 
414   while (!threads.empty()) {
415     last_callback_success_ = false;
416     EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
417     RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
418                              MemoryDumpLevelOfDetail::DETAILED);
419     EXPECT_TRUE(last_callback_success_);
420 
421     // Unregister a MDP and destroy one thread at each iteration to check the
422     // live unregistration logic. The unregistration needs to happen on the same
423     // thread the MDP belongs to.
424     {
425       RunLoop run_loop;
426       Closure unregistration =
427           Bind(&MemoryDumpManager::UnregisterDumpProvider,
428                Unretained(mdm_.get()), Unretained(mdps.back().get()));
429       threads.back()->task_runner()->PostTaskAndReply(FROM_HERE, unregistration,
430                                                       run_loop.QuitClosure());
431       run_loop.Run();
432     }
433     mdps.pop_back();
434     threads.back()->Stop();
435     threads.pop_back();
436   }
437 
438   DisableTracing();
439 }
440 
441 // Checks that providers get disabled after 3 consecutive failures, but not
442 // otherwise (e.g., if interleaved).
TEST_F(MemoryDumpManagerTest,DisableFailingDumpers)443 TEST_F(MemoryDumpManagerTest, DisableFailingDumpers) {
444   InitializeMemoryDumpManager(false /* is_coordinator */);
445   MockMemoryDumpProvider mdp1;
446   MockMemoryDumpProvider mdp2;
447 
448   RegisterDumpProvider(&mdp1);
449   RegisterDumpProvider(&mdp2);
450   EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
451 
452   const int kNumDumps = 2 * GetMaxConsecutiveFailuresCount();
453   EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(kNumDumps);
454 
455   EXPECT_CALL(mdp1, OnMemoryDump(_, _))
456       .Times(GetMaxConsecutiveFailuresCount())
457       .WillRepeatedly(Return(false));
458 
459   EXPECT_CALL(mdp2, OnMemoryDump(_, _))
460       .WillOnce(Return(false))
461       .WillOnce(Return(true))
462       .WillOnce(Return(false))
463       .WillOnce(Return(false))
464       .WillOnce(Return(true))
465       .WillOnce(Return(false));
466 
467   for (int i = 0; i < kNumDumps; i++) {
468     RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
469                              MemoryDumpLevelOfDetail::DETAILED);
470   }
471 
472   DisableTracing();
473 }
474 
475 // Sneakily registers an extra memory dump provider while an existing one is
476 // dumping and expect it to take part in the already active tracing session.
TEST_F(MemoryDumpManagerTest,RegisterDumperWhileDumping)477 TEST_F(MemoryDumpManagerTest, RegisterDumperWhileDumping) {
478   InitializeMemoryDumpManager(false /* is_coordinator */);
479   MockMemoryDumpProvider mdp1;
480   MockMemoryDumpProvider mdp2;
481 
482   RegisterDumpProvider(&mdp1);
483   EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
484 
485   EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(4);
486 
487   EXPECT_CALL(mdp1, OnMemoryDump(_, _))
488       .Times(4)
489       .WillOnce(Return(true))
490       .WillOnce(
491           Invoke([&mdp2](const MemoryDumpArgs&, ProcessMemoryDump*) -> bool {
492             RegisterDumpProvider(&mdp2);
493             return true;
494           }))
495       .WillRepeatedly(Return(true));
496 
497   // Depending on the insertion order (before or after mdp1), mdp2 might be
498   // called also immediately after it gets registered.
499   EXPECT_CALL(mdp2, OnMemoryDump(_, _))
500       .Times(Between(2, 3))
501       .WillRepeatedly(Return(true));
502 
503   for (int i = 0; i < 4; i++) {
504     RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
505                              MemoryDumpLevelOfDetail::DETAILED);
506   }
507 
508   DisableTracing();
509 }
510 
511 // Like RegisterDumperWhileDumping, but unregister the dump provider instead.
TEST_F(MemoryDumpManagerTest,UnregisterDumperWhileDumping)512 TEST_F(MemoryDumpManagerTest, UnregisterDumperWhileDumping) {
513   InitializeMemoryDumpManager(false /* is_coordinator */);
514   MockMemoryDumpProvider mdp1;
515   MockMemoryDumpProvider mdp2;
516 
517   RegisterDumpProvider(&mdp1, ThreadTaskRunnerHandle::Get(), kDefaultOptions);
518   RegisterDumpProvider(&mdp2, ThreadTaskRunnerHandle::Get(), kDefaultOptions);
519   EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
520 
521   EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(4);
522 
523   EXPECT_CALL(mdp1, OnMemoryDump(_, _))
524       .Times(4)
525       .WillOnce(Return(true))
526       .WillOnce(
527           Invoke([&mdp2](const MemoryDumpArgs&, ProcessMemoryDump*) -> bool {
528             MemoryDumpManager::GetInstance()->UnregisterDumpProvider(&mdp2);
529             return true;
530           }))
531       .WillRepeatedly(Return(true));
532 
533   // Depending on the insertion order (before or after mdp1), mdp2 might have
534   // been already called when UnregisterDumpProvider happens.
535   EXPECT_CALL(mdp2, OnMemoryDump(_, _))
536       .Times(Between(1, 2))
537       .WillRepeatedly(Return(true));
538 
539   for (int i = 0; i < 4; i++) {
540     RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
541                              MemoryDumpLevelOfDetail::DETAILED);
542   }
543 
544   DisableTracing();
545 }
546 
547 // Checks that the dump does not abort when unregistering a provider while
548 // dumping from a different thread than the dumping thread.
TEST_F(MemoryDumpManagerTest,UnregisterDumperFromThreadWhileDumping)549 TEST_F(MemoryDumpManagerTest, UnregisterDumperFromThreadWhileDumping) {
550   InitializeMemoryDumpManager(false /* is_coordinator */);
551   std::vector<scoped_ptr<TestIOThread>> threads;
552   std::vector<scoped_ptr<MockMemoryDumpProvider>> mdps;
553 
554   for (int i = 0; i < 2; i++) {
555     threads.push_back(
556         make_scoped_ptr(new TestIOThread(TestIOThread::kAutoStart)));
557     mdps.push_back(make_scoped_ptr(new MockMemoryDumpProvider()));
558     RegisterDumpProvider(mdps.back().get(), threads.back()->task_runner(),
559                          kDefaultOptions);
560   }
561 
562   int on_memory_dump_call_count = 0;
563 
564   // When OnMemoryDump is called on either of the dump providers, it will
565   // unregister the other one.
566   for (const scoped_ptr<MockMemoryDumpProvider>& mdp : mdps) {
567     int other_idx = (mdps.front() == mdp);
568     TestIOThread* other_thread = threads[other_idx].get();
569     MockMemoryDumpProvider* other_mdp = mdps[other_idx].get();
570     auto on_dump = [this, other_thread, other_mdp, &on_memory_dump_call_count](
571         const MemoryDumpArgs& args, ProcessMemoryDump* pmd) {
572       other_thread->PostTaskAndWait(
573           FROM_HERE, base::Bind(&MemoryDumpManager::UnregisterDumpProvider,
574                                 base::Unretained(&*mdm_), other_mdp));
575       on_memory_dump_call_count++;
576       return true;
577     };
578 
579     // OnMemoryDump is called once for the provider that dumps first, and zero
580     // times for the other provider.
581     EXPECT_CALL(*mdp, OnMemoryDump(_, _))
582         .Times(AtMost(1))
583         .WillOnce(Invoke(on_dump));
584   }
585 
586   last_callback_success_ = false;
587   EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
588   EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
589   RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
590                            MemoryDumpLevelOfDetail::DETAILED);
591   ASSERT_EQ(1, on_memory_dump_call_count);
592   ASSERT_TRUE(last_callback_success_);
593 
594   DisableTracing();
595 }
596 
597 // If a thread (with a dump provider living on it) is torn down during a dump
598 // its dump provider should be skipped but the dump itself should succeed.
TEST_F(MemoryDumpManagerTest,TearDownThreadWhileDumping)599 TEST_F(MemoryDumpManagerTest, TearDownThreadWhileDumping) {
600   InitializeMemoryDumpManager(false /* is_coordinator */);
601   std::vector<scoped_ptr<TestIOThread>> threads;
602   std::vector<scoped_ptr<MockMemoryDumpProvider>> mdps;
603 
604   for (int i = 0; i < 2; i++) {
605     threads.push_back(
606         make_scoped_ptr(new TestIOThread(TestIOThread::kAutoStart)));
607     mdps.push_back(make_scoped_ptr(new MockMemoryDumpProvider()));
608     RegisterDumpProvider(mdps.back().get(), threads.back()->task_runner(),
609                          kDefaultOptions);
610   }
611 
612   int on_memory_dump_call_count = 0;
613 
614   // When OnMemoryDump is called on either of the dump providers, it will
615   // tear down the thread of the other one.
616   for (const scoped_ptr<MockMemoryDumpProvider>& mdp : mdps) {
617     int other_idx = (mdps.front() == mdp);
618     TestIOThread* other_thread = threads[other_idx].get();
619     auto on_dump = [other_thread, &on_memory_dump_call_count](
620         const MemoryDumpArgs& args, ProcessMemoryDump* pmd) {
621       other_thread->Stop();
622       on_memory_dump_call_count++;
623       return true;
624     };
625 
626     // OnMemoryDump is called once for the provider that dumps first, and zero
627     // times for the other provider.
628     EXPECT_CALL(*mdp, OnMemoryDump(_, _))
629         .Times(AtMost(1))
630         .WillOnce(Invoke(on_dump));
631   }
632 
633   last_callback_success_ = false;
634   EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
635   EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
636   RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
637                            MemoryDumpLevelOfDetail::DETAILED);
638   ASSERT_EQ(1, on_memory_dump_call_count);
639   ASSERT_TRUE(last_callback_success_);
640 
641   DisableTracing();
642 }
643 
644 // Checks that a NACK callback is invoked if RequestGlobalDump() is called when
645 // tracing is not enabled.
TEST_F(MemoryDumpManagerTest,CallbackCalledOnFailure)646 TEST_F(MemoryDumpManagerTest, CallbackCalledOnFailure) {
647   InitializeMemoryDumpManager(false /* is_coordinator */);
648   MockMemoryDumpProvider mdp1;
649   RegisterDumpProvider(&mdp1);
650 
651   EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(0);
652   EXPECT_CALL(mdp1, OnMemoryDump(_, _)).Times(0);
653 
654   last_callback_success_ = true;
655   RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
656                            MemoryDumpLevelOfDetail::DETAILED);
657   EXPECT_FALSE(last_callback_success_);
658 }
659 
660 // Checks that is the MemoryDumpManager is initialized after tracing already
661 // began, it will still late-join the party (real use case: startup tracing).
TEST_F(MemoryDumpManagerTest,InitializedAfterStartOfTracing)662 TEST_F(MemoryDumpManagerTest, InitializedAfterStartOfTracing) {
663   MockMemoryDumpProvider mdp;
664   RegisterDumpProvider(&mdp);
665   EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
666 
667   // First check that a RequestGlobalDump() issued before the MemoryDumpManager
668   // initialization gets NACK-ed cleanly.
669   {
670     EXPECT_CALL(mdp, OnMemoryDump(_, _)).Times(0);
671     EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(0);
672     RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
673                              MemoryDumpLevelOfDetail::DETAILED);
674     EXPECT_FALSE(last_callback_success_);
675   }
676 
677   // Now late-initialize the MemoryDumpManager and check that the
678   // RequestGlobalDump completes successfully.
679   {
680     EXPECT_CALL(mdp, OnMemoryDump(_, _)).Times(1);
681     EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
682     InitializeMemoryDumpManager(false /* is_coordinator */);
683     RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
684                              MemoryDumpLevelOfDetail::DETAILED);
685     EXPECT_TRUE(last_callback_success_);
686   }
687   DisableTracing();
688 }
689 
690 // This test (and the MemoryDumpManagerTestCoordinator below) crystallizes the
691 // expectations of the chrome://tracing UI and chrome telemetry w.r.t. periodic
692 // dumps in memory-infra, handling gracefully the transition between the legacy
693 // and the new-style (JSON-based) TraceConfig.
TEST_F(MemoryDumpManagerTest,TraceConfigExpectations)694 TEST_F(MemoryDumpManagerTest, TraceConfigExpectations) {
695   InitializeMemoryDumpManager(false /* is_coordinator */);
696   MemoryDumpManagerDelegateForTesting& delegate = *delegate_;
697 
698   // Don't trigger the default behavior of the mock delegate in this test,
699   // which would short-circuit the dump request to the actual
700   // CreateProcessDump().
701   // We don't want to create any dump in this test, only check whether the dumps
702   // are requested or not.
703   ON_CALL(delegate, RequestGlobalMemoryDump(_, _)).WillByDefault(Return());
704 
705   // Enabling memory-infra in a non-coordinator process should not trigger any
706   // periodic dumps.
707   EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
708   EXPECT_FALSE(IsPeriodicDumpingEnabled());
709   DisableTracing();
710 
711   // Enabling memory-infra with the new (JSON) TraceConfig in a non-coordinator
712   // process with a fully defined trigger config should NOT enable any periodic
713   // dumps.
714   EnableTracingWithTraceConfig(
715       TraceConfigMemoryTestUtil::GetTraceConfig_PeriodicTriggers(1, 5));
716   EXPECT_FALSE(IsPeriodicDumpingEnabled());
717   DisableTracing();
718 }
719 
TEST_F(MemoryDumpManagerTest,TraceConfigExpectationsWhenIsCoordinator)720 TEST_F(MemoryDumpManagerTest, TraceConfigExpectationsWhenIsCoordinator) {
721   InitializeMemoryDumpManager(true /* is_coordinator */);
722   MemoryDumpManagerDelegateForTesting& delegate = *delegate_;
723   ON_CALL(delegate, RequestGlobalMemoryDump(_, _)).WillByDefault(Return());
724 
725   // Enabling memory-infra with the legacy TraceConfig (category filter) in
726   // a coordinator process should enable periodic dumps.
727   EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
728   EXPECT_TRUE(IsPeriodicDumpingEnabled());
729   DisableTracing();
730 
731   // Enabling memory-infra with the new (JSON) TraceConfig in a coordinator
732   // process without specifying any "memory_dump_config" section should enable
733   // periodic dumps. This is to preserve the behavior chrome://tracing UI, that
734   // is: ticking memory-infra should dump periodically with the default config.
735   EnableTracingWithTraceConfig(
736       TraceConfigMemoryTestUtil::GetTraceConfig_NoTriggers());
737   EXPECT_TRUE(IsPeriodicDumpingEnabled());
738   DisableTracing();
739 
740   // Enabling memory-infra with the new (JSON) TraceConfig in a coordinator
741   // process with an empty "memory_dump_config" should NOT enable periodic
742   // dumps. This is the way telemetry is supposed to use memory-infra with
743   // only explicitly triggered dumps.
744   EnableTracingWithTraceConfig(
745       TraceConfigMemoryTestUtil::GetTraceConfig_EmptyTriggers());
746   EXPECT_FALSE(IsPeriodicDumpingEnabled());
747   DisableTracing();
748 
749   // Enabling memory-infra with the new (JSON) TraceConfig in a coordinator
750   // process with a fully defined trigger config should cause periodic dumps to
751   // be performed in the correct order.
752   RunLoop run_loop;
753   auto quit_closure = run_loop.QuitClosure();
754 
755   const int kHeavyDumpRate = 5;
756   const int kLightDumpPeriodMs = 1;
757   const int kHeavyDumpPeriodMs = kHeavyDumpRate * kLightDumpPeriodMs;
758   // The expected sequence with light=1ms, heavy=5ms is H,L,L,L,L,H,...
759   testing::InSequence sequence;
760   EXPECT_CALL(delegate, RequestGlobalMemoryDump(IsDetailedDump(), _));
761   EXPECT_CALL(delegate, RequestGlobalMemoryDump(IsLightDump(), _))
762       .Times(kHeavyDumpRate - 1);
763   EXPECT_CALL(delegate, RequestGlobalMemoryDump(IsDetailedDump(), _));
764   EXPECT_CALL(delegate, RequestGlobalMemoryDump(IsLightDump(), _))
765       .Times(kHeavyDumpRate - 2);
766   EXPECT_CALL(delegate, RequestGlobalMemoryDump(IsLightDump(), _))
767       .WillOnce(Invoke([quit_closure](const MemoryDumpRequestArgs& args,
768                                       const MemoryDumpCallback& callback) {
769         ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE, quit_closure);
770       }));
771 
772   // Swallow all the final spurious calls until tracing gets disabled.
773   EXPECT_CALL(delegate, RequestGlobalMemoryDump(_, _)).Times(AnyNumber());
774 
775   EnableTracingWithTraceConfig(
776       TraceConfigMemoryTestUtil::GetTraceConfig_PeriodicTriggers(
777           kLightDumpPeriodMs, kHeavyDumpPeriodMs));
778   run_loop.Run();
779   DisableTracing();
780 }
781 
782 // Tests against race conditions that might arise when disabling tracing in the
783 // middle of a global memory dump.
TEST_F(MemoryDumpManagerTest,DisableTracingWhileDumping)784 TEST_F(MemoryDumpManagerTest, DisableTracingWhileDumping) {
785   base::WaitableEvent tracing_disabled_event(false, false);
786   InitializeMemoryDumpManager(false /* is_coordinator */);
787 
788   // Register a bound dump provider.
789   scoped_ptr<Thread> mdp_thread(new Thread("test thread"));
790   mdp_thread->Start();
791   MockMemoryDumpProvider mdp_with_affinity;
792   RegisterDumpProvider(&mdp_with_affinity, mdp_thread->task_runner(),
793                        kDefaultOptions);
794 
795   // Register also an unbound dump provider. Unbound dump providers are always
796   // invoked after bound ones.
797   MockMemoryDumpProvider unbound_mdp;
798   RegisterDumpProvider(&unbound_mdp, nullptr, kDefaultOptions);
799 
800   EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
801   EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
802   EXPECT_CALL(mdp_with_affinity, OnMemoryDump(_, _))
803       .Times(1)
804       .WillOnce(
805           Invoke([&tracing_disabled_event](const MemoryDumpArgs&,
806                                            ProcessMemoryDump* pmd) -> bool {
807             tracing_disabled_event.Wait();
808 
809             // At this point tracing has been disabled and the
810             // MemoryDumpManager.dump_thread_ has been shut down.
811             return true;
812           }));
813 
814   // |unbound_mdp| should never be invoked because the thread for unbound dump
815   // providers has been shutdown in the meanwhile.
816   EXPECT_CALL(unbound_mdp, OnMemoryDump(_, _)).Times(0);
817 
818   last_callback_success_ = true;
819   RunLoop run_loop;
820   MemoryDumpCallback callback =
821       Bind(&MemoryDumpManagerTest::DumpCallbackAdapter, Unretained(this),
822            MessageLoop::current()->task_runner(), run_loop.QuitClosure());
823   mdm_->RequestGlobalDump(MemoryDumpType::EXPLICITLY_TRIGGERED,
824                           MemoryDumpLevelOfDetail::DETAILED, callback);
825   DisableTracing();
826   tracing_disabled_event.Signal();
827   run_loop.Run();
828 
829   // RequestGlobalMemoryDump() should still suceed even if some threads were
830   // torn down during the dump.
831   EXPECT_TRUE(last_callback_success_);
832 }
833 
TEST_F(MemoryDumpManagerTest,DumpOnBehalfOfOtherProcess)834 TEST_F(MemoryDumpManagerTest, DumpOnBehalfOfOtherProcess) {
835   using trace_analyzer::Query;
836 
837   InitializeMemoryDumpManager(false /* is_coordinator */);
838 
839   // Standard provider with default options (create dump for current process).
840   MemoryDumpProvider::Options options;
841   MockMemoryDumpProvider mdp1;
842   RegisterDumpProvider(&mdp1, nullptr, options);
843 
844   // Provider with out-of-process dumping.
845   MockMemoryDumpProvider mdp2;
846   options.target_pid = 123;
847   RegisterDumpProvider(&mdp2, nullptr, options);
848 
849   // Another provider with out-of-process dumping.
850   MockMemoryDumpProvider mdp3;
851   options.target_pid = 456;
852   RegisterDumpProvider(&mdp3, nullptr, options);
853 
854   EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
855   EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
856   EXPECT_CALL(mdp1, OnMemoryDump(_, _)).Times(1).WillRepeatedly(Return(true));
857   EXPECT_CALL(mdp2, OnMemoryDump(_, _)).Times(1).WillRepeatedly(Return(true));
858   EXPECT_CALL(mdp3, OnMemoryDump(_, _)).Times(1).WillRepeatedly(Return(true));
859   RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
860                            MemoryDumpLevelOfDetail::DETAILED);
861   DisableTracing();
862 
863   // Flush the trace into JSON.
864   trace_event::TraceResultBuffer buffer;
865   TraceResultBuffer::SimpleOutput trace_output;
866   buffer.SetOutputCallback(trace_output.GetCallback());
867   RunLoop run_loop;
868   buffer.Start();
869   trace_event::TraceLog::GetInstance()->Flush(
870       Bind(&OnTraceDataCollected, run_loop.QuitClosure(), Unretained(&buffer)));
871   run_loop.Run();
872   buffer.Finish();
873 
874   // Analyze the JSON.
875   scoped_ptr<trace_analyzer::TraceAnalyzer> analyzer = make_scoped_ptr(
876       trace_analyzer::TraceAnalyzer::Create(trace_output.json_output));
877   trace_analyzer::TraceEventVector events;
878   analyzer->FindEvents(Query::EventPhaseIs(TRACE_EVENT_PHASE_MEMORY_DUMP),
879                        &events);
880 
881   ASSERT_EQ(3u, events.size());
882   ASSERT_EQ(1u, trace_analyzer::CountMatches(events, Query::EventPidIs(123)));
883   ASSERT_EQ(1u, trace_analyzer::CountMatches(events, Query::EventPidIs(456)));
884   ASSERT_EQ(1u, trace_analyzer::CountMatches(
885                     events, Query::EventPidIs(GetCurrentProcId())));
886   ASSERT_EQ(events[0]->id, events[1]->id);
887   ASSERT_EQ(events[0]->id, events[2]->id);
888 }
889 
890 // Tests the basics of the UnregisterAndDeleteDumpProviderSoon(): the
891 // unregistration should actually delete the providers and not leak them.
TEST_F(MemoryDumpManagerTest,UnregisterAndDeleteDumpProviderSoon)892 TEST_F(MemoryDumpManagerTest, UnregisterAndDeleteDumpProviderSoon) {
893   InitializeMemoryDumpManager(false /* is_coordinator */);
894   static const int kNumProviders = 3;
895   int dtor_count = 0;
896   std::vector<scoped_ptr<MemoryDumpProvider>> mdps;
897   for (int i = 0; i < kNumProviders; ++i) {
898     scoped_ptr<MockMemoryDumpProvider> mdp(new MockMemoryDumpProvider);
899     mdp->enable_mock_destructor = true;
900     EXPECT_CALL(*mdp, Destructor())
901         .WillOnce(Invoke([&dtor_count]() { dtor_count++; }));
902     RegisterDumpProvider(mdp.get(), nullptr, kDefaultOptions);
903     mdps.push_back(std::move(mdp));
904   }
905 
906   while (!mdps.empty()) {
907     mdm_->UnregisterAndDeleteDumpProviderSoon(std::move(mdps.back()));
908     mdps.pop_back();
909   }
910 
911   ASSERT_EQ(kNumProviders, dtor_count);
912 }
913 
914 // This test checks against races when unregistering an unbound dump provider
915 // from another thread while dumping. It registers one MDP and, when
916 // OnMemoryDump() is called, it invokes UnregisterAndDeleteDumpProviderSoon()
917 // from another thread. The OnMemoryDump() and the dtor call are expected to
918 // happen on the same thread (the MemoryDumpManager utility thread).
TEST_F(MemoryDumpManagerTest,UnregisterAndDeleteDumpProviderSoonDuringDump)919 TEST_F(MemoryDumpManagerTest, UnregisterAndDeleteDumpProviderSoonDuringDump) {
920   InitializeMemoryDumpManager(false /* is_coordinator */);
921   scoped_ptr<MockMemoryDumpProvider> mdp(new MockMemoryDumpProvider);
922   mdp->enable_mock_destructor = true;
923   RegisterDumpProvider(mdp.get(), nullptr, kDefaultOptions);
924 
925   base::PlatformThreadRef thread_ref;
926   auto self_unregister_from_another_thread = [&mdp, &thread_ref](
927       const MemoryDumpArgs&, ProcessMemoryDump*) -> bool {
928     thread_ref = PlatformThread::CurrentRef();
929     TestIOThread thread_for_unregistration(TestIOThread::kAutoStart);
930     thread_for_unregistration.PostTaskAndWait(
931         FROM_HERE,
932         base::Bind(
933             &MemoryDumpManager::UnregisterAndDeleteDumpProviderSoon,
934             base::Unretained(MemoryDumpManager::GetInstance()),
935             base::Passed(scoped_ptr<MemoryDumpProvider>(std::move(mdp)))));
936     thread_for_unregistration.Stop();
937     return true;
938   };
939   EXPECT_CALL(*mdp, OnMemoryDump(_, _))
940       .Times(1)
941       .WillOnce(Invoke(self_unregister_from_another_thread));
942   EXPECT_CALL(*mdp, Destructor())
943       .Times(1)
944       .WillOnce(Invoke([&thread_ref]() {
945         EXPECT_EQ(thread_ref, PlatformThread::CurrentRef());
946       }));
947 
948   EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
949   EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(2);
950   for (int i = 0; i < 2; ++i) {
951     RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
952                              MemoryDumpLevelOfDetail::DETAILED);
953   }
954   DisableTracing();
955 }
956 
957 }  // namespace trace_event
958 }  // namespace base
959