1 /*
2  * Copyright (C) 2017 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "src/tracing/core/trace_writer_impl.h"
18 
19 #include "perfetto/ext/base/utils.h"
20 #include "perfetto/ext/tracing/core/commit_data_request.h"
21 #include "perfetto/ext/tracing/core/trace_writer.h"
22 #include "perfetto/ext/tracing/core/tracing_service.h"
23 #include "src/base/test/gtest_test_suite.h"
24 #include "src/base/test/test_task_runner.h"
25 #include "src/tracing/core/shared_memory_arbiter_impl.h"
26 #include "src/tracing/test/aligned_buffer_test.h"
27 #include "src/tracing/test/fake_producer_endpoint.h"
28 #include "test/gtest_and_gmock.h"
29 
30 #include "protos/perfetto/trace/test_event.pbzero.h"
31 #include "protos/perfetto/trace/trace_packet.pbzero.h"
32 
33 namespace perfetto {
34 namespace {
35 
36 class TraceWriterImplTest : public AlignedBufferTest {
37  public:
SetUp()38   void SetUp() override {
39     SharedMemoryArbiterImpl::set_default_layout_for_testing(
40         SharedMemoryABI::PageLayout::kPageDiv4);
41     AlignedBufferTest::SetUp();
42     task_runner_.reset(new base::TestTaskRunner());
43     arbiter_.reset(new SharedMemoryArbiterImpl(buf(), buf_size(), page_size(),
44                                                &fake_producer_endpoint_,
45                                                task_runner_.get()));
46   }
47 
TearDown()48   void TearDown() override {
49     arbiter_.reset();
50     task_runner_.reset();
51   }
52 
53   FakeProducerEndpoint fake_producer_endpoint_;
54   std::unique_ptr<base::TestTaskRunner> task_runner_;
55   std::unique_ptr<SharedMemoryArbiterImpl> arbiter_;
56   std::function<void(const std::vector<uint32_t>&)> on_pages_complete_;
57 };
58 
59 size_t const kPageSizes[] = {4096, 65536};
60 INSTANTIATE_TEST_SUITE_P(PageSize,
61                          TraceWriterImplTest,
62                          ::testing::ValuesIn(kPageSizes));
63 
TEST_P(TraceWriterImplTest,SingleWriter)64 TEST_P(TraceWriterImplTest, SingleWriter) {
65   const BufferID kBufId = 42;
66   std::unique_ptr<TraceWriter> writer = arbiter_->CreateTraceWriter(kBufId);
67   const size_t kNumPackets = 32;
68   for (size_t i = 0; i < kNumPackets; i++) {
69     auto packet = writer->NewTracePacket();
70     char str[16];
71     sprintf(str, "foobar %zu", i);
72     packet->set_for_testing()->set_str(str);
73   }
74 
75   // Destroying the TraceWriteImpl should cause the last packet to be finalized
76   // and the chunk to be put back in the kChunkComplete state.
77   writer.reset();
78 
79   SharedMemoryABI* abi = arbiter_->shmem_abi_for_testing();
80   size_t packets_count = 0;
81   for (size_t page_idx = 0; page_idx < kNumPages; page_idx++) {
82     uint32_t page_layout = abi->GetPageLayout(page_idx);
83     size_t num_chunks = SharedMemoryABI::GetNumChunksForLayout(page_layout);
84     for (size_t chunk_idx = 0; chunk_idx < num_chunks; chunk_idx++) {
85       auto chunk_state = abi->GetChunkState(page_idx, chunk_idx);
86       ASSERT_TRUE(chunk_state == SharedMemoryABI::kChunkFree ||
87                   chunk_state == SharedMemoryABI::kChunkComplete);
88       auto chunk = abi->TryAcquireChunkForReading(page_idx, chunk_idx);
89       if (!chunk.is_valid())
90         continue;
91       packets_count += chunk.header()->packets.load().count;
92     }
93   }
94   EXPECT_EQ(kNumPackets, packets_count);
95   // TODO(primiano): check also the content of the packets decoding the protos.
96 }
97 
TEST_P(TraceWriterImplTest,FragmentingPacketWithProducerAndServicePatching)98 TEST_P(TraceWriterImplTest, FragmentingPacketWithProducerAndServicePatching) {
99   const BufferID kBufId = 42;
100   std::unique_ptr<TraceWriter> writer = arbiter_->CreateTraceWriter(kBufId);
101 
102   // Write a packet that's guaranteed to span more than a single chunk, but less
103   // than two chunks.
104   auto packet = writer->NewTracePacket();
105   size_t chunk_size = page_size() / 4;
106   std::stringstream large_string_writer;
107   for (size_t pos = 0; pos < chunk_size; pos++)
108     large_string_writer << "x";
109   std::string large_string = large_string_writer.str();
110   packet->set_for_testing()->set_str(large_string.data(), large_string.size());
111 
112   // First chunk should be committed.
113   arbiter_->FlushPendingCommitDataRequests();
114   const auto& last_commit = fake_producer_endpoint_.last_commit_data_request;
115   ASSERT_EQ(1, last_commit.chunks_to_move_size());
116   EXPECT_EQ(0u, last_commit.chunks_to_move()[0].page());
117   EXPECT_EQ(0u, last_commit.chunks_to_move()[0].chunk());
118   EXPECT_EQ(kBufId, last_commit.chunks_to_move()[0].target_buffer());
119   EXPECT_EQ(0, last_commit.chunks_to_patch_size());
120 
121   // We will simulate a batching cycle by first setting the batching period to a
122   // very large value and then force-flushing when we are done writing data.
123   arbiter_->SetDirectSMBPatchingSupportedByService();
124   ASSERT_TRUE(arbiter_->EnableDirectSMBPatching());
125   arbiter_->SetBatchCommitsDuration(UINT32_MAX);
126 
127   // Write a second packet that's guaranteed to span more than a single chunk.
128   // Starting a new trace packet should cause the patches for the first packet
129   // (i.e. for the first chunk) to be queued for sending to the service. They
130   // cannot be applied locally because the first chunk was already committed.
131   packet->Finalize();
132   auto packet2 = writer->NewTracePacket();
133   packet2->set_for_testing()->set_str(large_string.data(), large_string.size());
134 
135   // Starting a new packet yet again should cause the patches for the second
136   // packet (i.e. for the second chunk) to be applied in the producer, because
137   // the second chunk has not been committed yet.
138   packet2->Finalize();
139   auto packet3 = writer->NewTracePacket();
140 
141   // Simulate the end of the batching period, which should trigger a commit to
142   // the service.
143   arbiter_->FlushPendingCommitDataRequests();
144 
145   SharedMemoryABI* abi = arbiter_->shmem_abi_for_testing();
146 
147   // The first allocated chunk should be complete but need patching, since the
148   // packet extended past the chunk and no patches for the packet size or string
149   // field size were applied yet.
150   ASSERT_EQ(SharedMemoryABI::kChunkComplete, abi->GetChunkState(0u, 0u));
151   auto chunk = abi->TryAcquireChunkForReading(0u, 0u);
152   ASSERT_TRUE(chunk.is_valid());
153   ASSERT_EQ(1, chunk.header()->packets.load().count);
154   ASSERT_TRUE(chunk.header()->packets.load().flags &
155               SharedMemoryABI::ChunkHeader::kChunkNeedsPatching);
156   ASSERT_TRUE(chunk.header()->packets.load().flags &
157               SharedMemoryABI::ChunkHeader::kLastPacketContinuesOnNextChunk);
158 
159   // Verify that a patch for the first chunk was sent to the service.
160   ASSERT_EQ(1, last_commit.chunks_to_patch_size());
161   EXPECT_EQ(writer->writer_id(), last_commit.chunks_to_patch()[0].writer_id());
162   EXPECT_EQ(kBufId, last_commit.chunks_to_patch()[0].target_buffer());
163   EXPECT_EQ(chunk.header()->chunk_id.load(),
164             last_commit.chunks_to_patch()[0].chunk_id());
165   EXPECT_FALSE(last_commit.chunks_to_patch()[0].has_more_patches());
166   ASSERT_EQ(1, last_commit.chunks_to_patch()[0].patches_size());
167 
168   // Verify that the second chunk was committed.
169   ASSERT_EQ(1, last_commit.chunks_to_move_size());
170   EXPECT_EQ(0u, last_commit.chunks_to_move()[0].page());
171   EXPECT_EQ(1u, last_commit.chunks_to_move()[0].chunk());
172   EXPECT_EQ(kBufId, last_commit.chunks_to_move()[0].target_buffer());
173 
174   // The second chunk should be in a complete state and should not need
175   // patching, as the patches to it should have been applied in the producer.
176   ASSERT_EQ(SharedMemoryABI::kChunkComplete, abi->GetChunkState(0u, 1u));
177   auto chunk2 = abi->TryAcquireChunkForReading(0u, 1u);
178   ASSERT_TRUE(chunk2.is_valid());
179   ASSERT_EQ(2, chunk2.header()->packets.load().count);
180   ASSERT_TRUE(chunk2.header()->packets.load().flags &
181               SharedMemoryABI::ChunkHeader::kLastPacketContinuesOnNextChunk);
182   ASSERT_FALSE(chunk2.header()->packets.load().flags &
183                SharedMemoryABI::ChunkHeader::kChunkNeedsPatching);
184 }
185 
TEST_P(TraceWriterImplTest,FragmentingPacketWithoutEnablingProducerPatching)186 TEST_P(TraceWriterImplTest, FragmentingPacketWithoutEnablingProducerPatching) {
187   // We will simulate a batching cycle by first setting the batching period to a
188   // very large value and will force flush to simulate a flush happening when we
189   // believe it should - in this case when a patch is encountered.
190   //
191   // Note: direct producer-side patching should be disabled by default.
192   arbiter_->SetBatchCommitsDuration(UINT32_MAX);
193 
194   const BufferID kBufId = 42;
195   std::unique_ptr<TraceWriter> writer = arbiter_->CreateTraceWriter(kBufId);
196 
197   // Write a packet that's guaranteed to span more than a single chunk.
198   auto packet = writer->NewTracePacket();
199   size_t chunk_size = page_size() / 4;
200   std::stringstream large_string_writer;
201   for (size_t pos = 0; pos < chunk_size; pos++)
202     large_string_writer << "x";
203   std::string large_string = large_string_writer.str();
204   packet->set_for_testing()->set_str(large_string.data(), large_string.size());
205 
206   // Starting a new packet should cause the first chunk and its patches to be
207   // committed to the service.
208   packet->Finalize();
209   auto packet2 = writer->NewTracePacket();
210   arbiter_->FlushPendingCommitDataRequests();
211 
212   // The first allocated chunk should be complete but need patching, since the
213   // packet extended past the chunk and no patches for the packet size or string
214   // field size were applied in the producer.
215   SharedMemoryABI* abi = arbiter_->shmem_abi_for_testing();
216   ASSERT_EQ(SharedMemoryABI::kChunkComplete, abi->GetChunkState(0u, 0u));
217   auto chunk = abi->TryAcquireChunkForReading(0u, 0u);
218   ASSERT_TRUE(chunk.is_valid());
219   ASSERT_EQ(1, chunk.header()->packets.load().count);
220   ASSERT_TRUE(chunk.header()->packets.load().flags &
221               SharedMemoryABI::ChunkHeader::kChunkNeedsPatching);
222   ASSERT_TRUE(chunk.header()->packets.load().flags &
223               SharedMemoryABI::ChunkHeader::kLastPacketContinuesOnNextChunk);
224 
225   // The first chunk was committed.
226   const auto& last_commit = fake_producer_endpoint_.last_commit_data_request;
227   ASSERT_EQ(1, last_commit.chunks_to_move_size());
228   EXPECT_EQ(0u, last_commit.chunks_to_move()[0].page());
229   EXPECT_EQ(0u, last_commit.chunks_to_move()[0].chunk());
230   EXPECT_EQ(kBufId, last_commit.chunks_to_move()[0].target_buffer());
231 
232   // The patches for the first chunk were committed.
233   ASSERT_EQ(1, last_commit.chunks_to_patch_size());
234   EXPECT_EQ(writer->writer_id(), last_commit.chunks_to_patch()[0].writer_id());
235   EXPECT_EQ(kBufId, last_commit.chunks_to_patch()[0].target_buffer());
236   EXPECT_EQ(chunk.header()->chunk_id.load(),
237             last_commit.chunks_to_patch()[0].chunk_id());
238   EXPECT_FALSE(last_commit.chunks_to_patch()[0].has_more_patches());
239   ASSERT_EQ(1, last_commit.chunks_to_patch()[0].patches_size());
240 }
241 
242 // Sets up a scenario in which the SMB is exhausted and TraceWriter fails to get
243 // a new chunk while fragmenting a packet. Verifies that data is dropped until
244 // the SMB is freed up and TraceWriter can get a new chunk.
TEST_P(TraceWriterImplTest,FragmentingPacketWhileBufferExhausted)245 TEST_P(TraceWriterImplTest, FragmentingPacketWhileBufferExhausted) {
246   arbiter_.reset(new SharedMemoryArbiterImpl(buf(), buf_size(), page_size(),
247                                              &fake_producer_endpoint_,
248                                              task_runner_.get()));
249 
250   const BufferID kBufId = 42;
251   std::unique_ptr<TraceWriter> writer =
252       arbiter_->CreateTraceWriter(kBufId, BufferExhaustedPolicy::kDrop);
253 
254   // Write a small first packet, so that |writer| owns a chunk.
255   auto packet = writer->NewTracePacket();
256   EXPECT_FALSE(reinterpret_cast<TraceWriterImpl*>(writer.get())
257                    ->drop_packets_for_testing());
258   EXPECT_EQ(packet->Finalize(), 0u);
259 
260   // Grab all the remaining chunks in the SMB in new writers.
261   std::array<std::unique_ptr<TraceWriter>, kNumPages * 4 - 1> other_writers;
262   for (size_t i = 0; i < other_writers.size(); i++) {
263     other_writers[i] =
264         arbiter_->CreateTraceWriter(kBufId, BufferExhaustedPolicy::kDrop);
265     auto other_writer_packet = other_writers[i]->NewTracePacket();
266     EXPECT_FALSE(reinterpret_cast<TraceWriterImpl*>(other_writers[i].get())
267                      ->drop_packets_for_testing());
268   }
269 
270   // Write a packet that's guaranteed to span more than a single chunk, causing
271   // |writer| to attempt to acquire a new chunk but fail to do so.
272   auto packet2 = writer->NewTracePacket();
273   size_t chunk_size = page_size() / 4;
274   std::stringstream large_string_writer;
275   for (size_t pos = 0; pos < chunk_size; pos++)
276     large_string_writer << "x";
277   std::string large_string = large_string_writer.str();
278   packet2->set_for_testing()->set_str(large_string.data(), large_string.size());
279 
280   EXPECT_TRUE(reinterpret_cast<TraceWriterImpl*>(writer.get())
281                   ->drop_packets_for_testing());
282 
283   // First chunk should be committed.
284   arbiter_->FlushPendingCommitDataRequests();
285   const auto& last_commit = fake_producer_endpoint_.last_commit_data_request;
286   ASSERT_EQ(1, last_commit.chunks_to_move_size());
287   EXPECT_EQ(0u, last_commit.chunks_to_move()[0].page());
288   EXPECT_EQ(0u, last_commit.chunks_to_move()[0].chunk());
289   EXPECT_EQ(kBufId, last_commit.chunks_to_move()[0].target_buffer());
290   EXPECT_EQ(0, last_commit.chunks_to_patch_size());
291 
292   // It should not need patching and not have the continuation flag set.
293   SharedMemoryABI* abi = arbiter_->shmem_abi_for_testing();
294   ASSERT_EQ(SharedMemoryABI::kChunkComplete, abi->GetChunkState(0u, 0u));
295   auto chunk = abi->TryAcquireChunkForReading(0u, 0u);
296   ASSERT_TRUE(chunk.is_valid());
297   ASSERT_EQ(2, chunk.header()->packets.load().count);
298   ASSERT_FALSE(chunk.header()->packets.load().flags &
299                SharedMemoryABI::ChunkHeader::kChunkNeedsPatching);
300   ASSERT_FALSE(chunk.header()->packets.load().flags &
301                SharedMemoryABI::ChunkHeader::kLastPacketContinuesOnNextChunk);
302 
303   // Writing more data while in garbage mode succeeds. This data is dropped.
304   packet2->Finalize();
305   auto packet3 = writer->NewTracePacket();
306   packet3->set_for_testing()->set_str(large_string.data(), large_string.size());
307 
308   // Release the |writer|'s first chunk as free, so that it can grab it again.
309   abi->ReleaseChunkAsFree(std::move(chunk));
310 
311   // Starting a new packet should cause TraceWriter to attempt to grab a new
312   // chunk again, because we wrote enough data to wrap the garbage chunk.
313   packet3->Finalize();
314   auto packet4 = writer->NewTracePacket();
315 
316   // Grabbing the chunk should have succeeded.
317   EXPECT_FALSE(reinterpret_cast<TraceWriterImpl*>(writer.get())
318                    ->drop_packets_for_testing());
319 
320   // The first packet in the chunk should have the previous_packet_dropped flag
321   // set, so shouldn't be empty.
322   EXPECT_GT(packet4->Finalize(), 0u);
323 
324   // Flushing the writer causes the chunk to be released again.
325   writer->Flush();
326   EXPECT_EQ(1, last_commit.chunks_to_move_size());
327   EXPECT_EQ(0u, last_commit.chunks_to_move()[0].page());
328   EXPECT_EQ(0u, last_commit.chunks_to_move()[0].chunk());
329   ASSERT_EQ(0, last_commit.chunks_to_patch_size());
330 
331   // Chunk should contain only |packet4| and not have any continuation flag set.
332   ASSERT_EQ(SharedMemoryABI::kChunkComplete, abi->GetChunkState(0u, 0u));
333   chunk = abi->TryAcquireChunkForReading(0u, 0u);
334   ASSERT_TRUE(chunk.is_valid());
335   ASSERT_EQ(1, chunk.header()->packets.load().count);
336   ASSERT_FALSE(chunk.header()->packets.load().flags &
337                SharedMemoryABI::ChunkHeader::kChunkNeedsPatching);
338   ASSERT_FALSE(
339       chunk.header()->packets.load().flags &
340       SharedMemoryABI::ChunkHeader::kFirstPacketContinuesFromPrevChunk);
341   ASSERT_FALSE(chunk.header()->packets.load().flags &
342                SharedMemoryABI::ChunkHeader::kLastPacketContinuesOnNextChunk);
343 }
344 
345 // Verifies that a TraceWriter that is flushed before the SMB is full and then
346 // acquires a garbage chunk later recovers and writes a previous_packet_dropped
347 // marker into the trace.
TEST_P(TraceWriterImplTest,FlushBeforeBufferExhausted)348 TEST_P(TraceWriterImplTest, FlushBeforeBufferExhausted) {
349   arbiter_.reset(new SharedMemoryArbiterImpl(buf(), buf_size(), page_size(),
350                                              &fake_producer_endpoint_,
351                                              task_runner_.get()));
352 
353   const BufferID kBufId = 42;
354   std::unique_ptr<TraceWriter> writer =
355       arbiter_->CreateTraceWriter(kBufId, BufferExhaustedPolicy::kDrop);
356 
357   // Write a small first packet and flush it, so that |writer| no longer owns
358   // any chunk.
359   auto packet = writer->NewTracePacket();
360   EXPECT_FALSE(reinterpret_cast<TraceWriterImpl*>(writer.get())
361                    ->drop_packets_for_testing());
362   EXPECT_EQ(packet->Finalize(), 0u);
363 
364   // Flush the first chunk away.
365   writer->Flush();
366 
367   // First chunk should be committed. Don't release it as free just yet.
368   arbiter_->FlushPendingCommitDataRequests();
369   const auto& last_commit = fake_producer_endpoint_.last_commit_data_request;
370   ASSERT_EQ(1, last_commit.chunks_to_move_size());
371   EXPECT_EQ(0u, last_commit.chunks_to_move()[0].page());
372   EXPECT_EQ(0u, last_commit.chunks_to_move()[0].chunk());
373 
374   // Grab all the remaining chunks in the SMB in new writers.
375   std::array<std::unique_ptr<TraceWriter>, kNumPages * 4 - 1> other_writers;
376   for (size_t i = 0; i < other_writers.size(); i++) {
377     other_writers[i] =
378         arbiter_->CreateTraceWriter(kBufId, BufferExhaustedPolicy::kDrop);
379     auto other_writer_packet = other_writers[i]->NewTracePacket();
380     EXPECT_FALSE(reinterpret_cast<TraceWriterImpl*>(other_writers[i].get())
381                      ->drop_packets_for_testing());
382   }
383 
384   // Write another packet, causing |writer| to acquire a garbage chunk.
385   auto packet2 = writer->NewTracePacket();
386   EXPECT_TRUE(reinterpret_cast<TraceWriterImpl*>(writer.get())
387                   ->drop_packets_for_testing());
388 
389   // Writing more data while in garbage mode succeeds. This data is dropped.
390   // Make sure that we fill the garbage chunk, so that |writer| tries to
391   // re-acquire a valid chunk for the next packet.
392   size_t chunk_size = page_size() / 4;
393   std::stringstream large_string_writer;
394   for (size_t pos = 0; pos < chunk_size; pos++)
395     large_string_writer << "x";
396   std::string large_string = large_string_writer.str();
397   packet2->set_for_testing()->set_str(large_string.data(), large_string.size());
398   packet2->Finalize();
399 
400   // Next packet should still be in the garbage chunk.
401   auto packet3 = writer->NewTracePacket();
402   EXPECT_TRUE(reinterpret_cast<TraceWriterImpl*>(writer.get())
403                   ->drop_packets_for_testing());
404 
405   // Release the first chunk as free, so |writer| can acquire it again.
406   SharedMemoryABI* abi = arbiter_->shmem_abi_for_testing();
407   ASSERT_EQ(SharedMemoryABI::kChunkComplete, abi->GetChunkState(0u, 0u));
408   auto chunk = abi->TryAcquireChunkForReading(0u, 0u);
409   ASSERT_TRUE(chunk.is_valid());
410   abi->ReleaseChunkAsFree(std::move(chunk));
411 
412   // Fill the garbage chunk, so that the writer attempts to grab another chunk
413   // for |packet4|.
414   packet3->set_for_testing()->set_str(large_string.data(), large_string.size());
415   packet3->Finalize();
416 
417   // Next packet should go into the reacquired chunk we just released.
418   auto packet4 = writer->NewTracePacket();
419   EXPECT_FALSE(reinterpret_cast<TraceWriterImpl*>(writer.get())
420                    ->drop_packets_for_testing());
421 
422   // The first packet in the chunk should have the previous_packet_dropped flag
423   // set, so shouldn't be empty.
424   EXPECT_GT(packet4->Finalize(), 0u);
425 
426   // Flushing the writer causes the chunk to be released again.
427   writer->Flush();
428   EXPECT_EQ(1, last_commit.chunks_to_move_size());
429   EXPECT_EQ(0u, last_commit.chunks_to_move()[0].page());
430   EXPECT_EQ(0u, last_commit.chunks_to_move()[0].chunk());
431   ASSERT_EQ(0, last_commit.chunks_to_patch_size());
432 
433   // Chunk should contain only |packet4| and not have any continuation flag set.
434   ASSERT_EQ(SharedMemoryABI::kChunkComplete, abi->GetChunkState(0u, 0u));
435   chunk = abi->TryAcquireChunkForReading(0u, 0u);
436   ASSERT_TRUE(chunk.is_valid());
437   ASSERT_EQ(1, chunk.header()->packets.load().count);
438   ASSERT_FALSE(chunk.header()->packets.load().flags &
439                SharedMemoryABI::ChunkHeader::kChunkNeedsPatching);
440   ASSERT_FALSE(
441       chunk.header()->packets.load().flags &
442       SharedMemoryABI::ChunkHeader::kFirstPacketContinuesFromPrevChunk);
443   ASSERT_FALSE(chunk.header()->packets.load().flags &
444                SharedMemoryABI::ChunkHeader::kLastPacketContinuesOnNextChunk);
445 }
446 
447 // Regression test that verifies that flushing a TraceWriter while a fragmented
448 // packet still has uncommitted patches doesn't hit a DCHECK / crash the writer
449 // thread.
TEST_P(TraceWriterImplTest,FlushAfterFragmentingPacketWhileBufferExhausted)450 TEST_P(TraceWriterImplTest, FlushAfterFragmentingPacketWhileBufferExhausted) {
451   arbiter_.reset(new SharedMemoryArbiterImpl(buf(), buf_size(), page_size(),
452                                              &fake_producer_endpoint_,
453                                              task_runner_.get()));
454 
455   const BufferID kBufId = 42;
456   std::unique_ptr<TraceWriter> writer =
457       arbiter_->CreateTraceWriter(kBufId, BufferExhaustedPolicy::kDrop);
458 
459   // Write a small first packet, so that |writer| owns a chunk.
460   auto packet = writer->NewTracePacket();
461   EXPECT_FALSE(reinterpret_cast<TraceWriterImpl*>(writer.get())
462                    ->drop_packets_for_testing());
463   EXPECT_EQ(packet->Finalize(), 0u);
464 
465   // Grab all but one of the remaining chunks in the SMB in new writers.
466   std::array<std::unique_ptr<TraceWriter>, kNumPages * 4 - 2> other_writers;
467   for (size_t i = 0; i < other_writers.size(); i++) {
468     other_writers[i] =
469         arbiter_->CreateTraceWriter(kBufId, BufferExhaustedPolicy::kDrop);
470     auto other_writer_packet = other_writers[i]->NewTracePacket();
471     EXPECT_FALSE(reinterpret_cast<TraceWriterImpl*>(other_writers[i].get())
472                      ->drop_packets_for_testing());
473   }
474 
475   // Write a packet that's guaranteed to span more than a two chunks, causing
476   // |writer| to attempt to acquire two new chunks, but fail to acquire the
477   // second.
478   auto packet2 = writer->NewTracePacket();
479   size_t chunk_size = page_size() / 4;
480   std::stringstream large_string_writer;
481   for (size_t pos = 0; pos < chunk_size * 2; pos++)
482     large_string_writer << "x";
483   std::string large_string = large_string_writer.str();
484   packet2->set_for_testing()->set_str(large_string.data(), large_string.size());
485 
486   EXPECT_TRUE(reinterpret_cast<TraceWriterImpl*>(writer.get())
487                   ->drop_packets_for_testing());
488 
489   // First two chunks should be committed.
490   arbiter_->FlushPendingCommitDataRequests();
491   const auto& last_commit = fake_producer_endpoint_.last_commit_data_request;
492   ASSERT_EQ(2, last_commit.chunks_to_move_size());
493 
494   // Flushing should succeed, even though some patches are still in the writer's
495   // patch list.
496   packet2->Finalize();
497   writer->Flush();
498 }
499 
500 // TODO(primiano): add multi-writer test.
501 // TODO(primiano): add Flush() test.
502 
503 }  // namespace
504 }  // namespace perfetto
505