1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "base/trace_event/trace_buffer.h"
6 
7 #include <utility>
8 #include <vector>
9 
10 #include "base/macros.h"
11 #include "base/memory/scoped_ptr.h"
12 #include "base/trace_event/trace_event_impl.h"
13 
14 namespace base {
15 namespace trace_event {
16 
17 namespace {
18 
19 class TraceBufferRingBuffer : public TraceBuffer {
20  public:
TraceBufferRingBuffer(size_t max_chunks)21   TraceBufferRingBuffer(size_t max_chunks)
22       : max_chunks_(max_chunks),
23         recyclable_chunks_queue_(new size_t[queue_capacity()]),
24         queue_head_(0),
25         queue_tail_(max_chunks),
26         current_iteration_index_(0),
27         current_chunk_seq_(1) {
28     chunks_.reserve(max_chunks);
29     for (size_t i = 0; i < max_chunks; ++i)
30       recyclable_chunks_queue_[i] = i;
31   }
32 
GetChunk(size_t * index)33   scoped_ptr<TraceBufferChunk> GetChunk(size_t* index) override {
34     // Because the number of threads is much less than the number of chunks,
35     // the queue should never be empty.
36     DCHECK(!QueueIsEmpty());
37 
38     *index = recyclable_chunks_queue_[queue_head_];
39     queue_head_ = NextQueueIndex(queue_head_);
40     current_iteration_index_ = queue_head_;
41 
42     if (*index >= chunks_.size())
43       chunks_.resize(*index + 1);
44 
45     TraceBufferChunk* chunk = chunks_[*index].release();
46     chunks_[*index] = NULL;  // Put NULL in the slot of a in-flight chunk.
47     if (chunk)
48       chunk->Reset(current_chunk_seq_++);
49     else
50       chunk = new TraceBufferChunk(current_chunk_seq_++);
51 
52     return scoped_ptr<TraceBufferChunk>(chunk);
53   }
54 
ReturnChunk(size_t index,scoped_ptr<TraceBufferChunk> chunk)55   void ReturnChunk(size_t index, scoped_ptr<TraceBufferChunk> chunk) override {
56     // When this method is called, the queue should not be full because it
57     // can contain all chunks including the one to be returned.
58     DCHECK(!QueueIsFull());
59     DCHECK(chunk);
60     DCHECK_LT(index, chunks_.size());
61     DCHECK(!chunks_[index]);
62     chunks_[index] = std::move(chunk);
63     recyclable_chunks_queue_[queue_tail_] = index;
64     queue_tail_ = NextQueueIndex(queue_tail_);
65   }
66 
IsFull() const67   bool IsFull() const override { return false; }
68 
Size() const69   size_t Size() const override {
70     // This is approximate because not all of the chunks are full.
71     return chunks_.size() * TraceBufferChunk::kTraceBufferChunkSize;
72   }
73 
Capacity() const74   size_t Capacity() const override {
75     return max_chunks_ * TraceBufferChunk::kTraceBufferChunkSize;
76   }
77 
GetEventByHandle(TraceEventHandle handle)78   TraceEvent* GetEventByHandle(TraceEventHandle handle) override {
79     if (handle.chunk_index >= chunks_.size())
80       return NULL;
81     TraceBufferChunk* chunk = chunks_[handle.chunk_index].get();
82     if (!chunk || chunk->seq() != handle.chunk_seq)
83       return NULL;
84     return chunk->GetEventAt(handle.event_index);
85   }
86 
NextChunk()87   const TraceBufferChunk* NextChunk() override {
88     if (chunks_.empty())
89       return NULL;
90 
91     while (current_iteration_index_ != queue_tail_) {
92       size_t chunk_index = recyclable_chunks_queue_[current_iteration_index_];
93       current_iteration_index_ = NextQueueIndex(current_iteration_index_);
94       if (chunk_index >= chunks_.size())  // Skip uninitialized chunks.
95         continue;
96       DCHECK(chunks_[chunk_index]);
97       return chunks_[chunk_index].get();
98     }
99     return NULL;
100   }
101 
CloneForIteration() const102   scoped_ptr<TraceBuffer> CloneForIteration() const override {
103     scoped_ptr<ClonedTraceBuffer> cloned_buffer(new ClonedTraceBuffer());
104     for (size_t queue_index = queue_head_; queue_index != queue_tail_;
105          queue_index = NextQueueIndex(queue_index)) {
106       size_t chunk_index = recyclable_chunks_queue_[queue_index];
107       if (chunk_index >= chunks_.size())  // Skip uninitialized chunks.
108         continue;
109       TraceBufferChunk* chunk = chunks_[chunk_index].get();
110       cloned_buffer->chunks_.push_back(chunk ? chunk->Clone() : NULL);
111     }
112     return std::move(cloned_buffer);
113   }
114 
EstimateTraceMemoryOverhead(TraceEventMemoryOverhead * overhead)115   void EstimateTraceMemoryOverhead(
116       TraceEventMemoryOverhead* overhead) override {
117     overhead->Add("TraceBufferRingBuffer", sizeof(*this));
118     for (size_t queue_index = queue_head_; queue_index != queue_tail_;
119          queue_index = NextQueueIndex(queue_index)) {
120       size_t chunk_index = recyclable_chunks_queue_[queue_index];
121       if (chunk_index >= chunks_.size())  // Skip uninitialized chunks.
122         continue;
123       chunks_[chunk_index]->EstimateTraceMemoryOverhead(overhead);
124     }
125   }
126 
127  private:
128   class ClonedTraceBuffer : public TraceBuffer {
129    public:
ClonedTraceBuffer()130     ClonedTraceBuffer() : current_iteration_index_(0) {}
131 
132     // The only implemented method.
NextChunk()133     const TraceBufferChunk* NextChunk() override {
134       return current_iteration_index_ < chunks_.size()
135                  ? chunks_[current_iteration_index_++].get()
136                  : NULL;
137     }
138 
GetChunk(size_t *)139     scoped_ptr<TraceBufferChunk> GetChunk(size_t* /* index */) override {
140       NOTIMPLEMENTED();
141       return scoped_ptr<TraceBufferChunk>();
142     }
ReturnChunk(size_t,scoped_ptr<TraceBufferChunk>)143     void ReturnChunk(size_t /*index*/, scoped_ptr<TraceBufferChunk>) override {
144       NOTIMPLEMENTED();
145     }
IsFull() const146     bool IsFull() const override { return false; }
Size() const147     size_t Size() const override { return 0; }
Capacity() const148     size_t Capacity() const override { return 0; }
GetEventByHandle(TraceEventHandle)149     TraceEvent* GetEventByHandle(TraceEventHandle /* handle */) override {
150       return NULL;
151     }
CloneForIteration() const152     scoped_ptr<TraceBuffer> CloneForIteration() const override {
153       NOTIMPLEMENTED();
154       return scoped_ptr<TraceBuffer>();
155     }
EstimateTraceMemoryOverhead(TraceEventMemoryOverhead *)156     void EstimateTraceMemoryOverhead(
157         TraceEventMemoryOverhead* /* overhead */) override {
158       NOTIMPLEMENTED();
159     }
160 
161     size_t current_iteration_index_;
162     std::vector<scoped_ptr<TraceBufferChunk>> chunks_;
163   };
164 
QueueIsEmpty() const165   bool QueueIsEmpty() const { return queue_head_ == queue_tail_; }
166 
QueueSize() const167   size_t QueueSize() const {
168     return queue_tail_ > queue_head_
169                ? queue_tail_ - queue_head_
170                : queue_tail_ + queue_capacity() - queue_head_;
171   }
172 
QueueIsFull() const173   bool QueueIsFull() const { return QueueSize() == queue_capacity() - 1; }
174 
queue_capacity() const175   size_t queue_capacity() const {
176     // One extra space to help distinguish full state and empty state.
177     return max_chunks_ + 1;
178   }
179 
NextQueueIndex(size_t index) const180   size_t NextQueueIndex(size_t index) const {
181     index++;
182     if (index >= queue_capacity())
183       index = 0;
184     return index;
185   }
186 
187   size_t max_chunks_;
188   std::vector<scoped_ptr<TraceBufferChunk>> chunks_;
189 
190   scoped_ptr<size_t[]> recyclable_chunks_queue_;
191   size_t queue_head_;
192   size_t queue_tail_;
193 
194   size_t current_iteration_index_;
195   uint32_t current_chunk_seq_;
196 
197   DISALLOW_COPY_AND_ASSIGN(TraceBufferRingBuffer);
198 };
199 
200 class TraceBufferVector : public TraceBuffer {
201  public:
TraceBufferVector(size_t max_chunks)202   TraceBufferVector(size_t max_chunks)
203       : in_flight_chunk_count_(0),
204         current_iteration_index_(0),
205         max_chunks_(max_chunks) {
206     chunks_.reserve(max_chunks_);
207   }
208 
GetChunk(size_t * index)209   scoped_ptr<TraceBufferChunk> GetChunk(size_t* index) override {
210     // This function may be called when adding normal events or indirectly from
211     // AddMetadataEventsWhileLocked(). We can not DECHECK(!IsFull()) because we
212     // have to add the metadata events and flush thread-local buffers even if
213     // the buffer is full.
214     *index = chunks_.size();
215     chunks_.push_back(NULL);  // Put NULL in the slot of a in-flight chunk.
216     ++in_flight_chunk_count_;
217     // + 1 because zero chunk_seq is not allowed.
218     return scoped_ptr<TraceBufferChunk>(
219         new TraceBufferChunk(static_cast<uint32_t>(*index) + 1));
220   }
221 
ReturnChunk(size_t index,scoped_ptr<TraceBufferChunk> chunk)222   void ReturnChunk(size_t index, scoped_ptr<TraceBufferChunk> chunk) override {
223     DCHECK_GT(in_flight_chunk_count_, 0u);
224     DCHECK_LT(index, chunks_.size());
225     DCHECK(!chunks_[index]);
226     --in_flight_chunk_count_;
227     chunks_[index] = chunk.release();
228   }
229 
IsFull() const230   bool IsFull() const override { return chunks_.size() >= max_chunks_; }
231 
Size() const232   size_t Size() const override {
233     // This is approximate because not all of the chunks are full.
234     return chunks_.size() * TraceBufferChunk::kTraceBufferChunkSize;
235   }
236 
Capacity() const237   size_t Capacity() const override {
238     return max_chunks_ * TraceBufferChunk::kTraceBufferChunkSize;
239   }
240 
GetEventByHandle(TraceEventHandle handle)241   TraceEvent* GetEventByHandle(TraceEventHandle handle) override {
242     if (handle.chunk_index >= chunks_.size())
243       return NULL;
244     TraceBufferChunk* chunk = chunks_[handle.chunk_index];
245     if (!chunk || chunk->seq() != handle.chunk_seq)
246       return NULL;
247     return chunk->GetEventAt(handle.event_index);
248   }
249 
NextChunk()250   const TraceBufferChunk* NextChunk() override {
251     while (current_iteration_index_ < chunks_.size()) {
252       // Skip in-flight chunks.
253       const TraceBufferChunk* chunk = chunks_[current_iteration_index_++];
254       if (chunk)
255         return chunk;
256     }
257     return NULL;
258   }
259 
CloneForIteration() const260   scoped_ptr<TraceBuffer> CloneForIteration() const override {
261     NOTIMPLEMENTED();
262     return scoped_ptr<TraceBuffer>();
263   }
264 
EstimateTraceMemoryOverhead(TraceEventMemoryOverhead * overhead)265   void EstimateTraceMemoryOverhead(
266       TraceEventMemoryOverhead* overhead) override {
267     const size_t chunks_ptr_vector_allocated_size =
268         sizeof(*this) + max_chunks_ * sizeof(decltype(chunks_)::value_type);
269     const size_t chunks_ptr_vector_resident_size =
270         sizeof(*this) + chunks_.size() * sizeof(decltype(chunks_)::value_type);
271     overhead->Add("TraceBufferVector", chunks_ptr_vector_allocated_size,
272                   chunks_ptr_vector_resident_size);
273     for (size_t i = 0; i < chunks_.size(); ++i) {
274       TraceBufferChunk* chunk = chunks_[i];
275       // Skip the in-flight (nullptr) chunks. They will be accounted by the
276       // per-thread-local dumpers, see ThreadLocalEventBuffer::OnMemoryDump.
277       if (chunk)
278         chunk->EstimateTraceMemoryOverhead(overhead);
279     }
280   }
281 
282  private:
283   size_t in_flight_chunk_count_;
284   size_t current_iteration_index_;
285   size_t max_chunks_;
286   ScopedVector<TraceBufferChunk> chunks_;
287 
288   DISALLOW_COPY_AND_ASSIGN(TraceBufferVector);
289 };
290 
291 }  // namespace
292 
TraceBufferChunk(uint32_t seq)293 TraceBufferChunk::TraceBufferChunk(uint32_t seq) : next_free_(0), seq_(seq) {}
294 
~TraceBufferChunk()295 TraceBufferChunk::~TraceBufferChunk() {}
296 
Reset(uint32_t new_seq)297 void TraceBufferChunk::Reset(uint32_t new_seq) {
298   for (size_t i = 0; i < next_free_; ++i)
299     chunk_[i].Reset();
300   next_free_ = 0;
301   seq_ = new_seq;
302   cached_overhead_estimate_.reset();
303 }
304 
AddTraceEvent(size_t * event_index)305 TraceEvent* TraceBufferChunk::AddTraceEvent(size_t* event_index) {
306   DCHECK(!IsFull());
307   *event_index = next_free_++;
308   return &chunk_[*event_index];
309 }
310 
Clone() const311 scoped_ptr<TraceBufferChunk> TraceBufferChunk::Clone() const {
312   scoped_ptr<TraceBufferChunk> cloned_chunk(new TraceBufferChunk(seq_));
313   cloned_chunk->next_free_ = next_free_;
314   for (size_t i = 0; i < next_free_; ++i)
315     cloned_chunk->chunk_[i].CopyFrom(chunk_[i]);
316   return cloned_chunk;
317 }
318 
EstimateTraceMemoryOverhead(TraceEventMemoryOverhead * overhead)319 void TraceBufferChunk::EstimateTraceMemoryOverhead(
320     TraceEventMemoryOverhead* overhead) {
321   if (!cached_overhead_estimate_) {
322     cached_overhead_estimate_.reset(new TraceEventMemoryOverhead);
323 
324     // When estimating the size of TraceBufferChunk, exclude the array of trace
325     // events, as they are computed individually below.
326     cached_overhead_estimate_->Add("TraceBufferChunk",
327                                    sizeof(*this) - sizeof(chunk_));
328   }
329 
330   const size_t num_cached_estimated_events =
331       cached_overhead_estimate_->GetCount("TraceEvent");
332   DCHECK_LE(num_cached_estimated_events, size());
333 
334   if (IsFull() && num_cached_estimated_events == size()) {
335     overhead->Update(*cached_overhead_estimate_);
336     return;
337   }
338 
339   for (size_t i = num_cached_estimated_events; i < size(); ++i)
340     chunk_[i].EstimateTraceMemoryOverhead(cached_overhead_estimate_.get());
341 
342   if (IsFull()) {
343     cached_overhead_estimate_->AddSelf();
344   } else {
345     // The unused TraceEvents in |chunks_| are not cached. They will keep
346     // changing as new TraceEvents are added to this chunk, so they are
347     // computed on the fly.
348     const size_t num_unused_trace_events = capacity() - size();
349     overhead->Add("TraceEvent (unused)",
350                   num_unused_trace_events * sizeof(TraceEvent));
351   }
352 
353   overhead->Update(*cached_overhead_estimate_);
354 }
355 
356 TraceResultBuffer::OutputCallback
GetCallback()357 TraceResultBuffer::SimpleOutput::GetCallback() {
358   return Bind(&SimpleOutput::Append, Unretained(this));
359 }
360 
Append(const std::string & json_trace_output)361 void TraceResultBuffer::SimpleOutput::Append(
362     const std::string& json_trace_output) {
363   json_output += json_trace_output;
364 }
365 
TraceResultBuffer()366 TraceResultBuffer::TraceResultBuffer() : append_comma_(false) {}
367 
~TraceResultBuffer()368 TraceResultBuffer::~TraceResultBuffer() {}
369 
SetOutputCallback(const OutputCallback & json_chunk_callback)370 void TraceResultBuffer::SetOutputCallback(
371     const OutputCallback& json_chunk_callback) {
372   output_callback_ = json_chunk_callback;
373 }
374 
Start()375 void TraceResultBuffer::Start() {
376   append_comma_ = false;
377   output_callback_.Run("[");
378 }
379 
AddFragment(const std::string & trace_fragment)380 void TraceResultBuffer::AddFragment(const std::string& trace_fragment) {
381   if (append_comma_)
382     output_callback_.Run(",");
383   append_comma_ = true;
384   output_callback_.Run(trace_fragment);
385 }
386 
Finish()387 void TraceResultBuffer::Finish() {
388   output_callback_.Run("]");
389 }
390 
CreateTraceBufferRingBuffer(size_t max_chunks)391 TraceBuffer* TraceBuffer::CreateTraceBufferRingBuffer(size_t max_chunks) {
392   return new TraceBufferRingBuffer(max_chunks);
393 }
394 
CreateTraceBufferVectorOfSize(size_t max_chunks)395 TraceBuffer* TraceBuffer::CreateTraceBufferVectorOfSize(size_t max_chunks) {
396   return new TraceBufferVector(max_chunks);
397 }
398 
399 }  // namespace trace_event
400 }  // namespace base
401