1 /*
2  * Copyright (C) 2017 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef SRC_TRACE_PROCESSOR_TRACE_STORAGE_H_
18 #define SRC_TRACE_PROCESSOR_TRACE_STORAGE_H_
19 
20 #include <array>
21 #include <deque>
22 #include <map>
23 #include <string>
24 #include <unordered_map>
25 #include <utility>
26 #include <vector>
27 
28 #include "perfetto/base/hash.h"
29 #include "perfetto/base/logging.h"
30 #include "perfetto/base/optional.h"
31 #include "perfetto/base/string_view.h"
32 #include "perfetto/base/time.h"
33 #include "perfetto/base/utils.h"
34 #include "src/trace_processor/ftrace_utils.h"
35 #include "src/trace_processor/stats.h"
36 #include "src/trace_processor/string_pool.h"
37 
38 namespace perfetto {
39 namespace trace_processor {
40 
41 // UniquePid is an offset into |unique_processes_|. This is necessary because
42 // Unix pids are reused and thus not guaranteed to be unique over a long
43 // period of time.
44 using UniquePid = uint32_t;
45 
46 // UniqueTid is an offset into |unique_threads_|. Necessary because tids can
47 // be reused.
48 using UniqueTid = uint32_t;
49 
50 // StringId is an offset into |string_pool_|.
51 using StringId = StringPool::Id;
52 
53 // Identifiers for all the tables in the database.
54 enum TableId : uint8_t {
55   // Intentionally don't have TableId == 0 so that RowId == 0 can refer to an
56   // invalid row id.
57   kCounterValues = 1,
58   kRawEvents = 2,
59   kInstants = 3,
60   kSched = 4,
61 };
62 
63 // The top 8 bits are set to the TableId and the bottom 32 to the row of the
64 // table.
65 using RowId = int64_t;
66 static const RowId kInvalidRowId = 0;
67 
68 using ArgSetId = uint32_t;
69 static const ArgSetId kInvalidArgSetId = 0;
70 
71 enum RefType {
72   kRefNoRef = 0,
73   kRefUtid = 1,
74   kRefCpuId = 2,
75   kRefIrq = 3,
76   kRefSoftIrq = 4,
77   kRefUpid = 5,
78   kRefMax
79 };
80 
81 const std::vector<const char*>& GetRefTypeStringMap();
82 
83 // Stores a data inside a trace file in a columnar form. This makes it efficient
84 // to read or search across a single field of the trace (e.g. all the thread
85 // names for a given CPU).
86 class TraceStorage {
87  public:
88   TraceStorage();
89 
90   virtual ~TraceStorage();
91 
92   // Information about a unique process seen in a trace.
93   struct Process {
ProcessProcess94     explicit Process(uint32_t p) : pid(p) {}
95     int64_t start_ns = 0;
96     StringId name_id = 0;
97     uint32_t pid = 0;
98     base::Optional<UniquePid> pupid;
99   };
100 
101   // Information about a unique thread seen in a trace.
102   struct Thread {
ThreadThread103     explicit Thread(uint32_t t) : tid(t) {}
104     int64_t start_ns = 0;
105     StringId name_id = 0;
106     base::Optional<UniquePid> upid;
107     uint32_t tid = 0;
108   };
109 
110   // Generic key value storage which can be referenced by other tables.
111   class Args {
112    public:
113     // Variadic type representing the possible values for the args table.
114     struct Variadic {
115       enum Type { kInt, kString, kReal };
116 
IntegerVariadic117       static Variadic Integer(int64_t int_value) {
118         Variadic variadic;
119         variadic.type = Type::kInt;
120         variadic.int_value = int_value;
121         return variadic;
122       }
123 
StringVariadic124       static Variadic String(StringId string_id) {
125         Variadic variadic;
126         variadic.type = Type::kString;
127         variadic.string_value = string_id;
128         return variadic;
129       }
130 
RealVariadic131       static Variadic Real(double real_value) {
132         Variadic variadic;
133         variadic.type = Type::kReal;
134         variadic.real_value = real_value;
135         return variadic;
136       }
137 
138       Type type;
139       union {
140         int64_t int_value;
141         StringId string_value;
142         double real_value;
143       };
144     };
145 
146     struct Arg {
147       StringId flat_key = 0;
148       StringId key = 0;
149       Variadic value = Variadic::Integer(0);
150 
151       // This is only used by the arg tracker and so is not part of the hash.
152       RowId row_id = 0;
153     };
154 
155     struct ArgHasher {
operatorArgHasher156       uint64_t operator()(const Arg& arg) const noexcept {
157         base::Hash hash;
158         hash.Update(arg.key);
159         // We don't hash arg.flat_key because it's a subsequence of arg.key.
160         switch (arg.value.type) {
161           case Variadic::Type::kInt:
162             hash.Update(arg.value.int_value);
163             break;
164           case Variadic::Type::kString:
165             hash.Update(arg.value.string_value);
166             break;
167           case Variadic::Type::kReal:
168             hash.Update(arg.value.real_value);
169             break;
170         }
171         return hash.digest();
172       }
173     };
174 
set_ids()175     const std::deque<ArgSetId>& set_ids() const { return set_ids_; }
flat_keys()176     const std::deque<StringId>& flat_keys() const { return flat_keys_; }
keys()177     const std::deque<StringId>& keys() const { return keys_; }
arg_values()178     const std::deque<Variadic>& arg_values() const { return arg_values_; }
args_count()179     uint32_t args_count() const {
180       return static_cast<uint32_t>(set_ids_.size());
181     }
182 
AddArgSet(const std::vector<Arg> & args,uint32_t begin,uint32_t end)183     ArgSetId AddArgSet(const std::vector<Arg>& args,
184                        uint32_t begin,
185                        uint32_t end) {
186       base::Hash hash;
187       for (uint32_t i = begin; i < end; i++) {
188         hash.Update(ArgHasher()(args[i]));
189       }
190 
191       ArgSetHash digest = hash.digest();
192       auto it = arg_row_for_hash_.find(digest);
193       if (it != arg_row_for_hash_.end()) {
194         return set_ids_[it->second];
195       }
196 
197       // The +1 ensures that nothing has an id == kInvalidArgSetId == 0.
198       ArgSetId id = static_cast<uint32_t>(arg_row_for_hash_.size()) + 1;
199       arg_row_for_hash_.emplace(digest, args_count());
200       for (uint32_t i = begin; i < end; i++) {
201         const auto& arg = args[i];
202         set_ids_.emplace_back(id);
203         flat_keys_.emplace_back(arg.flat_key);
204         keys_.emplace_back(arg.key);
205         arg_values_.emplace_back(arg.value);
206       }
207       return id;
208     }
209 
210    private:
211     using ArgSetHash = uint64_t;
212 
213     std::deque<ArgSetId> set_ids_;
214     std::deque<StringId> flat_keys_;
215     std::deque<StringId> keys_;
216     std::deque<Variadic> arg_values_;
217 
218     std::unordered_map<ArgSetHash, uint32_t> arg_row_for_hash_;
219   };
220 
221   class Slices {
222    public:
AddSlice(uint32_t cpu,int64_t start_ns,int64_t duration_ns,UniqueTid utid,ftrace_utils::TaskState end_state,int32_t priority)223     inline size_t AddSlice(uint32_t cpu,
224                            int64_t start_ns,
225                            int64_t duration_ns,
226                            UniqueTid utid,
227                            ftrace_utils::TaskState end_state,
228                            int32_t priority) {
229       cpus_.emplace_back(cpu);
230       start_ns_.emplace_back(start_ns);
231       durations_.emplace_back(duration_ns);
232       utids_.emplace_back(utid);
233       end_states_.emplace_back(end_state);
234       priorities_.emplace_back(priority);
235 
236       if (utid >= rows_for_utids_.size())
237         rows_for_utids_.resize(utid + 1);
238       rows_for_utids_[utid].emplace_back(slice_count() - 1);
239       return slice_count() - 1;
240     }
241 
set_duration(size_t index,int64_t duration_ns)242     void set_duration(size_t index, int64_t duration_ns) {
243       durations_[index] = duration_ns;
244     }
245 
set_end_state(size_t index,ftrace_utils::TaskState end_state)246     void set_end_state(size_t index, ftrace_utils::TaskState end_state) {
247       end_states_[index] = end_state;
248     }
249 
slice_count()250     size_t slice_count() const { return start_ns_.size(); }
251 
cpus()252     const std::deque<uint32_t>& cpus() const { return cpus_; }
253 
start_ns()254     const std::deque<int64_t>& start_ns() const { return start_ns_; }
255 
durations()256     const std::deque<int64_t>& durations() const { return durations_; }
257 
utids()258     const std::deque<UniqueTid>& utids() const { return utids_; }
259 
end_state()260     const std::deque<ftrace_utils::TaskState>& end_state() const {
261       return end_states_;
262     }
263 
priorities()264     const std::deque<int32_t>& priorities() const { return priorities_; }
265 
rows_for_utids()266     const std::deque<std::vector<uint32_t>>& rows_for_utids() const {
267       return rows_for_utids_;
268     }
269 
270    private:
271     // Each deque below has the same number of entries (the number of slices
272     // in the trace for the CPU).
273     std::deque<uint32_t> cpus_;
274     std::deque<int64_t> start_ns_;
275     std::deque<int64_t> durations_;
276     std::deque<UniqueTid> utids_;
277     std::deque<ftrace_utils::TaskState> end_states_;
278     std::deque<int32_t> priorities_;
279 
280     // One row per utid.
281     std::deque<std::vector<uint32_t>> rows_for_utids_;
282   };
283 
284   class NestableSlices {
285    public:
AddSlice(int64_t start_ns,int64_t duration_ns,int64_t ref,RefType type,StringId cat,StringId name,uint8_t depth,int64_t stack_id,int64_t parent_stack_id)286     inline size_t AddSlice(int64_t start_ns,
287                            int64_t duration_ns,
288                            int64_t ref,
289                            RefType type,
290                            StringId cat,
291                            StringId name,
292                            uint8_t depth,
293                            int64_t stack_id,
294                            int64_t parent_stack_id) {
295       start_ns_.emplace_back(start_ns);
296       durations_.emplace_back(duration_ns);
297       refs_.emplace_back(ref);
298       types_.emplace_back(type);
299       cats_.emplace_back(cat);
300       names_.emplace_back(name);
301       depths_.emplace_back(depth);
302       stack_ids_.emplace_back(stack_id);
303       parent_stack_ids_.emplace_back(parent_stack_id);
304       return slice_count() - 1;
305     }
306 
set_duration(size_t index,int64_t duration_ns)307     void set_duration(size_t index, int64_t duration_ns) {
308       durations_[index] = duration_ns;
309     }
310 
set_stack_id(size_t index,int64_t stack_id)311     void set_stack_id(size_t index, int64_t stack_id) {
312       stack_ids_[index] = stack_id;
313     }
314 
slice_count()315     size_t slice_count() const { return start_ns_.size(); }
start_ns()316     const std::deque<int64_t>& start_ns() const { return start_ns_; }
durations()317     const std::deque<int64_t>& durations() const { return durations_; }
refs()318     const std::deque<int64_t>& refs() const { return refs_; }
types()319     const std::deque<RefType>& types() const { return types_; }
cats()320     const std::deque<StringId>& cats() const { return cats_; }
names()321     const std::deque<StringId>& names() const { return names_; }
depths()322     const std::deque<uint8_t>& depths() const { return depths_; }
stack_ids()323     const std::deque<int64_t>& stack_ids() const { return stack_ids_; }
parent_stack_ids()324     const std::deque<int64_t>& parent_stack_ids() const {
325       return parent_stack_ids_;
326     }
327 
328    private:
329     std::deque<int64_t> start_ns_;
330     std::deque<int64_t> durations_;
331     std::deque<int64_t> refs_;
332     std::deque<RefType> types_;
333     std::deque<StringId> cats_;
334     std::deque<StringId> names_;
335     std::deque<uint8_t> depths_;
336     std::deque<int64_t> stack_ids_;
337     std::deque<int64_t> parent_stack_ids_;
338   };
339 
340   class CounterDefinitions {
341    public:
342     using Id = uint32_t;
343     static constexpr Id kInvalidId = std::numeric_limits<Id>::max();
344 
AddCounterDefinition(StringId name_id,int64_t ref,RefType type)345     inline Id AddCounterDefinition(StringId name_id,
346                                    int64_t ref,
347                                    RefType type) {
348       base::Hash hash;
349       hash.Update(name_id);
350       hash.Update(ref);
351       hash.Update(type);
352 
353       // TODO(lalitm): this is a perf bottleneck and likely we can do something
354       // quite a bit better here.
355       uint64_t digest = hash.digest();
356       auto it = hash_to_row_idx_.find(digest);
357       if (it != hash_to_row_idx_.end())
358         return it->second;
359 
360       name_ids_.emplace_back(name_id);
361       refs_.emplace_back(ref);
362       types_.emplace_back(type);
363       hash_to_row_idx_.emplace(digest, size() - 1);
364       return size() - 1;
365     }
366 
size()367     uint32_t size() const { return static_cast<uint32_t>(name_ids_.size()); }
368 
name_ids()369     const std::deque<StringId>& name_ids() const { return name_ids_; }
370 
refs()371     const std::deque<int64_t>& refs() const { return refs_; }
372 
types()373     const std::deque<RefType>& types() const { return types_; }
374 
375    private:
376     std::deque<StringId> name_ids_;
377     std::deque<int64_t> refs_;
378     std::deque<RefType> types_;
379 
380     std::unordered_map<uint64_t, uint32_t> hash_to_row_idx_;
381   };
382 
383   class CounterValues {
384    public:
AddCounterValue(CounterDefinitions::Id counter_id,int64_t timestamp,double value)385     inline uint32_t AddCounterValue(CounterDefinitions::Id counter_id,
386                                     int64_t timestamp,
387                                     double value) {
388       counter_ids_.emplace_back(counter_id);
389       timestamps_.emplace_back(timestamp);
390       values_.emplace_back(value);
391       arg_set_ids_.emplace_back(kInvalidArgSetId);
392 
393       if (counter_id != CounterDefinitions::kInvalidId) {
394         if (counter_id >= rows_for_counter_id_.size()) {
395           rows_for_counter_id_.resize(counter_id + 1);
396         }
397         rows_for_counter_id_[counter_id].emplace_back(size() - 1);
398       }
399       return size() - 1;
400     }
401 
set_counter_id(uint32_t index,CounterDefinitions::Id counter_id)402     void set_counter_id(uint32_t index, CounterDefinitions::Id counter_id) {
403       PERFETTO_DCHECK(counter_ids_[index] == CounterDefinitions::kInvalidId);
404 
405       counter_ids_[index] = counter_id;
406       if (counter_id >= rows_for_counter_id_.size()) {
407         rows_for_counter_id_.resize(counter_id + 1);
408       }
409 
410       auto* new_rows = &rows_for_counter_id_[counter_id];
411       new_rows->insert(
412           std::upper_bound(new_rows->begin(), new_rows->end(), index), index);
413     }
414 
set_arg_set_id(uint32_t row,ArgSetId id)415     void set_arg_set_id(uint32_t row, ArgSetId id) { arg_set_ids_[row] = id; }
416 
size()417     uint32_t size() const { return static_cast<uint32_t>(counter_ids_.size()); }
418 
counter_ids()419     const std::deque<CounterDefinitions::Id>& counter_ids() const {
420       return counter_ids_;
421     }
422 
timestamps()423     const std::deque<int64_t>& timestamps() const { return timestamps_; }
424 
values()425     const std::deque<double>& values() const { return values_; }
426 
arg_set_ids()427     const std::deque<ArgSetId>& arg_set_ids() const { return arg_set_ids_; }
428 
rows_for_counter_id()429     const std::deque<std::vector<uint32_t>>& rows_for_counter_id() const {
430       return rows_for_counter_id_;
431     }
432 
433    private:
434     std::deque<CounterDefinitions::Id> counter_ids_;
435     std::deque<int64_t> timestamps_;
436     std::deque<double> values_;
437     std::deque<ArgSetId> arg_set_ids_;
438 
439     // Indexed by counter_id value and contains the row numbers corresponding to
440     // it.
441     std::deque<std::vector<uint32_t>> rows_for_counter_id_;
442   };
443 
444   class SqlStats {
445    public:
446     static constexpr size_t kMaxLogEntries = 100;
447     uint32_t RecordQueryBegin(const std::string& query,
448                               int64_t time_queued,
449                               int64_t time_started);
450     void RecordQueryFirstNext(uint32_t row, int64_t time_first_next);
451     void RecordQueryEnd(uint32_t row, int64_t time_end);
size()452     size_t size() const { return queries_.size(); }
queries()453     const std::deque<std::string>& queries() const { return queries_; }
times_queued()454     const std::deque<int64_t>& times_queued() const { return times_queued_; }
times_started()455     const std::deque<int64_t>& times_started() const { return times_started_; }
times_first_next()456     const std::deque<int64_t>& times_first_next() const {
457       return times_first_next_;
458     }
times_ended()459     const std::deque<int64_t>& times_ended() const { return times_ended_; }
460 
461    private:
462     uint32_t popped_queries_ = 0;
463 
464     std::deque<std::string> queries_;
465     std::deque<int64_t> times_queued_;
466     std::deque<int64_t> times_started_;
467     std::deque<int64_t> times_first_next_;
468     std::deque<int64_t> times_ended_;
469   };
470 
471   class Instants {
472    public:
AddInstantEvent(int64_t timestamp,StringId name_id,double value,int64_t ref,RefType type)473     inline uint32_t AddInstantEvent(int64_t timestamp,
474                                     StringId name_id,
475                                     double value,
476                                     int64_t ref,
477                                     RefType type) {
478       timestamps_.emplace_back(timestamp);
479       name_ids_.emplace_back(name_id);
480       values_.emplace_back(value);
481       refs_.emplace_back(ref);
482       types_.emplace_back(type);
483       arg_set_ids_.emplace_back(kInvalidArgSetId);
484       return static_cast<uint32_t>(instant_count() - 1);
485     }
486 
set_ref(uint32_t row,int64_t ref)487     void set_ref(uint32_t row, int64_t ref) { refs_[row] = ref; }
488 
set_arg_set_id(uint32_t row,ArgSetId id)489     void set_arg_set_id(uint32_t row, ArgSetId id) { arg_set_ids_[row] = id; }
490 
instant_count()491     size_t instant_count() const { return timestamps_.size(); }
492 
timestamps()493     const std::deque<int64_t>& timestamps() const { return timestamps_; }
494 
name_ids()495     const std::deque<StringId>& name_ids() const { return name_ids_; }
496 
values()497     const std::deque<double>& values() const { return values_; }
498 
refs()499     const std::deque<int64_t>& refs() const { return refs_; }
500 
types()501     const std::deque<RefType>& types() const { return types_; }
502 
arg_set_ids()503     const std::deque<ArgSetId>& arg_set_ids() const { return arg_set_ids_; }
504 
505    private:
506     std::deque<int64_t> timestamps_;
507     std::deque<StringId> name_ids_;
508     std::deque<double> values_;
509     std::deque<int64_t> refs_;
510     std::deque<RefType> types_;
511     std::deque<ArgSetId> arg_set_ids_;
512   };
513 
514   class RawEvents {
515    public:
AddRawEvent(int64_t timestamp,StringId name_id,uint32_t cpu,UniqueTid utid)516     inline RowId AddRawEvent(int64_t timestamp,
517                              StringId name_id,
518                              uint32_t cpu,
519                              UniqueTid utid) {
520       timestamps_.emplace_back(timestamp);
521       name_ids_.emplace_back(name_id);
522       cpus_.emplace_back(cpu);
523       utids_.emplace_back(utid);
524       arg_set_ids_.emplace_back(kInvalidArgSetId);
525       return CreateRowId(TableId::kRawEvents,
526                          static_cast<uint32_t>(raw_event_count() - 1));
527     }
528 
set_arg_set_id(uint32_t row,ArgSetId id)529     void set_arg_set_id(uint32_t row, ArgSetId id) { arg_set_ids_[row] = id; }
530 
raw_event_count()531     size_t raw_event_count() const { return timestamps_.size(); }
532 
timestamps()533     const std::deque<int64_t>& timestamps() const { return timestamps_; }
534 
name_ids()535     const std::deque<StringId>& name_ids() const { return name_ids_; }
536 
cpus()537     const std::deque<uint32_t>& cpus() const { return cpus_; }
538 
utids()539     const std::deque<UniqueTid>& utids() const { return utids_; }
540 
arg_set_ids()541     const std::deque<ArgSetId>& arg_set_ids() const { return arg_set_ids_; }
542 
543    private:
544     std::deque<int64_t> timestamps_;
545     std::deque<StringId> name_ids_;
546     std::deque<uint32_t> cpus_;
547     std::deque<UniqueTid> utids_;
548     std::deque<ArgSetId> arg_set_ids_;
549   };
550 
551   class AndroidLogs {
552    public:
AddLogEvent(int64_t timestamp,UniqueTid utid,uint8_t prio,StringId tag_id,StringId msg_id)553     inline size_t AddLogEvent(int64_t timestamp,
554                               UniqueTid utid,
555                               uint8_t prio,
556                               StringId tag_id,
557                               StringId msg_id) {
558       timestamps_.emplace_back(timestamp);
559       utids_.emplace_back(utid);
560       prios_.emplace_back(prio);
561       tag_ids_.emplace_back(tag_id);
562       msg_ids_.emplace_back(msg_id);
563       return size() - 1;
564     }
565 
size()566     size_t size() const { return timestamps_.size(); }
567 
timestamps()568     const std::deque<int64_t>& timestamps() const { return timestamps_; }
utids()569     const std::deque<UniqueTid>& utids() const { return utids_; }
prios()570     const std::deque<uint8_t>& prios() const { return prios_; }
tag_ids()571     const std::deque<StringId>& tag_ids() const { return tag_ids_; }
msg_ids()572     const std::deque<StringId>& msg_ids() const { return msg_ids_; }
573 
574    private:
575     std::deque<int64_t> timestamps_;
576     std::deque<UniqueTid> utids_;
577     std::deque<uint8_t> prios_;
578     std::deque<StringId> tag_ids_;
579     std::deque<StringId> msg_ids_;
580   };
581 
582   struct Stats {
583     using IndexMap = std::map<int, int64_t>;
584     int64_t value = 0;
585     IndexMap indexed_values;
586   };
587   using StatsMap = std::array<Stats, stats::kNumKeys>;
588 
589   class HeapProfileFrames {
590    public:
591     struct Row {
592       StringId name_id;
593       int64_t mapping_row;
594       int64_t rel_pc;
595 
596       bool operator==(const Row& other) const {
597         return std::tie(name_id, mapping_row, rel_pc) ==
598                std::tie(other.name_id, other.mapping_row, other.rel_pc);
599       }
600     };
601 
Insert(const Row & row)602     int64_t Insert(const Row& row) {
603       names_.emplace_back(row.name_id);
604       mappings_.emplace_back(row.mapping_row);
605       rel_pcs_.emplace_back(row.rel_pc);
606       return static_cast<int64_t>(names_.size()) - 1;
607     }
608 
names()609     const std::deque<StringId>& names() const { return names_; }
mappings()610     const std::deque<int64_t>& mappings() const { return mappings_; }
rel_pcs()611     const std::deque<int64_t>& rel_pcs() const { return rel_pcs_; }
612 
613    private:
614     std::deque<StringId> names_;
615     std::deque<int64_t> mappings_;
616     std::deque<int64_t> rel_pcs_;
617   };
618 
619   class HeapProfileCallsites {
620    public:
621     struct Row {
622       int64_t depth;
623       int64_t parent_id;
624       int64_t frame_row;
625 
626       bool operator==(const Row& other) const {
627         return std::tie(depth, parent_id, frame_row) ==
628                std::tie(other.depth, other.parent_id, other.frame_row);
629       }
630     };
631 
Insert(const Row & row)632     int64_t Insert(const Row& row) {
633       frame_depths_.emplace_back(row.depth);
634       parent_callsite_ids_.emplace_back(row.parent_id);
635       frame_ids_.emplace_back(row.frame_row);
636       return static_cast<int64_t>(frame_depths_.size()) - 1;
637     }
638 
frame_depths()639     const std::deque<int64_t>& frame_depths() const { return frame_depths_; }
parent_callsite_ids()640     const std::deque<int64_t>& parent_callsite_ids() const {
641       return parent_callsite_ids_;
642     }
frame_ids()643     const std::deque<int64_t>& frame_ids() const { return frame_ids_; }
644 
645    private:
646     std::deque<int64_t> frame_depths_;
647     std::deque<int64_t> parent_callsite_ids_;
648     std::deque<int64_t> frame_ids_;
649   };
650 
651   class HeapProfileMappings {
652    public:
653     struct Row {
654       StringId build_id;
655       int64_t offset;
656       int64_t start;
657       int64_t end;
658       int64_t load_bias;
659       StringId name_id;
660 
661       bool operator==(const Row& other) const {
662         return std::tie(build_id, offset, start, end, load_bias, name_id) ==
663                std::tie(other.build_id, other.offset, other.start, other.end,
664                         other.load_bias, other.name_id);
665       }
666     };
667 
Insert(const Row & row)668     int64_t Insert(const Row& row) {
669       build_ids_.emplace_back(row.build_id);
670       offsets_.emplace_back(row.offset);
671       starts_.emplace_back(row.start);
672       ends_.emplace_back(row.end);
673       load_biases_.emplace_back(row.load_bias);
674       names_.emplace_back(row.name_id);
675       return static_cast<int64_t>(build_ids_.size()) - 1;
676     }
677 
build_ids()678     const std::deque<StringId>& build_ids() const { return build_ids_; }
offsets()679     const std::deque<int64_t>& offsets() const { return offsets_; }
starts()680     const std::deque<int64_t>& starts() const { return starts_; }
ends()681     const std::deque<int64_t>& ends() const { return ends_; }
load_biases()682     const std::deque<int64_t>& load_biases() const { return load_biases_; }
names()683     const std::deque<StringId>& names() const { return names_; }
684 
685    private:
686     std::deque<StringId> build_ids_;
687     std::deque<int64_t> offsets_;
688     std::deque<int64_t> starts_;
689     std::deque<int64_t> ends_;
690     std::deque<int64_t> load_biases_;
691     std::deque<StringId> names_;
692   };
693 
694   class HeapProfileAllocations {
695    public:
696     struct Row {
697       int64_t timestamp;
698       int64_t pid;
699       int64_t callsite_id;
700       int64_t count;
701       int64_t size;
702     };
703 
Insert(const Row & row)704     void Insert(const Row& row) {
705       timestamps_.emplace_back(row.timestamp);
706       pids_.emplace_back(row.pid);
707       callsite_ids_.emplace_back(row.callsite_id);
708       counts_.emplace_back(row.count);
709       sizes_.emplace_back(row.size);
710     }
711 
timestamps()712     const std::deque<int64_t>& timestamps() const { return timestamps_; }
pids()713     const std::deque<int64_t>& pids() const { return pids_; }
callsite_ids()714     const std::deque<int64_t>& callsite_ids() const { return callsite_ids_; }
counts()715     const std::deque<int64_t>& counts() const { return counts_; }
sizes()716     const std::deque<int64_t>& sizes() const { return sizes_; }
717 
718    private:
719     std::deque<int64_t> timestamps_;
720     std::deque<int64_t> pids_;
721     std::deque<int64_t> callsite_ids_;
722     std::deque<int64_t> counts_;
723     std::deque<int64_t> sizes_;
724   };
725 
726   void ResetStorage();
727 
AddEmptyThread(uint32_t tid)728   UniqueTid AddEmptyThread(uint32_t tid) {
729     unique_threads_.emplace_back(tid);
730     return static_cast<UniqueTid>(unique_threads_.size() - 1);
731   }
732 
AddEmptyProcess(uint32_t pid)733   UniquePid AddEmptyProcess(uint32_t pid) {
734     unique_processes_.emplace_back(pid);
735     return static_cast<UniquePid>(unique_processes_.size() - 1);
736   }
737 
738   // Return an unqiue identifier for the contents of each string.
739   // The string is copied internally and can be destroyed after this called.
740   // Virtual for testing.
InternString(base::StringView str)741   virtual StringId InternString(base::StringView str) {
742     return string_pool_.InternString(str);
743   }
744 
GetMutableProcess(UniquePid upid)745   Process* GetMutableProcess(UniquePid upid) {
746     PERFETTO_DCHECK(upid < unique_processes_.size());
747     return &unique_processes_[upid];
748   }
749 
GetMutableThread(UniqueTid utid)750   Thread* GetMutableThread(UniqueTid utid) {
751     PERFETTO_DCHECK(utid < unique_threads_.size());
752     return &unique_threads_[utid];
753   }
754 
755   // Example usage: SetStats(stats::android_log_num_failed, 42);
SetStats(size_t key,int64_t value)756   void SetStats(size_t key, int64_t value) {
757     PERFETTO_DCHECK(key < stats::kNumKeys);
758     PERFETTO_DCHECK(stats::kTypes[key] == stats::kSingle);
759     stats_[key].value = value;
760   }
761 
762   // Example usage: IncrementStats(stats::android_log_num_failed, -1);
763   void IncrementStats(size_t key, int64_t increment = 1) {
764     PERFETTO_DCHECK(key < stats::kNumKeys);
765     PERFETTO_DCHECK(stats::kTypes[key] == stats::kSingle);
766     stats_[key].value += increment;
767   }
768 
769   // Example usage: IncrementIndexedStats(stats::cpu_failure, 1);
770   void IncrementIndexedStats(size_t key, int index, int64_t increment = 1) {
771     PERFETTO_DCHECK(key < stats::kNumKeys);
772     PERFETTO_DCHECK(stats::kTypes[key] == stats::kIndexed);
773     stats_[key].indexed_values[index] += increment;
774   }
775 
776   // Example usage: SetIndexedStats(stats::cpu_failure, 1, 42);
SetIndexedStats(size_t key,int index,int64_t value)777   void SetIndexedStats(size_t key, int index, int64_t value) {
778     PERFETTO_DCHECK(key < stats::kNumKeys);
779     PERFETTO_DCHECK(stats::kTypes[key] == stats::kIndexed);
780     stats_[key].indexed_values[index] = value;
781   }
782 
783   class ScopedStatsTracer {
784    public:
ScopedStatsTracer(TraceStorage * storage,size_t key)785     ScopedStatsTracer(TraceStorage* storage, size_t key)
786         : storage_(storage), key_(key), start_ns_(base::GetWallTimeNs()) {}
787 
~ScopedStatsTracer()788     ~ScopedStatsTracer() {
789       if (!storage_)
790         return;
791       auto delta_ns = base::GetWallTimeNs() - start_ns_;
792       storage_->IncrementStats(key_, delta_ns.count());
793     }
794 
ScopedStatsTracer(ScopedStatsTracer && other)795     ScopedStatsTracer(ScopedStatsTracer&& other) noexcept { MoveImpl(&other); }
796 
797     ScopedStatsTracer& operator=(ScopedStatsTracer&& other) {
798       MoveImpl(&other);
799       return *this;
800     }
801 
802    private:
803     ScopedStatsTracer(const ScopedStatsTracer&) = delete;
804     ScopedStatsTracer& operator=(const ScopedStatsTracer&) = delete;
805 
MoveImpl(ScopedStatsTracer * other)806     void MoveImpl(ScopedStatsTracer* other) {
807       storage_ = other->storage_;
808       key_ = other->key_;
809       start_ns_ = other->start_ns_;
810       other->storage_ = nullptr;
811     }
812 
813     TraceStorage* storage_;
814     size_t key_;
815     base::TimeNanos start_ns_;
816   };
817 
TraceExecutionTimeIntoStats(size_t key)818   ScopedStatsTracer TraceExecutionTimeIntoStats(size_t key) {
819     return ScopedStatsTracer(this, key);
820   }
821 
822   // Reading methods.
GetString(StringId id)823   NullTermStringView GetString(StringId id) const {
824     return string_pool_.Get(id);
825   }
826 
GetProcess(UniquePid upid)827   const Process& GetProcess(UniquePid upid) const {
828     PERFETTO_DCHECK(upid < unique_processes_.size());
829     return unique_processes_[upid];
830   }
831 
GetThread(UniqueTid utid)832   const Thread& GetThread(UniqueTid utid) const {
833     // Allow utid == 0 for idle thread retrieval.
834     PERFETTO_DCHECK(utid < unique_threads_.size());
835     return unique_threads_[utid];
836   }
837 
CreateRowId(TableId table,uint32_t row)838   static RowId CreateRowId(TableId table, uint32_t row) {
839     return (static_cast<RowId>(table) << kRowIdTableShift) | row;
840   }
841 
ParseRowId(RowId rowid)842   static std::pair<int8_t /*table*/, uint32_t /*row*/> ParseRowId(RowId rowid) {
843     auto id = static_cast<uint64_t>(rowid);
844     auto table_id = static_cast<uint8_t>(id >> kRowIdTableShift);
845     auto row = static_cast<uint32_t>(id & ((1ull << kRowIdTableShift) - 1));
846     return std::make_pair(table_id, row);
847   }
848 
slices()849   const Slices& slices() const { return slices_; }
mutable_slices()850   Slices* mutable_slices() { return &slices_; }
851 
nestable_slices()852   const NestableSlices& nestable_slices() const { return nestable_slices_; }
mutable_nestable_slices()853   NestableSlices* mutable_nestable_slices() { return &nestable_slices_; }
854 
counter_definitions()855   const CounterDefinitions& counter_definitions() const {
856     return counter_definitions_;
857   }
mutable_counter_definitions()858   CounterDefinitions* mutable_counter_definitions() {
859     return &counter_definitions_;
860   }
861 
counter_values()862   const CounterValues& counter_values() const { return counter_values_; }
mutable_counter_values()863   CounterValues* mutable_counter_values() { return &counter_values_; }
864 
sql_stats()865   const SqlStats& sql_stats() const { return sql_stats_; }
mutable_sql_stats()866   SqlStats* mutable_sql_stats() { return &sql_stats_; }
867 
instants()868   const Instants& instants() const { return instants_; }
mutable_instants()869   Instants* mutable_instants() { return &instants_; }
870 
android_logs()871   const AndroidLogs& android_logs() const { return android_log_; }
mutable_android_log()872   AndroidLogs* mutable_android_log() { return &android_log_; }
873 
stats()874   const StatsMap& stats() const { return stats_; }
875 
args()876   const Args& args() const { return args_; }
mutable_args()877   Args* mutable_args() { return &args_; }
878 
raw_events()879   const RawEvents& raw_events() const { return raw_events_; }
mutable_raw_events()880   RawEvents* mutable_raw_events() { return &raw_events_; }
881 
heap_profile_mappings()882   const HeapProfileMappings& heap_profile_mappings() const {
883     return heap_profile_mappings_;
884   }
mutable_heap_profile_mappings()885   HeapProfileMappings* mutable_heap_profile_mappings() {
886     return &heap_profile_mappings_;
887   }
888 
heap_profile_frames()889   const HeapProfileFrames& heap_profile_frames() const {
890     return heap_profile_frames_;
891   }
mutable_heap_profile_frames()892   HeapProfileFrames* mutable_heap_profile_frames() {
893     return &heap_profile_frames_;
894   }
895 
heap_profile_callsites()896   const HeapProfileCallsites& heap_profile_callsites() const {
897     return heap_profile_callsites_;
898   }
mutable_heap_profile_callsites()899   HeapProfileCallsites* mutable_heap_profile_callsites() {
900     return &heap_profile_callsites_;
901   }
902 
heap_profile_allocations()903   const HeapProfileAllocations& heap_profile_allocations() const {
904     return heap_profile_allocations_;
905   }
mutable_heap_profile_allocations()906   HeapProfileAllocations* mutable_heap_profile_allocations() {
907     return &heap_profile_allocations_;
908   }
909 
string_pool()910   const StringPool& string_pool() const { return string_pool_; }
911 
912   // |unique_processes_| always contains at least 1 element becuase the 0th ID
913   // is reserved to indicate an invalid process.
process_count()914   size_t process_count() const { return unique_processes_.size(); }
915 
916   // |unique_threads_| always contains at least 1 element becuase the 0th ID
917   // is reserved to indicate an invalid thread.
thread_count()918   size_t thread_count() const { return unique_threads_.size(); }
919 
920   // Number of interned strings in the pool. Includes the empty string w/ ID=0.
string_count()921   size_t string_count() const { return string_pool_.size(); }
922 
923   // Start / end ts (in nanoseconds) across the parsed trace events.
924   // Returns (0, 0) if the trace is empty.
925   std::pair<int64_t, int64_t> GetTraceTimestampBoundsNs() const;
926 
927  private:
928   static constexpr uint8_t kRowIdTableShift = 32;
929 
930   using StringHash = uint64_t;
931 
932   TraceStorage(const TraceStorage&) = delete;
933   TraceStorage& operator=(const TraceStorage&) = delete;
934 
935   TraceStorage(TraceStorage&&) = default;
936   TraceStorage& operator=(TraceStorage&&) = default;
937 
938   // Stats about parsing the trace.
939   StatsMap stats_{};
940 
941   // One entry for each CPU in the trace.
942   Slices slices_;
943 
944   // Args for all other tables.
945   Args args_;
946 
947   // One entry for each unique string in the trace.
948   StringPool string_pool_;
949 
950   // One entry for each UniquePid, with UniquePid as the index.
951   // Never hold on to pointers to Process, as vector resize will
952   // invalidate them.
953   std::vector<Process> unique_processes_;
954 
955   // One entry for each UniqueTid, with UniqueTid as the index.
956   std::deque<Thread> unique_threads_;
957 
958   // Slices coming from userspace events (e.g. Chromium TRACE_EVENT macros).
959   NestableSlices nestable_slices_;
960 
961   // The type of counters in the trace. Can be thought of the the "metadata".
962   CounterDefinitions counter_definitions_;
963 
964   // The values from the Counter events from the trace. This includes CPU
965   // frequency events as well systrace trace_marker counter events.
966   CounterValues counter_values_;
967 
968   SqlStats sql_stats_;
969 
970   // These are instantaneous events in the trace. They have no duration
971   // and do not have a value that make sense to track over time.
972   // e.g. signal events
973   Instants instants_;
974 
975   // Raw events are every ftrace event in the trace. The raw event includes
976   // the timestamp and the pid. The args for the raw event will be in the
977   // args table. This table can be used to generate a text version of the
978   // trace.
979   RawEvents raw_events_;
980   AndroidLogs android_log_;
981 
982   HeapProfileMappings heap_profile_mappings_;
983   HeapProfileFrames heap_profile_frames_;
984   HeapProfileCallsites heap_profile_callsites_;
985   HeapProfileAllocations heap_profile_allocations_;
986 };
987 
988 }  // namespace trace_processor
989 }  // namespace perfetto
990 
991 namespace std {
992 
993 template <>
994 struct hash<::perfetto::trace_processor::TraceStorage::HeapProfileFrames::Row> {
995   using argument_type =
996       ::perfetto::trace_processor::TraceStorage::HeapProfileFrames::Row;
997   using result_type = size_t;
998 
999   result_type operator()(const argument_type& r) const {
1000     return std::hash<::perfetto::trace_processor::StringId>{}(r.name_id) ^
1001            std::hash<int64_t>{}(r.mapping_row) ^ std::hash<int64_t>{}(r.rel_pc);
1002   }
1003 };
1004 
1005 template <>
1006 struct hash<
1007     ::perfetto::trace_processor::TraceStorage::HeapProfileCallsites::Row> {
1008   using argument_type =
1009       ::perfetto::trace_processor::TraceStorage::HeapProfileCallsites::Row;
1010   using result_type = size_t;
1011 
1012   result_type operator()(const argument_type& r) const {
1013     return std::hash<int64_t>{}(r.depth) ^ std::hash<int64_t>{}(r.parent_id) ^
1014            std::hash<int64_t>{}(r.frame_row);
1015   }
1016 };
1017 
1018 template <>
1019 struct hash<
1020     ::perfetto::trace_processor::TraceStorage::HeapProfileMappings::Row> {
1021   using argument_type =
1022       ::perfetto::trace_processor::TraceStorage::HeapProfileMappings::Row;
1023   using result_type = size_t;
1024 
1025   result_type operator()(const argument_type& r) const {
1026     return std::hash<::perfetto::trace_processor::StringId>{}(r.build_id) ^
1027            std::hash<int64_t>{}(r.offset) ^ std::hash<int64_t>{}(r.start) ^
1028            std::hash<int64_t>{}(r.end) ^ std::hash<int64_t>{}(r.load_bias) ^
1029            std::hash<::perfetto::trace_processor::StringId>{}(r.name_id);
1030   }
1031 };
1032 
1033 }  // namespace std
1034 
1035 #endif  // SRC_TRACE_PROCESSOR_TRACE_STORAGE_H_
1036