1 // Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "perf_reader.h"
6
7 #include <byteswap.h>
8 #include <limits.h>
9
10 #include <bitset>
11 #include <cstdio>
12 #include <cstdlib>
13 #include <cstring>
14 #include <vector>
15
16 #define LOG_TAG "perf_reader"
17
18 #include "base/logging.h"
19
20 #include "quipper_string.h"
21 #include "perf_utils.h"
22
23 namespace quipper {
24
25 struct BufferWithSize {
26 char* ptr;
27 size_t size;
28 };
29
30 // If the buffer is read-only, it is not sufficient to mark the previous struct
31 // as const, as this only means that the pointer cannot be changed, and says
32 // nothing about the contents of the buffer. So, we need another struct.
33 struct ConstBufferWithSize {
34 const char* ptr;
35 size_t size;
36 };
37
38 namespace {
39
40 // The type of the number of string data, found in the command line metadata in
41 // the perf data file.
42 typedef u32 num_string_data_type;
43
44 // Types of the event desc fields that are not found in other structs.
45 typedef u32 event_desc_num_events;
46 typedef u32 event_desc_attr_size;
47 typedef u32 event_desc_num_unique_ids;
48
49 // The type of the number of nodes field in NUMA topology.
50 typedef u32 numa_topology_num_nodes_type;
51
52 // A mask that is applied to metadata_mask_ in order to get a mask for
53 // only the metadata supported by quipper.
54 const uint32_t kSupportedMetadataMask =
55 1 << HEADER_TRACING_DATA |
56 1 << HEADER_BUILD_ID |
57 1 << HEADER_HOSTNAME |
58 1 << HEADER_OSRELEASE |
59 1 << HEADER_VERSION |
60 1 << HEADER_ARCH |
61 1 << HEADER_NRCPUS |
62 1 << HEADER_CPUDESC |
63 1 << HEADER_CPUID |
64 1 << HEADER_TOTAL_MEM |
65 1 << HEADER_CMDLINE |
66 1 << HEADER_EVENT_DESC |
67 1 << HEADER_CPU_TOPOLOGY |
68 1 << HEADER_NUMA_TOPOLOGY |
69 1 << HEADER_BRANCH_STACK;
70
71 // By default, the build ID event has PID = -1.
72 const uint32_t kDefaultBuildIDEventPid = static_cast<uint32_t>(-1);
73
74 template <class T>
ByteSwap(T * input)75 void ByteSwap(T* input) {
76 switch (sizeof(T)) {
77 case sizeof(uint8_t):
78 LOG(WARNING) << "Attempting to byte swap on a single byte.";
79 break;
80 case sizeof(uint16_t):
81 *input = bswap_16(*input);
82 break;
83 case sizeof(uint32_t):
84 *input = bswap_32(*input);
85 break;
86 case sizeof(uint64_t):
87 *input = bswap_64(*input);
88 break;
89 default:
90 LOG(FATAL) << "Invalid size for byte swap: " << sizeof(T) << " bytes";
91 break;
92 }
93 }
94
MaybeSwap(u64 value,bool swap)95 u64 MaybeSwap(u64 value, bool swap) {
96 if (swap)
97 return bswap_64(value);
98 return value;
99 }
100
MaybeSwap(u32 value,bool swap)101 u32 MaybeSwap(u32 value, bool swap) {
102 if (swap)
103 return bswap_32(value);
104 return value;
105 }
106
ReverseByte(u8 x)107 u8 ReverseByte(u8 x) {
108 x = (x & 0xf0) >> 4 | (x & 0x0f) << 4; // exchange nibbles
109 x = (x & 0xcc) >> 2 | (x & 0x33) << 2; // exchange pairs
110 x = (x & 0xaa) >> 1 | (x & 0x55) << 1; // exchange neighbors
111 return x;
112 }
113
114 // If field points to the start of a bitfield padded to len bytes, this
115 // performs an endian swap of the bitfield, assuming the compiler that produced
116 // it conforms to the same ABI (bitfield layout is not completely specified by
117 // the language).
SwapBitfieldOfBits(u8 * field,size_t len)118 void SwapBitfieldOfBits(u8* field, size_t len) {
119 for (size_t i = 0; i < len; i++) {
120 field[i] = ReverseByte(field[i]);
121 }
122 }
123
124 // The code currently assumes that the compiler will not add any padding to the
125 // various structs. These CHECKs make sure that this is true.
CheckNoEventHeaderPadding()126 void CheckNoEventHeaderPadding() {
127 perf_event_header header;
128 CHECK_EQ(sizeof(header),
129 sizeof(header.type) + sizeof(header.misc) + sizeof(header.size));
130 }
131
CheckNoPerfEventAttrPadding()132 void CheckNoPerfEventAttrPadding() {
133 perf_event_attr attr;
134 CHECK_EQ(sizeof(attr),
135 (reinterpret_cast<u64>(&attr.__reserved_2) -
136 reinterpret_cast<u64>(&attr)) +
137 sizeof(attr.__reserved_2));
138 }
139
CheckNoEventTypePadding()140 void CheckNoEventTypePadding() {
141 perf_trace_event_type event_type;
142 CHECK_EQ(sizeof(event_type),
143 sizeof(event_type.event_id) + sizeof(event_type.name));
144 }
145
CheckNoBuildIDEventPadding()146 void CheckNoBuildIDEventPadding() {
147 build_id_event event;
148 CHECK_EQ(sizeof(event),
149 sizeof(event.header.type) + sizeof(event.header.misc) +
150 sizeof(event.header.size) + sizeof(event.pid) +
151 sizeof(event.build_id));
152 }
153
154 // Creates/updates a build id event with |build_id| and |filename|.
155 // Passing "" to |build_id| or |filename| will leave the corresponding field
156 // unchanged (in which case |event| must be non-null).
157 // If |event| is null or is not large enough, a new event will be created.
158 // In this case, if |event| is non-null, it will be freed.
159 // Otherwise, updates the fields of the existing event.
160 // |new_misc| indicates kernel vs user space, and is only used to fill in the
161 // |header.misc| field of new events.
162 // In either case, returns a pointer to the event containing the updated data,
163 // or NULL in the case of a failure.
CreateOrUpdateBuildID(const string & build_id,const string & filename,uint16_t new_misc,build_id_event * event)164 build_id_event* CreateOrUpdateBuildID(const string& build_id,
165 const string& filename,
166 uint16_t new_misc,
167 build_id_event* event) {
168 // When creating an event from scratch, build id and filename must be present.
169 if (!event && (build_id.empty() || filename.empty()))
170 return NULL;
171 size_t new_len = GetUint64AlignedStringLength(
172 filename.empty() ? event->filename : filename);
173
174 // If event is null, or we don't have enough memory, allocate more memory, and
175 // switch the new pointer with the existing pointer.
176 size_t new_size = sizeof(*event) + new_len;
177 if (!event || new_size > event->header.size) {
178 build_id_event* new_event = CallocMemoryForBuildID(new_size);
179
180 if (event) {
181 // Copy over everything except the filename and free the event.
182 // It is guaranteed that we are changing the filename - otherwise, the old
183 // size and the new size would be equal.
184 *new_event = *event;
185 free(event);
186 } else {
187 // Fill in the fields appropriately.
188 new_event->header.type = HEADER_BUILD_ID;
189 new_event->header.misc = new_misc;
190 new_event->pid = kDefaultBuildIDEventPid;
191 }
192 event = new_event;
193 }
194
195 // Here, event is the pointer to the build_id_event that we are keeping.
196 // Update the event's size, build id, and filename.
197 if (!build_id.empty() &&
198 !StringToHex(build_id, event->build_id, arraysize(event->build_id))) {
199 free(event);
200 return NULL;
201 }
202
203 if (!filename.empty())
204 CHECK_GT(snprintf(event->filename, new_len, "%s", filename.c_str()), 0);
205
206 event->header.size = new_size;
207 return event;
208 }
209
210 // Reads |size| bytes from |buffer| into |dest| and advances |src_offset|.
ReadDataFromBuffer(const ConstBufferWithSize & buffer,size_t size,const string & value_name,size_t * src_offset,void * dest)211 bool ReadDataFromBuffer(const ConstBufferWithSize& buffer,
212 size_t size,
213 const string& value_name,
214 size_t* src_offset,
215 void* dest) {
216 size_t end_offset = *src_offset + size / sizeof(*buffer.ptr);
217 if (buffer.size < end_offset) {
218 LOG(ERROR) << "Not enough bytes to read " << value_name
219 << ". Requested " << size << " bytes";
220 return false;
221 }
222 memcpy(dest, buffer.ptr + *src_offset, size);
223 *src_offset = end_offset;
224 return true;
225 }
226
227 // Reads a CStringWithLength from |buffer| into |dest|, and advances the offset.
ReadStringFromBuffer(const ConstBufferWithSize & buffer,bool is_cross_endian,size_t * offset,CStringWithLength * dest)228 bool ReadStringFromBuffer(const ConstBufferWithSize& buffer,
229 bool is_cross_endian,
230 size_t* offset,
231 CStringWithLength* dest) {
232 if (!ReadDataFromBuffer(buffer, sizeof(dest->len), "string length",
233 offset, &dest->len)) {
234 return false;
235 }
236 if (is_cross_endian)
237 ByteSwap(&dest->len);
238
239 if (buffer.size < *offset + dest->len) {
240 LOG(ERROR) << "Not enough bytes to read string";
241 return false;
242 }
243 dest->str = string(buffer.ptr + *offset);
244 *offset += dest->len / sizeof(*buffer.ptr);
245 return true;
246 }
247
248 // Read read info from perf data. Corresponds to sample format type
249 // PERF_SAMPLE_READ.
ReadReadInfo(const uint64_t * array,bool swap_bytes,uint64_t read_format,struct perf_sample * sample)250 const uint64_t* ReadReadInfo(const uint64_t* array,
251 bool swap_bytes,
252 uint64_t read_format,
253 struct perf_sample* sample) {
254 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
255 sample->read.time_enabled = *array++;
256 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
257 sample->read.time_running = *array++;
258 if (read_format & PERF_FORMAT_ID)
259 sample->read.one.id = *array++;
260
261 if (swap_bytes) {
262 ByteSwap(&sample->read.time_enabled);
263 ByteSwap(&sample->read.time_running);
264 ByteSwap(&sample->read.one.id);
265 }
266
267 return array;
268 }
269
270 // Read call chain info from perf data. Corresponds to sample format type
271 // PERF_SAMPLE_CALLCHAIN.
ReadCallchain(const uint64_t * array,bool swap_bytes,struct perf_sample * sample)272 const uint64_t* ReadCallchain(const uint64_t* array,
273 bool swap_bytes,
274 struct perf_sample* sample) {
275 // Make sure there is no existing allocated memory in |sample->callchain|.
276 CHECK_EQ(static_cast<void*>(NULL), sample->callchain);
277
278 // The callgraph data consists of a uint64_t value |nr| followed by |nr|
279 // addresses.
280 uint64_t callchain_size = *array++;
281 if (swap_bytes)
282 ByteSwap(&callchain_size);
283 struct ip_callchain* callchain =
284 reinterpret_cast<struct ip_callchain*>(new uint64_t[callchain_size + 1]);
285 callchain->nr = callchain_size;
286 for (size_t i = 0; i < callchain_size; ++i) {
287 callchain->ips[i] = *array++;
288 if (swap_bytes)
289 ByteSwap(&callchain->ips[i]);
290 }
291 sample->callchain = callchain;
292
293 return array;
294 }
295
296 // Read raw info from perf data. Corresponds to sample format type
297 // PERF_SAMPLE_RAW.
ReadRawData(const uint64_t * array,bool swap_bytes,struct perf_sample * sample)298 const uint64_t* ReadRawData(const uint64_t* array,
299 bool swap_bytes,
300 struct perf_sample* sample) {
301 // First read the size.
302 const uint32_t* ptr = reinterpret_cast<const uint32_t*>(array);
303 sample->raw_size = *ptr++;
304 if (swap_bytes)
305 ByteSwap(&sample->raw_size);
306
307 // Allocate space for and read the raw data bytes.
308 sample->raw_data = new uint8_t[sample->raw_size];
309 memcpy(sample->raw_data, ptr, sample->raw_size);
310
311 // Determine the bytes that were read, and align to the next 64 bits.
312 int bytes_read = AlignSize(sizeof(sample->raw_size) + sample->raw_size,
313 sizeof(uint64_t));
314 array += bytes_read / sizeof(uint64_t);
315
316 return array;
317 }
318
319 // Read call chain info from perf data. Corresponds to sample format type
320 // PERF_SAMPLE_CALLCHAIN.
ReadBranchStack(const uint64_t * array,bool swap_bytes,struct perf_sample * sample)321 const uint64_t* ReadBranchStack(const uint64_t* array,
322 bool swap_bytes,
323 struct perf_sample* sample) {
324 // Make sure there is no existing allocated memory in
325 // |sample->branch_stack|.
326 CHECK_EQ(static_cast<void*>(NULL), sample->branch_stack);
327
328 // The branch stack data consists of a uint64_t value |nr| followed by |nr|
329 // branch_entry structs.
330 uint64_t branch_stack_size = *array++;
331 if (swap_bytes)
332 ByteSwap(&branch_stack_size);
333 struct branch_stack* branch_stack =
334 reinterpret_cast<struct branch_stack*>(
335 new uint8_t[sizeof(uint64_t) +
336 branch_stack_size * sizeof(struct branch_entry)]);
337 branch_stack->nr = branch_stack_size;
338 for (size_t i = 0; i < branch_stack_size; ++i) {
339 memcpy(&branch_stack->entries[i], array, sizeof(struct branch_entry));
340 array += sizeof(struct branch_entry) / sizeof(*array);
341 if (swap_bytes) {
342 ByteSwap(&branch_stack->entries[i].from);
343 ByteSwap(&branch_stack->entries[i].to);
344 }
345 }
346 sample->branch_stack = branch_stack;
347
348 return array;
349 }
350
ReadPerfSampleFromData(const perf_event_type event_type,const uint64_t * array,const uint64_t sample_fields,const uint64_t read_format,bool swap_bytes,const perf_event_attr & attr0,size_t n_attrs,struct perf_sample * sample)351 size_t ReadPerfSampleFromData(const perf_event_type event_type,
352 const uint64_t* array,
353 const uint64_t sample_fields,
354 const uint64_t read_format,
355 bool swap_bytes,
356 const perf_event_attr &attr0,
357 size_t n_attrs,
358 struct perf_sample* sample) {
359 const uint64_t* initial_array_ptr = array;
360
361 union {
362 uint32_t val32[sizeof(uint64_t) / sizeof(uint32_t)];
363 uint64_t val64;
364 };
365
366 // See structure for PERF_RECORD_SAMPLE in kernel/perf_event.h
367 // and compare sample_id when sample_id_all is set.
368
369 // NB: For sample_id, sample_fields has already been masked to the set
370 // of fields in that struct by GetSampleFieldsForEventType. That set
371 // of fields is mostly in the same order as PERF_RECORD_SAMPLE, with
372 // the exception of PERF_SAMPLE_IDENTIFIER.
373
374 // PERF_SAMPLE_IDENTIFIER is in a different location depending on
375 // if this is a SAMPLE event or the sample_id of another event.
376 if (event_type == PERF_RECORD_SAMPLE) {
377 // { u64 id; } && PERF_SAMPLE_IDENTIFIER
378 if (sample_fields & PERF_SAMPLE_IDENTIFIER) {
379 sample->id = MaybeSwap(*array++, swap_bytes);
380 }
381 }
382
383 // { u64 ip; } && PERF_SAMPLE_IP
384 if (sample_fields & PERF_SAMPLE_IP) {
385 sample->ip = MaybeSwap(*array++, swap_bytes);
386 }
387
388 // { u32 pid, tid; } && PERF_SAMPLE_TID
389 if (sample_fields & PERF_SAMPLE_TID) {
390 val64 = *array++;
391 sample->pid = MaybeSwap(val32[0], swap_bytes);
392 sample->tid = MaybeSwap(val32[1], swap_bytes);
393 }
394
395 // { u64 time; } && PERF_SAMPLE_TIME
396 if (sample_fields & PERF_SAMPLE_TIME) {
397 sample->time = MaybeSwap(*array++, swap_bytes);
398 }
399
400 // { u64 addr; } && PERF_SAMPLE_ADDR
401 if (sample_fields & PERF_SAMPLE_ADDR) {
402 sample->addr = MaybeSwap(*array++, swap_bytes);
403 }
404
405 // { u64 id; } && PERF_SAMPLE_ID
406 if (sample_fields & PERF_SAMPLE_ID) {
407 sample->id = MaybeSwap(*array++, swap_bytes);
408 }
409
410 // { u64 stream_id;} && PERF_SAMPLE_STREAM_ID
411 if (sample_fields & PERF_SAMPLE_STREAM_ID) {
412 sample->stream_id = MaybeSwap(*array++, swap_bytes);
413 }
414
415 // { u32 cpu, res; } && PERF_SAMPLE_CPU
416 if (sample_fields & PERF_SAMPLE_CPU) {
417 val64 = *array++;
418 sample->cpu = MaybeSwap(val32[0], swap_bytes);
419 // sample->res = MaybeSwap(*val32[1], swap_bytes); // not implemented?
420 }
421
422 // This is the location of PERF_SAMPLE_IDENTIFIER in struct sample_id.
423 if (event_type != PERF_RECORD_SAMPLE) {
424 // { u64 id; } && PERF_SAMPLE_IDENTIFIER
425 if (sample_fields & PERF_SAMPLE_IDENTIFIER) {
426 sample->id = MaybeSwap(*array++, swap_bytes);
427 }
428 }
429
430 //
431 // The remaining fields are only in PERF_RECORD_SAMPLE
432 //
433
434 // { u64 period; } && PERF_SAMPLE_PERIOD
435 if (sample_fields & PERF_SAMPLE_PERIOD) {
436 sample->period = MaybeSwap(*array++, swap_bytes);
437 }
438
439 // { struct read_format values; } && PERF_SAMPLE_READ
440 if (sample_fields & PERF_SAMPLE_READ) {
441 // TODO(cwp-team): support grouped read info.
442 if (read_format & PERF_FORMAT_GROUP)
443 return 0;
444 array = ReadReadInfo(array, swap_bytes, read_format, sample);
445 }
446
447 // { u64 nr,
448 // u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN
449 if (sample_fields & PERF_SAMPLE_CALLCHAIN) {
450 array = ReadCallchain(array, swap_bytes, sample);
451 }
452
453 // { u32 size;
454 // char data[size];}&& PERF_SAMPLE_RAW
455 if (sample_fields & PERF_SAMPLE_RAW) {
456 array = ReadRawData(array, swap_bytes, sample);
457 }
458
459 // { u64 nr;
460 // { u64 from, to, flags } lbr[nr];} && PERF_SAMPLE_BRANCH_STACK
461 if (sample_fields & PERF_SAMPLE_BRANCH_STACK) {
462 array = ReadBranchStack(array, swap_bytes, sample);
463 }
464
465 // { u64 abi,
466 // u64 regs[nr]; } && PERF_SAMPLE_REGS_USER
467 if (sample_fields & PERF_SAMPLE_REGS_USER) {
468 uint64_t abi = MaybeSwap(*array++, swap_bytes);
469 if (abi != 0) {
470 assert(n_attrs == 1);
471 uint64_t reg_mask = attr0.sample_regs_user;
472 size_t bit_nr = 0;
473 for (size_t i = 0; i < 64; ++i) {
474 if ((reg_mask >> i) & 1) {
475 bit_nr++;
476 }
477 }
478 array += bit_nr;
479 }
480 }
481
482 // { u64 size,
483 // u64 regs[nr]; } && PERF_SAMPLE_STACK_USER
484 if (sample_fields & PERF_SAMPLE_STACK_USER) {
485 uint64_t size = MaybeSwap(*array++, swap_bytes);
486 if (size != 0) {
487 array += (size / sizeof(uint64_t));
488 array += 1; // for dyn_size
489 }
490 }
491
492 static const u64 kUnimplementedSampleFields =
493 PERF_SAMPLE_WEIGHT |
494 PERF_SAMPLE_DATA_SRC |
495 PERF_SAMPLE_TRANSACTION;
496
497 if (sample_fields & kUnimplementedSampleFields) {
498 LOG(WARNING) << "Unimplemented sample fields 0x"
499 << std::hex << (sample_fields & kUnimplementedSampleFields);
500 }
501
502 if (sample_fields & ~(PERF_SAMPLE_MAX-1)) {
503 LOG(WARNING) << "Unrecognized sample fields 0x"
504 << std::hex << (sample_fields & ~(PERF_SAMPLE_MAX-1));
505 }
506
507 return (array - initial_array_ptr) * sizeof(uint64_t);
508 }
509
WritePerfSampleToData(const perf_event_type event_type,const struct perf_sample & sample,const uint64_t sample_fields,const uint64_t read_format,uint64_t * array)510 size_t WritePerfSampleToData(const perf_event_type event_type,
511 const struct perf_sample& sample,
512 const uint64_t sample_fields,
513 const uint64_t read_format,
514 uint64_t* array) {
515 const uint64_t* initial_array_ptr = array;
516
517 union {
518 uint32_t val32[sizeof(uint64_t) / sizeof(uint32_t)];
519 uint64_t val64;
520 };
521
522 // See notes at the top of ReadPerfSampleFromData regarding the structure
523 // of PERF_RECORD_SAMPLE, sample_id, and PERF_SAMPLE_IDENTIFIER, as they
524 // all apply here as well.
525
526 // PERF_SAMPLE_IDENTIFIER is in a different location depending on
527 // if this is a SAMPLE event or the sample_id of another event.
528 if (event_type == PERF_RECORD_SAMPLE) {
529 // { u64 id; } && PERF_SAMPLE_IDENTIFIER
530 if (sample_fields & PERF_SAMPLE_IDENTIFIER) {
531 *array++ = sample.id;
532 }
533 }
534
535 // { u64 ip; } && PERF_SAMPLE_IP
536 if (sample_fields & PERF_SAMPLE_IP) {
537 *array++ = sample.ip;
538 }
539
540 // { u32 pid, tid; } && PERF_SAMPLE_TID
541 if (sample_fields & PERF_SAMPLE_TID) {
542 val32[0] = sample.pid;
543 val32[1] = sample.tid;
544 *array++ = val64;
545 }
546
547 // { u64 time; } && PERF_SAMPLE_TIME
548 if (sample_fields & PERF_SAMPLE_TIME) {
549 *array++ = sample.time;
550 }
551
552 // { u64 addr; } && PERF_SAMPLE_ADDR
553 if (sample_fields & PERF_SAMPLE_ADDR) {
554 *array++ = sample.addr;
555 }
556
557 // { u64 id; } && PERF_SAMPLE_ID
558 if (sample_fields & PERF_SAMPLE_ID) {
559 *array++ = sample.id;
560 }
561
562 // { u64 stream_id;} && PERF_SAMPLE_STREAM_ID
563 if (sample_fields & PERF_SAMPLE_STREAM_ID) {
564 *array++ = sample.stream_id;
565 }
566
567 // { u32 cpu, res; } && PERF_SAMPLE_CPU
568 if (sample_fields & PERF_SAMPLE_CPU) {
569 val32[0] = sample.cpu;
570 // val32[1] = sample.res; // not implemented?
571 val32[1] = 0;
572 *array++ = val64;
573 }
574
575 // This is the location of PERF_SAMPLE_IDENTIFIER in struct sample_id.
576 if (event_type != PERF_RECORD_SAMPLE) {
577 // { u64 id; } && PERF_SAMPLE_IDENTIFIER
578 if (sample_fields & PERF_SAMPLE_IDENTIFIER) {
579 *array++ = sample.id;
580 }
581 }
582
583 //
584 // The remaining fields are only in PERF_RECORD_SAMPLE
585 //
586
587 // { u64 period; } && PERF_SAMPLE_PERIOD
588 if (sample_fields & PERF_SAMPLE_PERIOD) {
589 *array++ = sample.period;
590 }
591
592 // { struct read_format values; } && PERF_SAMPLE_READ
593 if (sample_fields & PERF_SAMPLE_READ) {
594 // TODO(cwp-team): support grouped read info.
595 if (read_format & PERF_FORMAT_GROUP)
596 return 0;
597 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
598 *array++ = sample.read.time_enabled;
599 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
600 *array++ = sample.read.time_running;
601 if (read_format & PERF_FORMAT_ID)
602 *array++ = sample.read.one.id;
603 }
604
605 // { u64 nr,
606 // u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN
607 if (sample_fields & PERF_SAMPLE_CALLCHAIN) {
608 if (!sample.callchain) {
609 LOG(ERROR) << "Expecting callchain data, but none was found.";
610 } else {
611 *array++ = sample.callchain->nr;
612 for (size_t i = 0; i < sample.callchain->nr; ++i)
613 *array++ = sample.callchain->ips[i];
614 }
615 }
616
617 // { u32 size;
618 // char data[size];}&& PERF_SAMPLE_RAW
619 if (sample_fields & PERF_SAMPLE_RAW) {
620 uint32_t* ptr = reinterpret_cast<uint32_t*>(array);
621 *ptr++ = sample.raw_size;
622 memcpy(ptr, sample.raw_data, sample.raw_size);
623
624 // Update the data read pointer after aligning to the next 64 bytes.
625 int num_bytes = AlignSize(sizeof(sample.raw_size) + sample.raw_size,
626 sizeof(uint64_t));
627 array += num_bytes / sizeof(uint64_t);
628 }
629
630 // { u64 nr;
631 // { u64 from, to, flags } lbr[nr];} && PERF_SAMPLE_BRANCH_STACK
632 if (sample_fields & PERF_SAMPLE_BRANCH_STACK) {
633 if (!sample.branch_stack) {
634 LOG(ERROR) << "Expecting branch stack data, but none was found.";
635 } else {
636 *array++ = sample.branch_stack->nr;
637 for (size_t i = 0; i < sample.branch_stack->nr; ++i) {
638 *array++ = sample.branch_stack->entries[i].from;
639 *array++ = sample.branch_stack->entries[i].to;
640 memcpy(array++, &sample.branch_stack->entries[i].flags,
641 sizeof(uint64_t));
642 }
643 }
644 }
645
646 //
647 // Unsupported sample types.
648 //
649 CHECK(!(sample_fields & PERF_SAMPLE_STACK_USER|PERF_SAMPLE_REGS_USER));
650
651 return (array - initial_array_ptr) * sizeof(uint64_t);
652 }
653
654 } // namespace
655
~PerfReader()656 PerfReader::~PerfReader() {
657 // Free allocated memory.
658 for (size_t i = 0; i < build_id_events_.size(); ++i)
659 if (build_id_events_[i])
660 free(build_id_events_[i]);
661 }
662
PerfizeBuildIDString(string * build_id)663 void PerfReader::PerfizeBuildIDString(string* build_id) {
664 build_id->resize(kBuildIDStringLength, '0');
665 }
666
UnperfizeBuildIDString(string * build_id)667 void PerfReader::UnperfizeBuildIDString(string* build_id) {
668 const size_t kPaddingSize = 8;
669 const string kBuildIDPadding = string(kPaddingSize, '0');
670
671 // Remove kBuildIDPadding from the end of build_id until we cannot remove any
672 // more, or removing more would cause the build id to be empty.
673 while (build_id->size() > kPaddingSize &&
674 build_id->substr(build_id->size() - kPaddingSize) == kBuildIDPadding) {
675 build_id->resize(build_id->size() - kPaddingSize);
676 }
677 }
678
ReadFile(const string & filename)679 bool PerfReader::ReadFile(const string& filename) {
680 std::vector<char> data;
681 if (!ReadFileToData(filename, &data))
682 return false;
683 return ReadFromVector(data);
684 }
685
ReadFromVector(const std::vector<char> & data)686 bool PerfReader::ReadFromVector(const std::vector<char>& data) {
687 return ReadFromPointer(&data[0], data.size());
688 }
689
ReadFromString(const string & str)690 bool PerfReader::ReadFromString(const string& str) {
691 return ReadFromPointer(str.c_str(), str.size());
692 }
693
ReadFromPointer(const char * perf_data,size_t size)694 bool PerfReader::ReadFromPointer(const char* perf_data, size_t size) {
695 const ConstBufferWithSize data = { perf_data, size };
696
697 if (data.size == 0)
698 return false;
699 if (!ReadHeader(data))
700 return false;
701
702 // Check if it is normal perf data.
703 if (header_.size == sizeof(header_)) {
704 DLOG(INFO) << "Perf data is in normal format.";
705 metadata_mask_ = header_.adds_features[0];
706 return (ReadAttrs(data) && ReadEventTypes(data) && ReadData(data)
707 && ReadMetadata(data));
708 }
709
710 // Otherwise it is piped data.
711 LOG(ERROR) << "Internal error: no support for piped data";
712 return false;
713 }
714
Localize(const std::map<string,string> & build_ids_to_filenames)715 bool PerfReader::Localize(
716 const std::map<string, string>& build_ids_to_filenames) {
717 std::map<string, string> perfized_build_ids_to_filenames;
718 std::map<string, string>::const_iterator it;
719 for (it = build_ids_to_filenames.begin();
720 it != build_ids_to_filenames.end();
721 ++it) {
722 string build_id = it->first;
723 PerfizeBuildIDString(&build_id);
724 perfized_build_ids_to_filenames[build_id] = it->second;
725 }
726
727 std::map<string, string> filename_map;
728 for (size_t i = 0; i < build_id_events_.size(); ++i) {
729 build_id_event* event = build_id_events_[i];
730 string build_id = HexToString(event->build_id, kBuildIDArraySize);
731 if (perfized_build_ids_to_filenames.find(build_id) ==
732 perfized_build_ids_to_filenames.end()) {
733 continue;
734 }
735
736 string new_name = perfized_build_ids_to_filenames.at(build_id);
737 filename_map[string(event->filename)] = new_name;
738 build_id_event* new_event = CreateOrUpdateBuildID("", new_name, 0, event);
739 CHECK(new_event);
740 build_id_events_[i] = new_event;
741 }
742
743 LocalizeUsingFilenames(filename_map);
744 return true;
745 }
746
LocalizeUsingFilenames(const std::map<string,string> & filename_map)747 bool PerfReader::LocalizeUsingFilenames(
748 const std::map<string, string>& filename_map) {
749 LocalizeMMapFilenames(filename_map);
750 for (size_t i = 0; i < build_id_events_.size(); ++i) {
751 build_id_event* event = build_id_events_[i];
752 string old_name = event->filename;
753
754 if (filename_map.find(event->filename) != filename_map.end()) {
755 const string& new_name = filename_map.at(old_name);
756 build_id_event* new_event = CreateOrUpdateBuildID("", new_name, 0, event);
757 CHECK(new_event);
758 build_id_events_[i] = new_event;
759 }
760 }
761 return true;
762 }
763
GetFilenames(std::vector<string> * filenames) const764 void PerfReader::GetFilenames(std::vector<string>* filenames) const {
765 std::set<string> filename_set;
766 GetFilenamesAsSet(&filename_set);
767 filenames->clear();
768 filenames->insert(filenames->begin(), filename_set.begin(),
769 filename_set.end());
770 }
771
GetFilenamesAsSet(std::set<string> * filenames) const772 void PerfReader::GetFilenamesAsSet(std::set<string>* filenames) const {
773 filenames->clear();
774 for (size_t i = 0; i < events_.size(); ++i) {
775 const event_t& event = *events_[i];
776 if (event.header.type == PERF_RECORD_MMAP)
777 filenames->insert(event.mmap.filename);
778 if (event.header.type == PERF_RECORD_MMAP2)
779 filenames->insert(event.mmap2.filename);
780 }
781 }
782
GetFilenamesToBuildIDs(std::map<string,string> * filenames_to_build_ids) const783 void PerfReader::GetFilenamesToBuildIDs(
784 std::map<string, string>* filenames_to_build_ids) const {
785 filenames_to_build_ids->clear();
786 for (size_t i = 0; i < build_id_events_.size(); ++i) {
787 const build_id_event& event = *build_id_events_[i];
788 string build_id = HexToString(event.build_id, kBuildIDArraySize);
789 (*filenames_to_build_ids)[event.filename] = build_id;
790 }
791 }
792
IsSupportedEventType(uint32_t type)793 bool PerfReader::IsSupportedEventType(uint32_t type) {
794 switch (type) {
795 case PERF_RECORD_SAMPLE:
796 case PERF_RECORD_MMAP:
797 case PERF_RECORD_MMAP2:
798 case PERF_RECORD_FORK:
799 case PERF_RECORD_EXIT:
800 case PERF_RECORD_COMM:
801 case PERF_RECORD_LOST:
802 case PERF_RECORD_THROTTLE:
803 case PERF_RECORD_UNTHROTTLE:
804 case SIMPLE_PERF_RECORD_KERNEL_SYMBOL:
805 case SIMPLE_PERF_RECORD_DSO:
806 case SIMPLE_PERF_RECORD_SYMBOL:
807 case SIMPLE_PERF_RECORD_SPLIT:
808 case SIMPLE_PERF_RECORD_SPLIT_END:
809 return true;
810 case PERF_RECORD_READ:
811 case PERF_RECORD_MAX:
812 return false;
813 default:
814 LOG(FATAL) << "Unknown event type " << type;
815 return false;
816 }
817 }
818
ReadPerfSampleInfo(const event_t & event,struct perf_sample * sample) const819 bool PerfReader::ReadPerfSampleInfo(const event_t& event,
820 struct perf_sample* sample) const {
821 CHECK(sample);
822
823 if (!IsSupportedEventType(event.header.type)) {
824 LOG(ERROR) << "Unsupported event type " << event.header.type;
825 return false;
826 }
827
828 // We want to completely ignore these records
829 if (event.header.type == SIMPLE_PERF_RECORD_KERNEL_SYMBOL ||
830 event.header.type == SIMPLE_PERF_RECORD_DSO ||
831 event.header.type == SIMPLE_PERF_RECORD_SYMBOL ||
832 event.header.type == SIMPLE_PERF_RECORD_SPLIT ||
833 event.header.type == SIMPLE_PERF_RECORD_SPLIT_END)
834 return true;
835
836 uint64_t sample_format = GetSampleFieldsForEventType(event.header.type,
837 sample_type_);
838 uint64_t offset = GetPerfSampleDataOffset(event);
839 size_t size_read = ReadPerfSampleFromData(
840 static_cast<perf_event_type>(event.header.type),
841 reinterpret_cast<const uint64_t*>(&event) + offset / sizeof(uint64_t),
842 sample_format,
843 read_format_,
844 is_cross_endian_,
845 attrs_[0].attr,
846 attrs_.size(),
847 sample);
848
849 size_t expected_size = event.header.size - offset;
850 if (size_read != expected_size) {
851 LOG(ERROR) << "Read " << size_read << " bytes, expected "
852 << expected_size << " bytes.";
853 }
854
855 return (size_read == expected_size);
856 }
857
WritePerfSampleInfo(const perf_sample & sample,event_t * event) const858 bool PerfReader::WritePerfSampleInfo(const perf_sample& sample,
859 event_t* event) const {
860 CHECK(event);
861
862 if (!IsSupportedEventType(event->header.type)) {
863 LOG(ERROR) << "Unsupported event type " << event->header.type;
864 return false;
865 }
866
867 uint64_t sample_format = GetSampleFieldsForEventType(event->header.type,
868 sample_type_);
869 uint64_t offset = GetPerfSampleDataOffset(*event);
870
871 size_t expected_size = event->header.size - offset;
872 memset(reinterpret_cast<uint8_t*>(event) + offset, 0, expected_size);
873 size_t size_written = WritePerfSampleToData(
874 static_cast<perf_event_type>(event->header.type),
875 sample,
876 sample_format,
877 read_format_,
878 reinterpret_cast<uint64_t*>(event) + offset / sizeof(uint64_t));
879 if (size_written != expected_size) {
880 LOG(ERROR) << "Wrote " << size_written << " bytes, expected "
881 << expected_size << " bytes.";
882 }
883
884 return (size_written == expected_size);
885 }
886
ReadHeader(const ConstBufferWithSize & data)887 bool PerfReader::ReadHeader(const ConstBufferWithSize& data) {
888 CheckNoEventHeaderPadding();
889 size_t offset = 0;
890 if (!ReadDataFromBuffer(data, sizeof(piped_header_), "header magic",
891 &offset, &piped_header_)) {
892 return false;
893 }
894 if (piped_header_.magic != kPerfMagic &&
895 piped_header_.magic != bswap_64(kPerfMagic)) {
896 LOG(ERROR) << "Read wrong magic. Expected: 0x" << std::hex << kPerfMagic
897 << " or 0x" << std::hex << bswap_64(kPerfMagic)
898 << " Got: 0x" << std::hex << piped_header_.magic;
899 return false;
900 }
901 is_cross_endian_ = (piped_header_.magic != kPerfMagic);
902 if (is_cross_endian_)
903 ByteSwap(&piped_header_.size);
904
905 // Header can be a piped header.
906 if (piped_header_.size == sizeof(piped_header_))
907 return true;
908
909 // Re-read full header
910 offset = 0;
911 if (!ReadDataFromBuffer(data, sizeof(header_), "header data",
912 &offset, &header_)) {
913 return false;
914 }
915 if (is_cross_endian_)
916 ByteSwap(&header_.size);
917
918 DLOG(INFO) << "event_types.size: " << header_.event_types.size;
919 DLOG(INFO) << "event_types.offset: " << header_.event_types.offset;
920
921 return true;
922 }
923
ReadAttrs(const ConstBufferWithSize & data)924 bool PerfReader::ReadAttrs(const ConstBufferWithSize& data) {
925 size_t num_attrs = header_.attrs.size / header_.attr_size;
926 size_t offset = header_.attrs.offset;
927 for (size_t i = 0; i < num_attrs; i++) {
928 if (!ReadAttr(data, &offset))
929 return false;
930 }
931 return true;
932 }
933
ReadAttr(const ConstBufferWithSize & data,size_t * offset)934 bool PerfReader::ReadAttr(const ConstBufferWithSize& data, size_t* offset) {
935 PerfFileAttr attr;
936 if (!ReadEventAttr(data, offset, &attr.attr))
937 return false;
938
939 perf_file_section ids;
940 if (!ReadDataFromBuffer(data, sizeof(ids), "ID section info", offset, &ids))
941 return false;
942 if (is_cross_endian_) {
943 ByteSwap(&ids.offset);
944 ByteSwap(&ids.size);
945 }
946
947 size_t num_ids = ids.size / sizeof(decltype(attr.ids)::value_type);
948 // Convert the offset from u64 to size_t.
949 size_t ids_offset = ids.offset;
950 if (!ReadUniqueIDs(data, num_ids, &ids_offset, &attr.ids))
951 return false;
952 attrs_.push_back(attr);
953 return true;
954 }
955
ReadPerfEventAttrSize(const ConstBufferWithSize & data,size_t attr_offset)956 u32 PerfReader::ReadPerfEventAttrSize(const ConstBufferWithSize& data,
957 size_t attr_offset) {
958 static_assert(std::is_same<decltype(perf_event_attr::size), u32>::value,
959 "ReadPerfEventAttrSize return type should match "
960 "perf_event_attr.size");
961 u32 attr_size;
962 size_t attr_size_offset = attr_offset + offsetof(perf_event_attr, size);
963 if (!ReadDataFromBuffer(data, sizeof(perf_event_attr::size),
964 "attr.size", &attr_size_offset, &attr_size)) {
965 return kuint32max;
966 }
967 return MaybeSwap(attr_size, is_cross_endian_);
968 }
969
ReadEventAttr(const ConstBufferWithSize & data,size_t * offset,perf_event_attr * attr)970 bool PerfReader::ReadEventAttr(const ConstBufferWithSize& data, size_t* offset,
971 perf_event_attr* attr) {
972 CheckNoPerfEventAttrPadding();
973
974 std::memset(attr, 0, sizeof(*attr));
975 //*attr = {0};
976
977 // read just size first
978 u32 attr_size = ReadPerfEventAttrSize(data, *offset);
979 if (attr_size == kuint32max) {
980 return false;
981 }
982
983 // now read the the struct.
984 if (!ReadDataFromBuffer(data, attr_size, "attribute", offset,
985 reinterpret_cast<char*>(attr))) {
986 return false;
987 }
988
989 if (is_cross_endian_) {
990 // Depending on attr->size, some of these might not have actually been
991 // read. This is okay: they are zero.
992 ByteSwap(&attr->type);
993 ByteSwap(&attr->size);
994 ByteSwap(&attr->config);
995 ByteSwap(&attr->sample_period);
996 ByteSwap(&attr->sample_type);
997 ByteSwap(&attr->read_format);
998
999 // NB: This will also reverse precise_ip : 2 as if it was two fields:
1000 auto *const bitfield_start = &attr->read_format + 1;
1001 SwapBitfieldOfBits(reinterpret_cast<u8*>(bitfield_start),
1002 sizeof(u64));
1003 // ... So swap it back:
1004 const auto tmp = attr->precise_ip;
1005 attr->precise_ip = (tmp & 0x2) >> 1 | (tmp & 0x1) << 1;
1006
1007 ByteSwap(&attr->wakeup_events); // union with wakeup_watermark
1008 ByteSwap(&attr->bp_type);
1009 ByteSwap(&attr->bp_addr); // union with config1
1010 ByteSwap(&attr->bp_len); // union with config2
1011 ByteSwap(&attr->branch_sample_type);
1012 ByteSwap(&attr->sample_regs_user);
1013 ByteSwap(&attr->sample_stack_user);
1014 }
1015
1016 CHECK_EQ(attr_size, attr->size);
1017 // The actual perf_event_attr data size might be different from the size of
1018 // the struct definition. Check against perf_event_attr's |size| field.
1019 attr->size = sizeof(*attr);
1020
1021 // Assign sample type if it hasn't been assigned, otherwise make sure all
1022 // subsequent attributes have the same sample type bits set.
1023 if (sample_type_ == 0) {
1024 sample_type_ = attr->sample_type;
1025 } else {
1026 CHECK_EQ(sample_type_, attr->sample_type)
1027 << "Event type sample format does not match sample format of other "
1028 << "event type.";
1029 }
1030
1031 if (read_format_ == 0) {
1032 read_format_ = attr->read_format;
1033 } else {
1034 CHECK_EQ(read_format_, attr->read_format)
1035 << "Event type read format does not match read format of other event "
1036 << "types.";
1037 }
1038
1039 return true;
1040 }
1041
ReadUniqueIDs(const ConstBufferWithSize & data,size_t num_ids,size_t * offset,std::vector<u64> * ids)1042 bool PerfReader::ReadUniqueIDs(const ConstBufferWithSize& data, size_t num_ids,
1043 size_t* offset, std::vector<u64>* ids) {
1044 ids->resize(num_ids);
1045 for (size_t j = 0; j < num_ids; j++) {
1046 if (!ReadDataFromBuffer(data, sizeof(ids->at(j)), "ID", offset,
1047 &ids->at(j))) {
1048 return false;
1049 }
1050 if (is_cross_endian_)
1051 ByteSwap(&ids->at(j));
1052 }
1053 return true;
1054 }
1055
ReadEventTypes(const ConstBufferWithSize & data)1056 bool PerfReader::ReadEventTypes(const ConstBufferWithSize& data) {
1057 size_t num_event_types = header_.event_types.size /
1058 sizeof(struct perf_trace_event_type);
1059 CHECK_EQ(sizeof(perf_trace_event_type) * num_event_types,
1060 header_.event_types.size);
1061 size_t offset = header_.event_types.offset;
1062 for (size_t i = 0; i < num_event_types; ++i) {
1063 if (!ReadEventType(data, &offset))
1064 return false;
1065 }
1066 return true;
1067 }
1068
ReadEventType(const ConstBufferWithSize & data,size_t * offset)1069 bool PerfReader::ReadEventType(const ConstBufferWithSize& data,
1070 size_t* offset) {
1071 CheckNoEventTypePadding();
1072 perf_trace_event_type type;
1073 memset(&type, 0, sizeof(type));
1074 if (!ReadDataFromBuffer(data, sizeof(type.event_id), "event id",
1075 offset, &type.event_id)) {
1076 return false;
1077 }
1078 const char* event_name = reinterpret_cast<const char*>(data.ptr + *offset);
1079 CHECK_GT(snprintf(type.name, sizeof(type.name), "%s", event_name), 0);
1080 *offset += sizeof(type.name);
1081 event_types_.push_back(type);
1082 return true;
1083 }
1084
ReadData(const ConstBufferWithSize & data)1085 bool PerfReader::ReadData(const ConstBufferWithSize& data) {
1086 u64 data_remaining_bytes = header_.data.size;
1087 size_t offset = header_.data.offset;
1088 while (data_remaining_bytes != 0) {
1089 if (data.size < offset) {
1090 LOG(ERROR) << "Not enough data to read a perf event.";
1091 return false;
1092 }
1093
1094 const event_t* event = reinterpret_cast<const event_t*>(data.ptr + offset);
1095 if (!ReadPerfEventBlock(*event))
1096 return false;
1097 data_remaining_bytes -= event->header.size;
1098 offset += event->header.size;
1099 }
1100
1101 DLOG(INFO) << "Number of events stored: "<< events_.size();
1102 return true;
1103 }
1104
ReadMetadata(const ConstBufferWithSize & data)1105 bool PerfReader::ReadMetadata(const ConstBufferWithSize& data) {
1106 size_t offset = header_.data.offset + header_.data.size;
1107
1108 for (u32 type = HEADER_FIRST_FEATURE; type != HEADER_LAST_FEATURE; ++type) {
1109 if ((metadata_mask_ & (1 << type)) == 0)
1110 continue;
1111
1112 if (data.size < offset) {
1113 LOG(ERROR) << "Not enough data to read offset and size of metadata.";
1114 return false;
1115 }
1116
1117 u64 metadata_offset, metadata_size;
1118 if (!ReadDataFromBuffer(data, sizeof(metadata_offset), "metadata offset",
1119 &offset, &metadata_offset) ||
1120 !ReadDataFromBuffer(data, sizeof(metadata_size), "metadata size",
1121 &offset, &metadata_size)) {
1122 return false;
1123 }
1124
1125 if (data.size < metadata_offset + metadata_size) {
1126 LOG(ERROR) << "Not enough data to read metadata.";
1127 return false;
1128 }
1129
1130 switch (type) {
1131 case HEADER_TRACING_DATA:
1132 if (!ReadTracingMetadata(data, metadata_offset, metadata_size)) {
1133 return false;
1134 }
1135 break;
1136 case HEADER_BUILD_ID:
1137 if (!ReadBuildIDMetadata(data, type, metadata_offset, metadata_size))
1138 return false;
1139 break;
1140 case HEADER_HOSTNAME:
1141 case HEADER_OSRELEASE:
1142 case HEADER_VERSION:
1143 case HEADER_ARCH:
1144 case HEADER_CPUDESC:
1145 case HEADER_CPUID:
1146 case HEADER_CMDLINE:
1147 if (!ReadStringMetadata(data, type, metadata_offset, metadata_size))
1148 return false;
1149 break;
1150 case HEADER_NRCPUS:
1151 if (!ReadUint32Metadata(data, type, metadata_offset, metadata_size))
1152 return false;
1153 break;
1154 case HEADER_TOTAL_MEM:
1155 if (!ReadUint64Metadata(data, type, metadata_offset, metadata_size))
1156 return false;
1157 break;
1158 case HEADER_EVENT_DESC:
1159 break;
1160 case HEADER_CPU_TOPOLOGY:
1161 if (!ReadCPUTopologyMetadata(data, type, metadata_offset, metadata_size))
1162 return false;
1163 break;
1164 case HEADER_NUMA_TOPOLOGY:
1165 if (!ReadNUMATopologyMetadata(data, type, metadata_offset, metadata_size))
1166 return false;
1167 break;
1168 case HEADER_PMU_MAPPINGS:
1169 // ignore for now
1170 continue;
1171 break;
1172 case HEADER_BRANCH_STACK:
1173 continue;
1174 default: LOG(INFO) << "Unsupported metadata type: " << type;
1175 break;
1176 }
1177 }
1178
1179 // Event type events are optional in some newer versions of perf. They
1180 // contain the same information that is already in |attrs_|. Make sure the
1181 // number of event types matches the number of attrs, but only if there are
1182 // event type events present.
1183 if (event_types_.size() > 0) {
1184 if (event_types_.size() != attrs_.size()) {
1185 LOG(ERROR) << "Mismatch between number of event type events and attr "
1186 << "events: " << event_types_.size() << " vs "
1187 << attrs_.size();
1188 return false;
1189 }
1190 metadata_mask_ |= (1 << HEADER_EVENT_DESC);
1191 }
1192 return true;
1193 }
1194
ReadBuildIDMetadata(const ConstBufferWithSize & data,u32,size_t offset,size_t size)1195 bool PerfReader::ReadBuildIDMetadata(const ConstBufferWithSize& data, u32 /*type*/,
1196 size_t offset, size_t size) {
1197 CheckNoBuildIDEventPadding();
1198 while (size > 0) {
1199 // Make sure there is enough data for everything but the filename.
1200 if (data.size < offset + sizeof(build_id_event) / sizeof(*data.ptr)) {
1201 LOG(ERROR) << "Not enough bytes to read build id event";
1202 return false;
1203 }
1204
1205 const build_id_event* temp_ptr =
1206 reinterpret_cast<const build_id_event*>(data.ptr + offset);
1207 u16 event_size = temp_ptr->header.size;
1208 if (is_cross_endian_)
1209 ByteSwap(&event_size);
1210
1211 // Make sure there is enough data for the rest of the event.
1212 if (data.size < offset + event_size / sizeof(*data.ptr)) {
1213 LOG(ERROR) << "Not enough bytes to read build id event";
1214 return false;
1215 }
1216
1217 // Allocate memory for the event and copy over the bytes.
1218 build_id_event* event = CallocMemoryForBuildID(event_size);
1219 if (!ReadDataFromBuffer(data, event_size, "build id event",
1220 &offset, event)) {
1221 return false;
1222 }
1223 if (is_cross_endian_) {
1224 ByteSwap(&event->header.type);
1225 ByteSwap(&event->header.misc);
1226 ByteSwap(&event->header.size);
1227 ByteSwap(&event->pid);
1228 }
1229 size -= event_size;
1230
1231 // Perf tends to use more space than necessary, so fix the size.
1232 event->header.size =
1233 sizeof(*event) + GetUint64AlignedStringLength(event->filename);
1234 build_id_events_.push_back(event);
1235 }
1236
1237 return true;
1238 }
1239
ReadStringMetadata(const ConstBufferWithSize & data,u32 type,size_t offset,size_t size)1240 bool PerfReader::ReadStringMetadata(const ConstBufferWithSize& data, u32 type,
1241 size_t offset, size_t size) {
1242 PerfStringMetadata str_data;
1243 str_data.type = type;
1244
1245 size_t start_offset = offset;
1246 // Skip the number of string data if it is present.
1247 if (NeedsNumberOfStringData(type))
1248 offset += sizeof(num_string_data_type) / sizeof(*data.ptr);
1249
1250 while ((offset - start_offset) < size) {
1251 CStringWithLength single_string;
1252 if (!ReadStringFromBuffer(data, is_cross_endian_, &offset, &single_string))
1253 return false;
1254 str_data.data.push_back(single_string);
1255 }
1256
1257 string_metadata_.push_back(str_data);
1258 return true;
1259 }
1260
ReadUint32Metadata(const ConstBufferWithSize & data,u32 type,size_t offset,size_t size)1261 bool PerfReader::ReadUint32Metadata(const ConstBufferWithSize& data, u32 type,
1262 size_t offset, size_t size) {
1263 PerfUint32Metadata uint32_data;
1264 uint32_data.type = type;
1265
1266 size_t start_offset = offset;
1267 while (size > offset - start_offset) {
1268 uint32_t item;
1269 if (!ReadDataFromBuffer(data, sizeof(item), "uint32_t data", &offset,
1270 &item))
1271 return false;
1272
1273 if (is_cross_endian_)
1274 ByteSwap(&item);
1275
1276 uint32_data.data.push_back(item);
1277 }
1278
1279 uint32_metadata_.push_back(uint32_data);
1280 return true;
1281 }
1282
ReadUint64Metadata(const ConstBufferWithSize & data,u32 type,size_t offset,size_t size)1283 bool PerfReader::ReadUint64Metadata(const ConstBufferWithSize& data, u32 type,
1284 size_t offset, size_t size) {
1285 PerfUint64Metadata uint64_data;
1286 uint64_data.type = type;
1287
1288 size_t start_offset = offset;
1289 while (size > offset - start_offset) {
1290 uint64_t item;
1291 if (!ReadDataFromBuffer(data, sizeof(item), "uint64_t data", &offset,
1292 &item))
1293 return false;
1294
1295 if (is_cross_endian_)
1296 ByteSwap(&item);
1297
1298 uint64_data.data.push_back(item);
1299 }
1300
1301 uint64_metadata_.push_back(uint64_data);
1302 return true;
1303 }
1304
ReadCPUTopologyMetadata(const ConstBufferWithSize & data,u32,size_t offset,size_t)1305 bool PerfReader::ReadCPUTopologyMetadata(
1306 const ConstBufferWithSize& data, u32 /*type*/, size_t offset, size_t /*size*/) {
1307 num_siblings_type num_core_siblings;
1308 if (!ReadDataFromBuffer(data, sizeof(num_core_siblings), "num cores",
1309 &offset, &num_core_siblings)) {
1310 return false;
1311 }
1312 if (is_cross_endian_)
1313 ByteSwap(&num_core_siblings);
1314
1315 cpu_topology_.core_siblings.resize(num_core_siblings);
1316 for (size_t i = 0; i < num_core_siblings; ++i) {
1317 if (!ReadStringFromBuffer(data, is_cross_endian_, &offset,
1318 &cpu_topology_.core_siblings[i])) {
1319 return false;
1320 }
1321 }
1322
1323 num_siblings_type num_thread_siblings;
1324 if (!ReadDataFromBuffer(data, sizeof(num_thread_siblings), "num threads",
1325 &offset, &num_thread_siblings)) {
1326 return false;
1327 }
1328 if (is_cross_endian_)
1329 ByteSwap(&num_thread_siblings);
1330
1331 cpu_topology_.thread_siblings.resize(num_thread_siblings);
1332 for (size_t i = 0; i < num_thread_siblings; ++i) {
1333 if (!ReadStringFromBuffer(data, is_cross_endian_, &offset,
1334 &cpu_topology_.thread_siblings[i])) {
1335 return false;
1336 }
1337 }
1338
1339 return true;
1340 }
1341
ReadNUMATopologyMetadata(const ConstBufferWithSize & data,u32,size_t offset,size_t)1342 bool PerfReader::ReadNUMATopologyMetadata(
1343 const ConstBufferWithSize& data, u32 /*type*/, size_t offset, size_t /*size*/) {
1344 numa_topology_num_nodes_type num_nodes;
1345 if (!ReadDataFromBuffer(data, sizeof(num_nodes), "num nodes",
1346 &offset, &num_nodes)) {
1347 return false;
1348 }
1349 if (is_cross_endian_)
1350 ByteSwap(&num_nodes);
1351
1352 for (size_t i = 0; i < num_nodes; ++i) {
1353 PerfNodeTopologyMetadata node;
1354 if (!ReadDataFromBuffer(data, sizeof(node.id), "node id",
1355 &offset, &node.id) ||
1356 !ReadDataFromBuffer(data, sizeof(node.total_memory),
1357 "node total memory", &offset,
1358 &node.total_memory) ||
1359 !ReadDataFromBuffer(data, sizeof(node.free_memory),
1360 "node free memory", &offset, &node.free_memory) ||
1361 !ReadStringFromBuffer(data, is_cross_endian_, &offset,
1362 &node.cpu_list)) {
1363 return false;
1364 }
1365 if (is_cross_endian_) {
1366 ByteSwap(&node.id);
1367 ByteSwap(&node.total_memory);
1368 ByteSwap(&node.free_memory);
1369 }
1370 numa_topology_.push_back(node);
1371 }
1372 return true;
1373 }
1374
ReadTracingMetadata(const ConstBufferWithSize & data,size_t offset,size_t size)1375 bool PerfReader::ReadTracingMetadata(
1376 const ConstBufferWithSize& data, size_t offset, size_t size) {
1377 size_t tracing_data_offset = offset;
1378 tracing_data_.resize(size);
1379 return ReadDataFromBuffer(data, tracing_data_.size(), "tracing_data",
1380 &tracing_data_offset, tracing_data_.data());
1381 }
1382
ReadTracingMetadataEvent(const ConstBufferWithSize & data,size_t offset)1383 bool PerfReader::ReadTracingMetadataEvent(
1384 const ConstBufferWithSize& data, size_t offset) {
1385 // TRACING_DATA's header.size is a lie. It is the size of only the event
1386 // struct. The size of the data is in the event struct, and followed
1387 // immediately by the tracing header data.
1388
1389 // Make a copy of the event (but not the tracing data)
1390 tracing_data_event tracing_event =
1391 *reinterpret_cast<const tracing_data_event*>(data.ptr + offset);
1392
1393 if (is_cross_endian_) {
1394 ByteSwap(&tracing_event.header.type);
1395 ByteSwap(&tracing_event.header.misc);
1396 ByteSwap(&tracing_event.header.size);
1397 ByteSwap(&tracing_event.size);
1398 }
1399
1400 return ReadTracingMetadata(data, offset + tracing_event.header.size,
1401 tracing_event.size);
1402 }
1403
ReadAttrEventBlock(const ConstBufferWithSize & data,size_t offset,size_t size)1404 bool PerfReader::ReadAttrEventBlock(const ConstBufferWithSize& data,
1405 size_t offset, size_t size) {
1406 const size_t initial_offset = offset;
1407 PerfFileAttr attr;
1408 if (!ReadEventAttr(data, &offset, &attr.attr))
1409 return false;
1410
1411 // attr.attr.size has been upgraded to the current size of perf_event_attr.
1412 const size_t actual_attr_size = offset - initial_offset;
1413
1414 const size_t num_ids =
1415 (size - actual_attr_size) / sizeof(decltype(attr.ids)::value_type);
1416 if (!ReadUniqueIDs(data, num_ids, &offset, &attr.ids))
1417 return false;
1418
1419 // Event types are found many times in the perf data file.
1420 // Only add this event type if it is not already present.
1421 for (size_t i = 0; i < attrs_.size(); ++i) {
1422 if (attrs_[i].ids[0] == attr.ids[0])
1423 return true;
1424 }
1425 attrs_.push_back(attr);
1426 return true;
1427 }
1428
1429 // When this method is called, |event| is a reference to the bytes in the data
1430 // vector that contains the entire perf.data file. As a result, we need to be
1431 // careful to only copy event.header.size bytes.
1432 // In particular, something like
1433 // event_t event_copy = event;
1434 // would be bad, because it would read past the end of the event, and possibly
1435 // pass the end of the data vector as well.
ReadPerfEventBlock(const event_t & event)1436 bool PerfReader::ReadPerfEventBlock(const event_t& event) {
1437 u16 size = event.header.size;
1438 if (is_cross_endian_)
1439 ByteSwap(&size);
1440
1441 //
1442 // Upstream linux perf limits the size of an event record to 2^16 bytes,
1443 // however simpleperf includes extensions to support larger (2^32) record
1444 // sizes via a split record scheme (the larger records are split up
1445 // into chunks and then embedded into a series of SIMPLE_PERF_RECORD_SPLIT
1446 // records followed by a terminating SIMPLE_PERF_RECORD_SPLIT_END record.
1447 // At the moment none of the larger records are of interest to perfprofd, so
1448 // the main thing we're doing here is ignoring/bypassing them.
1449 //
1450 if (event.header.type == SIMPLE_PERF_RECORD_KERNEL_SYMBOL ||
1451 event.header.type == SIMPLE_PERF_RECORD_DSO ||
1452 event.header.type == SIMPLE_PERF_RECORD_SYMBOL ||
1453 event.header.type == SIMPLE_PERF_RECORD_SPLIT ||
1454 event.header.type == SIMPLE_PERF_RECORD_SPLIT_END)
1455 size = sizeof(event_t);
1456 else if (size > sizeof(event_t)) {
1457 LOG(INFO) << "Data size: " << size << " sizeof(event_t): "
1458 << sizeof(event_t);
1459 return false;
1460 }
1461
1462 // Copy only the part of the event that is needed.
1463 malloced_unique_ptr<event_t> event_copy(CallocMemoryForEvent(size));
1464 memcpy(event_copy.get(), &event, size);
1465 if (is_cross_endian_) {
1466 ByteSwap(&event_copy->header.type);
1467 ByteSwap(&event_copy->header.misc);
1468 ByteSwap(&event_copy->header.size);
1469 }
1470
1471 uint32_t type = event_copy->header.type;
1472 if (is_cross_endian_) {
1473 switch (type) {
1474 case PERF_RECORD_SAMPLE:
1475 break;
1476 case PERF_RECORD_MMAP:
1477 ByteSwap(&event_copy->mmap.pid);
1478 ByteSwap(&event_copy->mmap.tid);
1479 ByteSwap(&event_copy->mmap.start);
1480 ByteSwap(&event_copy->mmap.len);
1481 ByteSwap(&event_copy->mmap.pgoff);
1482 break;
1483 case PERF_RECORD_MMAP2:
1484 ByteSwap(&event_copy->mmap2.pid);
1485 ByteSwap(&event_copy->mmap2.tid);
1486 ByteSwap(&event_copy->mmap2.start);
1487 ByteSwap(&event_copy->mmap2.len);
1488 ByteSwap(&event_copy->mmap2.pgoff);
1489 ByteSwap(&event_copy->mmap2.maj);
1490 ByteSwap(&event_copy->mmap2.min);
1491 ByteSwap(&event_copy->mmap2.ino);
1492 ByteSwap(&event_copy->mmap2.ino_generation);
1493 break;
1494 case PERF_RECORD_FORK:
1495 case PERF_RECORD_EXIT:
1496 ByteSwap(&event_copy->fork.pid);
1497 ByteSwap(&event_copy->fork.tid);
1498 ByteSwap(&event_copy->fork.ppid);
1499 ByteSwap(&event_copy->fork.ptid);
1500 break;
1501 case PERF_RECORD_COMM:
1502 ByteSwap(&event_copy->comm.pid);
1503 ByteSwap(&event_copy->comm.tid);
1504 break;
1505 case PERF_RECORD_LOST:
1506 ByteSwap(&event_copy->lost.id);
1507 ByteSwap(&event_copy->lost.lost);
1508 break;
1509 case PERF_RECORD_READ:
1510 ByteSwap(&event_copy->read.pid);
1511 ByteSwap(&event_copy->read.tid);
1512 ByteSwap(&event_copy->read.value);
1513 ByteSwap(&event_copy->read.time_enabled);
1514 ByteSwap(&event_copy->read.time_running);
1515 ByteSwap(&event_copy->read.id);
1516 break;
1517 case SIMPLE_PERF_RECORD_KERNEL_SYMBOL:
1518 break;
1519 default:
1520 LOG(FATAL) << "Unknown event type: " << type;
1521 }
1522 }
1523
1524 events_.push_back(std::move(event_copy));
1525
1526 return true;
1527 }
1528
GetNumMetadata() const1529 size_t PerfReader::GetNumMetadata() const {
1530 // This is just the number of 1s in the binary representation of the metadata
1531 // mask. However, make sure to only use supported metadata, and don't include
1532 // branch stack (since it doesn't have an entry in the metadata section).
1533 uint64_t new_mask = metadata_mask_;
1534 new_mask &= kSupportedMetadataMask & ~(1 << HEADER_BRANCH_STACK);
1535 std::bitset<sizeof(new_mask) * CHAR_BIT> bits(new_mask);
1536 return bits.count();
1537 }
1538
GetEventDescMetadataSize() const1539 size_t PerfReader::GetEventDescMetadataSize() const {
1540 size_t size = 0;
1541 if (event_types_.empty()) {
1542 return size;
1543 }
1544 if (metadata_mask_ & (1 << HEADER_EVENT_DESC)) {
1545 if (event_types_.size() > 0 && event_types_.size() != attrs_.size()) {
1546 LOG(ERROR) << "Mismatch between number of event type events and attr "
1547 << "events: " << event_types_.size() << " vs "
1548 << attrs_.size();
1549 return size;
1550 }
1551 size += sizeof(event_desc_num_events) + sizeof(event_desc_attr_size);
1552 CStringWithLength dummy;
1553 for (size_t i = 0; i < attrs_.size(); ++i) {
1554 size += sizeof(perf_event_attr) + sizeof(dummy.len);
1555 size += sizeof(event_desc_num_unique_ids);
1556 size += GetUint64AlignedStringLength(event_types_[i].name) * sizeof(char);
1557 size += attrs_[i].ids.size() * sizeof(attrs_[i].ids[0]);
1558 }
1559 }
1560 return size;
1561 }
1562
GetBuildIDMetadataSize() const1563 size_t PerfReader::GetBuildIDMetadataSize() const {
1564 size_t size = 0;
1565 for (size_t i = 0; i < build_id_events_.size(); ++i)
1566 size += build_id_events_[i]->header.size;
1567 return size;
1568 }
1569
GetStringMetadataSize() const1570 size_t PerfReader::GetStringMetadataSize() const {
1571 size_t size = 0;
1572 for (size_t i = 0; i < string_metadata_.size(); ++i) {
1573 const PerfStringMetadata& metadata = string_metadata_[i];
1574 if (NeedsNumberOfStringData(metadata.type))
1575 size += sizeof(num_string_data_type);
1576
1577 for (size_t j = 0; j < metadata.data.size(); ++j) {
1578 const CStringWithLength& str = metadata.data[j];
1579 size += sizeof(str.len) + (str.len * sizeof(char));
1580 }
1581 }
1582 return size;
1583 }
1584
GetUint32MetadataSize() const1585 size_t PerfReader::GetUint32MetadataSize() const {
1586 size_t size = 0;
1587 for (size_t i = 0; i < uint32_metadata_.size(); ++i) {
1588 const PerfUint32Metadata& metadata = uint32_metadata_[i];
1589 size += metadata.data.size() * sizeof(metadata.data[0]);
1590 }
1591 return size;
1592 }
1593
GetUint64MetadataSize() const1594 size_t PerfReader::GetUint64MetadataSize() const {
1595 size_t size = 0;
1596 for (size_t i = 0; i < uint64_metadata_.size(); ++i) {
1597 const PerfUint64Metadata& metadata = uint64_metadata_[i];
1598 size += metadata.data.size() * sizeof(metadata.data[0]);
1599 }
1600 return size;
1601 }
1602
GetCPUTopologyMetadataSize() const1603 size_t PerfReader::GetCPUTopologyMetadataSize() const {
1604 // Core siblings.
1605 size_t size = sizeof(num_siblings_type);
1606 for (size_t i = 0; i < cpu_topology_.core_siblings.size(); ++i) {
1607 const CStringWithLength& str = cpu_topology_.core_siblings[i];
1608 size += sizeof(str.len) + (str.len * sizeof(char));
1609 }
1610
1611 // Thread siblings.
1612 size += sizeof(num_siblings_type);
1613 for (size_t i = 0; i < cpu_topology_.thread_siblings.size(); ++i) {
1614 const CStringWithLength& str = cpu_topology_.thread_siblings[i];
1615 size += sizeof(str.len) + (str.len * sizeof(char));
1616 }
1617
1618 return size;
1619 }
1620
GetNUMATopologyMetadataSize() const1621 size_t PerfReader::GetNUMATopologyMetadataSize() const {
1622 size_t size = sizeof(numa_topology_num_nodes_type);
1623 for (size_t i = 0; i < numa_topology_.size(); ++i) {
1624 const PerfNodeTopologyMetadata& node = numa_topology_[i];
1625 size += sizeof(node.id);
1626 size += sizeof(node.total_memory) + sizeof(node.free_memory);
1627 size += sizeof(node.cpu_list.len) + node.cpu_list.len * sizeof(char);
1628 }
1629 return size;
1630 }
1631
NeedsNumberOfStringData(u32 type) const1632 bool PerfReader::NeedsNumberOfStringData(u32 type) const {
1633 return type == HEADER_CMDLINE;
1634 }
1635
LocalizeMMapFilenames(const std::map<string,string> & filename_map)1636 bool PerfReader::LocalizeMMapFilenames(
1637 const std::map<string, string>& filename_map) {
1638 // Search for mmap/mmap2 events for which the filename needs to be updated.
1639 for (size_t i = 0; i < events_.size(); ++i) {
1640 string filename;
1641 size_t size_of_fixed_event_parts;
1642 event_t* event = events_[i].get();
1643 if (event->header.type == PERF_RECORD_MMAP) {
1644 filename = string(event->mmap.filename);
1645 size_of_fixed_event_parts =
1646 sizeof(event->mmap) - sizeof(event->mmap.filename);
1647 } else if (event->header.type == PERF_RECORD_MMAP2) {
1648 filename = string(event->mmap2.filename);
1649 size_of_fixed_event_parts =
1650 sizeof(event->mmap2) - sizeof(event->mmap2.filename);
1651 } else {
1652 continue;
1653 }
1654
1655 const auto it = filename_map.find(filename);
1656 if (it == filename_map.end()) // not found
1657 continue;
1658
1659 const string& new_filename = it->second;
1660 size_t old_len = GetUint64AlignedStringLength(filename);
1661 size_t new_len = GetUint64AlignedStringLength(new_filename);
1662 size_t old_offset = GetPerfSampleDataOffset(*event);
1663 size_t sample_size = event->header.size - old_offset;
1664
1665 int size_change = new_len - old_len;
1666 size_t new_size = event->header.size + size_change;
1667 size_t new_offset = old_offset + size_change;
1668
1669 if (size_change > 0) {
1670 // Allocate memory for a new event.
1671 event_t* old_event = event;
1672 malloced_unique_ptr<event_t> new_event(CallocMemoryForEvent(new_size));
1673
1674 // Copy over everything except filename and sample info.
1675 memcpy(new_event.get(), old_event, size_of_fixed_event_parts);
1676
1677 // Copy over the sample info to the correct location.
1678 char* old_addr = reinterpret_cast<char*>(old_event);
1679 char* new_addr = reinterpret_cast<char*>(new_event.get());
1680 memcpy(new_addr + new_offset, old_addr + old_offset, sample_size);
1681
1682 events_[i] = std::move(new_event);
1683 event = events_[i].get();
1684 } else if (size_change < 0) {
1685 // Move the perf sample data to its new location.
1686 // Since source and dest could overlap, use memmove instead of memcpy.
1687 char* start_addr = reinterpret_cast<char*>(event);
1688 memmove(start_addr + new_offset, start_addr + old_offset, sample_size);
1689 }
1690
1691 // Copy over the new filename and fix the size of the event.
1692 char *event_filename = nullptr;
1693 if (event->header.type == PERF_RECORD_MMAP) {
1694 event_filename = event->mmap.filename;
1695 } else if (event->header.type == PERF_RECORD_MMAP2) {
1696 event_filename = event->mmap2.filename;
1697 } else {
1698 LOG(FATAL) << "Unexpected event type"; // Impossible
1699 }
1700 CHECK_GT(snprintf(event_filename, new_filename.size() + 1, "%s",
1701 new_filename.c_str()),
1702 0);
1703 event->header.size = new_size;
1704 }
1705
1706 return true;
1707 }
1708
1709 } // namespace quipper
1710