1 /* 2 * Copyright (C) 2018 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #include "RecordReadThread.h" 18 19 #include <gmock/gmock.h> 20 #include <gtest/gtest.h> 21 22 #include "event_type.h" 23 #include "get_test_data.h" 24 #include "record.h" 25 #include "record_equal_test.h" 26 #include "record_file.h" 27 28 using ::testing::_; 29 using ::testing::Eq; 30 using ::testing::Return; 31 using ::testing::Truly; 32 33 using namespace simpleperf; 34 35 class RecordBufferTest : public ::testing::Test { 36 protected: 37 void PushRecord(uint32_t type, size_t size) { 38 char* p = buffer_->AllocWriteSpace(size); 39 ASSERT_NE(p, nullptr); 40 perf_event_header header; 41 header.type = type; 42 header.size = size; 43 memcpy(p, &header, sizeof(header)); 44 buffer_->FinishWrite(); 45 } 46 47 void PopRecord(uint32_t type, uint32_t size) { 48 char* p = buffer_->GetCurrentRecord(); 49 ASSERT_NE(p, nullptr); 50 perf_event_header header; 51 memcpy(&header, p, sizeof(header)); 52 ASSERT_EQ(header.type, type); 53 ASSERT_EQ(header.size, size); 54 buffer_->MoveToNextRecord(); 55 } 56 57 std::unique_ptr<RecordBuffer> buffer_; 58 }; 59 60 TEST_F(RecordBufferTest, fifo) { 61 for (size_t loop = 0; loop < 10; ++loop) { 62 buffer_.reset(new RecordBuffer(sizeof(perf_event_header) * 10)); 63 size_t record_size = sizeof(perf_event_header) + loop; 64 size_t max_records_in_buffer = (buffer_->size() - 2 * record_size + 1) / record_size; 65 uint32_t write_id = 0; 66 uint32_t read_id = 0; 67 while (read_id < 100) { 68 while (write_id < 100 && write_id - read_id < max_records_in_buffer) { 69 ASSERT_NO_FATAL_FAILURE(PushRecord(write_id++, record_size)); 70 } 71 ASSERT_NO_FATAL_FAILURE(PopRecord(read_id++, record_size)); 72 } 73 } 74 } 75 76 TEST(RecordParser, smoke) { 77 std::unique_ptr<RecordFileReader> reader = RecordFileReader::CreateInstance( 78 GetTestData(PERF_DATA_NO_UNWIND)); 79 ASSERT_TRUE(reader); 80 RecordParser parser(*reader->AttrSection()[0].attr); 81 auto process_record = [&](std::unique_ptr<Record> record) { 82 if (record->type() == PERF_RECORD_MMAP || record->type() == PERF_RECORD_COMM || 83 record->type() == PERF_RECORD_FORK || record->type() == PERF_RECORD_SAMPLE) { 84 perf_event_header header; 85 memcpy(&header, record->Binary(), sizeof(header)); 86 auto read_record_fn = [&](size_t pos, size_t size, void* dest) { 87 memcpy(dest, record->Binary() + pos, size); 88 }; 89 size_t pos = parser.GetTimePos(header); 90 ASSERT_NE(0u, pos); 91 uint64_t time; 92 read_record_fn(pos, sizeof(time), &time); 93 ASSERT_EQ(record->Timestamp(), time); 94 if (record->type() == PERF_RECORD_SAMPLE) { 95 auto sr = static_cast<SampleRecord*>(record.get()); 96 pos = parser.GetStackSizePos(read_record_fn); 97 ASSERT_NE(0u, pos); 98 uint64_t stack_size; 99 read_record_fn(pos, sizeof(stack_size), &stack_size); 100 ASSERT_EQ(sr->stack_user_data.size, stack_size); 101 102 // Test pid pos in sample records. 103 pos = parser.GetPidPosInSampleRecord(); 104 uint32_t pid; 105 read_record_fn(pos, sizeof(pid), &pid); 106 ASSERT_EQ(sr->tid_data.pid, pid); 107 } 108 } 109 }; 110 ASSERT_TRUE(reader->ReadDataSection([&](std::unique_ptr<Record> record) { 111 process_record(std::move(record)); 112 return !HasFatalFailure(); 113 })); 114 } 115 116 struct MockEventFd : public EventFd { 117 MockEventFd(const perf_event_attr& attr, int cpu, char* buffer, size_t buffer_size, 118 bool mock_aux_buffer) 119 : EventFd(attr, -1, "", 0, cpu) { 120 mmap_data_buffer_ = buffer; 121 mmap_data_buffer_size_ = buffer_size; 122 if (mock_aux_buffer) { 123 aux_buffer_size_ = 1; // Make HasAuxBuffer() return true. 124 } 125 } 126 127 MOCK_METHOD2(CreateMappedBuffer, bool(size_t, bool)); 128 MOCK_METHOD0(DestroyMappedBuffer, void()); 129 MOCK_METHOD2(StartPolling, bool(IOEventLoop&, const std::function<bool()>&)); 130 MOCK_METHOD0(StopPolling, bool()); 131 MOCK_METHOD1(GetAvailableMmapDataSize, size_t(size_t&)); 132 MOCK_METHOD1(DiscardMmapData, void(size_t)); 133 134 MOCK_METHOD2(CreateAuxBuffer, bool(size_t, bool)); 135 MOCK_METHOD0(DestroyAuxBuffer, void()); 136 MOCK_METHOD4(GetAvailableAuxData, uint64_t(char**, size_t*, char**, size_t*)); 137 MOCK_METHOD1(DiscardAuxData, void(size_t)); 138 }; 139 140 static perf_event_attr CreateFakeEventAttr() { 141 const EventType* type = FindEventTypeByName("cpu-clock"); 142 CHECK(type != nullptr); 143 return CreateDefaultPerfEventAttr(*type); 144 } 145 146 static std::vector<std::unique_ptr<Record>> CreateFakeRecords( 147 const perf_event_attr& attr, size_t record_count, size_t stack_size, size_t dyn_stack_size) { 148 std::vector<std::unique_ptr<Record>> records; 149 for (size_t i = 0; i < record_count; ++i) { 150 SampleRecord* r = new SampleRecord(attr, i, i + 1, i + 2, i + 3, i + 4, i + 5, i + 6, {}, 151 std::vector<char>(stack_size), dyn_stack_size); 152 records.emplace_back(r); 153 } 154 return records; 155 } 156 157 static size_t AlignToPowerOfTwo(size_t value) { 158 size_t result = 1; 159 while (result < value) { 160 result <<= 1; 161 } 162 return result; 163 } 164 165 static inline std::function<bool(size_t&)> SetArg(size_t value) { 166 return [value](size_t& arg) { 167 arg = value; 168 return true; 169 }; 170 } 171 172 TEST(KernelRecordReader, smoke) { 173 // 1. Create fake records. 174 perf_event_attr attr = CreateFakeEventAttr(); 175 std::vector<std::unique_ptr<Record>> records = CreateFakeRecords(attr, 10, 0, 0); 176 // 2. Create a buffer whose size is power of two. 177 size_t data_size = records.size() * records[0]->size(); 178 std::vector<char> buffer(AlignToPowerOfTwo(data_size)); 179 // 3. Copy record data into the buffer. Since a record in a kernel buffer can be wrapped around 180 // to the beginning of the buffer, create the case in the first record. 181 size_t data_pos = buffer.size() - 4; 182 memcpy(&buffer[data_pos], records[0]->Binary(), 4); 183 memcpy(&buffer[0], records[0]->Binary() + 4, records[0]->size() - 4); 184 size_t pos = records[0]->size() - 4; 185 for (size_t i = 1; i < records.size(); ++i) { 186 memcpy(&buffer[pos], records[i]->Binary(), records[i]->size()); 187 pos += records[i]->size(); 188 } 189 // Read records using KernelRecordReader. 190 MockEventFd event_fd(attr, 0, buffer.data(), buffer.size(), false); 191 192 EXPECT_CALL(event_fd, GetAvailableMmapDataSize(Truly(SetArg(data_pos)))) 193 .Times(1).WillOnce(Return(data_size)); 194 EXPECT_CALL(event_fd, DiscardMmapData(Eq(data_size))).Times(1); 195 KernelRecordReader reader(&event_fd); 196 RecordParser parser(attr); 197 ASSERT_TRUE(reader.GetDataFromKernelBuffer()); 198 for (size_t i = 0; i < records.size(); ++i) { 199 ASSERT_TRUE(reader.MoveToNextRecord(parser)); 200 ASSERT_EQ(reader.RecordHeader().type, records[i]->type()); 201 ASSERT_EQ(reader.RecordHeader().size, records[i]->size()); 202 ASSERT_EQ(reader.RecordTime(), records[i]->Timestamp()); 203 std::vector<char> data(reader.RecordHeader().size); 204 reader.ReadRecord(0, data.size(), &data[0]); 205 ASSERT_EQ(0, memcmp(&data[0], records[i]->Binary(), records[i]->size())); 206 } 207 ASSERT_FALSE(reader.MoveToNextRecord(parser)); 208 } 209 210 class RecordReadThreadTest : public ::testing::Test { 211 protected: 212 std::vector<EventFd*> CreateFakeEventFds(const perf_event_attr& attr, size_t event_fd_count) { 213 size_t records_per_fd = records_.size() / event_fd_count; 214 buffers_.clear(); 215 buffers_.resize(event_fd_count); 216 for (size_t i = 0; i < records_.size(); ++i) { 217 std::vector<char>& buffer = buffers_[i % event_fd_count]; 218 buffer.insert(buffer.end(), records_[i]->Binary(), 219 records_[i]->Binary() + records_[i]->size()); 220 } 221 size_t data_size = records_per_fd * records_[0]->size(); 222 size_t buffer_size = AlignToPowerOfTwo(data_size); 223 for (auto& buffer : buffers_) { 224 buffer.resize(buffer_size); 225 } 226 event_fds_.resize(event_fd_count); 227 for (size_t i = 0; i < event_fd_count; ++i) { 228 event_fds_[i].reset(new MockEventFd(attr, i, buffers_[i].data(), buffer_size, false)); 229 EXPECT_CALL(*event_fds_[i], CreateMappedBuffer(_, _)).Times(1).WillOnce(Return(true)); 230 EXPECT_CALL(*event_fds_[i], StartPolling(_, _)).Times(1).WillOnce(Return(true)); 231 EXPECT_CALL(*event_fds_[i], GetAvailableMmapDataSize(Truly(SetArg(0)))).Times(1) 232 .WillOnce(Return(data_size)); 233 EXPECT_CALL(*event_fds_[i], DiscardMmapData(Eq(data_size))).Times(1); 234 EXPECT_CALL(*event_fds_[i], StopPolling()).Times(1).WillOnce(Return(true)); 235 EXPECT_CALL(*event_fds_[i], DestroyMappedBuffer()).Times(1); 236 EXPECT_CALL(*event_fds_[i], DestroyAuxBuffer()).Times(1); 237 } 238 std::vector<EventFd*> result; 239 for (auto& fd : event_fds_) { 240 result.push_back(fd.get()); 241 } 242 return result; 243 } 244 245 std::vector<std::unique_ptr<Record>> records_; 246 std::vector<std::vector<char>> buffers_; 247 std::vector<std::unique_ptr<MockEventFd>> event_fds_; 248 }; 249 250 TEST_F(RecordReadThreadTest, handle_cmds) { 251 perf_event_attr attr = CreateFakeEventAttr(); 252 records_ = CreateFakeRecords(attr, 2, 0, 0); 253 std::vector<EventFd*> event_fds = CreateFakeEventFds(attr, 2); 254 RecordReadThread thread(128 * 1024, event_fds[0]->attr(), 1, 1, 0); 255 IOEventLoop loop; 256 bool has_notify = false; 257 auto callback = [&]() { 258 has_notify = true; 259 return loop.ExitLoop(); 260 }; 261 ASSERT_TRUE(thread.RegisterDataCallback(loop, callback)); 262 ASSERT_TRUE(thread.AddEventFds(event_fds)); 263 ASSERT_TRUE(thread.SyncKernelBuffer()); 264 ASSERT_TRUE(loop.RunLoop()); 265 ASSERT_TRUE(has_notify); 266 ASSERT_TRUE(thread.GetRecord()); 267 ASSERT_TRUE(thread.RemoveEventFds(event_fds)); 268 ASSERT_TRUE(thread.StopReadThread()); 269 } 270 271 TEST_F(RecordReadThreadTest, read_records) { 272 perf_event_attr attr = CreateFakeEventAttr(); 273 RecordReadThread thread(128 * 1024, attr, 1, 1, 0); 274 IOEventLoop loop; 275 size_t record_index; 276 auto callback = [&]() { 277 while (true) { 278 std::unique_ptr<Record> r = thread.GetRecord(); 279 if (!r) { 280 break; 281 } 282 std::unique_ptr<Record>& expected = records_[record_index++]; 283 if (r->size() != expected->size() || 284 memcmp(r->Binary(), expected->Binary(), r->size()) != 0) { 285 return false; 286 } 287 } 288 return loop.ExitLoop(); 289 }; 290 ASSERT_TRUE(thread.RegisterDataCallback(loop, callback)); 291 for (size_t event_fd_count = 1; event_fd_count < 10; ++event_fd_count) { 292 records_ = CreateFakeRecords(attr, event_fd_count * 10, 0, 0); 293 std::vector<EventFd*> event_fds = CreateFakeEventFds(attr, event_fd_count); 294 record_index = 0; 295 ASSERT_TRUE(thread.AddEventFds(event_fds)); 296 ASSERT_TRUE(thread.SyncKernelBuffer()); 297 ASSERT_TRUE(loop.RunLoop()); 298 ASSERT_EQ(record_index, records_.size()); 299 ASSERT_TRUE(thread.RemoveEventFds(event_fds)); 300 } 301 } 302 303 TEST_F(RecordReadThreadTest, process_sample_record) { 304 perf_event_attr attr = CreateFakeEventAttr(); 305 attr.sample_type |= PERF_SAMPLE_STACK_USER; 306 attr.sample_stack_user = 64 * 1024; 307 size_t record_buffer_size = 128 * 1024; 308 RecordReadThread thread(record_buffer_size, attr, 1, 1, 0); 309 IOEventLoop loop; 310 ASSERT_TRUE(thread.RegisterDataCallback(loop, []() { return true; })); 311 312 auto read_record = [&](std::unique_ptr<Record>& r) { 313 std::vector<EventFd*> event_fds = CreateFakeEventFds(attr, 1); 314 ASSERT_TRUE(thread.AddEventFds(event_fds)); 315 ASSERT_TRUE(thread.SyncKernelBuffer()); 316 ASSERT_TRUE(thread.RemoveEventFds(event_fds)); 317 r = thread.GetRecord(); 318 }; 319 320 // When the free space in record buffer is above low level, only invalid stack data in sample 321 // records is removed. 322 thread.SetBufferLevels(0, 0); 323 records_ = CreateFakeRecords(attr, 1, 8192, 8192); 324 std::unique_ptr<Record> r; 325 read_record(r); 326 ASSERT_TRUE(r); 327 SampleRecord* sr = static_cast<SampleRecord*>(r.get()); 328 ASSERT_EQ(sr->stack_user_data.size, 8192u); 329 ASSERT_EQ(sr->stack_user_data.dyn_size, 8192u); 330 records_ = CreateFakeRecords(attr, 1, 8192, 4096); 331 read_record(r); 332 ASSERT_TRUE(r); 333 sr = static_cast<SampleRecord*>(r.get()); 334 ASSERT_EQ(sr->stack_user_data.size, 4096u); 335 ASSERT_EQ(sr->stack_user_data.dyn_size, 4096u); 336 337 // When the free space in record buffer is below low level but above critical level, only 338 // 1K stack data in sample records is left. 339 thread.SetBufferLevels(record_buffer_size, 0); 340 read_record(r); 341 ASSERT_TRUE(r); 342 sr = static_cast<SampleRecord*>(r.get()); 343 ASSERT_EQ(sr->stack_user_data.size, 1024u); 344 ASSERT_EQ(sr->stack_user_data.dyn_size, 1024u); 345 346 // When the free space in record buffer is below critical level, sample records are dropped. 347 thread.SetBufferLevels(record_buffer_size, record_buffer_size); 348 read_record(r); 349 ASSERT_FALSE(r); 350 ASSERT_EQ(thread.GetStat().lost_samples, 1u); 351 ASSERT_EQ(thread.GetStat().lost_non_samples, 0u); 352 ASSERT_EQ(thread.GetStat().cut_stack_samples, 1u); 353 } 354 355 // Test that the data notification exists until the RecordBuffer is empty. So we can read all 356 // records even if reading one record at a time. 357 TEST_F(RecordReadThreadTest, has_data_notification_until_buffer_empty) { 358 perf_event_attr attr = CreateFakeEventAttr(); 359 RecordReadThread thread(128 * 1024, attr, 1, 1, 0); 360 IOEventLoop loop; 361 size_t record_index = 0; 362 auto read_one_record = [&]() { 363 std::unique_ptr<Record> r = thread.GetRecord(); 364 if (!r) { 365 return loop.ExitLoop(); 366 } 367 std::unique_ptr<Record>& expected = records_[record_index++]; 368 if (r->size() != expected->size() || memcmp(r->Binary(), expected->Binary(), r->size()) != 0) { 369 return false; 370 } 371 return true; 372 }; 373 ASSERT_TRUE(thread.RegisterDataCallback(loop, read_one_record)); 374 records_ = CreateFakeRecords(attr, 2, 0, 0); 375 std::vector<EventFd*> event_fds = CreateFakeEventFds(attr, 1); 376 ASSERT_TRUE(thread.AddEventFds(event_fds)); 377 ASSERT_TRUE(thread.SyncKernelBuffer()); 378 ASSERT_TRUE(loop.RunLoop()); 379 ASSERT_EQ(record_index, records_.size()); 380 ASSERT_TRUE(thread.RemoveEventFds(event_fds)); 381 } 382 383 TEST_F(RecordReadThreadTest, no_cut_samples) { 384 perf_event_attr attr = CreateFakeEventAttr(); 385 attr.sample_type |= PERF_SAMPLE_STACK_USER; 386 attr.sample_stack_user = 64 * 1024; 387 RecordReadThread thread(128 * 1024, attr, 1, 1, 0, false); 388 IOEventLoop loop; 389 ASSERT_TRUE(thread.RegisterDataCallback(loop, []() { return true; })); 390 const size_t total_samples = 100; 391 records_ = CreateFakeRecords(attr, total_samples, 8 * 1024, 8 * 1024); 392 std::vector<EventFd*> event_fds = CreateFakeEventFds(attr, 1); 393 ASSERT_TRUE(thread.AddEventFds(event_fds)); 394 ASSERT_TRUE(thread.SyncKernelBuffer()); 395 ASSERT_TRUE(thread.RemoveEventFds(event_fds)); 396 size_t received_samples = 0; 397 while (thread.GetRecord()) { 398 received_samples++; 399 } 400 ASSERT_GT(received_samples, 0u); 401 ASSERT_GT(thread.GetStat().lost_samples, 0u); 402 ASSERT_EQ(thread.GetStat().lost_samples, total_samples - received_samples); 403 ASSERT_EQ(thread.GetStat().cut_stack_samples, 0u); 404 } 405 406 TEST_F(RecordReadThreadTest, exclude_perf) { 407 perf_event_attr attr = CreateFakeEventAttr(); 408 attr.sample_type |= PERF_SAMPLE_STACK_USER; 409 size_t stack_size = 1024; 410 attr.sample_stack_user = stack_size; 411 records_.emplace_back(new SampleRecord(attr, 0, 1, getpid(), 3, 4, 5, 6, {}, 412 std::vector<char>(stack_size), stack_size)); 413 records_.emplace_back(new SampleRecord(attr, 0, 1, getpid() + 1, 3, 4, 5, 6, {}, 414 std::vector<char>(stack_size), stack_size)); 415 416 auto read_records = [&](RecordReadThread& thread, std::vector<std::unique_ptr<Record>>& records) { 417 records.clear(); 418 std::vector<EventFd*> event_fds = CreateFakeEventFds(attr, 1); 419 ASSERT_TRUE(thread.AddEventFds(event_fds)); 420 ASSERT_TRUE(thread.SyncKernelBuffer()); 421 ASSERT_TRUE(thread.RemoveEventFds(event_fds)); 422 while (auto r = thread.GetRecord()) { 423 records.emplace_back(std::move(r)); 424 } 425 }; 426 427 // By default, no samples are excluded. 428 RecordReadThread thread(128 * 1024, attr, 1, 1, 0); 429 IOEventLoop loop; 430 ASSERT_TRUE(thread.RegisterDataCallback(loop, []() { return true; })); 431 std::vector<std::unique_ptr<Record>> received_records; 432 read_records(thread, received_records); 433 ASSERT_EQ(received_records.size(), 2); 434 CheckRecordEqual(*received_records[0], *records_[0]); 435 CheckRecordEqual(*received_records[1], *records_[1]); 436 437 // With exclude_perf, the first sample is excluded. 438 RecordReadThread thread2(128 * 1024, attr, 1, 1, 0, true, true); 439 ASSERT_TRUE(thread2.RegisterDataCallback(loop, []() { return true; })); 440 read_records(thread2, received_records); 441 ASSERT_EQ(received_records.size(), 1); 442 CheckRecordEqual(*received_records[0], *records_[1]); 443 } 444 445 struct FakeAuxData { 446 std::vector<char> buf1; 447 std::vector<char> buf2; 448 std::vector<char> pad; 449 bool lost; 450 451 FakeAuxData(size_t buf1_size, size_t buf2_size, char c, size_t pad_size, bool lost) 452 : buf1(buf1_size, c), buf2(buf2_size, c), pad(pad_size, 0), lost(lost) {} 453 }; 454 455 TEST_F(RecordReadThreadTest, read_aux_data) { 456 const EventType* type = FindEventTypeByName("cs-etm"); 457 if (type == nullptr) { 458 GTEST_LOG_(INFO) << "Omit this test as cs-etm event type isn't available"; 459 return; 460 } 461 std::vector<FakeAuxData> aux_data; 462 aux_data.emplace_back(40, 0, '0', 0, false); // one buffer 463 aux_data.emplace_back(40, 40, '1', 0, false); // two buffers 464 aux_data.emplace_back(36, 0, '2', 4, false); // one buffer needs padding to 8 bytes alignment 465 aux_data.emplace_back(1024, 0, '3', 0, true); // one buffer too big to fit into RecordReadThread 466 size_t test_index = 0; 467 468 auto SetBuf1 = [&](char** buf1) { 469 *buf1 = aux_data[test_index].buf1.data(); 470 return true; 471 }; 472 auto SetSize1 = [&](size_t* size1) { 473 *size1 = aux_data[test_index].buf1.size(); 474 return true; 475 }; 476 auto SetBuf2 = [&](char** buf2) { 477 *buf2 = aux_data[test_index].buf2.data(); 478 return true; 479 }; 480 auto SetSize2 = [&](size_t* size2) { 481 *size2 = aux_data[test_index].buf2.size(); 482 return true; 483 }; 484 auto CheckDiscardSize = [&](size_t size) { 485 return size == aux_data[test_index].buf1.size() + aux_data[test_index].buf2.size(); 486 }; 487 488 const size_t AUX_BUFFER_SIZE = 4096; 489 490 perf_event_attr attr = CreateDefaultPerfEventAttr(*type); 491 MockEventFd fd(attr, 0, nullptr, 1, true); 492 EXPECT_CALL(fd, CreateMappedBuffer(_, _)).Times(1).WillOnce(Return(true)); 493 EXPECT_CALL(fd, CreateAuxBuffer(Eq(AUX_BUFFER_SIZE), _)).Times(1).WillOnce(Return(true)); 494 EXPECT_CALL(fd, StartPolling(_, _)).Times(1).WillOnce(Return(true)); 495 EXPECT_CALL(fd, GetAvailableMmapDataSize(_)).Times(aux_data.size()).WillRepeatedly(Return(0)); 496 EXPECT_CALL(fd, 497 GetAvailableAuxData(Truly(SetBuf1), Truly(SetSize1), Truly(SetBuf2), Truly(SetSize2))) 498 .Times(aux_data.size()); 499 EXPECT_CALL(fd, DiscardAuxData(Truly(CheckDiscardSize))).Times(aux_data.size()); 500 EXPECT_CALL(fd, StopPolling()).Times(1).WillOnce(Return(true)); 501 EXPECT_CALL(fd, DestroyMappedBuffer()).Times(1); 502 EXPECT_CALL(fd, DestroyAuxBuffer()).Times(1); 503 504 RecordReadThread thread(1024, attr, 1, 1, AUX_BUFFER_SIZE); 505 IOEventLoop loop; 506 ASSERT_TRUE(thread.RegisterDataCallback(loop, []() { return true; })); 507 ASSERT_TRUE(thread.AddEventFds({&fd})); 508 for (; test_index < aux_data.size(); ++test_index) { 509 ASSERT_TRUE(thread.SyncKernelBuffer()); 510 std::unique_ptr<Record> r = thread.GetRecord(); 511 if (aux_data[test_index].lost) { 512 ASSERT_TRUE(r == nullptr); 513 continue; 514 } 515 ASSERT_TRUE(r); 516 ASSERT_EQ(r->type(), PERF_RECORD_AUXTRACE); 517 auto auxtrace = static_cast<AuxTraceRecord*>(r.get()); 518 auto& expected = aux_data[test_index]; 519 ASSERT_EQ(auxtrace->data->aux_size, 520 expected.buf1.size() + expected.buf2.size() + expected.pad.size()); 521 const char* p = auxtrace->location.addr; 522 ASSERT_TRUE(p != nullptr); 523 if (!expected.buf1.empty()) { 524 ASSERT_EQ(memcmp(p, expected.buf1.data(), expected.buf1.size()), 0); 525 p += expected.buf1.size(); 526 } 527 if (!expected.buf2.empty()) { 528 ASSERT_EQ(memcmp(p, expected.buf2.data(), expected.buf2.size()), 0); 529 p += expected.buf2.size(); 530 } 531 if (!expected.pad.empty()) { 532 ASSERT_EQ(memcmp(p, expected.pad.data(), expected.pad.size()), 0); 533 } 534 } 535 ASSERT_TRUE(thread.GetRecord() == nullptr); 536 ASSERT_TRUE(thread.RemoveEventFds({&fd})); 537 size_t aux_data_size = 0; 538 size_t lost_aux_data_size = 0; 539 for (auto& aux : aux_data) { 540 if (aux.lost) { 541 lost_aux_data_size += aux.buf1.size() + aux.buf2.size(); 542 } else { 543 aux_data_size += aux.buf1.size() + aux.buf2.size(); 544 } 545 } 546 ASSERT_EQ(aux_data_size, thread.GetStat().aux_data_size); 547 ASSERT_EQ(lost_aux_data_size, thread.GetStat().lost_aux_data_size); 548 }