1 /*
2 * Copyright (C) 2019 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <array>
18 #include <atomic>
19 #include <chrono>
20 #include <deque>
21 #include <thread>
22
23 #include "perfetto/ext/base/metatrace.h"
24 #include "perfetto/ext/base/thread_annotations.h"
25 #include "src/base/test/test_task_runner.h"
26 #include "test/gtest_and_gmock.h"
27
28 namespace perfetto {
29 namespace {
30
31 namespace m = ::perfetto::metatrace;
32 using ::testing::Invoke;
33
34 class MetatraceTest : public ::testing::Test {
35 public:
SetUp()36 void SetUp() override { m::Disable(); }
37
TearDown()38 void TearDown() override {
39 task_runner_.RunUntilIdle();
40 m::Disable();
41 }
42
Enable(uint32_t tags)43 void Enable(uint32_t tags) {
44 m::Enable([this] { ReadCallback(); }, &task_runner_, tags);
45 }
46
47 MOCK_METHOD0(ReadCallback, void());
48 base::TestTaskRunner task_runner_;
49 };
50
TEST_F(MetatraceTest,TagEnablingLogic)51 TEST_F(MetatraceTest, TagEnablingLogic) {
52 EXPECT_CALL(*this, ReadCallback()).Times(0);
53 for (int iteration = 0; iteration < 3; iteration++) {
54 ASSERT_EQ(m::RingBuffer::GetSizeForTesting(), 0u);
55
56 // No events should be traced before enabling.
57 m::TraceCounter(m::TAG_ANY, /*id=*/1, /*value=*/42);
58 { m::ScopedEvent evt(m::TAG_ANY, /*id=*/1); }
59 ASSERT_EQ(m::RingBuffer::GetSizeForTesting(), 0u);
60
61 // Enable tags bit 1 (=2) and 2 (=4) and verify that only those events are
62 // added.
63 auto t_start = metatrace::TraceTimeNowNs();
64 Enable(/*tags=*/2 | 4);
65 m::TraceCounter(/*tag=*/1, /*id=*/42, /*value=*/10); // No.
66 m::TraceCounter(/*tag=*/2, /*id=*/42, /*value=*/11); // Yes.
67 m::TraceCounter(/*tag=*/4, /*id=*/42, /*value=*/12); // Yes.
68 m::TraceCounter(/*tag=*/1 | 2, /*id=*/42, /*value=*/13); // Yes.
69 m::TraceCounter(/*tag=*/1 | 4, /*id=*/42, /*value=*/14); // Yes.
70 m::TraceCounter(/*tag=*/2 | 4, /*id=*/42, /*value=*/15); // Yes.
71 m::TraceCounter(/*tag=*/4 | 8, /*id=*/42, /*value=*/16); // Yes.
72 m::TraceCounter(/*tag=*/1 | 8, /*id=*/42, /*value=*/17); // No.
73 m::TraceCounter(m::TAG_ANY, /*id=*/42, /*value=*/18); // Yes.
74 { m::ScopedEvent evt(/*tag=*/1, /*id=*/20); } // No.
75 { m::ScopedEvent evt(/*tag=*/8, /*id=*/21); } // No.
76 { m::ScopedEvent evt(/*tag=*/2, /*id=*/22); } // Yes.
77 { m::ScopedEvent evt(/*tag=*/4 | 8, /*id=*/23); } // Yes.
78 { m::ScopedEvent evt(m::TAG_ANY, /*id=*/24); } // Yes.
79
80 {
81 auto it = m::RingBuffer::GetReadIterator();
82 ASSERT_TRUE(it);
83 ASSERT_EQ(it->counter_value, 11);
84 ASSERT_TRUE(++it);
85 ASSERT_EQ(it->counter_value, 12);
86 ASSERT_TRUE(++it);
87 ASSERT_EQ(it->counter_value, 13);
88 ASSERT_TRUE(++it);
89 ASSERT_EQ(it->counter_value, 14);
90 }
91
92 // Test that destroying and re-creating the iterator resumes reading from
93 // the right place.
94 {
95 auto it = m::RingBuffer::GetReadIterator();
96 ASSERT_TRUE(++it);
97 ASSERT_EQ(it->counter_value, 15);
98 ASSERT_TRUE(++it);
99 ASSERT_EQ(it->counter_value, 16);
100 ASSERT_TRUE(++it);
101 ASSERT_EQ(it->counter_value, 18);
102 ASSERT_TRUE(++it);
103 ASSERT_EQ(it->type_and_id, 22);
104 ASSERT_TRUE(++it);
105 ASSERT_EQ(it->type_and_id, 23);
106 ASSERT_TRUE(++it);
107 ASSERT_EQ(it->type_and_id, 24);
108 ASSERT_FALSE(++it);
109 }
110
111 // Test that we can write pids up to 32 bit TIDs (I observed up to 262144
112 // from /proc/sys/kernel/pid_max) and up to 2 days of timestamps.
113 {
114 auto* record = m::RingBuffer::AppendNewRecord();
115 record->counter_value = 42;
116 constexpr uint64_t kTwoDays = 48ULL * 3600 * 1000 * 1000 * 1000;
117 record->set_timestamp(t_start + kTwoDays);
118 record->thread_id = 0xbabaf00d;
119 record->type_and_id = m::Record::kTypeCounter;
120
121 auto it = m::RingBuffer::GetReadIterator();
122 ASSERT_TRUE(it);
123 ASSERT_EQ(it->timestamp_ns(), t_start + kTwoDays);
124 ASSERT_EQ(it->thread_id, 0xbabaf00d);
125 ASSERT_FALSE(++it);
126 }
127
128 m::Disable();
129 }
130 }
131
132 // Test that overruns are handled properly and that the writer re-synchronizes
133 // after the reader catches up.
TEST_F(MetatraceTest,HandleOverruns)134 TEST_F(MetatraceTest, HandleOverruns) {
135 int cnt = 0;
136 int exp_cnt = 0;
137 for (size_t iteration = 0; iteration < 3; iteration++) {
138 Enable(m::TAG_ANY);
139 std::string checkpoint_name = "ReadTask " + std::to_string(iteration);
140 auto checkpoint = task_runner_.CreateCheckpoint(checkpoint_name);
141 EXPECT_CALL(*this, ReadCallback()).WillOnce(Invoke(checkpoint));
142
143 for (size_t i = 0; i < m::RingBuffer::kCapacity; i++)
144 m::TraceCounter(/*tag=*/1, /*id=*/42, /*value=*/cnt++);
145 ASSERT_EQ(m::RingBuffer::GetSizeForTesting(), m::RingBuffer::kCapacity);
146 ASSERT_FALSE(m::RingBuffer::has_overruns());
147
148 for (int n = 0; n < 3; n++)
149 m::TraceCounter(/*tag=*/1, /*id=*/42, /*value=*/-1); // Will overrun.
150
151 ASSERT_TRUE(m::RingBuffer::has_overruns());
152 ASSERT_EQ(m::RingBuffer::GetSizeForTesting(), m::RingBuffer::kCapacity);
153
154 for (auto it = m::RingBuffer::GetReadIterator(); it; ++it)
155 ASSERT_EQ(it->counter_value, exp_cnt++);
156
157 ASSERT_EQ(m::RingBuffer::GetSizeForTesting(), 0u);
158
159 task_runner_.RunUntilCheckpoint(checkpoint_name);
160 m::Disable();
161 }
162 }
163
164 // Sets up a scenario where the writer writes constantly (however, guaranteeing
165 // to not overrun) and the reader catches up. Tests that all events are seen
166 // consistently without gaps.
TEST_F(MetatraceTest,InterleavedReadWrites)167 TEST_F(MetatraceTest, InterleavedReadWrites) {
168 Enable(m::TAG_ANY);
169 constexpr int kMaxValue = m::RingBuffer::kCapacity * 3;
170
171 std::atomic<int> last_value_read{-1};
172 auto read_task = [&last_value_read] {
173 int last = last_value_read;
174 for (auto it = m::RingBuffer::GetReadIterator(); it; ++it) {
175 if (it->type_and_id.load(std::memory_order_acquire) == 0)
176 break;
177 // TSan doesn't know about the happens-before relationship between the
178 // type_and_id marker and the value being valid. Fixing this properly
179 // would require making all accesses to the metatrace object as
180 // std::atomic and read them with memory_order_relaxed, which is overkill.
181 PERFETTO_ANNOTATE_BENIGN_RACE_SIZED(&it->counter_value, sizeof(int), "")
182 int32_t counter_value = it->counter_value;
183 EXPECT_EQ(counter_value, last + 1);
184 last = counter_value;
185 }
186 // The read pointer is incremented only after destroying the iterator.
187 // Publish the last read value after the loop.
188 last_value_read = last;
189 };
190
191 EXPECT_CALL(*this, ReadCallback()).WillRepeatedly(Invoke(read_task));
192
193 // The writer will write continuously counters from 0 to kMaxValue.
194 auto writer_done = task_runner_.CreateCheckpoint("writer_done");
195 std::thread writer_thread([this, &writer_done, &last_value_read] {
196 for (int i = 0; i < kMaxValue; i++) {
197 m::TraceCounter(/*tag=*/1, /*id=*/1, i);
198 const int kCapacity = static_cast<int>(m::RingBuffer::kCapacity);
199
200 // Wait for the reader to avoid overruns.
201 // Using memory_order_relaxed because the QEMU arm emulator seems to incur
202 // in very high costs when dealing with full barriers, causing timeouts.
203 for (int sleep_us = 1;
204 i - last_value_read.load(std::memory_order_relaxed) >= kCapacity - 1;
205 sleep_us = std::min(sleep_us * 10, 1000)) {
206 std::this_thread::sleep_for(std::chrono::microseconds(sleep_us));
207 }
208 }
209 task_runner_.PostTask(writer_done);
210 });
211
212 task_runner_.RunUntilCheckpoint("writer_done");
213 writer_thread.join();
214
215 read_task(); // Do a final read pass.
216 EXPECT_FALSE(m::RingBuffer::has_overruns());
217 EXPECT_EQ(last_value_read, kMaxValue - 1);
218 }
219
220 // Try to hit potential thread races:
221 // - Test that the read callback is posted only once per cycle.
222 // - Test that the final size of the ring buffeer is sane.
223 // - Test that event records are consistent within each thread's event stream.
TEST_F(MetatraceTest,ThreadRaces)224 TEST_F(MetatraceTest, ThreadRaces) {
225 for (size_t iteration = 0; iteration < 10; iteration++) {
226 Enable(m::TAG_ANY);
227
228 std::string checkpoint_name = "ReadTask " + std::to_string(iteration);
229 auto checkpoint = task_runner_.CreateCheckpoint(checkpoint_name);
230 EXPECT_CALL(*this, ReadCallback()).WillOnce(Invoke(checkpoint));
231
232 auto thread_main = [](uint16_t thd_idx) {
233 for (size_t i = 0; i < m::RingBuffer::kCapacity + 500; i++)
234 m::TraceCounter(/*tag=*/1, thd_idx, static_cast<int>(i));
235 };
236
237 constexpr size_t kNumThreads = 8;
238 std::array<std::thread, kNumThreads> threads;
239 for (size_t thd_idx = 0; thd_idx < kNumThreads; thd_idx++)
240 threads[thd_idx] = std::thread(thread_main, thd_idx);
241
242 for (auto& t : threads)
243 t.join();
244
245 task_runner_.RunUntilCheckpoint(checkpoint_name);
246 ASSERT_EQ(m::RingBuffer::GetSizeForTesting(), m::RingBuffer::kCapacity);
247
248 std::array<int, kNumThreads> last_val{}; // Last value for each thread.
249 for (auto it = m::RingBuffer::GetReadIterator(); it; ++it) {
250 if (it->type_and_id.load(std::memory_order_acquire) == 0)
251 break;
252 using Record = m::Record;
253 ASSERT_EQ(it->type_and_id & Record::kTypeMask, Record::kTypeCounter);
254 auto thd_idx = static_cast<size_t>(it->type_and_id & ~Record::kTypeMask);
255 ASSERT_EQ(it->counter_value, last_val[thd_idx]);
256 last_val[thd_idx]++;
257 }
258
259 m::Disable();
260 }
261 }
262
263 } // namespace
264 } // namespace perfetto
265