1 /*
2  * Copyright 2020 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "IoPerfCollection.h"
18 
19 #include <WatchdogProperties.sysprop.h>
20 #include <android-base/file.h>
21 #include <cutils/android_filesystem_config.h>
22 
23 #include <algorithm>
24 #include <future>
25 #include <queue>
26 #include <string>
27 #include <vector>
28 
29 #include "LooperStub.h"
30 #include "ProcPidDir.h"
31 #include "ProcPidStat.h"
32 #include "ProcStat.h"
33 #include "UidIoStats.h"
34 #include "gmock/gmock.h"
35 
36 namespace android {
37 namespace automotive {
38 namespace watchdog {
39 
40 using android::base::Error;
41 using android::base::Result;
42 using android::base::WriteStringToFile;
43 using testing::LooperStub;
44 using testing::populateProcPidDir;
45 
46 namespace {
47 
48 const std::chrono::seconds kTestBootInterval = 1s;
49 const std::chrono::seconds kTestPeriodicInterval = 2s;
50 const std::chrono::seconds kTestCustomInterval = 3s;
51 const std::chrono::seconds kTestCustomCollectionDuration = 11s;
52 
53 class UidIoStatsStub : public UidIoStats {
54 public:
UidIoStatsStub(bool enabled=false)55     explicit UidIoStatsStub(bool enabled = false) : mEnabled(enabled) {}
collect()56     Result<std::unordered_map<uint32_t, UidIoUsage>> collect() override {
57         if (mCache.empty()) {
58             return Error() << "Cache is empty";
59         }
60         const auto entry = mCache.front();
61         mCache.pop();
62         return entry;
63     }
enabled()64     bool enabled() override { return mEnabled; }
filePath()65     std::string filePath() override { return kUidIoStatsPath; }
push(const std::unordered_map<uint32_t,UidIoUsage> & entry)66     void push(const std::unordered_map<uint32_t, UidIoUsage>& entry) { mCache.push(entry); }
67 
68 private:
69     bool mEnabled;
70     std::queue<std::unordered_map<uint32_t, UidIoUsage>> mCache;
71 };
72 
73 class ProcStatStub : public ProcStat {
74 public:
ProcStatStub(bool enabled=false)75     explicit ProcStatStub(bool enabled = false) : mEnabled(enabled) {}
collect()76     Result<ProcStatInfo> collect() override {
77         if (mCache.empty()) {
78             return Error() << "Cache is empty";
79         }
80         const auto entry = mCache.front();
81         mCache.pop();
82         return entry;
83     }
enabled()84     bool enabled() override { return mEnabled; }
filePath()85     std::string filePath() override { return kProcStatPath; }
push(const ProcStatInfo & entry)86     void push(const ProcStatInfo& entry) { mCache.push(entry); }
87 
88 private:
89     bool mEnabled;
90     std::queue<ProcStatInfo> mCache;
91 };
92 
93 class ProcPidStatStub : public ProcPidStat {
94 public:
ProcPidStatStub(bool enabled=false)95     explicit ProcPidStatStub(bool enabled = false) : mEnabled(enabled) {}
collect()96     Result<std::vector<ProcessStats>> collect() override {
97         if (mCache.empty()) {
98             return Error() << "Cache is empty";
99         }
100         const auto entry = mCache.front();
101         mCache.pop();
102         return entry;
103     }
enabled()104     bool enabled() override { return mEnabled; }
dirPath()105     std::string dirPath() override { return kProcDirPath; }
push(const std::vector<ProcessStats> & entry)106     void push(const std::vector<ProcessStats>& entry) { mCache.push(entry); }
107 
108 private:
109     bool mEnabled;
110     std::queue<std::vector<ProcessStats>> mCache;
111 };
112 
isEqual(const UidIoPerfData & lhs,const UidIoPerfData & rhs)113 bool isEqual(const UidIoPerfData& lhs, const UidIoPerfData& rhs) {
114     if (lhs.topNReads.size() != rhs.topNReads.size() ||
115         lhs.topNWrites.size() != rhs.topNWrites.size()) {
116         return false;
117     }
118     for (int i = 0; i < METRIC_TYPES; ++i) {
119         for (int j = 0; j < UID_STATES; ++j) {
120             if (lhs.total[i][j] != rhs.total[i][j]) {
121                 return false;
122             }
123         }
124     }
125     auto comp = [&](const UidIoPerfData::Stats& l, const UidIoPerfData::Stats& r) -> bool {
126         bool isEqual = l.userId == r.userId && l.packageName == r.packageName;
127         for (int i = 0; i < UID_STATES; ++i) {
128             isEqual &= l.bytes[i] == r.bytes[i] && l.fsync[i] == r.fsync[i];
129         }
130         return isEqual;
131     };
132     return lhs.topNReads.size() == rhs.topNReads.size() &&
133             std::equal(lhs.topNReads.begin(), lhs.topNReads.end(), rhs.topNReads.begin(), comp) &&
134             lhs.topNWrites.size() == rhs.topNWrites.size() &&
135             std::equal(lhs.topNWrites.begin(), lhs.topNWrites.end(), rhs.topNWrites.begin(), comp);
136 }
137 
isEqual(const SystemIoPerfData & lhs,const SystemIoPerfData & rhs)138 bool isEqual(const SystemIoPerfData& lhs, const SystemIoPerfData& rhs) {
139     return lhs.cpuIoWaitTime == rhs.cpuIoWaitTime && lhs.totalCpuTime == rhs.totalCpuTime &&
140             lhs.ioBlockedProcessesCnt == rhs.ioBlockedProcessesCnt &&
141             lhs.totalProcessesCnt == rhs.totalProcessesCnt;
142 }
143 
isEqual(const ProcessIoPerfData & lhs,const ProcessIoPerfData & rhs)144 bool isEqual(const ProcessIoPerfData& lhs, const ProcessIoPerfData& rhs) {
145     if (lhs.topNIoBlockedUids.size() != rhs.topNIoBlockedUids.size() ||
146         lhs.topNMajorFaultUids.size() != rhs.topNMajorFaultUids.size() ||
147         lhs.totalMajorFaults != rhs.totalMajorFaults ||
148         lhs.majorFaultsPercentChange != rhs.majorFaultsPercentChange) {
149         return false;
150     }
151     auto comp = [&](const ProcessIoPerfData::UidStats& l,
152                     const ProcessIoPerfData::UidStats& r) -> bool {
153         auto comp = [&](const ProcessIoPerfData::UidStats::ProcessStats& l,
154                         const ProcessIoPerfData::UidStats::ProcessStats& r) -> bool {
155             return l.comm == r.comm && l.count == r.count;
156         };
157         return l.userId == r.userId && l.packageName == r.packageName && l.count == r.count &&
158                 l.topNProcesses.size() == r.topNProcesses.size() &&
159                 std::equal(l.topNProcesses.begin(), l.topNProcesses.end(), r.topNProcesses.begin(),
160                            comp);
161     };
162     return lhs.topNIoBlockedUids.size() == lhs.topNIoBlockedUids.size() &&
163             std::equal(lhs.topNIoBlockedUids.begin(), lhs.topNIoBlockedUids.end(),
164                        rhs.topNIoBlockedUids.begin(), comp) &&
165             lhs.topNIoBlockedUidsTotalTaskCnt.size() == rhs.topNIoBlockedUidsTotalTaskCnt.size() &&
166             std::equal(lhs.topNIoBlockedUidsTotalTaskCnt.begin(),
167                        lhs.topNIoBlockedUidsTotalTaskCnt.end(),
168                        rhs.topNIoBlockedUidsTotalTaskCnt.begin()) &&
169             lhs.topNMajorFaultUids.size() == rhs.topNMajorFaultUids.size() &&
170             std::equal(lhs.topNMajorFaultUids.begin(), lhs.topNMajorFaultUids.end(),
171                        rhs.topNMajorFaultUids.begin(), comp);
172 }
173 
isEqual(const IoPerfRecord & lhs,const IoPerfRecord & rhs)174 bool isEqual(const IoPerfRecord& lhs, const IoPerfRecord& rhs) {
175     return isEqual(lhs.uidIoPerfData, rhs.uidIoPerfData) &&
176             isEqual(lhs.systemIoPerfData, rhs.systemIoPerfData) &&
177             isEqual(lhs.processIoPerfData, rhs.processIoPerfData);
178 }
179 
180 }  // namespace
181 
TEST(IoPerfCollectionTest,TestCollectionStartAndTerminate)182 TEST(IoPerfCollectionTest, TestCollectionStartAndTerminate) {
183     sp<IoPerfCollection> collector = new IoPerfCollection();
184     const auto& ret = collector->start();
185     ASSERT_TRUE(ret) << ret.error().message();
186     ASSERT_TRUE(collector->mCollectionThread.joinable()) << "Collection thread not created";
187     ASSERT_FALSE(collector->start())
188             << "No error returned when collector was started more than once";
189     ASSERT_TRUE(sysprop::topNStatsPerCategory().has_value());
190     ASSERT_EQ(collector->mTopNStatsPerCategory, sysprop::topNStatsPerCategory().value());
191 
192     ASSERT_TRUE(sysprop::topNStatsPerSubcategory().has_value());
193     ASSERT_EQ(collector->mTopNStatsPerSubcategory, sysprop::topNStatsPerSubcategory().value());
194 
195     ASSERT_TRUE(sysprop::boottimeCollectionInterval().has_value());
196     ASSERT_EQ(std::chrono::duration_cast<std::chrono::seconds>(
197                       collector->mBoottimeCollection.interval)
198                       .count(),
199               sysprop::boottimeCollectionInterval().value());
200 
201     ASSERT_TRUE(sysprop::topNStatsPerCategory().has_value());
202     ASSERT_EQ(std::chrono::duration_cast<std::chrono::seconds>(
203                       collector->mPeriodicCollection.interval)
204                       .count(),
205               sysprop::periodicCollectionInterval().value());
206 
207     ASSERT_TRUE(sysprop::periodicCollectionBufferSize().has_value());
208     ASSERT_EQ(collector->mPeriodicCollection.maxCacheSize,
209               sysprop::periodicCollectionBufferSize().value());
210 
211     collector->terminate();
212     ASSERT_FALSE(collector->mCollectionThread.joinable()) << "Collection thread did not terminate";
213 }
214 
TEST(IoPerfCollectionTest,TestValidCollectionSequence)215 TEST(IoPerfCollectionTest, TestValidCollectionSequence) {
216     sp<UidIoStatsStub> uidIoStatsStub = new UidIoStatsStub(true);
217     sp<ProcStatStub> procStatStub = new ProcStatStub(true);
218     sp<ProcPidStatStub> procPidStatStub = new ProcPidStatStub(true);
219     sp<LooperStub> looperStub = new LooperStub();
220 
221     sp<IoPerfCollection> collector = new IoPerfCollection();
222     collector->mUidIoStats = uidIoStatsStub;
223     collector->mProcStat = procStatStub;
224     collector->mProcPidStat = procPidStatStub;
225     collector->mHandlerLooper = looperStub;
226 
227     auto ret = collector->start();
228     ASSERT_TRUE(ret) << ret.error().message();
229 
230     collector->mBoottimeCollection.interval = kTestBootInterval;
231     collector->mPeriodicCollection.interval = kTestPeriodicInterval;
232     collector->mPeriodicCollection.maxCacheSize = 1;
233 
234     // #1 Boot-time collection
235     uidIoStatsStub->push({{1009, {.uid = 1009, .ios = {0, 20000, 0, 30000, 0, 300}}}});
236     procStatStub->push(ProcStatInfo{
237             /*stats=*/{6200, 5700, 1700, 3100, /*ioWaitTime=*/1100, 5200, 3900, 0, 0, 0},
238             /*runnableCnt=*/17,
239             /*ioBlockedCnt=*/5,
240     });
241     procPidStatStub->push({{.tgid = 100,
242                             .uid = 1009,
243                             .process = {.pid = 100,
244                                         .comm = "disk I/O",
245                                         .state = "D",
246                                         .ppid = 1,
247                                         .majorFaults = 5000,
248                                         .numThreads = 1,
249                                         .startTime = 234},
250                             .threads = {{100,
251                                          {.pid = 100,
252                                           .comm = "disk I/O",
253                                           .state = "D",
254                                           .ppid = 1,
255                                           .majorFaults = 5000,
256                                           .numThreads = 1,
257                                           .startTime = 234}}}}});
258     IoPerfRecord bootExpectedFirst = {
259             .uidIoPerfData = {.topNReads = {{.userId = 0,
260                                              .packageName = "mount",
261                                              .bytes = {0, 20000},
262                                              .fsync{0, 300}}},
263                               .topNWrites = {{.userId = 0,
264                                               .packageName = "mount",
265                                               .bytes = {0, 30000},
266                                               .fsync{0, 300}}},
267                               .total = {{0, 20000}, {0, 30000}, {0, 300}}},
268             .systemIoPerfData = {.cpuIoWaitTime = 1100,
269                                  .totalCpuTime = 26900,
270                                  .ioBlockedProcessesCnt = 5,
271                                  .totalProcessesCnt = 22},
272             .processIoPerfData = {.topNIoBlockedUids = {{0, "mount", 1, {{"disk I/O", 1}}}},
273                                   .topNIoBlockedUidsTotalTaskCnt = {1},
274                                   .topNMajorFaultUids = {{0, "mount", 5000, {{"disk I/O", 5000}}}},
275                                   .totalMajorFaults = 5000,
276                                   .majorFaultsPercentChange = 0.0},
277     };
278     ret = looperStub->pollCache();
279     ASSERT_TRUE(ret) << ret.error().message();
280     ASSERT_EQ(looperStub->numSecondsElapsed(), 0)
281             << "Boot-time collection didn't start immediately";
282 
283     // #2 Boot-time collection
284     uidIoStatsStub->push({
285             {1009, {.uid = 1009, .ios = {0, 2000, 0, 3000, 0, 100}}},
286     });
287     procStatStub->push(ProcStatInfo{
288             /*stats=*/{1200, 1700, 2700, 7800, /*ioWaitTime=*/5500, 500, 300, 0, 0, 100},
289             /*runnableCnt=*/8,
290             /*ioBlockedCnt=*/6,
291     });
292     procPidStatStub->push({{.tgid = 100,
293                             .uid = 1009,
294                             .process = {.pid = 100,
295                                         .comm = "disk I/O",
296                                         .state = "D",
297                                         .ppid = 1,
298                                         .majorFaults = 11000,
299                                         .numThreads = 1,
300                                         .startTime = 234},
301                             .threads = {{100,
302                                          {.pid = 100,
303                                           .comm = "disk I/O",
304                                           .state = "D",
305                                           .ppid = 1,
306                                           .majorFaults = 10000,
307                                           .numThreads = 1,
308                                           .startTime = 234}},
309                                         {200,
310                                          {.pid = 200,
311                                           .comm = "disk I/O",
312                                           .state = "D",
313                                           .ppid = 1,
314                                           .majorFaults = 1000,
315                                           .numThreads = 1,
316                                           .startTime = 1234}}}}});
317     IoPerfRecord bootExpectedSecond = {
318             .uidIoPerfData = {.topNReads = {{.userId = 0,
319                                              .packageName = "mount",
320                                              .bytes = {0, 2000},
321                                              .fsync{0, 100}}},
322                               .topNWrites = {{.userId = 0,
323                                               .packageName = "mount",
324                                               .bytes = {0, 3000},
325                                               .fsync{0, 100}}},
326                               .total = {{0, 2000}, {0, 3000}, {0, 100}}},
327             .systemIoPerfData = {.cpuIoWaitTime = 5500,
328                                  .totalCpuTime = 19800,
329                                  .ioBlockedProcessesCnt = 6,
330                                  .totalProcessesCnt = 14},
331             .processIoPerfData =
332                     {.topNIoBlockedUids = {{0, "mount", 2, {{"disk I/O", 2}}}},
333                      .topNIoBlockedUidsTotalTaskCnt = {2},
334                      .topNMajorFaultUids = {{0, "mount", 11000, {{"disk I/O", 11000}}}},
335                      .totalMajorFaults = 11000,
336                      .majorFaultsPercentChange = ((11000.0 - 5000.0) / 5000.0) * 100},
337     };
338     ret = looperStub->pollCache();
339     ASSERT_TRUE(ret) << ret.error().message();
340     ASSERT_EQ(looperStub->numSecondsElapsed(), kTestBootInterval.count())
341             << "Subsequent boot-time collection didn't happen at " << kTestBootInterval.count()
342             << " seconds interval";
343 
344     // #3 Last boot-time collection
345     ret = collector->onBootFinished();
346     ASSERT_TRUE(ret) << ret.error().message();
347     uidIoStatsStub->push({
348             {1009, {.uid = 1009, .ios = {0, 7000, 0, 8000, 0, 50}}},
349     });
350     procStatStub->push(ProcStatInfo{
351             /*stats=*/{1400, 1900, 2900, 8000, /*ioWaitTime=*/5700, 700, 500, 0, 0, 300},
352             /*runnableCnt=*/10,
353             /*ioBlockedCnt=*/8,
354     });
355     procPidStatStub->push({{.tgid = 100,
356                             .uid = 1009,
357                             .process = {.pid = 100,
358                                         .comm = "disk I/O",
359                                         .state = "D",
360                                         .ppid = 1,
361                                         .majorFaults = 5000,
362                                         .numThreads = 1,
363                                         .startTime = 234},
364                             .threads = {{100,
365                                          {.pid = 100,
366                                           .comm = "disk I/O",
367                                           .state = "D",
368                                           .ppid = 1,
369                                           .majorFaults = 3000,
370                                           .numThreads = 1,
371                                           .startTime = 234}},
372                                         {200,
373                                          {.pid = 200,
374                                           .comm = "disk I/O",
375                                           .state = "D",
376                                           .ppid = 1,
377                                           .majorFaults = 2000,
378                                           .numThreads = 1,
379                                           .startTime = 1234}}}}});
380     IoPerfRecord bootExpectedThird = {
381             .uidIoPerfData = {.topNReads = {{.userId = 0,
382                                              .packageName = "mount",
383                                              .bytes = {0, 7000},
384                                              .fsync{0, 50}}},
385                               .topNWrites = {{.userId = 0,
386                                               .packageName = "mount",
387                                               .bytes = {0, 8000},
388                                               .fsync{0, 50}}},
389                               .total = {{0, 7000}, {0, 8000}, {0, 50}}},
390             .systemIoPerfData = {.cpuIoWaitTime = 5700,
391                                  .totalCpuTime = 21400,
392                                  .ioBlockedProcessesCnt = 8,
393                                  .totalProcessesCnt = 18},
394             .processIoPerfData = {.topNIoBlockedUids = {{0, "mount", 2, {{"disk I/O", 2}}}},
395                                   .topNIoBlockedUidsTotalTaskCnt = {2},
396                                   .topNMajorFaultUids = {{0, "mount", 5000, {{"disk I/O", 5000}}}},
397                                   .totalMajorFaults = 5000,
398                                   .majorFaultsPercentChange = ((5000.0 - 11000.0) / 11000.0) * 100},
399     };
400     ret = looperStub->pollCache();
401     ASSERT_TRUE(ret) << ret.error().message();
402     ASSERT_EQ(looperStub->numSecondsElapsed(), 0)
403             << "Last boot-time collection didn't happen immediately after receiving boot complete "
404             << "notification";
405 
406     ASSERT_EQ(collector->mBoottimeCollection.records.size(), 3);
407     ASSERT_TRUE(isEqual(collector->mBoottimeCollection.records[0], bootExpectedFirst))
408             << "Boot-time collection record 1 doesn't match.\nExpected:\n"
409             << toString(bootExpectedFirst) << "\nActual:\n"
410             << toString(collector->mBoottimeCollection.records[0]);
411     ASSERT_TRUE(isEqual(collector->mBoottimeCollection.records[1], bootExpectedSecond))
412             << "Boot-time collection record 2 doesn't match.\nExpected:\n"
413             << toString(bootExpectedSecond) << "\nActual:\n"
414             << toString(collector->mBoottimeCollection.records[1]);
415     ASSERT_TRUE(isEqual(collector->mBoottimeCollection.records[2], bootExpectedThird))
416             << "Boot-time collection record 3 doesn't match.\nExpected:\n"
417             << toString(bootExpectedSecond) << "\nActual:\n"
418             << toString(collector->mBoottimeCollection.records[2]);
419 
420     // #4 Periodic collection
421     uidIoStatsStub->push({
422             {1009, {.uid = 1009, .ios = {0, 4000, 0, 6000, 0, 100}}},
423     });
424     procStatStub->push(ProcStatInfo{
425             /*stats=*/{200, 700, 400, 800, /*ioWaitTime=*/500, 666, 780, 0, 0, 230},
426             /*runnableCnt=*/12,
427             /*ioBlockedCnt=*/3,
428     });
429     procPidStatStub->push({{.tgid = 100,
430                             .uid = 1009,
431                             .process = {.pid = 100,
432                                         .comm = "disk I/O",
433                                         .state = "D",
434                                         .ppid = 1,
435                                         .majorFaults = 4100,
436                                         .numThreads = 1,
437                                         .startTime = 234},
438                             .threads = {{100,
439                                          {.pid = 100,
440                                           .comm = "disk I/O",
441                                           .state = "D",
442                                           .ppid = 1,
443                                           .majorFaults = 100,
444                                           .numThreads = 1,
445                                           .startTime = 234}},
446                                         {1200,
447                                          {.pid = 1200,
448                                           .comm = "disk I/O",
449                                           .state = "S",
450                                           .ppid = 1,
451                                           .majorFaults = 4000,
452                                           .numThreads = 1,
453                                           .startTime = 567890}}}}});
454     IoPerfRecord periodicExpectedFirst = {
455             .uidIoPerfData = {.topNReads = {{.userId = 0,
456                                              .packageName = "mount",
457                                              .bytes = {0, 4000},
458                                              .fsync{0, 100}}},
459                               .topNWrites = {{.userId = 0,
460                                               .packageName = "mount",
461                                               .bytes = {0, 6000},
462                                               .fsync{0, 100}}},
463                               .total = {{0, 4000}, {0, 6000}, {0, 100}}},
464             .systemIoPerfData = {.cpuIoWaitTime = 500,
465                                  .totalCpuTime = 4276,
466                                  .ioBlockedProcessesCnt = 3,
467                                  .totalProcessesCnt = 15},
468             .processIoPerfData = {.topNIoBlockedUids = {{0, "mount", 1, {{"disk I/O", 1}}}},
469                                   .topNIoBlockedUidsTotalTaskCnt = {2},
470                                   .topNMajorFaultUids = {{0, "mount", 4100, {{"disk I/O", 4100}}}},
471                                   .totalMajorFaults = 4100,
472                                   .majorFaultsPercentChange = ((4100.0 - 5000.0) / 5000.0) * 100},
473     };
474     ret = looperStub->pollCache();
475     ASSERT_TRUE(ret) << ret.error().message();
476     ASSERT_EQ(looperStub->numSecondsElapsed(), kTestPeriodicInterval.count())
477             << "First periodic collection didn't happen at " << kTestPeriodicInterval.count()
478             << " seconds interval";
479 
480     // #5 Periodic collection
481     uidIoStatsStub->push({
482             {1009, {.uid = 1009, .ios = {0, 3000, 0, 5000, 0, 800}}},
483     });
484     procStatStub->push(ProcStatInfo{
485             /*stats=*/{2300, 7300, 4300, 8300, /*ioWaitTime=*/5300, 6366, 7380, 0, 0, 2330},
486             /*runnableCnt=*/2,
487             /*ioBlockedCnt=*/4,
488     });
489     procPidStatStub->push({{.tgid = 100,
490                             .uid = 1009,
491                             .process = {.pid = 100,
492                                         .comm = "disk I/O",
493                                         .state = "D",
494                                         .ppid = 1,
495                                         .majorFaults = 44300,
496                                         .numThreads = 1,
497                                         .startTime = 234},
498                             .threads = {{100,
499                                          {.pid = 100,
500                                           .comm = "disk I/O",
501                                           .state = "D",
502                                           .ppid = 1,
503                                           .majorFaults = 1300,
504                                           .numThreads = 1,
505                                           .startTime = 234}},
506                                         {1200,
507                                          {.pid = 1200,
508                                           .comm = "disk I/O",
509                                           .state = "D",
510                                           .ppid = 1,
511                                           .majorFaults = 43000,
512                                           .numThreads = 1,
513                                           .startTime = 567890}}}}});
514     IoPerfRecord periodicExpectedSecond = {
515             .uidIoPerfData = {.topNReads = {{.userId = 0,
516                                              .packageName = "mount",
517                                              .bytes = {0, 3000},
518                                              .fsync{0, 800}}},
519                               .topNWrites = {{.userId = 0,
520                                               .packageName = "mount",
521                                               .bytes = {0, 5000},
522                                               .fsync{0, 800}}},
523                               .total = {{0, 3000}, {0, 5000}, {0, 800}}},
524             .systemIoPerfData = {.cpuIoWaitTime = 5300,
525                                  .totalCpuTime = 43576,
526                                  .ioBlockedProcessesCnt = 4,
527                                  .totalProcessesCnt = 6},
528             .processIoPerfData =
529                     {.topNIoBlockedUids = {{0, "mount", 2, {{"disk I/O", 2}}}},
530                      .topNIoBlockedUidsTotalTaskCnt = {2},
531                      .topNMajorFaultUids = {{0, "mount", 44300, {{"disk I/O", 44300}}}},
532                      .totalMajorFaults = 44300,
533                      .majorFaultsPercentChange = ((44300.0 - 4100.0) / 4100.0) * 100},
534     };
535     ret = looperStub->pollCache();
536     ASSERT_TRUE(ret) << ret.error().message();
537     ASSERT_EQ(looperStub->numSecondsElapsed(), kTestPeriodicInterval.count())
538             << "Subsequent periodic collection didn't happen at " << kTestPeriodicInterval.count()
539             << " seconds interval";
540 
541     ASSERT_EQ(collector->mPeriodicCollection.records.size(), 2);
542     ASSERT_TRUE(isEqual(collector->mPeriodicCollection.records[0], periodicExpectedFirst))
543             << "Periodic collection snapshot 1, record 1 doesn't match.\nExpected:\n"
544             << toString(periodicExpectedFirst) << "\nActual:\n"
545             << toString(collector->mPeriodicCollection.records[0]);
546     ASSERT_TRUE(isEqual(collector->mPeriodicCollection.records[1], periodicExpectedSecond))
547             << "Periodic collection snapshot 1, record 2 doesn't match.\nExpected:\n"
548             << toString(periodicExpectedSecond) << "\nActual:\n"
549             << toString(collector->mPeriodicCollection.records[1]);
550 
551     // #6 Custom collection
552     Vector<String16> args;
553     args.push_back(String16(kStartCustomCollectionFlag));
554     args.push_back(String16(kIntervalFlag));
555     args.push_back(String16(std::to_string(kTestCustomInterval.count()).c_str()));
556     args.push_back(String16(kMaxDurationFlag));
557     args.push_back(String16(std::to_string(kTestCustomCollectionDuration.count()).c_str()));
558 
559     ret = collector->dump(-1, args);
560     ASSERT_TRUE(ret.ok()) << ret.error().message();
561     uidIoStatsStub->push({
562             {1009, {.uid = 1009, .ios = {0, 13000, 0, 15000, 0, 100}}},
563     });
564     procStatStub->push(ProcStatInfo{
565             /*stats=*/{2800, 7800, 4800, 8800, /*ioWaitTime=*/5800, 6866, 7880, 0, 0, 2830},
566             /*runnableCnt=*/200,
567             /*ioBlockedCnt=*/13,
568     });
569     procPidStatStub->push({{.tgid = 100,
570                             .uid = 1009,
571                             .process = {.pid = 100,
572                                         .comm = "disk I/O",
573                                         .state = "D",
574                                         .ppid = 1,
575                                         .majorFaults = 49800,
576                                         .numThreads = 1,
577                                         .startTime = 234},
578                             .threads = {{100,
579                                          {.pid = 100,
580                                           .comm = "disk I/O",
581                                           .state = "D",
582                                           .ppid = 1,
583                                           .majorFaults = 1800,
584                                           .numThreads = 1,
585                                           .startTime = 234}},
586                                         {1200,
587                                          {.pid = 1200,
588                                           .comm = "disk I/O",
589                                           .state = "D",
590                                           .ppid = 1,
591                                           .majorFaults = 48000,
592                                           .numThreads = 1,
593                                           .startTime = 567890}}}}});
594     IoPerfRecord customExpectedFirst = {
595             .uidIoPerfData = {.topNReads = {{.userId = 0,
596                                              .packageName = "mount",
597                                              .bytes = {0, 13000},
598                                              .fsync{0, 100}}},
599                               .topNWrites = {{.userId = 0,
600                                               .packageName = "mount",
601                                               .bytes = {0, 15000},
602                                               .fsync{0, 100}}},
603                               .total = {{0, 13000}, {0, 15000}, {0, 100}}},
604             .systemIoPerfData = {.cpuIoWaitTime = 5800,
605                                  .totalCpuTime = 47576,
606                                  .ioBlockedProcessesCnt = 13,
607                                  .totalProcessesCnt = 213},
608             .processIoPerfData =
609                     {.topNIoBlockedUids = {{0, "mount", 2, {{"disk I/O", 2}}}},
610                      .topNIoBlockedUidsTotalTaskCnt = {2},
611                      .topNMajorFaultUids = {{0, "mount", 49800, {{"disk I/O", 49800}}}},
612                      .totalMajorFaults = 49800,
613                      .majorFaultsPercentChange = ((49800.0 - 44300.0) / 44300.0) * 100},
614     };
615     ret = looperStub->pollCache();
616     ASSERT_TRUE(ret) << ret.error().message();
617     ASSERT_EQ(looperStub->numSecondsElapsed(), 0) << "Custom collection didn't start immediately";
618 
619     // #7 Custom collection
620     uidIoStatsStub->push({
621             {1009, {.uid = 1009, .ios = {0, 14000, 0, 16000, 0, 100}}},
622     });
623     procStatStub->push(ProcStatInfo{
624             /*stats=*/{2900, 7900, 4900, 8900, /*ioWaitTime=*/5900, 6966, 7980, 0, 0, 2930},
625             /*runnableCnt=*/100,
626             /*ioBlockedCnt=*/57,
627     });
628     procPidStatStub->push({{.tgid = 100,
629                             .uid = 1009,
630                             .process = {.pid = 100,
631                                         .comm = "disk I/O",
632                                         .state = "D",
633                                         .ppid = 1,
634                                         .majorFaults = 50900,
635                                         .numThreads = 1,
636                                         .startTime = 234},
637                             .threads = {{100,
638                                          {.pid = 100,
639                                           .comm = "disk I/O",
640                                           .state = "D",
641                                           .ppid = 1,
642                                           .majorFaults = 1900,
643                                           .numThreads = 1,
644                                           .startTime = 234}},
645                                         {1200,
646                                          {.pid = 1200,
647                                           .comm = "disk I/O",
648                                           .state = "D",
649                                           .ppid = 1,
650                                           .majorFaults = 49000,
651                                           .numThreads = 1,
652                                           .startTime = 567890}}}}});
653     IoPerfRecord customExpectedSecond = {
654             .uidIoPerfData = {.topNReads = {{.userId = 0,
655                                              .packageName = "mount",
656                                              .bytes = {0, 14000},
657                                              .fsync{0, 100}}},
658                               .topNWrites = {{.userId = 0,
659                                               .packageName = "mount",
660                                               .bytes = {0, 16000},
661                                               .fsync{0, 100}}},
662                               .total = {{0, 14000}, {0, 16000}, {0, 100}}},
663             .systemIoPerfData = {.cpuIoWaitTime = 5900,
664                                  .totalCpuTime = 48376,
665                                  .ioBlockedProcessesCnt = 57,
666                                  .totalProcessesCnt = 157},
667             .processIoPerfData =
668                     {.topNIoBlockedUids = {{0, "mount", 2, {{"disk I/O", 2}}}},
669                      .topNIoBlockedUidsTotalTaskCnt = {2},
670                      .topNMajorFaultUids = {{0, "mount", 50900, {{"disk I/O", 50900}}}},
671                      .totalMajorFaults = 50900,
672                      .majorFaultsPercentChange = ((50900.0 - 49800.0) / 49800.0) * 100},
673     };
674     ret = looperStub->pollCache();
675     ASSERT_TRUE(ret) << ret.error().message();
676     ASSERT_EQ(looperStub->numSecondsElapsed(), kTestCustomInterval.count())
677             << "Subsequent custom collection didn't happen at " << kTestCustomInterval.count()
678             << " seconds interval";
679 
680     ASSERT_EQ(collector->mCustomCollection.records.size(), 2);
681     ASSERT_TRUE(isEqual(collector->mCustomCollection.records[0], customExpectedFirst))
682             << "Custom collection record 1 doesn't match.\nExpected:\n"
683             << toString(customExpectedFirst) << "\nActual:\n"
684             << toString(collector->mCustomCollection.records[0]);
685     ASSERT_TRUE(isEqual(collector->mCustomCollection.records[1], customExpectedSecond))
686             << "Custom collection record 2 doesn't match.\nExpected:\n"
687             << toString(customExpectedSecond) << "\nActual:\n"
688             << toString(collector->mCustomCollection.records[1]);
689 
690     // #8 Switch to periodic collection
691     args.clear();
692     args.push_back(String16(kEndCustomCollectionFlag));
693     TemporaryFile customDump;
694     ret = collector->dump(customDump.fd, args);
695     ASSERT_TRUE(ret.ok()) << ret.error().message();
696     ret = looperStub->pollCache();
697     ASSERT_TRUE(ret) << ret.error().message();
698 
699     // Custom collection cache should be emptied on ending the collection.
700     ASSERT_EQ(collector->mCustomCollection.records.size(), 0);
701 
702     // #7 periodic collection
703     uidIoStatsStub->push({
704             {1009, {.uid = 1009, .ios = {0, 123, 0, 456, 0, 25}}},
705     });
706     procStatStub->push(ProcStatInfo{
707             /*stats=*/{3400, 2300, 5600, 7800, /*ioWaitTime=*/1100, 166, 180, 0, 0, 130},
708             /*runnableCnt=*/3,
709             /*ioBlockedCnt=*/1,
710     });
711     procPidStatStub->push({{.tgid = 100,
712                             .uid = 1009,
713                             .process = {.pid = 100,
714                                         .comm = "disk I/O",
715                                         .state = "D",
716                                         .ppid = 1,
717                                         .majorFaults = 5701,
718                                         .numThreads = 1,
719                                         .startTime = 234},
720                             .threads = {{100,
721                                          {.pid = 100,
722                                           .comm = "disk I/O",
723                                           .state = "D",
724                                           .ppid = 1,
725                                           .majorFaults = 23,
726                                           .numThreads = 1,
727                                           .startTime = 234}},
728                                         {1200,
729                                          {.pid = 1200,
730                                           .comm = "disk I/O",
731                                           .state = "D",
732                                           .ppid = 1,
733                                           .majorFaults = 5678,
734                                           .numThreads = 1,
735                                           .startTime = 567890}}}}});
736     IoPerfRecord periodicExpectedThird = {
737             .uidIoPerfData = {.topNReads = {{.userId = 0,
738                                              .packageName = "mount",
739                                              .bytes = {0, 123},
740                                              .fsync{0, 25}}},
741                               .topNWrites = {{.userId = 0,
742                                               .packageName = "mount",
743                                               .bytes = {0, 456},
744                                               .fsync{0, 25}}},
745                               .total = {{0, 123}, {0, 456}, {0, 25}}},
746             .systemIoPerfData = {.cpuIoWaitTime = 1100,
747                                  .totalCpuTime = 20676,
748                                  .ioBlockedProcessesCnt = 1,
749                                  .totalProcessesCnt = 4},
750             .processIoPerfData = {.topNIoBlockedUids = {{0, "mount", 2, {{"disk I/O", 2}}}},
751                                   .topNIoBlockedUidsTotalTaskCnt = {2},
752                                   .topNMajorFaultUids = {{0, "mount", 5701, {{"disk I/O", 5701}}}},
753                                   .totalMajorFaults = 5701,
754                                   .majorFaultsPercentChange = ((5701.0 - 50900.0) / 50900.0) * 100},
755     };
756     ret = looperStub->pollCache();
757     ASSERT_TRUE(ret) << ret.error().message();
758     ASSERT_EQ(looperStub->numSecondsElapsed(), 0)
759             << "Periodic collection didn't start immediately after ending custom collection";
760 
761     // Maximum periodic collection buffer size is 2.
762     ASSERT_EQ(collector->mPeriodicCollection.records.size(), 2);
763     ASSERT_TRUE(isEqual(collector->mPeriodicCollection.records[0], periodicExpectedSecond))
764             << "Periodic collection snapshot 2, record 1 doesn't match.\nExpected:\n"
765             << toString(periodicExpectedSecond) << "\nActual:\n"
766             << toString(collector->mPeriodicCollection.records[0]);
767     ASSERT_TRUE(isEqual(collector->mPeriodicCollection.records[1], periodicExpectedThird))
768             << "Periodic collection snapshot 2, record 2 doesn't match.\nExpected:\n"
769             << toString(periodicExpectedThird) << "\nActual:\n"
770             << toString(collector->mPeriodicCollection.records[1]);
771 
772     ASSERT_EQ(collector->mBoottimeCollection.records.size(), 3)
773             << "Boot-time records not persisted until collector termination";
774 
775     TemporaryFile bugreportDump;
776     ret = collector->dump(bugreportDump.fd, {});
777     ASSERT_TRUE(ret.ok()) << ret.error().message();
778 
779     collector->terminate();
780 }
781 
TEST(IoPerfCollectionTest,TestCollectionTerminatesOnZeroEnabledCollectors)782 TEST(IoPerfCollectionTest, TestCollectionTerminatesOnZeroEnabledCollectors) {
783     sp<IoPerfCollection> collector = new IoPerfCollection();
784     collector->mUidIoStats = new UidIoStatsStub();
785     collector->mProcStat = new ProcStatStub();
786     collector->mProcPidStat = new ProcPidStatStub();
787 
788     const auto& ret = collector->start();
789     ASSERT_TRUE(ret) << ret.error().message();
790 
791     ASSERT_EQ(std::async([&]() {
792                   if (collector->mCollectionThread.joinable()) {
793                       collector->mCollectionThread.join();
794                   }
795               }).wait_for(1s),
796               std::future_status::ready)
797             << "Collection thread didn't terminate within 1 second.";
798     ASSERT_EQ(collector->mCurrCollectionEvent, CollectionEvent::TERMINATED);
799 
800     // When the collection doesn't auto-terminate on error, the test will hang if the collector is
801     // not terminated explicitly. Thus call terminate to avoid this.
802     collector->terminate();
803 }
804 
TEST(IoPerfCollectionTest,TestCollectionTerminatesOnError)805 TEST(IoPerfCollectionTest, TestCollectionTerminatesOnError) {
806     sp<IoPerfCollection> collector = new IoPerfCollection();
807     collector->mUidIoStats = new UidIoStatsStub(true);
808     collector->mProcStat = new ProcStatStub(true);
809     collector->mProcPidStat = new ProcPidStatStub(true);
810 
811     // Stub caches are empty so polling them should trigger error.
812     const auto& ret = collector->start();
813     ASSERT_TRUE(ret) << ret.error().message();
814 
815     ASSERT_EQ(std::async([&]() {
816                   if (collector->mCollectionThread.joinable()) {
817                       collector->mCollectionThread.join();
818                   }
819               }).wait_for(1s),
820               std::future_status::ready)
821             << "Collection thread didn't terminate within 1 second.";
822     ASSERT_EQ(collector->mCurrCollectionEvent, CollectionEvent::TERMINATED);
823 
824     // When the collection doesn't auto-terminate on error, the test will hang if the collector is
825     // not terminated explicitly. Thus call terminate to avoid this.
826     collector->terminate();
827 }
828 
TEST(IoPerfCollectionTest,TestCustomCollectionFiltersPackageNames)829 TEST(IoPerfCollectionTest, TestCustomCollectionFiltersPackageNames) {
830     sp<UidIoStatsStub> uidIoStatsStub = new UidIoStatsStub(true);
831     sp<ProcStatStub> procStatStub = new ProcStatStub(true);
832     sp<ProcPidStatStub> procPidStatStub = new ProcPidStatStub(true);
833     sp<LooperStub> looperStub = new LooperStub();
834 
835     sp<IoPerfCollection> collector = new IoPerfCollection();
836     collector->mUidIoStats = uidIoStatsStub;
837     collector->mProcStat = procStatStub;
838     collector->mProcPidStat = procPidStatStub;
839     collector->mHandlerLooper = looperStub;
840     // Filter by package name should ignore this limit.
841     collector->mTopNStatsPerCategory = 1;
842 
843     auto ret = collector->start();
844     ASSERT_TRUE(ret) << ret.error().message();
845 
846     // Dummy boot-time collection
847     uidIoStatsStub->push({});
848     procStatStub->push(ProcStatInfo{});
849     procPidStatStub->push({});
850     ret = looperStub->pollCache();
851     ASSERT_TRUE(ret) << ret.error().message();
852 
853     // Dummy Periodic collection
854     ret = collector->onBootFinished();
855     ASSERT_TRUE(ret) << ret.error().message();
856     uidIoStatsStub->push({});
857     procStatStub->push(ProcStatInfo{});
858     procPidStatStub->push({});
859     ret = looperStub->pollCache();
860     ASSERT_TRUE(ret) << ret.error().message();
861 
862     // Start custom Collection
863     Vector<String16> args;
864     args.push_back(String16(kStartCustomCollectionFlag));
865     args.push_back(String16(kIntervalFlag));
866     args.push_back(String16(std::to_string(kTestCustomInterval.count()).c_str()));
867     args.push_back(String16(kMaxDurationFlag));
868     args.push_back(String16(std::to_string(kTestCustomCollectionDuration.count()).c_str()));
869     args.push_back(String16(kFilterPackagesFlag));
870     args.push_back(String16("android.car.cts,system_server"));
871 
872     ret = collector->dump(-1, args);
873     ASSERT_TRUE(ret.ok()) << ret.error().message();
874 
875     // Custom collection
876     collector->mUidToPackageNameMapping[1009] = "android.car.cts";
877     collector->mUidToPackageNameMapping[2001] = "system_server";
878     collector->mUidToPackageNameMapping[3456] = "random_process";
879     uidIoStatsStub->push({
880             {1009, {.uid = 1009, .ios = {0, 14000, 0, 16000, 0, 100}}},
881             {2001, {.uid = 2001, .ios = {0, 3400, 0, 6700, 0, 200}}},
882             {3456, {.uid = 3456, .ios = {0, 4200, 0, 5600, 0, 300}}},
883     });
884     procStatStub->push(ProcStatInfo{
885             /*stats=*/{2900, 7900, 4900, 8900, /*ioWaitTime=*/5900, 6966, 7980, 0, 0, 2930},
886             /*runnableCnt=*/100,
887             /*ioBlockedCnt=*/57,
888     });
889     procPidStatStub->push({{.tgid = 100,
890                             .uid = 1009,
891                             .process = {.pid = 100,
892                                         .comm = "cts_test",
893                                         .state = "D",
894                                         .ppid = 1,
895                                         .majorFaults = 50900,
896                                         .numThreads = 2,
897                                         .startTime = 234},
898                             .threads = {{100,
899                                          {.pid = 100,
900                                           .comm = "cts_test",
901                                           .state = "D",
902                                           .ppid = 1,
903                                           .majorFaults = 50900,
904                                           .numThreads = 1,
905                                           .startTime = 234}},
906                                         {200,
907                                          {.pid = 200,
908                                           .comm = "cts_test_2",
909                                           .state = "D",
910                                           .ppid = 1,
911                                           .majorFaults = 0,
912                                           .numThreads = 1,
913                                           .startTime = 290}}}},
914                            {.tgid = 1000,
915                             .uid = 2001,
916                             .process = {.pid = 1000,
917                                         .comm = "system_server",
918                                         .state = "D",
919                                         .ppid = 1,
920                                         .majorFaults = 1234,
921                                         .numThreads = 1,
922                                         .startTime = 345},
923                             .threads = {{1000,
924                                          {.pid = 1000,
925                                           .comm = "system_server",
926                                           .state = "D",
927                                           .ppid = 1,
928                                           .majorFaults = 1234,
929                                           .numThreads = 1,
930                                           .startTime = 345}}}},
931                            {.tgid = 4000,
932                             .uid = 3456,
933                             .process = {.pid = 4000,
934                                         .comm = "random_process",
935                                         .state = "D",
936                                         .ppid = 1,
937                                         .majorFaults = 3456,
938                                         .numThreads = 1,
939                                         .startTime = 890},
940                             .threads = {{4000,
941                                          {.pid = 4000,
942                                           .comm = "random_process",
943                                           .state = "D",
944                                           .ppid = 1,
945                                           .majorFaults = 50900,
946                                           .numThreads = 1,
947                                           .startTime = 890}}}}});
948     IoPerfRecord expected = {
949             .uidIoPerfData = {.topNReads = {{.userId = 0,
950                                              .packageName = "android.car.cts",
951                                              .bytes = {0, 14000},
952                                              .fsync{0, 100}},
953                                             {.userId = 0,
954                                              .packageName = "system_server",
955                                              .bytes = {0, 3400},
956                                              .fsync{0, 200}}},
957                               .topNWrites = {{.userId = 0,
958                                               .packageName = "android.car.cts",
959                                               .bytes = {0, 16000},
960                                               .fsync{0, 100}},
961                                              {.userId = 0,
962                                               .packageName = "system_server",
963                                               .bytes = {0, 6700},
964                                               .fsync{0, 200}}},
965                               .total = {{0, 21600}, {0, 28300}, {0, 600}}},
966             .systemIoPerfData = {.cpuIoWaitTime = 5900,
967                                  .totalCpuTime = 48376,
968                                  .ioBlockedProcessesCnt = 57,
969                                  .totalProcessesCnt = 157},
970             .processIoPerfData =
971                     {.topNIoBlockedUids = {{0, "android.car.cts", 2, {{"cts_test", 2}}},
972                                            {0, "system_server", 1, {{"system_server", 1}}}},
973                      .topNIoBlockedUidsTotalTaskCnt = {2, 1},
974                      .topNMajorFaultUids = {{0, "android.car.cts", 50900, {{"cts_test", 50900}}},
975                                             {0, "system_server", 1234, {{"system_server", 1234}}}},
976                      .totalMajorFaults = 55590,
977                      .majorFaultsPercentChange = 0},
978     };
979     ret = looperStub->pollCache();
980     ASSERT_TRUE(ret) << ret.error().message();
981     ASSERT_EQ(looperStub->numSecondsElapsed(), 0) << "Custom collection didn't start immediately";
982 
983     ASSERT_EQ(collector->mCurrCollectionEvent, CollectionEvent::CUSTOM);
984     ASSERT_EQ(collector->mCustomCollection.records.size(), 1);
985     ASSERT_TRUE(isEqual(collector->mCustomCollection.records[0], expected))
986             << "Custom collection record doesn't match.\nExpected:\n"
987             << toString(expected) << "\nActual:\n"
988             << toString(collector->mCustomCollection.records[0]);
989     collector->terminate();
990 }
991 
TEST(IoPerfCollectionTest,TestCustomCollectionTerminatesAfterMaxDuration)992 TEST(IoPerfCollectionTest, TestCustomCollectionTerminatesAfterMaxDuration) {
993     sp<UidIoStatsStub> uidIoStatsStub = new UidIoStatsStub(true);
994     sp<ProcStatStub> procStatStub = new ProcStatStub(true);
995     sp<ProcPidStatStub> procPidStatStub = new ProcPidStatStub(true);
996     sp<LooperStub> looperStub = new LooperStub();
997 
998     sp<IoPerfCollection> collector = new IoPerfCollection();
999     collector->mUidIoStats = uidIoStatsStub;
1000     collector->mProcStat = procStatStub;
1001     collector->mProcPidStat = procPidStatStub;
1002     collector->mHandlerLooper = looperStub;
1003 
1004     auto ret = collector->start();
1005     ASSERT_TRUE(ret) << ret.error().message();
1006 
1007     // Dummy boot-time collection
1008     uidIoStatsStub->push({});
1009     procStatStub->push(ProcStatInfo{});
1010     procPidStatStub->push({});
1011     ret = looperStub->pollCache();
1012     ASSERT_TRUE(ret) << ret.error().message();
1013 
1014     // Dummy Periodic collection
1015     ret = collector->onBootFinished();
1016     ASSERT_TRUE(ret) << ret.error().message();
1017     uidIoStatsStub->push({});
1018     procStatStub->push(ProcStatInfo{});
1019     procPidStatStub->push({});
1020     ret = looperStub->pollCache();
1021     ASSERT_TRUE(ret) << ret.error().message();
1022 
1023     // Start custom Collection
1024     Vector<String16> args;
1025     args.push_back(String16(kStartCustomCollectionFlag));
1026     args.push_back(String16(kIntervalFlag));
1027     args.push_back(String16(std::to_string(kTestCustomInterval.count()).c_str()));
1028     args.push_back(String16(kMaxDurationFlag));
1029     args.push_back(String16(std::to_string(kTestCustomCollectionDuration.count()).c_str()));
1030 
1031     ret = collector->dump(-1, args);
1032     ASSERT_TRUE(ret.ok()) << ret.error().message();
1033     // Maximum custom collection iterations during |kTestCustomCollectionDuration|.
1034     int maxIterations =
1035             static_cast<int>(kTestCustomCollectionDuration.count() / kTestCustomInterval.count());
1036     for (int i = 0; i < maxIterations; ++i) {
1037         ASSERT_TRUE(ret) << ret.error().message();
1038         uidIoStatsStub->push({});
1039         procStatStub->push(ProcStatInfo{});
1040         procPidStatStub->push({});
1041         ret = looperStub->pollCache();
1042         ASSERT_TRUE(ret) << ret.error().message();
1043         int secondsElapsed = (i == 0 ? 0 : kTestCustomInterval.count());
1044         ASSERT_EQ(looperStub->numSecondsElapsed(), secondsElapsed)
1045                 << "Custom collection didn't happen at " << secondsElapsed
1046                 << " seconds interval in iteration " << i;
1047     }
1048 
1049     ASSERT_EQ(collector->mCurrCollectionEvent, CollectionEvent::CUSTOM);
1050     ASSERT_GT(collector->mCustomCollection.records.size(), 0);
1051     // Next looper message was injected during startCustomCollection to end the custom collection
1052     // after |kTestCustomCollectionDuration|. Thus on processing this message the custom collection
1053     // should terminate.
1054     ret = looperStub->pollCache();
1055     ASSERT_TRUE(ret) << ret.error().message();
1056     ASSERT_EQ(looperStub->numSecondsElapsed(),
1057               kTestCustomCollectionDuration.count() % kTestCustomInterval.count())
1058             << "Custom collection did't end after " << kTestCustomCollectionDuration.count()
1059             << " seconds";
1060     ASSERT_EQ(collector->mCurrCollectionEvent, CollectionEvent::PERIODIC);
1061     ASSERT_EQ(collector->mCustomCollection.records.size(), 0)
1062             << "Custom collection records not discarded at the end of the collection";
1063     collector->terminate();
1064 }
1065 
TEST(IoPerfCollectionTest,TestValidUidIoStatFile)1066 TEST(IoPerfCollectionTest, TestValidUidIoStatFile) {
1067     // Format: uid fgRdChar fgWrChar fgRdBytes fgWrBytes bgRdChar bgWrChar bgRdBytes bgWrBytes
1068     // fgFsync bgFsync
1069     constexpr char firstSnapshot[] =
1070         "1001234 5000 1000 3000 500 0 0 0 0 20 0\n"
1071         "1005678 500 100 30 50 300 400 100 200 45 60\n"
1072         "1009 0 0 0 0 40000 50000 20000 30000 0 300\n"
1073         "1001000 4000 3000 2000 1000 400 300 200 100 50 10\n";
1074 
1075     struct UidIoPerfData expectedUidIoPerfData = {};
1076     expectedUidIoPerfData.total[READ_BYTES][FOREGROUND] = 5030;
1077     expectedUidIoPerfData.total[READ_BYTES][BACKGROUND] = 20300;
1078     expectedUidIoPerfData.total[WRITE_BYTES][FOREGROUND] = 1550;
1079     expectedUidIoPerfData.total[WRITE_BYTES][BACKGROUND] = 30300;
1080     expectedUidIoPerfData.total[FSYNC_COUNT][FOREGROUND] = 115;
1081     expectedUidIoPerfData.total[FSYNC_COUNT][BACKGROUND] = 370;
1082     expectedUidIoPerfData.topNReads.push_back({
1083             // uid: 1009
1084             .userId = 0,
1085             .packageName = "mount",
1086             .bytes = {0, 20000},
1087             .fsync = {0, 300},
1088     });
1089     expectedUidIoPerfData.topNReads.push_back({
1090             // uid: 1001234
1091             .userId = 10,
1092             .packageName = "1001234",
1093             .bytes = {3000, 0},
1094             .fsync = {20, 0},
1095     });
1096     expectedUidIoPerfData.topNWrites.push_back({
1097             // uid: 1009
1098             .userId = 0,
1099             .packageName = "mount",
1100             .bytes = {0, 30000},
1101             .fsync = {0, 300},
1102     });
1103     expectedUidIoPerfData.topNWrites.push_back({
1104             // uid: 1001000
1105             .userId = 10,
1106             .packageName = "shared:android.uid.system",
1107             .bytes = {1000, 100},
1108             .fsync = {50, 10},
1109     });
1110 
1111     TemporaryFile tf;
1112     ASSERT_NE(tf.fd, -1);
1113     ASSERT_TRUE(WriteStringToFile(firstSnapshot, tf.path));
1114 
1115     IoPerfCollection collector;
1116     collector.mUidIoStats = new UidIoStats(tf.path);
1117     collector.mTopNStatsPerCategory = 2;
1118     ASSERT_TRUE(collector.mUidIoStats->enabled()) << "Temporary file is inaccessible";
1119 
1120     struct UidIoPerfData actualUidIoPerfData = {};
1121     auto ret = collector.collectUidIoPerfDataLocked(CollectionInfo{}, &actualUidIoPerfData);
1122     ASSERT_RESULT_OK(ret);
1123     EXPECT_TRUE(isEqual(expectedUidIoPerfData, actualUidIoPerfData))
1124         << "First snapshot doesn't match.\nExpected:\n"
1125         << toString(expectedUidIoPerfData) << "\nActual:\n"
1126         << toString(actualUidIoPerfData);
1127 
1128     constexpr char secondSnapshot[] =
1129         "1001234 10000 2000 7000 950 0 0 0 0 45 0\n"
1130         "1005678 600 100 40 50 1000 1000 1000 600 50 70\n"
1131         "1003456 300 500 200 300 0 0 0 0 50 0\n"
1132         "1001000 400 300 200 100 40 30 20 10 5 1\n";
1133 
1134     expectedUidIoPerfData = {};
1135     expectedUidIoPerfData.total[READ_BYTES][FOREGROUND] = 4210;
1136     expectedUidIoPerfData.total[READ_BYTES][BACKGROUND] = 900;
1137     expectedUidIoPerfData.total[WRITE_BYTES][FOREGROUND] = 750;
1138     expectedUidIoPerfData.total[WRITE_BYTES][BACKGROUND] = 400;
1139     expectedUidIoPerfData.total[FSYNC_COUNT][FOREGROUND] = 80;
1140     expectedUidIoPerfData.total[FSYNC_COUNT][BACKGROUND] = 10;
1141     expectedUidIoPerfData.topNReads.push_back({
1142             // uid: 1001234
1143             .userId = 10,
1144             .packageName = "1001234",
1145             .bytes = {4000, 0},
1146             .fsync = {25, 0},
1147     });
1148     expectedUidIoPerfData.topNReads.push_back({
1149             // uid: 1005678
1150             .userId = 10,
1151             .packageName = "1005678",
1152             .bytes = {10, 900},
1153             .fsync = {5, 10},
1154     });
1155     expectedUidIoPerfData.topNWrites.push_back({
1156             // uid: 1001234
1157             .userId = 10,
1158             .packageName = "1001234",
1159             .bytes = {450, 0},
1160             .fsync = {25, 0},
1161     });
1162     expectedUidIoPerfData.topNWrites.push_back({
1163             // uid: 1005678
1164             .userId = 10,
1165             .packageName = "1005678",
1166             .bytes = {0, 400},
1167             .fsync = {5, 10},
1168     });
1169     ASSERT_TRUE(WriteStringToFile(secondSnapshot, tf.path));
1170     actualUidIoPerfData = {};
1171     ret = collector.collectUidIoPerfDataLocked(CollectionInfo{}, &actualUidIoPerfData);
1172     ASSERT_RESULT_OK(ret);
1173     EXPECT_TRUE(isEqual(expectedUidIoPerfData, actualUidIoPerfData))
1174         << "Second snapshot doesn't match.\nExpected:\n"
1175         << toString(expectedUidIoPerfData) << "\nActual:\n"
1176         << toString(actualUidIoPerfData);
1177 }
1178 
TEST(IoPerfCollectionTest,TestUidIOStatsLessThanTopNStatsLimit)1179 TEST(IoPerfCollectionTest, TestUidIOStatsLessThanTopNStatsLimit) {
1180     // Format: uid fgRdChar fgWrChar fgRdBytes fgWrBytes bgRdChar bgWrChar bgRdBytes bgWrBytes
1181     // fgFsync bgFsync
1182     constexpr char contents[] = "1001234 5000 1000 3000 500 0 0 0 0 20 0\n";
1183 
1184     struct UidIoPerfData expectedUidIoPerfData = {};
1185     expectedUidIoPerfData.total[READ_BYTES][FOREGROUND] = 3000;
1186     expectedUidIoPerfData.total[READ_BYTES][BACKGROUND] = 0;
1187     expectedUidIoPerfData.total[WRITE_BYTES][FOREGROUND] = 500;
1188     expectedUidIoPerfData.total[WRITE_BYTES][BACKGROUND] = 0;
1189     expectedUidIoPerfData.total[FSYNC_COUNT][FOREGROUND] = 20;
1190     expectedUidIoPerfData.total[FSYNC_COUNT][BACKGROUND] = 0;
1191     expectedUidIoPerfData.topNReads.push_back({
1192             // uid: 1001234
1193             .userId = 10,
1194             .packageName = "1001234",
1195             .bytes = {3000, 0},
1196             .fsync = {20, 0},
1197     });
1198     expectedUidIoPerfData.topNWrites.push_back({
1199             // uid: 1001234
1200             .userId = 10,
1201             .packageName = "1001234",
1202             .bytes = {500, 0},
1203             .fsync = {20, 0},
1204     });
1205 
1206     TemporaryFile tf;
1207     ASSERT_NE(tf.fd, -1);
1208     ASSERT_TRUE(WriteStringToFile(contents, tf.path));
1209 
1210     IoPerfCollection collector;
1211     collector.mUidIoStats = new UidIoStats(tf.path);
1212     collector.mTopNStatsPerCategory = 10;
1213     ASSERT_TRUE(collector.mUidIoStats->enabled()) << "Temporary file is inaccessible";
1214 
1215     struct UidIoPerfData actualUidIoPerfData = {};
1216     const auto& ret = collector.collectUidIoPerfDataLocked(CollectionInfo{}, &actualUidIoPerfData);
1217     ASSERT_RESULT_OK(ret);
1218     EXPECT_TRUE(isEqual(expectedUidIoPerfData, actualUidIoPerfData))
1219         << "Collected data doesn't match.\nExpected:\n"
1220         << toString(expectedUidIoPerfData) << "\nActual:\n"
1221         << toString(actualUidIoPerfData);
1222 }
1223 
TEST(IoPerfCollectionTest,TestValidProcStatFile)1224 TEST(IoPerfCollectionTest, TestValidProcStatFile) {
1225     constexpr char firstSnapshot[] =
1226             "cpu  6200 5700 1700 3100 1100 5200 3900 0 0 0\n"
1227             "cpu0 2400 2900 600 690 340 4300 2100 0 0 0\n"
1228             "cpu1 1900 2380 510 760 51 370 1500 0 0 0\n"
1229             "cpu2 900 400 400 1000 600 400 160 0 0 0\n"
1230             "cpu3 1000 20 190 650 109 130 140 0 0 0\n"
1231             "intr 694351583 0 0 0 297062868 0 5922464 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 "
1232             "0 0\n"
1233             // Skipped most of the intr line as it is not important for testing the ProcStat parsing
1234             // logic.
1235             "ctxt 579020168\n"
1236             "btime 1579718450\n"
1237             "processes 113804\n"
1238             "procs_running 17\n"
1239             "procs_blocked 5\n"
1240             "softirq 33275060 934664 11958403 5111 516325 200333 0 341482 10651335 0 8667407\n";
1241     struct SystemIoPerfData expectedSystemIoPerfData = {
1242             .cpuIoWaitTime = 1100,
1243             .totalCpuTime = 26900,
1244             .ioBlockedProcessesCnt = 5,
1245             .totalProcessesCnt = 22,
1246     };
1247 
1248     TemporaryFile tf;
1249     ASSERT_NE(tf.fd, -1);
1250     ASSERT_TRUE(WriteStringToFile(firstSnapshot, tf.path));
1251 
1252     IoPerfCollection collector;
1253     collector.mProcStat = new ProcStat(tf.path);
1254     ASSERT_TRUE(collector.mProcStat->enabled()) << "Temporary file is inaccessible";
1255 
1256     struct SystemIoPerfData actualSystemIoPerfData = {};
1257     auto ret = collector.collectSystemIoPerfDataLocked(&actualSystemIoPerfData);
1258     ASSERT_RESULT_OK(ret);
1259     EXPECT_TRUE(isEqual(expectedSystemIoPerfData, actualSystemIoPerfData))
1260             << "First snapshot doesn't match.\nExpected:\n"
1261             << toString(expectedSystemIoPerfData) << "\nActual:\n"
1262             << toString(actualSystemIoPerfData);
1263 
1264     constexpr char secondSnapshot[] =
1265             "cpu  16200 8700 2000 4100 2200 6200 5900 0 0 0\n"
1266             "cpu0 4400 3400 700 890 800 4500 3100 0 0 0\n"
1267             "cpu1 5900 3380 610 960 100 670 2000 0 0 0\n"
1268             "cpu2 2900 1000 450 1400 800 600 460 0 0 0\n"
1269             "cpu3 3000 920 240 850 500 430 340 0 0 0\n"
1270             "intr 694351583 0 0 0 297062868 0 5922464 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 "
1271             "0 0\n"
1272             "ctxt 579020168\n"
1273             "btime 1579718450\n"
1274             "processes 113804\n"
1275             "procs_running 10\n"
1276             "procs_blocked 2\n"
1277             "softirq 33275060 934664 11958403 5111 516325 200333 0 341482 10651335 0 8667407\n";
1278     expectedSystemIoPerfData = {
1279             .cpuIoWaitTime = 1100,
1280             .totalCpuTime = 18400,
1281             .ioBlockedProcessesCnt = 2,
1282             .totalProcessesCnt = 12,
1283     };
1284 
1285     ASSERT_TRUE(WriteStringToFile(secondSnapshot, tf.path));
1286     actualSystemIoPerfData = {};
1287     ret = collector.collectSystemIoPerfDataLocked(&actualSystemIoPerfData);
1288     ASSERT_RESULT_OK(ret);
1289     EXPECT_TRUE(isEqual(expectedSystemIoPerfData, actualSystemIoPerfData))
1290             << "Second snapshot doesn't match.\nExpected:\n"
1291             << toString(expectedSystemIoPerfData) << "\nActual:\n"
1292             << toString(actualSystemIoPerfData);
1293 }
1294 
TEST(IoPerfCollectionTest,TestValidProcPidContents)1295 TEST(IoPerfCollectionTest, TestValidProcPidContents) {
1296     std::unordered_map<uint32_t, std::vector<uint32_t>> pidToTids = {
1297             {1, {1, 453}},
1298             {2546, {2546, 3456, 4789}},
1299             {7890, {7890, 8978, 12890}},
1300             {18902, {18902, 21345, 32452}},
1301             {28900, {28900}},
1302     };
1303     std::unordered_map<uint32_t, std::string> perProcessStat = {
1304             {1, "1 (init) S 0 0 0 0 0 0 0 0 220 0 0 0 0 0 0 0 2 0 0\n"},
1305             {2546, "2546 (system_server) R 1 0 0 0 0 0 0 0 6000 0 0 0 0 0 0 0 3 0 1000\n"},
1306             {7890, "7890 (logd) D 1 0 0 0 0 0 0 0 15000 0 0 0 0 0 0 0 3 0 2345\n"},
1307             {18902, "18902 (disk I/O) D 1 0 0 0 0 0 0 0 45678 0 0 0 0 0 0 0 3 0 897654\n"},
1308             {28900, "28900 (tombstoned) D 1 0 0 0 0 0 0 0 89765 0 0 0 0 0 0 0 3 0 2345671\n"},
1309     };
1310     std::unordered_map<uint32_t, std::string> perProcessStatus = {
1311             {1, "Pid:\t1\nTgid:\t1\nUid:\t0\t0\t0\t0\n"},
1312             {2546, "Pid:\t2546\nTgid:\t2546\nUid:\t1001000\t1001000\t1001000\t1001000\n"},
1313             {7890, "Pid:\t7890\nTgid:\t7890\nUid:\t1001000\t1001000\t1001000\t1001000\n"},
1314             {18902, "Pid:\t18902\nTgid:\t18902\nUid:\t1009\t1009\t1009\t1009\n"},
1315             {28900, "Pid:\t28900\nTgid:\t28900\nUid:\t1001234\t1001234\t1001234\t1001234\n"},
1316     };
1317     std::unordered_map<uint32_t, std::string> perThreadStat = {
1318             {1, "1 (init) S 0 0 0 0 0 0 0 0 200 0 0 0 0 0 0 0 2 0 0\n"},
1319             {453, "453 (init) S 0 0 0 0 0 0 0 0 20 0 0 0 0 0 0 0 2 0 275\n"},
1320             {2546, "2546 (system_server) R 1 0 0 0 0 0 0 0 1000 0 0 0 0 0 0 0 3 0 1000\n"},
1321             {3456, "3456 (system_server) S 1 0 0 0 0 0 0 0 3000 0 0 0 0 0 0 0 3 0 2300\n"},
1322             {4789, "4789 (system_server) D 1 0 0 0 0 0 0 0 2000 0 0 0 0 0 0 0 3 0 4500\n"},
1323             {7890, "7890 (logd) D 1 0 0 0 0 0 0 0 10000 0 0 0 0 0 0 0 3 0 2345\n"},
1324             {8978, "8978 (logd) D 1 0 0 0 0 0 0 0 1000 0 0 0 0 0 0 0 3 0 2500\n"},
1325             {12890, "12890 (logd) D 1 0 0 0 0 0 0 0 500 0 0 0 0 0 0 0 3 0 2900\n"},
1326             {18902, "18902 (disk I/O) D 1 0 0 0 0 0 0 0 30000 0 0 0 0 0 0 0 3 0 897654\n"},
1327             {21345, "21345 (disk I/O) D 1 0 0 0 0 0 0 0 15000 0 0 0 0 0 0 0 3 0 904000\n"},
1328             {32452, "32452 (disk I/O) D 1 0 0 0 0 0 0 0 678 0 0 0 0 0 0 0 3 0 1007000\n"},
1329             {28900, "28900 (tombstoned) D 1 0 0 0 0 0 0 0 89765 0 0 0 0 0 0 0 3 0 2345671\n"},
1330     };
1331     struct ProcessIoPerfData expectedProcessIoPerfData = {};
1332     expectedProcessIoPerfData.topNIoBlockedUids.push_back({
1333             // uid: 1001000
1334             .userId = 10,
1335             .packageName = "shared:android.uid.system",
1336             .count = 4,
1337             .topNProcesses = {{"logd", 3}, {"system_server", 1}},
1338     });
1339     expectedProcessIoPerfData.topNIoBlockedUidsTotalTaskCnt.push_back(6);
1340     expectedProcessIoPerfData.topNIoBlockedUids.push_back({
1341             // uid: 1009
1342             .userId = 0,
1343             .packageName = "mount",
1344             .count = 3,
1345             .topNProcesses = {{"disk I/O", 3}},
1346     });
1347     expectedProcessIoPerfData.topNIoBlockedUidsTotalTaskCnt.push_back(3);
1348     expectedProcessIoPerfData.topNMajorFaultUids.push_back({
1349             // uid: 1001234
1350             .userId = 10,
1351             .packageName = "1001234",
1352             .count = 89765,
1353             .topNProcesses = {{"tombstoned", 89765}},
1354     });
1355     expectedProcessIoPerfData.topNMajorFaultUids.push_back({
1356             // uid: 1009
1357             .userId = 0,
1358             .packageName = "mount",
1359             .count = 45678,
1360             .topNProcesses = {{"disk I/O", 45678}},
1361     });
1362     expectedProcessIoPerfData.totalMajorFaults = 156663;
1363     expectedProcessIoPerfData.majorFaultsPercentChange = 0;
1364 
1365     TemporaryDir firstSnapshot;
1366     auto ret = populateProcPidDir(firstSnapshot.path, pidToTids, perProcessStat, perProcessStatus,
1367                                   perThreadStat);
1368     ASSERT_TRUE(ret) << "Failed to populate proc pid dir: " << ret.error();
1369 
1370     IoPerfCollection collector;
1371     collector.mProcPidStat = new ProcPidStat(firstSnapshot.path);
1372     collector.mTopNStatsPerCategory = 2;
1373     collector.mTopNStatsPerSubcategory = 2;
1374     ASSERT_TRUE(collector.mProcPidStat->enabled())
1375             << "Files under the temporary proc directory are inaccessible";
1376 
1377     struct ProcessIoPerfData actualProcessIoPerfData = {};
1378     ret = collector.collectProcessIoPerfDataLocked(CollectionInfo{}, &actualProcessIoPerfData);
1379     ASSERT_TRUE(ret) << "Failed to collect first snapshot: " << ret.error();
1380     EXPECT_TRUE(isEqual(expectedProcessIoPerfData, actualProcessIoPerfData))
1381             << "First snapshot doesn't match.\nExpected:\n"
1382             << toString(expectedProcessIoPerfData) << "\nActual:\n"
1383             << toString(actualProcessIoPerfData);
1384 
1385     pidToTids = {
1386             {1, {1, 453}},
1387             {2546, {2546, 3456, 4789}},
1388     };
1389     perProcessStat = {
1390             {1, "1 (init) S 0 0 0 0 0 0 0 0 880 0 0 0 0 0 0 0 2 0 0\n"},
1391             {2546, "2546 (system_server) R 1 0 0 0 0 0 0 0 18000 0 0 0 0 0 0 0 3 0 1000\n"},
1392     };
1393     perProcessStatus = {
1394             {1, "Pid:\t1\nTgid:\t1\nUid:\t0\t0\t0\t0\n"},
1395             {2546, "Pid:\t2546\nTgid:\t2546\nUid:\t1001000\t1001000\t1001000\t1001000\n"},
1396     };
1397     perThreadStat = {
1398             {1, "1 (init) S 0 0 0 0 0 0 0 0 800 0 0 0 0 0 0 0 2 0 0\n"},
1399             {453, "453 (init) S 0 0 0 0 0 0 0 0 80 0 0 0 0 0 0 0 2 0 275\n"},
1400             {2546, "2546 (system_server) R 1 0 0 0 0 0 0 0 3000 0 0 0 0 0 0 0 3 0 1000\n"},
1401             {3456, "3456 (system_server) S 1 0 0 0 0 0 0 0 9000 0 0 0 0 0 0 0 3 0 2300\n"},
1402             {4789, "4789 (system_server) D 1 0 0 0 0 0 0 0 6000 0 0 0 0 0 0 0 3 0 4500\n"},
1403     };
1404     expectedProcessIoPerfData = {};
1405     expectedProcessIoPerfData.topNIoBlockedUids.push_back({
1406             // uid: 1001000
1407             .userId = 10,
1408             .packageName = "shared:android.uid.system",
1409             .count = 1,
1410             .topNProcesses = {{"system_server", 1}},
1411     });
1412     expectedProcessIoPerfData.topNIoBlockedUidsTotalTaskCnt.push_back(3);
1413     expectedProcessIoPerfData.topNMajorFaultUids.push_back({
1414             // uid: 1001000
1415             .userId = 10,
1416             .packageName = "shared:android.uid.system",
1417             .count = 12000,
1418             .topNProcesses = {{"system_server", 12000}},
1419     });
1420     expectedProcessIoPerfData.topNMajorFaultUids.push_back({
1421             // uid: 0
1422             .userId = 0,
1423             .packageName = "root",
1424             .count = 660,
1425             .topNProcesses = {{"init", 660}},
1426     });
1427     expectedProcessIoPerfData.totalMajorFaults = 12660;
1428     expectedProcessIoPerfData.majorFaultsPercentChange = ((12660.0 - 156663.0) / 156663.0) * 100;
1429 
1430     TemporaryDir secondSnapshot;
1431     ret = populateProcPidDir(secondSnapshot.path, pidToTids, perProcessStat, perProcessStatus,
1432                              perThreadStat);
1433     ASSERT_TRUE(ret) << "Failed to populate proc pid dir: " << ret.error();
1434 
1435     collector.mProcPidStat->mPath = secondSnapshot.path;
1436 
1437     actualProcessIoPerfData = {};
1438     ret = collector.collectProcessIoPerfDataLocked(CollectionInfo{}, &actualProcessIoPerfData);
1439     ASSERT_TRUE(ret) << "Failed to collect second snapshot: " << ret.error();
1440     EXPECT_TRUE(isEqual(expectedProcessIoPerfData, actualProcessIoPerfData))
1441             << "Second snapshot doesn't match.\nExpected:\n"
1442             << toString(expectedProcessIoPerfData) << "\nActual:\n"
1443             << toString(actualProcessIoPerfData);
1444 }
1445 
TEST(IoPerfCollectionTest,TestProcPidContentsLessThanTopNStatsLimit)1446 TEST(IoPerfCollectionTest, TestProcPidContentsLessThanTopNStatsLimit) {
1447     std::unordered_map<uint32_t, std::vector<uint32_t>> pidToTids = {
1448             {1, {1, 453}},
1449     };
1450     std::unordered_map<uint32_t, std::string> perProcessStat = {
1451             {1, "1 (init) S 0 0 0 0 0 0 0 0 880 0 0 0 0 0 0 0 2 0 0\n"},
1452     };
1453     std::unordered_map<uint32_t, std::string> perProcessStatus = {
1454             {1, "Pid:\t1\nTgid:\t1\nUid:\t0\t0\t0\t0\n"},
1455     };
1456     std::unordered_map<uint32_t, std::string> perThreadStat = {
1457             {1, "1 (init) S 0 0 0 0 0 0 0 0 800 0 0 0 0 0 0 0 2 0 0\n"},
1458             {453, "453 (init) S 0 0 0 0 0 0 0 0 80 0 0 0 0 0 0 0 2 0 275\n"},
1459     };
1460     struct ProcessIoPerfData expectedProcessIoPerfData = {};
1461     expectedProcessIoPerfData.topNMajorFaultUids.push_back({
1462             // uid: 0
1463             .userId = 0,
1464             .packageName = "root",
1465             .count = 880,
1466             .topNProcesses = {{"init", 880}},
1467     });
1468     expectedProcessIoPerfData.totalMajorFaults = 880;
1469     expectedProcessIoPerfData.majorFaultsPercentChange = 0.0;
1470 
1471     TemporaryDir prodDir;
1472     auto ret = populateProcPidDir(prodDir.path, pidToTids, perProcessStat, perProcessStatus,
1473                                   perThreadStat);
1474     ASSERT_TRUE(ret) << "Failed to populate proc pid dir: " << ret.error();
1475 
1476     IoPerfCollection collector;
1477     collector.mTopNStatsPerCategory = 5;
1478     collector.mTopNStatsPerSubcategory = 3;
1479     collector.mProcPidStat = new ProcPidStat(prodDir.path);
1480     struct ProcessIoPerfData actualProcessIoPerfData = {};
1481     ret = collector.collectProcessIoPerfDataLocked(CollectionInfo{}, &actualProcessIoPerfData);
1482     ASSERT_TRUE(ret) << "Failed to collect proc pid contents: " << ret.error();
1483     EXPECT_TRUE(isEqual(expectedProcessIoPerfData, actualProcessIoPerfData))
1484             << "proc pid contents don't match.\nExpected:\n"
1485             << toString(expectedProcessIoPerfData) << "\nActual:\n"
1486             << toString(actualProcessIoPerfData);
1487 }
1488 
TEST(IoPerfCollectionTest,TestHandlesInvalidDumpArguments)1489 TEST(IoPerfCollectionTest, TestHandlesInvalidDumpArguments) {
1490     sp<IoPerfCollection> collector = new IoPerfCollection();
1491     collector->start();
1492     Vector<String16> args;
1493     args.push_back(String16(kStartCustomCollectionFlag));
1494     args.push_back(String16("Invalid flag"));
1495     args.push_back(String16("Invalid value"));
1496     ASSERT_FALSE(collector->dump(-1, args).ok());
1497 
1498     args.clear();
1499     args.push_back(String16(kStartCustomCollectionFlag));
1500     args.push_back(String16(kIntervalFlag));
1501     args.push_back(String16("Invalid interval"));
1502     ASSERT_FALSE(collector->dump(-1, args).ok());
1503 
1504     args.clear();
1505     args.push_back(String16(kStartCustomCollectionFlag));
1506     args.push_back(String16(kMaxDurationFlag));
1507     args.push_back(String16("Invalid duration"));
1508     ASSERT_FALSE(collector->dump(-1, args).ok());
1509 
1510     args.clear();
1511     args.push_back(String16(kEndCustomCollectionFlag));
1512     args.push_back(String16(kMaxDurationFlag));
1513     args.push_back(String16(std::to_string(kTestCustomCollectionDuration.count()).c_str()));
1514     ASSERT_FALSE(collector->dump(-1, args).ok());
1515 
1516     args.clear();
1517     args.push_back(String16("Invalid flag"));
1518     ASSERT_FALSE(collector->dump(-1, args).ok());
1519     collector->terminate();
1520 }
1521 
1522 }  // namespace watchdog
1523 }  // namespace automotive
1524 }  // namespace android
1525