1 // Copyright 2017 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 //#define LOG_NDEBUG 0
6 #define LOG_TAG "C2VDAComponent_test"
7
8 #include <C2VDAComponent.h>
9
10 #include <C2Buffer.h>
11 #include <C2BufferPriv.h>
12 #include <C2Component.h>
13 #include <C2PlatformSupport.h>
14 #include <C2Work.h>
15 #include <SimpleC2Interface.h>
16
17 #include <base/files/file.h>
18 #include <base/files/file_path.h>
19 #include <base/md5.h>
20 #include <base/strings/string_piece.h>
21 #include <base/strings/string_split.h>
22
23 #include <gtest/gtest.h>
24 #include <media/DataSource.h>
25 #include <media/IMediaHTTPService.h>
26 #include <media/MediaExtractor.h>
27 #include <media/MediaSource.h>
28 #include <media/stagefright/DataSourceFactory.h>
29 #include <media/stagefright/MediaDefs.h>
30 #include <media/stagefright/MediaErrors.h>
31 #include <media/stagefright/MediaExtractorFactory.h>
32 #include <media/stagefright/MetaData.h>
33 #include <media/stagefright/Utils.h>
34 #include <media/stagefright/foundation/ABuffer.h>
35 #include <media/stagefright/foundation/ALooper.h>
36 #include <media/stagefright/foundation/AMessage.h>
37 #include <media/stagefright/foundation/AUtils.h>
38 #include <utils/Log.h>
39
40 #include <fcntl.h>
41 #include <inttypes.h>
42 #include <stdio.h>
43 #include <stdlib.h>
44 #include <string.h>
45 #include <sys/stat.h>
46 #include <sys/time.h>
47 #include <sys/types.h>
48 #include <algorithm>
49 #include <chrono>
50 #include <thread>
51
52 using namespace std::chrono_literals;
53
54 namespace {
55
56 const int kMD5StringLength = 32;
57
58 // Read in golden MD5s for the sanity play-through check of this video
readGoldenMD5s(const std::string & videoFile,std::vector<std::string> * md5Strings)59 void readGoldenMD5s(const std::string& videoFile, std::vector<std::string>* md5Strings) {
60 base::FilePath filepath(videoFile + ".md5");
61 std::string allMD5s;
62 base::ReadFileToString(filepath, &allMD5s);
63 *md5Strings = base::SplitString(allMD5s, "\n", base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
64 // Check these are legitimate MD5s.
65 for (const std::string& md5String : *md5Strings) {
66 // Ignore the empty string added by SplitString. Ignore comments.
67 if (!md5String.length() || md5String.at(0) == '#') {
68 continue;
69 }
70 if (static_cast<int>(md5String.length()) != kMD5StringLength) {
71 fprintf(stderr, "MD5 length error: %s\n", md5String.c_str());
72 }
73 if (std::count_if(md5String.begin(), md5String.end(), isxdigit) != kMD5StringLength) {
74 fprintf(stderr, "MD5 includes non-hex char: %s\n", md5String.c_str());
75 }
76 }
77 if (md5Strings->empty()) {
78 fprintf(stderr, "MD5 checksum file (%s) missing or empty.\n",
79 filepath.MaybeAsASCII().c_str());
80 }
81 }
82
83 // Get file path name of recording raw YUV
getRecordOutputPath(const std::string & videoFile,int width,int height)84 base::FilePath getRecordOutputPath(const std::string& videoFile, int width, int height) {
85 base::FilePath filepath(videoFile);
86 filepath = filepath.RemoveExtension();
87 std::string suffix = "_output_" + std::to_string(width) + "x" + std::to_string(height) + ".yuv";
88 return base::FilePath(filepath.value() + suffix);
89 }
90 } // namespace
91
92 namespace android {
93
94 // Input video data parameters. This could be overwritten by user argument [-i].
95 // The syntax of each column is:
96 // filename:componentName:width:height:numFrames:numFragments
97 // - |filename| is the file path to mp4 (h264) or webm (VP8/9) video.
98 // - |componentName| specifies the name of decoder component.
99 // - |width| and |height| are for video size (in pixels).
100 // - |numFrames| is the number of picture frames.
101 // - |numFragments| is the NALU (h264) or frame (VP8/9) count by MediaExtractor.
102 const char* gTestVideoData = "bear.mp4:c2.vda.avc.decoder:640:360:82:84";
103 //const char* gTestVideoData = "bear-vp8.webm:c2.vda.vp8.decoder:640:360:82:82";
104 //const char* gTestVideoData = "bear-vp9.webm:c2.vda.vp9.decoder:320:240:82:82";
105
106 // Record decoded output frames as raw YUV format.
107 // The recorded file will be named as "<video_name>_output_<width>x<height>.yuv" under the same
108 // folder of input video file.
109 bool gRecordOutputYUV = false;
110
111 const std::string kH264DecoderName = "c2.vda.avc.decoder";
112 const std::string kVP8DecoderName = "c2.vda.vp8.decoder";
113 const std::string kVP9DecoderName = "c2.vda.vp9.decoder";
114
115 // Magic constants for indicating the timing of flush being called.
116 enum FlushPoint : int { END_OF_STREAM_FLUSH = -3, MID_STREAM_FLUSH = -2, NO_FLUSH = -1 };
117
118 struct TestVideoFile {
119 enum class CodecType { UNKNOWN, H264, VP8, VP9 };
120
121 std::string mFilename;
122 std::string mComponentName;
123 CodecType mCodec = CodecType::UNKNOWN;
124 int mWidth = -1;
125 int mHeight = -1;
126 int mNumFrames = -1;
127 int mNumFragments = -1;
128 sp<IMediaSource> mData;
129 };
130
131 class C2VDALinearBuffer : public C2Buffer {
132 public:
C2VDALinearBuffer(const std::shared_ptr<C2LinearBlock> & block)133 explicit C2VDALinearBuffer(const std::shared_ptr<C2LinearBlock>& block)
134 : C2Buffer({block->share(block->offset(), block->size(), C2Fence())}) {}
135 };
136
137 class C2VDADummyLinearBuffer : public C2Buffer {
138 public:
C2VDADummyLinearBuffer(const std::shared_ptr<C2LinearBlock> & block)139 explicit C2VDADummyLinearBuffer(const std::shared_ptr<C2LinearBlock>& block)
140 : C2Buffer({block->share(0, 0, C2Fence())}) {}
141 };
142
143 class Listener;
144
145 class C2VDAComponentTest : public ::testing::Test {
146 public:
147 void onWorkDone(std::weak_ptr<C2Component> component,
148 std::list<std::unique_ptr<C2Work>> workItems);
149 void onTripped(std::weak_ptr<C2Component> component,
150 std::vector<std::shared_ptr<C2SettingResult>> settingResult);
151 void onError(std::weak_ptr<C2Component> component, uint32_t errorCode);
152
153 protected:
154 C2VDAComponentTest();
155 void SetUp() override;
156
157 void parseTestVideoData(const char* testVideoData);
158
159 protected:
160 using ULock = std::unique_lock<std::mutex>;
161
162 enum {
163 kWorkCount = 16,
164 };
165
166 std::shared_ptr<Listener> mListener;
167
168 // Allocators
169 std::shared_ptr<C2Allocator> mLinearAlloc;
170 std::shared_ptr<C2BlockPool> mLinearBlockPool;
171
172 // The array of output video frame counters which will be counted in listenerThread. The array
173 // length equals to iteration time of stream play.
174 std::vector<int> mOutputFrameCounts;
175 // The array of work counters returned from component which will be counted in listenerThread.
176 // The array length equals to iteration time of stream play.
177 std::vector<int> mFinishedWorkCounts;
178 // The array of output frame MD5Sum which will be computed in listenerThread. The array length
179 // equals to iteration time of stream play.
180 std::vector<std::string> mMD5Strings;
181
182 // Mutex for |mWorkQueue| among main and listenerThread.
183 std::mutex mQueueLock;
184 std::condition_variable mQueueCondition;
185 std::list<std::unique_ptr<C2Work>> mWorkQueue;
186
187 // Mutex for |mProcessedWork| among main and listenerThread.
188 std::mutex mProcessedLock;
189 std::condition_variable mProcessedCondition;
190 std::list<std::unique_ptr<C2Work>> mProcessedWork;
191
192 // Mutex for |mFlushDone| among main and listenerThread.
193 std::mutex mFlushDoneLock;
194 std::condition_variable mFlushDoneCondition;
195 bool mFlushDone;
196
197 std::unique_ptr<TestVideoFile> mTestVideoFile;
198 };
199
200 class Listener : public C2Component::Listener {
201 public:
Listener(C2VDAComponentTest * thiz)202 explicit Listener(C2VDAComponentTest* thiz) : mThis(thiz) {}
203 virtual ~Listener() = default;
204
onWorkDone_nb(std::weak_ptr<C2Component> component,std::list<std::unique_ptr<C2Work>> workItems)205 virtual void onWorkDone_nb(std::weak_ptr<C2Component> component,
206 std::list<std::unique_ptr<C2Work>> workItems) override {
207 mThis->onWorkDone(component, std::move(workItems));
208 }
209
onTripped_nb(std::weak_ptr<C2Component> component,std::vector<std::shared_ptr<C2SettingResult>> settingResult)210 virtual void onTripped_nb(
211 std::weak_ptr<C2Component> component,
212 std::vector<std::shared_ptr<C2SettingResult>> settingResult) override {
213 mThis->onTripped(component, settingResult);
214 }
215
onError_nb(std::weak_ptr<C2Component> component,uint32_t errorCode)216 virtual void onError_nb(std::weak_ptr<C2Component> component, uint32_t errorCode) override {
217 mThis->onError(component, errorCode);
218 }
219
220 private:
221 C2VDAComponentTest* const mThis;
222 };
223
C2VDAComponentTest()224 C2VDAComponentTest::C2VDAComponentTest() : mListener(new Listener(this)) {
225 std::shared_ptr<C2AllocatorStore> store = GetCodec2PlatformAllocatorStore();
226 CHECK_EQ(store->fetchAllocator(C2AllocatorStore::DEFAULT_LINEAR, &mLinearAlloc), C2_OK);
227
228 mLinearBlockPool = std::make_shared<C2BasicLinearBlockPool>(mLinearAlloc);
229 }
230
onWorkDone(std::weak_ptr<C2Component> component,std::list<std::unique_ptr<C2Work>> workItems)231 void C2VDAComponentTest::onWorkDone(std::weak_ptr<C2Component> component,
232 std::list<std::unique_ptr<C2Work>> workItems) {
233 (void)component;
234 ULock l(mProcessedLock);
235 for (auto& item : workItems) {
236 mProcessedWork.emplace_back(std::move(item));
237 }
238 mProcessedCondition.notify_all();
239 }
240
onTripped(std::weak_ptr<C2Component> component,std::vector<std::shared_ptr<C2SettingResult>> settingResult)241 void C2VDAComponentTest::onTripped(std::weak_ptr<C2Component> component,
242 std::vector<std::shared_ptr<C2SettingResult>> settingResult) {
243 (void)component;
244 (void)settingResult;
245 // no-ops
246 }
247
onError(std::weak_ptr<C2Component> component,uint32_t errorCode)248 void C2VDAComponentTest::onError(std::weak_ptr<C2Component> component, uint32_t errorCode) {
249 (void)component;
250 // fail the test
251 FAIL() << "Get error code from component: " << errorCode;
252 }
253
SetUp()254 void C2VDAComponentTest::SetUp() {
255 parseTestVideoData(gTestVideoData);
256
257 mWorkQueue.clear();
258 for (int i = 0; i < kWorkCount; ++i) {
259 mWorkQueue.emplace_back(new C2Work);
260 }
261 mProcessedWork.clear();
262 mFlushDone = false;
263 }
264
getMediaSourceFromFile(const std::string & filename,const TestVideoFile::CodecType codec,sp<IMediaSource> * source)265 static bool getMediaSourceFromFile(const std::string& filename,
266 const TestVideoFile::CodecType codec, sp<IMediaSource>* source) {
267 source->clear();
268
269 sp<DataSource> dataSource =
270 DataSourceFactory::CreateFromURI(nullptr /* httpService */, filename.c_str());
271
272 if (dataSource == nullptr) {
273 fprintf(stderr, "Unable to create data source.\n");
274 return false;
275 }
276
277 sp<IMediaExtractor> extractor = MediaExtractorFactory::Create(dataSource);
278 if (extractor == nullptr) {
279 fprintf(stderr, "could not create extractor.\n");
280 return false;
281 }
282
283 std::string expectedMime;
284 if (codec == TestVideoFile::CodecType::H264) {
285 expectedMime = "video/avc";
286 } else if (codec == TestVideoFile::CodecType::VP8) {
287 expectedMime = "video/x-vnd.on2.vp8";
288 } else if (codec == TestVideoFile::CodecType::VP9) {
289 expectedMime = "video/x-vnd.on2.vp9";
290 } else {
291 fprintf(stderr, "unsupported codec type.\n");
292 return false;
293 }
294
295 for (size_t i = 0, numTracks = extractor->countTracks(); i < numTracks; ++i) {
296 sp<MetaData> meta =
297 extractor->getTrackMetaData(i, MediaExtractor::kIncludeExtensiveMetaData);
298 if (meta == nullptr) {
299 continue;
300 }
301 const char* mime;
302 meta->findCString(kKeyMIMEType, &mime);
303 if (!strcasecmp(mime, expectedMime.c_str())) {
304 *source = extractor->getTrack(i);
305 if (*source == nullptr) {
306 fprintf(stderr, "It's NULL track for track %zu.\n", i);
307 return false;
308 }
309 return true;
310 }
311 }
312 fprintf(stderr, "No track found.\n");
313 return false;
314 }
315
parseTestVideoData(const char * testVideoData)316 void C2VDAComponentTest::parseTestVideoData(const char* testVideoData) {
317 ALOGV("videoDataStr: %s", testVideoData);
318 mTestVideoFile = std::make_unique<TestVideoFile>();
319
320 auto splitString = [](const std::string& input, const char delim) {
321 std::vector<std::string> splits;
322 auto beg = input.begin();
323 while (beg != input.end()) {
324 auto pos = std::find(beg, input.end(), delim);
325 splits.emplace_back(beg, pos);
326 beg = pos != input.end() ? pos + 1 : pos;
327 }
328 return splits;
329 };
330 auto tokens = splitString(testVideoData, ':');
331 ASSERT_EQ(tokens.size(), 6u);
332 mTestVideoFile->mFilename = tokens[0];
333 ASSERT_GT(mTestVideoFile->mFilename.length(), 0u);
334
335 mTestVideoFile->mComponentName = tokens[1];
336 if (mTestVideoFile->mComponentName == kH264DecoderName) {
337 mTestVideoFile->mCodec = TestVideoFile::CodecType::H264;
338 } else if (mTestVideoFile->mComponentName == kVP8DecoderName) {
339 mTestVideoFile->mCodec = TestVideoFile::CodecType::VP8;
340 } else if (mTestVideoFile->mComponentName == kVP9DecoderName) {
341 mTestVideoFile->mCodec = TestVideoFile::CodecType::VP9;
342 }
343 ASSERT_NE(mTestVideoFile->mCodec, TestVideoFile::CodecType::UNKNOWN);
344
345 mTestVideoFile->mWidth = std::stoi(tokens[2]);
346 mTestVideoFile->mHeight = std::stoi(tokens[3]);
347 mTestVideoFile->mNumFrames = std::stoi(tokens[4]);
348 mTestVideoFile->mNumFragments = std::stoi(tokens[5]);
349
350 ALOGV("mTestVideoFile: %s, %s, %d, %d, %d, %d", mTestVideoFile->mFilename.c_str(),
351 mTestVideoFile->mComponentName.c_str(), mTestVideoFile->mWidth, mTestVideoFile->mHeight,
352 mTestVideoFile->mNumFrames, mTestVideoFile->mNumFragments);
353 }
354
getFrameStringPieces(const C2GraphicView & constGraphicView,std::vector<::base::StringPiece> * framePieces)355 static void getFrameStringPieces(const C2GraphicView& constGraphicView,
356 std::vector<::base::StringPiece>* framePieces) {
357 const uint8_t* const* constData = constGraphicView.data();
358 ASSERT_NE(constData, nullptr);
359 const C2PlanarLayout& layout = constGraphicView.layout();
360 ASSERT_EQ(layout.type, C2PlanarLayout::TYPE_YUV) << "Only support YUV plane format";
361
362 framePieces->clear();
363 framePieces->push_back(
364 ::base::StringPiece(reinterpret_cast<const char*>(constData[C2PlanarLayout::PLANE_Y]),
365 constGraphicView.width() * constGraphicView.height()));
366 if (layout.planes[C2PlanarLayout::PLANE_U].colInc == 2) { // semi-planar mode
367 framePieces->push_back(::base::StringPiece(
368 reinterpret_cast<const char*>(std::min(constData[C2PlanarLayout::PLANE_U],
369 constData[C2PlanarLayout::PLANE_V])),
370 constGraphicView.width() * constGraphicView.height() / 2));
371 } else {
372 framePieces->push_back(::base::StringPiece(
373 reinterpret_cast<const char*>(constData[C2PlanarLayout::PLANE_U]),
374 constGraphicView.width() * constGraphicView.height() / 4));
375 framePieces->push_back(::base::StringPiece(
376 reinterpret_cast<const char*>(constData[C2PlanarLayout::PLANE_V]),
377 constGraphicView.width() * constGraphicView.height() / 4));
378 }
379 }
380
381 // Test parameters:
382 // - Flush after work index. If this value is not negative, test will signal flush to component
383 // after queueing the work frame index equals to this value in the first iteration. Negative
384 // values may be magic constants, please refer to FlushPoint enum.
385 // - Number of play through. This value specifies the iteration time for playing entire video. If
386 // |mFlushAfterWorkIndex| is not negative, the first iteration will perform flush, then repeat
387 // times as this value for playing entire video.
388 // - Sanity check. If this is true, decoded content sanity check is enabled. Test will compute the
389 // MD5Sum for output frame data for a play-though iteration (not flushed), and compare to golden
390 // MD5Sums which should be stored in the file |video_filename|.md5
391 // - Use dummy EOS work. If this is true, test will queue a dummy work with end-of-stream flag in
392 // the end of all input works. On the contrary, test will call drain_nb() to component.
393 class C2VDAComponentParamTest
394 : public C2VDAComponentTest,
395 public ::testing::WithParamInterface<std::tuple<int, uint32_t, bool, bool>> {
396 protected:
397 int mFlushAfterWorkIndex;
398 uint32_t mNumberOfPlaythrough;
399 bool mSanityCheck;
400 bool mUseDummyEOSWork;
401 };
402
TEST_P(C2VDAComponentParamTest,SimpleDecodeTest)403 TEST_P(C2VDAComponentParamTest, SimpleDecodeTest) {
404 mFlushAfterWorkIndex = std::get<0>(GetParam());
405 if (mFlushAfterWorkIndex == FlushPoint::MID_STREAM_FLUSH) {
406 mFlushAfterWorkIndex = mTestVideoFile->mNumFragments / 2;
407 } else if (mFlushAfterWorkIndex == FlushPoint::END_OF_STREAM_FLUSH) {
408 mFlushAfterWorkIndex = mTestVideoFile->mNumFragments - 1;
409 }
410 ASSERT_LT(mFlushAfterWorkIndex, mTestVideoFile->mNumFragments);
411 mNumberOfPlaythrough = std::get<1>(GetParam());
412
413 if (mFlushAfterWorkIndex >= 0) {
414 mNumberOfPlaythrough++; // add the first iteration for perform mid-stream flushing.
415 }
416
417 mSanityCheck = std::get<2>(GetParam());
418 mUseDummyEOSWork = std::get<3>(GetParam());
419
420 // Reset counters and determine the expected answers for all iterations.
421 mOutputFrameCounts.resize(mNumberOfPlaythrough, 0);
422 mFinishedWorkCounts.resize(mNumberOfPlaythrough, 0);
423 mMD5Strings.resize(mNumberOfPlaythrough);
424 std::vector<int> expectedOutputFrameCounts(mNumberOfPlaythrough, mTestVideoFile->mNumFrames);
425 auto expectedWorkCount = mTestVideoFile->mNumFragments;
426 if (mUseDummyEOSWork) {
427 expectedWorkCount += 1; // plus one dummy EOS work
428 }
429 std::vector<int> expectedFinishedWorkCounts(mNumberOfPlaythrough, expectedWorkCount);
430 if (mFlushAfterWorkIndex >= 0) {
431 // First iteration performs the mid-stream flushing.
432 expectedOutputFrameCounts[0] = mFlushAfterWorkIndex + 1;
433 expectedFinishedWorkCounts[0] = mFlushAfterWorkIndex + 1;
434 }
435
436 std::shared_ptr<C2Component> component(std::make_shared<C2VDAComponent>(
437 mTestVideoFile->mComponentName, 0, std::make_shared<C2ReflectorHelper>()));
438
439 ASSERT_EQ(component->setListener_vb(mListener, C2_DONT_BLOCK), C2_OK);
440 ASSERT_EQ(component->start(), C2_OK);
441
442 std::atomic_bool running(true);
443 std::thread listenerThread([this, &running]() {
444 uint32_t iteration = 0;
445 ::base::MD5Context md5Ctx;
446 ::base::MD5Init(&md5Ctx);
447 ::base::File recordFile;
448 if (gRecordOutputYUV) {
449 auto recordFilePath = getRecordOutputPath(
450 mTestVideoFile->mFilename, mTestVideoFile->mWidth, mTestVideoFile->mHeight);
451 fprintf(stdout, "record output file: %s\n", recordFilePath.value().c_str());
452 recordFile = ::base::File(recordFilePath,
453 ::base::File::FLAG_OPEN_ALWAYS | ::base::File::FLAG_WRITE);
454 ASSERT_TRUE(recordFile.IsValid());
455 }
456 while (running) {
457 std::unique_ptr<C2Work> work;
458 {
459 ULock l(mProcessedLock);
460 if (mProcessedWork.empty()) {
461 mProcessedCondition.wait_for(l, 100ms);
462 if (mProcessedWork.empty()) {
463 continue;
464 }
465 }
466 work = std::move(mProcessedWork.front());
467 mProcessedWork.pop_front();
468 }
469 mFinishedWorkCounts[iteration]++;
470 ALOGV("Output: frame index: %llu result: %d flags: 0x%x buffers: %zu",
471 work->input.ordinal.frameIndex.peekull(), work->result,
472 work->worklets.front()->output.flags,
473 work->worklets.front()->output.buffers.size());
474
475 ASSERT_EQ(work->worklets.size(), 1u);
476 if (work->worklets.front()->output.buffers.size() == 1u) {
477 std::shared_ptr<C2Buffer> output = work->worklets.front()->output.buffers[0];
478 C2ConstGraphicBlock graphicBlock = output->data().graphicBlocks().front();
479
480 // check graphic buffer size (coded size) is not less than given video size.
481 ASSERT_LE(mTestVideoFile->mWidth, static_cast<int>(graphicBlock.width()));
482 ASSERT_LE(mTestVideoFile->mHeight, static_cast<int>(graphicBlock.height()));
483
484 // check visible rect equals to given video size.
485 ASSERT_EQ(mTestVideoFile->mWidth, static_cast<int>(graphicBlock.crop().width));
486 ASSERT_EQ(mTestVideoFile->mHeight, static_cast<int>(graphicBlock.crop().height));
487 ASSERT_EQ(0u, graphicBlock.crop().left);
488 ASSERT_EQ(0u, graphicBlock.crop().top);
489
490 // Intended behavior for Intel libva driver (crbug.com/148546):
491 // The 5ms latency is laid here to make sure surface content is finished processed
492 // processed by libva.
493 std::this_thread::sleep_for(std::chrono::milliseconds(5));
494
495 const C2GraphicView& constGraphicView = graphicBlock.map().get();
496 ASSERT_EQ(C2_OK, constGraphicView.error());
497 std::vector<::base::StringPiece> framePieces;
498 getFrameStringPieces(constGraphicView, &framePieces);
499 ASSERT_FALSE(framePieces.empty());
500 if (mSanityCheck) {
501 for (const auto& piece : framePieces) {
502 ::base::MD5Update(&md5Ctx, piece);
503 }
504 }
505 if (gRecordOutputYUV) {
506 for (const auto& piece : framePieces) {
507 ASSERT_EQ(static_cast<int>(piece.length()),
508 recordFile.WriteAtCurrentPos(piece.data(), piece.length()))
509 << "Failed to write file for yuv recording...";
510 }
511 }
512
513 work->worklets.front()->output.buffers.clear();
514 mOutputFrameCounts[iteration]++;
515 }
516
517 bool iteration_end =
518 work->worklets.front()->output.flags & C2FrameData::FLAG_END_OF_STREAM;
519
520 // input buffer should be reset in component side.
521 ASSERT_EQ(work->input.buffers.size(), 1u);
522 ASSERT_TRUE(work->input.buffers.front() == nullptr);
523 work->worklets.clear();
524 work->workletsProcessed = 0;
525
526 if (iteration == 0 && work->input.ordinal.frameIndex.peeku() ==
527 static_cast<uint64_t>(mFlushAfterWorkIndex)) {
528 ULock l(mFlushDoneLock);
529 mFlushDone = true;
530 mFlushDoneCondition.notify_all();
531 iteration_end = true;
532 }
533
534 ULock l(mQueueLock);
535 mWorkQueue.emplace_back(std::move(work));
536 mQueueCondition.notify_all();
537
538 if (iteration_end) {
539 // record md5sum
540 ::base::MD5Digest digest;
541 ::base::MD5Final(&digest, &md5Ctx);
542 mMD5Strings[iteration] = ::base::MD5DigestToBase16(digest);
543 ::base::MD5Init(&md5Ctx);
544
545 iteration++;
546 if (iteration == mNumberOfPlaythrough) {
547 running.store(false); // stop the thread
548 }
549 }
550 }
551 });
552
553 for (uint32_t iteration = 0; iteration < mNumberOfPlaythrough; ++iteration) {
554 ASSERT_TRUE(getMediaSourceFromFile(mTestVideoFile->mFilename, mTestVideoFile->mCodec,
555 &mTestVideoFile->mData));
556
557 std::deque<sp<ABuffer>> csds;
558 if (mTestVideoFile->mCodec == TestVideoFile::CodecType::H264) {
559 // Get csd buffers for h264.
560 sp<AMessage> format;
561 (void)convertMetaDataToMessage(mTestVideoFile->mData->getFormat(), &format);
562 csds.resize(2);
563 format->findBuffer("csd-0", &csds[0]);
564 format->findBuffer("csd-1", &csds[1]);
565 ASSERT_TRUE(csds[0] != nullptr && csds[1] != nullptr);
566 }
567
568 ASSERT_EQ(mTestVideoFile->mData->start(), OK);
569
570 int numWorks = 0;
571 while (true) {
572 size_t size = 0u;
573 void* data = nullptr;
574 int64_t timestamp = 0u;
575 MediaBufferBase* buffer = nullptr;
576 sp<ABuffer> csd;
577 bool queueDummyEOSWork = false;
578 if (!csds.empty()) {
579 csd = std::move(csds.front());
580 csds.pop_front();
581 size = csd->size();
582 data = csd->data();
583 } else {
584 if (mTestVideoFile->mData->read(&buffer) != OK) {
585 ASSERT_TRUE(buffer == nullptr);
586 if (mUseDummyEOSWork) {
587 ALOGV("Meet end of stream. Put a dummy EOS work.");
588 queueDummyEOSWork = true;
589 } else {
590 ALOGV("Meet end of stream. Now drain the component.");
591 ASSERT_EQ(component->drain_nb(C2Component::DRAIN_COMPONENT_WITH_EOS),
592 C2_OK);
593 break;
594 }
595 // TODO(johnylin): add test with drain with DRAIN_COMPONENT_NO_EOS when we know
596 // the actual use case of it.
597 } else {
598 MetaDataBase& meta = buffer->meta_data();
599 ASSERT_TRUE(meta.findInt64(kKeyTime, ×tamp));
600 size = buffer->size();
601 data = buffer->data();
602 }
603 }
604
605 std::unique_ptr<C2Work> work;
606 while (!work) {
607 ULock l(mQueueLock);
608 if (!mWorkQueue.empty()) {
609 work = std::move(mWorkQueue.front());
610 mWorkQueue.pop_front();
611 } else {
612 mQueueCondition.wait_for(l, 100ms);
613 }
614 }
615
616 work->input.ordinal.frameIndex = static_cast<uint64_t>(numWorks);
617 work->input.buffers.clear();
618
619 std::shared_ptr<C2LinearBlock> block;
620 if (queueDummyEOSWork) {
621 work->input.flags = C2FrameData::FLAG_END_OF_STREAM;
622 work->input.ordinal.timestamp = 0; // timestamp is invalid for dummy EOS work
623
624 // Create a dummy input buffer by allocating minimal size of buffer from block pool.
625 mLinearBlockPool->fetchLinearBlock(
626 1, {C2MemoryUsage::CPU_READ, C2MemoryUsage::CPU_WRITE}, &block);
627 work->input.buffers.emplace_back(new C2VDADummyLinearBuffer(std::move(block)));
628 ALOGV("Input: (Dummy EOS) id: %llu", work->input.ordinal.frameIndex.peekull());
629 } else {
630 work->input.flags = static_cast<C2FrameData::flags_t>(0);
631 work->input.ordinal.timestamp = static_cast<uint64_t>(timestamp);
632
633 // Allocate an input buffer with data size.
634 mLinearBlockPool->fetchLinearBlock(
635 size, {C2MemoryUsage::CPU_READ, C2MemoryUsage::CPU_WRITE}, &block);
636 C2WriteView view = block->map().get();
637 ASSERT_EQ(view.error(), C2_OK);
638 memcpy(view.base(), data, size);
639 work->input.buffers.emplace_back(new C2VDALinearBuffer(std::move(block)));
640 ALOGV("Input: bitstream id: %llu timestamp: %llu size: %zu",
641 work->input.ordinal.frameIndex.peekull(),
642 work->input.ordinal.timestamp.peekull(), size);
643 }
644
645 work->worklets.clear();
646 work->worklets.emplace_back(new C2Worklet);
647
648 std::list<std::unique_ptr<C2Work>> items;
649 items.push_back(std::move(work));
650
651 // Queue the work.
652 ASSERT_EQ(component->queue_nb(&items), C2_OK);
653 numWorks++;
654
655 if (buffer) {
656 buffer->release();
657 }
658
659 if (iteration == 0 && numWorks == mFlushAfterWorkIndex + 1) {
660 // Perform flush.
661 // Note: C2VDAComponent does not return work via |flushedWork|.
662 ASSERT_EQ(component->flush_sm(C2Component::FLUSH_COMPONENT,
663 nullptr /* flushedWork */),
664 C2_OK);
665 break;
666 }
667
668 if (queueDummyEOSWork) {
669 break;
670 }
671 }
672
673 if (iteration == 0 && mFlushAfterWorkIndex >= 0) {
674 // Wait here until client get all flushed works.
675 while (true) {
676 ULock l(mFlushDoneLock);
677 if (mFlushDone) {
678 break;
679 }
680 mFlushDoneCondition.wait_for(l, 100ms);
681 }
682 ALOGV("Got flush done signal");
683 EXPECT_EQ(numWorks, mFlushAfterWorkIndex + 1);
684 } else {
685 EXPECT_EQ(numWorks, expectedWorkCount);
686 }
687 ASSERT_EQ(mTestVideoFile->mData->stop(), OK);
688 }
689
690 listenerThread.join();
691 ASSERT_EQ(running, false);
692 ASSERT_EQ(component->stop(), C2_OK);
693
694 // Finally check the decoding want as expected.
695 for (uint32_t i = 0; i < mNumberOfPlaythrough; ++i) {
696 if (mFlushAfterWorkIndex >= 0 && i == 0) {
697 EXPECT_LE(mOutputFrameCounts[i], expectedOutputFrameCounts[i]) << "At iteration: " << i;
698 } else {
699 EXPECT_EQ(mOutputFrameCounts[i], expectedOutputFrameCounts[i]) << "At iteration: " << i;
700 }
701 EXPECT_EQ(mFinishedWorkCounts[i], expectedFinishedWorkCounts[i]) << "At iteration: " << i;
702 }
703
704 if (mSanityCheck) {
705 std::vector<std::string> goldenMD5s;
706 readGoldenMD5s(mTestVideoFile->mFilename, &goldenMD5s);
707 for (uint32_t i = 0; i < mNumberOfPlaythrough; ++i) {
708 if (mFlushAfterWorkIndex >= 0 && i == 0) {
709 continue; // do not compare the iteration with flushing
710 }
711 bool matched = std::find(goldenMD5s.begin(), goldenMD5s.end(), mMD5Strings[i]) !=
712 goldenMD5s.end();
713 EXPECT_TRUE(matched) << "Unknown MD5: " << mMD5Strings[i] << " at iter: " << i;
714 }
715 }
716 }
717
718 // Play input video once, end by draining.
719 INSTANTIATE_TEST_CASE_P(SinglePlaythroughTest, C2VDAComponentParamTest,
720 ::testing::Values(std::make_tuple(static_cast<int>(FlushPoint::NO_FLUSH),
721 1u, false, false)));
722 // Play input video once, end by dummy EOS work.
723 INSTANTIATE_TEST_CASE_P(DummyEOSWorkTest, C2VDAComponentParamTest,
724 ::testing::Values(std::make_tuple(static_cast<int>(FlushPoint::NO_FLUSH),
725 1u, false, true)));
726
727 // Play 5 times of input video, and check sanity by MD5Sum.
728 INSTANTIATE_TEST_CASE_P(MultiplePlaythroughSanityTest, C2VDAComponentParamTest,
729 ::testing::Values(std::make_tuple(static_cast<int>(FlushPoint::NO_FLUSH),
730 5u, true, false)));
731
732 // Test mid-stream flush then play once entirely.
733 INSTANTIATE_TEST_CASE_P(FlushPlaythroughTest, C2VDAComponentParamTest,
734 ::testing::Values(std::make_tuple(40, 1u, true, false)));
735
736 // Test mid-stream flush then stop.
737 INSTANTIATE_TEST_CASE_P(FlushStopTest, C2VDAComponentParamTest,
738 ::testing::Values(std::make_tuple(
739 static_cast<int>(FlushPoint::MID_STREAM_FLUSH), 0u, false, false)));
740
741 // Test early flush (after a few works) then stop.
742 INSTANTIATE_TEST_CASE_P(EarlyFlushStopTest, C2VDAComponentParamTest,
743 ::testing::Values(std::make_tuple(0, 0u, false, false),
744 std::make_tuple(1, 0u, false, false),
745 std::make_tuple(2, 0u, false, false),
746 std::make_tuple(3, 0u, false, false)));
747
748 // Test end-of-stream flush then stop.
749 INSTANTIATE_TEST_CASE_P(
750 EndOfStreamFlushStopTest, C2VDAComponentParamTest,
751 ::testing::Values(std::make_tuple(static_cast<int>(FlushPoint::END_OF_STREAM_FLUSH), 0u,
752 false, false)));
753
754 } // namespace android
755
usage(const char * me)756 static void usage(const char* me) {
757 fprintf(stderr, "usage: %s [-i test_video_data] [-r(ecord YUV)] [gtest options]\n", me);
758 }
759
main(int argc,char ** argv)760 int main(int argc, char** argv) {
761 ::testing::InitGoogleTest(&argc, argv);
762
763 int res;
764 while ((res = getopt(argc, argv, "i:r")) >= 0) {
765 switch (res) {
766 case 'i': {
767 android::gTestVideoData = optarg;
768 break;
769 }
770 case 'r': {
771 android::gRecordOutputYUV = true;
772 break;
773 }
774 default: {
775 usage(argv[0]);
776 exit(1);
777 break;
778 }
779 }
780 }
781
782 return RUN_ALL_TESTS();
783 }
784