1 // Copyright 2020 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 //#define LOG_NDEBUG 0
6 #define LOG_TAG "V4L2Decoder"
7
8 #include <v4l2_codec2/components/V4L2Decoder.h>
9
10 #include <stdint.h>
11
12 #include <vector>
13
14 #include <base/bind.h>
15 #include <base/files/scoped_file.h>
16 #include <base/memory/ptr_util.h>
17 #include <log/log.h>
18
19 #include <v4l2_codec2/common/Common.h>
20 #include <v4l2_codec2/common/Fourcc.h>
21
22 namespace android {
23 namespace {
24
25 constexpr size_t kNumInputBuffers = 16;
26 // Extra buffers for transmitting in the whole video pipeline.
27 constexpr size_t kNumExtraOutputBuffers = 4;
28
29 // Currently we only support flexible pixel 420 format YCBCR_420_888 in Android.
30 // Here is the list of flexible 420 format.
31 constexpr std::initializer_list<uint32_t> kSupportedOutputFourccs = {
32 Fourcc::YU12, Fourcc::YV12, Fourcc::YM12, Fourcc::YM21,
33 Fourcc::NV12, Fourcc::NV21, Fourcc::NM12, Fourcc::NM21,
34 };
35
VideoCodecToV4L2PixFmt(VideoCodec codec)36 uint32_t VideoCodecToV4L2PixFmt(VideoCodec codec) {
37 switch (codec) {
38 case VideoCodec::H264:
39 return V4L2_PIX_FMT_H264;
40 case VideoCodec::VP8:
41 return V4L2_PIX_FMT_VP8;
42 case VideoCodec::VP9:
43 return V4L2_PIX_FMT_VP9;
44 }
45 }
46
47 } // namespace
48
49 // static
Create(const VideoCodec & codec,const size_t inputBufferSize,GetPoolCB getPoolCb,OutputCB outputCb,ErrorCB errorCb,scoped_refptr<::base::SequencedTaskRunner> taskRunner)50 std::unique_ptr<VideoDecoder> V4L2Decoder::Create(
51 const VideoCodec& codec, const size_t inputBufferSize, GetPoolCB getPoolCb,
52 OutputCB outputCb, ErrorCB errorCb, scoped_refptr<::base::SequencedTaskRunner> taskRunner) {
53 std::unique_ptr<V4L2Decoder> decoder =
54 ::base::WrapUnique<V4L2Decoder>(new V4L2Decoder(taskRunner));
55 if (!decoder->start(codec, inputBufferSize, std::move(getPoolCb), std::move(outputCb),
56 std::move(errorCb))) {
57 return nullptr;
58 }
59 return decoder;
60 }
61
V4L2Decoder(scoped_refptr<::base::SequencedTaskRunner> taskRunner)62 V4L2Decoder::V4L2Decoder(scoped_refptr<::base::SequencedTaskRunner> taskRunner)
63 : mTaskRunner(std::move(taskRunner)) {
64 ALOGV("%s()", __func__);
65
66 mWeakThis = mWeakThisFactory.GetWeakPtr();
67 }
68
~V4L2Decoder()69 V4L2Decoder::~V4L2Decoder() {
70 ALOGV("%s()", __func__);
71 ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
72
73 mWeakThisFactory.InvalidateWeakPtrs();
74
75 // Streamoff input and output queue.
76 if (mOutputQueue) {
77 mOutputQueue->streamoff();
78 mOutputQueue->deallocateBuffers();
79 mOutputQueue = nullptr;
80 }
81 if (mInputQueue) {
82 mInputQueue->streamoff();
83 mInputQueue->deallocateBuffers();
84 mInputQueue = nullptr;
85 }
86 if (mDevice) {
87 mDevice->stopPolling();
88 mDevice = nullptr;
89 }
90 }
91
start(const VideoCodec & codec,const size_t inputBufferSize,GetPoolCB getPoolCb,OutputCB outputCb,ErrorCB errorCb)92 bool V4L2Decoder::start(const VideoCodec& codec, const size_t inputBufferSize, GetPoolCB getPoolCb,
93 OutputCB outputCb, ErrorCB errorCb) {
94 ALOGV("%s(codec=%s, inputBufferSize=%zu)", __func__, VideoCodecToString(codec),
95 inputBufferSize);
96 ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
97
98 mGetPoolCb = std::move(getPoolCb);
99 mOutputCb = std::move(outputCb);
100 mErrorCb = std::move(errorCb);
101
102 if (mState == State::Error) {
103 ALOGE("Ignore due to error state.");
104 return false;
105 }
106
107 mDevice = V4L2Device::create();
108
109 const uint32_t inputPixelFormat = VideoCodecToV4L2PixFmt(codec);
110 if (!mDevice->open(V4L2Device::Type::kDecoder, inputPixelFormat)) {
111 ALOGE("Failed to open device for %s", VideoCodecToString(codec));
112 return false;
113 }
114
115 if (!mDevice->hasCapabilities(V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING)) {
116 ALOGE("Device does not have VIDEO_M2M_MPLANE and STREAMING capabilities.");
117 return false;
118 }
119
120 struct v4l2_decoder_cmd cmd;
121 memset(&cmd, 0, sizeof(cmd));
122 cmd.cmd = V4L2_DEC_CMD_STOP;
123 if (mDevice->ioctl(VIDIOC_TRY_DECODER_CMD, &cmd) != 0) {
124 ALOGE("Device does not support flushing (V4L2_DEC_CMD_STOP)");
125 return false;
126 }
127
128 // Subscribe to the resolution change event.
129 struct v4l2_event_subscription sub;
130 memset(&sub, 0, sizeof(sub));
131 sub.type = V4L2_EVENT_SOURCE_CHANGE;
132 if (mDevice->ioctl(VIDIOC_SUBSCRIBE_EVENT, &sub) != 0) {
133 ALOGE("ioctl() failed: VIDIOC_SUBSCRIBE_EVENT: V4L2_EVENT_SOURCE_CHANGE");
134 return false;
135 }
136
137 // Create Input/Output V4L2Queue, and setup input queue.
138 mInputQueue = mDevice->getQueue(V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
139 mOutputQueue = mDevice->getQueue(V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
140 if (!mInputQueue || !mOutputQueue) {
141 ALOGE("Failed to create V4L2 queue.");
142 return false;
143 }
144 if (!setupInputFormat(inputPixelFormat, inputBufferSize)) {
145 ALOGE("Failed to setup input format.");
146 return false;
147 }
148
149 if (!mDevice->startPolling(::base::BindRepeating(&V4L2Decoder::serviceDeviceTask, mWeakThis),
150 ::base::BindRepeating(&V4L2Decoder::onError, mWeakThis))) {
151 ALOGE("Failed to start polling V4L2 device.");
152 return false;
153 }
154
155 setState(State::Idle);
156 return true;
157 }
158
setupInputFormat(const uint32_t inputPixelFormat,const size_t inputBufferSize)159 bool V4L2Decoder::setupInputFormat(const uint32_t inputPixelFormat, const size_t inputBufferSize) {
160 ALOGV("%s(inputPixelFormat=%u, inputBufferSize=%zu)", __func__, inputPixelFormat,
161 inputBufferSize);
162 ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
163
164 // Check if the format is supported.
165 std::vector<uint32_t> formats =
166 mDevice->enumerateSupportedPixelformats(V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
167 if (std::find(formats.begin(), formats.end(), inputPixelFormat) == formats.end()) {
168 ALOGE("Input codec s not supported by device.");
169 return false;
170 }
171
172 // Setup the input format.
173 auto format = mInputQueue->setFormat(inputPixelFormat, ui::Size(), inputBufferSize, 0);
174 if (!format) {
175 ALOGE("Failed to call IOCTL to set input format.");
176 return false;
177 }
178 ALOG_ASSERT(format->fmt.pix_mp.pixelformat == inputPixelFormat);
179
180 if (mInputQueue->allocateBuffers(kNumInputBuffers, V4L2_MEMORY_DMABUF) == 0) {
181 ALOGE("Failed to allocate input buffer.");
182 return false;
183 }
184 if (!mInputQueue->streamon()) {
185 ALOGE("Failed to streamon input queue.");
186 return false;
187 }
188 return true;
189 }
190
decode(std::unique_ptr<BitstreamBuffer> buffer,DecodeCB decodeCb)191 void V4L2Decoder::decode(std::unique_ptr<BitstreamBuffer> buffer, DecodeCB decodeCb) {
192 ALOGV("%s(id=%d)", __func__, buffer->id);
193 ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
194
195 if (mState == State::Error) {
196 ALOGE("Ignore due to error state.");
197 mTaskRunner->PostTask(FROM_HERE, ::base::BindOnce(std::move(decodeCb),
198 VideoDecoder::DecodeStatus::kError));
199 return;
200 }
201
202 if (mState == State::Idle) {
203 setState(State::Decoding);
204 }
205
206 mDecodeRequests.push(DecodeRequest(std::move(buffer), std::move(decodeCb)));
207 pumpDecodeRequest();
208 }
209
drain(DecodeCB drainCb)210 void V4L2Decoder::drain(DecodeCB drainCb) {
211 ALOGV("%s()", __func__);
212 ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
213
214 switch (mState) {
215 case State::Idle:
216 ALOGV("Nothing need to drain, ignore.");
217 mTaskRunner->PostTask(
218 FROM_HERE, ::base::BindOnce(std::move(drainCb), VideoDecoder::DecodeStatus::kOk));
219 return;
220
221 case State::Decoding:
222 mDecodeRequests.push(DecodeRequest(nullptr, std::move(drainCb)));
223 pumpDecodeRequest();
224 return;
225
226 case State::Draining:
227 case State::Error:
228 ALOGE("Ignore due to wrong state: %s", StateToString(mState));
229 mTaskRunner->PostTask(FROM_HERE, ::base::BindOnce(std::move(drainCb),
230 VideoDecoder::DecodeStatus::kError));
231 return;
232 }
233 }
234
pumpDecodeRequest()235 void V4L2Decoder::pumpDecodeRequest() {
236 ALOGV("%s()", __func__);
237 ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
238
239 if (mState != State::Decoding) return;
240
241 while (!mDecodeRequests.empty()) {
242 // Drain the decoder.
243 if (mDecodeRequests.front().buffer == nullptr) {
244 ALOGV("Get drain request.");
245 // Send the flush command after all input buffers are dequeued. This makes
246 // sure all previous resolution changes have been handled because the
247 // driver must hold the input buffer that triggers resolution change. The
248 // driver cannot decode data in it without new output buffers. If we send
249 // the flush now and a queued input buffer triggers resolution change
250 // later, the driver will send an output buffer that has
251 // V4L2_BUF_FLAG_LAST. But some queued input buffer have not been decoded
252 // yet. Also, V4L2VDA calls STREAMOFF and STREAMON after resolution
253 // change. They implicitly send a V4L2_DEC_CMD_STOP and V4L2_DEC_CMD_START
254 // to the decoder.
255 if (mInputQueue->queuedBuffersCount() > 0) {
256 ALOGV("Wait for all input buffers dequeued.");
257 return;
258 }
259
260 auto request = std::move(mDecodeRequests.front());
261 mDecodeRequests.pop();
262
263 if (!sendV4L2DecoderCmd(false)) {
264 std::move(request.decodeCb).Run(VideoDecoder::DecodeStatus::kError);
265 onError();
266 return;
267 }
268 mDrainCb = std::move(request.decodeCb);
269 setState(State::Draining);
270 return;
271 }
272
273 // Pause if no free input buffer. We resume decoding after dequeueing input buffers.
274 auto inputBuffer = mInputQueue->getFreeBuffer();
275 if (!inputBuffer) {
276 ALOGV("There is no free input buffer.");
277 return;
278 }
279
280 auto request = std::move(mDecodeRequests.front());
281 mDecodeRequests.pop();
282
283 const int32_t bitstreamId = request.buffer->id;
284 ALOGV("QBUF to input queue, bitstreadId=%d", bitstreamId);
285 inputBuffer->setTimeStamp({.tv_sec = bitstreamId});
286 size_t planeSize = inputBuffer->getPlaneSize(0);
287 if (request.buffer->size > planeSize) {
288 ALOGE("The input size (%zu) is not enough, we need %zu", planeSize,
289 request.buffer->size);
290 onError();
291 return;
292 }
293
294 ALOGV("Set bytes_used=%zu, offset=%zu", request.buffer->offset + request.buffer->size,
295 request.buffer->offset);
296 inputBuffer->setPlaneDataOffset(0, request.buffer->offset);
297 inputBuffer->setPlaneBytesUsed(0, request.buffer->offset + request.buffer->size);
298 std::vector<int> fds;
299 fds.push_back(std::move(request.buffer->dmabuf_fd));
300 if (!std::move(*inputBuffer).queueDMABuf(fds)) {
301 ALOGE("%s(): Failed to QBUF to input queue, bitstreamId=%d", __func__, bitstreamId);
302 onError();
303 return;
304 }
305
306 mPendingDecodeCbs.insert(std::make_pair(bitstreamId, std::move(request.decodeCb)));
307 }
308 }
309
flush()310 void V4L2Decoder::flush() {
311 ALOGV("%s()", __func__);
312 ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
313
314 if (mState == State::Idle) {
315 ALOGV("Nothing need to flush, ignore.");
316 return;
317 }
318 if (mState == State::Error) {
319 ALOGE("Ignore due to error state.");
320 return;
321 }
322
323 // Call all pending callbacks.
324 for (auto& item : mPendingDecodeCbs) {
325 std::move(item.second).Run(VideoDecoder::DecodeStatus::kAborted);
326 }
327 mPendingDecodeCbs.clear();
328 if (mDrainCb) {
329 std::move(mDrainCb).Run(VideoDecoder::DecodeStatus::kAborted);
330 }
331
332 // Streamoff both V4L2 queues to drop input and output buffers.
333 mDevice->stopPolling();
334 mOutputQueue->streamoff();
335 mFrameAtDevice.clear();
336 mInputQueue->streamoff();
337
338 // Streamon both V4L2 queues.
339 mInputQueue->streamon();
340 mOutputQueue->streamon();
341
342 // If there is no free buffer at mOutputQueue, tryFetchVideoFrame() should be triggerred after
343 // a buffer is DQBUF from output queue. Now all the buffers are dropped at mOutputQueue, we
344 // have to trigger tryFetchVideoFrame() here.
345 if (mVideoFramePool) {
346 tryFetchVideoFrame();
347 }
348
349 if (!mDevice->startPolling(::base::BindRepeating(&V4L2Decoder::serviceDeviceTask, mWeakThis),
350 ::base::BindRepeating(&V4L2Decoder::onError, mWeakThis))) {
351 ALOGE("Failed to start polling V4L2 device.");
352 onError();
353 return;
354 }
355
356 setState(State::Idle);
357 }
358
serviceDeviceTask(bool event)359 void V4L2Decoder::serviceDeviceTask(bool event) {
360 ALOGV("%s(event=%d) state=%s InputQueue(%s):%zu+%zu/%zu, OutputQueue(%s):%zu+%zu/%zu", __func__,
361 event, StateToString(mState), (mInputQueue->isStreaming() ? "streamon" : "streamoff"),
362 mInputQueue->freeBuffersCount(), mInputQueue->queuedBuffersCount(),
363 mInputQueue->allocatedBuffersCount(),
364 (mOutputQueue->isStreaming() ? "streamon" : "streamoff"),
365 mOutputQueue->freeBuffersCount(), mOutputQueue->queuedBuffersCount(),
366 mOutputQueue->allocatedBuffersCount());
367 ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
368
369 if (mState == State::Error) return;
370
371 // Dequeue output and input queue.
372 bool inputDequeued = false;
373 while (mInputQueue->queuedBuffersCount() > 0) {
374 bool success;
375 V4L2ReadableBufferRef dequeuedBuffer;
376 std::tie(success, dequeuedBuffer) = mInputQueue->dequeueBuffer();
377 if (!success) {
378 ALOGE("Failed to dequeue buffer from input queue.");
379 onError();
380 return;
381 }
382 if (!dequeuedBuffer) break;
383
384 inputDequeued = true;
385
386 // Run the corresponding decode callback.
387 int32_t id = dequeuedBuffer->getTimeStamp().tv_sec;
388 ALOGV("DQBUF from input queue, bitstreamId=%d", id);
389 auto it = mPendingDecodeCbs.find(id);
390 if (it == mPendingDecodeCbs.end()) {
391 ALOGW("Callback is already abandoned.");
392 continue;
393 }
394 std::move(it->second).Run(VideoDecoder::DecodeStatus::kOk);
395 mPendingDecodeCbs.erase(it);
396 }
397
398 bool outputDequeued = false;
399 while (mOutputQueue->queuedBuffersCount() > 0) {
400 bool success;
401 V4L2ReadableBufferRef dequeuedBuffer;
402 std::tie(success, dequeuedBuffer) = mOutputQueue->dequeueBuffer();
403 if (!success) {
404 ALOGE("Failed to dequeue buffer from output queue.");
405 onError();
406 return;
407 }
408 if (!dequeuedBuffer) break;
409
410 outputDequeued = true;
411
412 const size_t bufferId = dequeuedBuffer->bufferId();
413 const int32_t bitstreamId = static_cast<int32_t>(dequeuedBuffer->getTimeStamp().tv_sec);
414 const size_t bytesUsed = dequeuedBuffer->getPlaneBytesUsed(0);
415 const bool isLast = dequeuedBuffer->isLast();
416 ALOGV("DQBUF from output queue, bufferId=%zu, bitstreamId=%d, bytesused=%zu, isLast=%d",
417 bufferId, bitstreamId, bytesUsed, isLast);
418
419 // Get the corresponding VideoFrame of the dequeued buffer.
420 auto it = mFrameAtDevice.find(bufferId);
421 ALOG_ASSERT(it != mFrameAtDevice.end(), "buffer %zu is not found at mFrameAtDevice",
422 bufferId);
423 auto frame = std::move(it->second);
424 mFrameAtDevice.erase(it);
425
426 if (bytesUsed > 0) {
427 ALOGV("Send output frame(bitstreamId=%d) to client", bitstreamId);
428 frame->setBitstreamId(bitstreamId);
429 frame->setVisibleRect(mVisibleRect);
430 mOutputCb.Run(std::move(frame));
431 } else {
432 // Workaround(b/168750131): If the buffer is not enqueued before the next drain is done,
433 // then the driver will fail to notify EOS. So we recycle the buffer immediately.
434 ALOGV("Recycle empty buffer %zu back to V4L2 output queue.", bufferId);
435 dequeuedBuffer.reset();
436 auto outputBuffer = mOutputQueue->getFreeBuffer(bufferId);
437 ALOG_ASSERT(outputBuffer, "V4L2 output queue slot %zu is not freed.", bufferId);
438
439 if (!std::move(*outputBuffer).queueDMABuf(frame->getFDs())) {
440 ALOGE("%s(): Failed to recycle empty buffer to output queue.", __func__);
441 onError();
442 return;
443 }
444 mFrameAtDevice.insert(std::make_pair(bufferId, std::move(frame)));
445 }
446
447 if (mDrainCb && isLast) {
448 ALOGV("All buffers are drained.");
449 sendV4L2DecoderCmd(true);
450 std::move(mDrainCb).Run(VideoDecoder::DecodeStatus::kOk);
451 setState(State::Idle);
452 }
453 }
454
455 // Handle resolution change event.
456 if (event && dequeueResolutionChangeEvent()) {
457 if (!changeResolution()) {
458 onError();
459 return;
460 }
461 }
462
463 // We freed some input buffers, continue handling decode requests.
464 if (inputDequeued) {
465 mTaskRunner->PostTask(FROM_HERE,
466 ::base::BindOnce(&V4L2Decoder::pumpDecodeRequest, mWeakThis));
467 }
468 // We free some output buffers, try to get VideoFrame.
469 if (outputDequeued) {
470 mTaskRunner->PostTask(FROM_HERE,
471 ::base::BindOnce(&V4L2Decoder::tryFetchVideoFrame, mWeakThis));
472 }
473 }
474
dequeueResolutionChangeEvent()475 bool V4L2Decoder::dequeueResolutionChangeEvent() {
476 ALOGV("%s()", __func__);
477 ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
478
479 struct v4l2_event ev;
480 memset(&ev, 0, sizeof(ev));
481 while (mDevice->ioctl(VIDIOC_DQEVENT, &ev) == 0) {
482 if (ev.type == V4L2_EVENT_SOURCE_CHANGE &&
483 ev.u.src_change.changes & V4L2_EVENT_SRC_CH_RESOLUTION) {
484 return true;
485 }
486 }
487 return false;
488 }
489
changeResolution()490 bool V4L2Decoder::changeResolution() {
491 ALOGV("%s()", __func__);
492 ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
493
494 const std::optional<struct v4l2_format> format = getFormatInfo();
495 std::optional<size_t> numOutputBuffers = getNumOutputBuffers();
496 if (!format || !numOutputBuffers) {
497 return false;
498 }
499
500 const ui::Size codedSize(format->fmt.pix_mp.width, format->fmt.pix_mp.height);
501 if (!setupOutputFormat(codedSize)) {
502 return false;
503 }
504
505 const std::optional<struct v4l2_format> adjustedFormat = getFormatInfo();
506 if (!adjustedFormat) {
507 return false;
508 }
509 mCodedSize.set(adjustedFormat->fmt.pix_mp.width, adjustedFormat->fmt.pix_mp.height);
510 mVisibleRect = getVisibleRect(mCodedSize);
511
512 ALOGI("Need %zu output buffers. coded size: %s, visible rect: %s", *numOutputBuffers,
513 toString(mCodedSize).c_str(), toString(mVisibleRect).c_str());
514 if (isEmpty(mCodedSize)) {
515 ALOGE("Failed to get resolution from V4L2 driver.");
516 return false;
517 }
518
519 mOutputQueue->streamoff();
520 mOutputQueue->deallocateBuffers();
521 mFrameAtDevice.clear();
522 mBlockIdToV4L2Id.clear();
523
524 if (mOutputQueue->allocateBuffers(*numOutputBuffers, V4L2_MEMORY_DMABUF) == 0) {
525 ALOGE("Failed to allocate output buffer.");
526 return false;
527 }
528 if (!mOutputQueue->streamon()) {
529 ALOGE("Failed to streamon output queue.");
530 return false;
531 }
532
533 // Release the previous VideoFramePool before getting a new one to guarantee only one pool
534 // exists at the same time.
535 mVideoFramePool.reset();
536 // Always use flexible pixel 420 format YCBCR_420_888 in Android.
537 mVideoFramePool = mGetPoolCb.Run(mCodedSize, HalPixelFormat::YCBCR_420_888, *numOutputBuffers);
538 if (!mVideoFramePool) {
539 ALOGE("Failed to get block pool with size: %s", toString(mCodedSize).c_str());
540 return false;
541 }
542
543 tryFetchVideoFrame();
544 return true;
545 }
546
setupOutputFormat(const ui::Size & size)547 bool V4L2Decoder::setupOutputFormat(const ui::Size& size) {
548 for (const uint32_t& pixfmt :
549 mDevice->enumerateSupportedPixelformats(V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)) {
550 if (std::find(kSupportedOutputFourccs.begin(), kSupportedOutputFourccs.end(), pixfmt) ==
551 kSupportedOutputFourccs.end()) {
552 ALOGD("Pixel format %s is not supported, skipping...", fourccToString(pixfmt).c_str());
553 continue;
554 }
555
556 if (mOutputQueue->setFormat(pixfmt, size, 0) != std::nullopt) {
557 return true;
558 }
559 }
560
561 ALOGE("Failed to find supported pixel format");
562 return false;
563 }
564
tryFetchVideoFrame()565 void V4L2Decoder::tryFetchVideoFrame() {
566 ALOGV("%s()", __func__);
567 ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
568
569 if (!mVideoFramePool) {
570 ALOGE("mVideoFramePool is null, failed to get the instance after resolution change?");
571 onError();
572 return;
573 }
574
575 if (mOutputQueue->freeBuffersCount() == 0) {
576 ALOGV("No free V4L2 output buffers, ignore.");
577 return;
578 }
579
580 if (!mVideoFramePool->getVideoFrame(
581 ::base::BindOnce(&V4L2Decoder::onVideoFrameReady, mWeakThis))) {
582 ALOGV("%s(): Previous callback is running, ignore.", __func__);
583 }
584 }
585
onVideoFrameReady(std::optional<VideoFramePool::FrameWithBlockId> frameWithBlockId)586 void V4L2Decoder::onVideoFrameReady(
587 std::optional<VideoFramePool::FrameWithBlockId> frameWithBlockId) {
588 ALOGV("%s()", __func__);
589 ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
590
591 if (!frameWithBlockId) {
592 ALOGE("Got nullptr VideoFrame.");
593 onError();
594 return;
595 }
596
597 // Unwrap our arguments.
598 std::unique_ptr<VideoFrame> frame;
599 uint32_t blockId;
600 std::tie(frame, blockId) = std::move(*frameWithBlockId);
601
602 std::optional<V4L2WritableBufferRef> outputBuffer;
603 // Find the V4L2 buffer that is associated with this block.
604 auto iter = mBlockIdToV4L2Id.find(blockId);
605 if (iter != mBlockIdToV4L2Id.end()) {
606 // If we have met this block in the past, reuse the same V4L2 buffer.
607 outputBuffer = mOutputQueue->getFreeBuffer(iter->second);
608 } else if (mBlockIdToV4L2Id.size() < mOutputQueue->allocatedBuffersCount()) {
609 // If this is the first time we see this block, give it the next
610 // available V4L2 buffer.
611 const size_t v4l2BufferId = mBlockIdToV4L2Id.size();
612 mBlockIdToV4L2Id.emplace(blockId, v4l2BufferId);
613 outputBuffer = mOutputQueue->getFreeBuffer(v4l2BufferId);
614 } else {
615 // If this happens, this is a bug in VideoFramePool. It should never
616 // provide more blocks than we have V4L2 buffers.
617 ALOGE("Got more different blocks than we have V4L2 buffers for.");
618 }
619
620 if (!outputBuffer) {
621 ALOGE("V4L2 buffer not available. blockId=%u", blockId);
622 onError();
623 return;
624 }
625
626 uint32_t v4l2Id = outputBuffer->bufferId();
627 ALOGV("QBUF to output queue, blockId=%u, V4L2Id=%u", blockId, v4l2Id);
628
629 if (!std::move(*outputBuffer).queueDMABuf(frame->getFDs())) {
630 ALOGE("%s(): Failed to QBUF to output queue, blockId=%u, V4L2Id=%u", __func__, blockId,
631 v4l2Id);
632 onError();
633 return;
634 }
635 if (mFrameAtDevice.find(v4l2Id) != mFrameAtDevice.end()) {
636 ALOGE("%s(): V4L2 buffer %d already enqueued.", __func__, v4l2Id);
637 onError();
638 return;
639 }
640 mFrameAtDevice.insert(std::make_pair(v4l2Id, std::move(frame)));
641
642 tryFetchVideoFrame();
643 }
644
getNumOutputBuffers()645 std::optional<size_t> V4L2Decoder::getNumOutputBuffers() {
646 ALOGV("%s()", __func__);
647 ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
648
649 struct v4l2_control ctrl;
650 memset(&ctrl, 0, sizeof(ctrl));
651 ctrl.id = V4L2_CID_MIN_BUFFERS_FOR_CAPTURE;
652 if (mDevice->ioctl(VIDIOC_G_CTRL, &ctrl) != 0) {
653 ALOGE("ioctl() failed: VIDIOC_G_CTRL");
654 return std::nullopt;
655 }
656 ALOGV("%s() V4L2_CID_MIN_BUFFERS_FOR_CAPTURE returns %u", __func__, ctrl.value);
657
658 return ctrl.value + kNumExtraOutputBuffers;
659 }
660
getFormatInfo()661 std::optional<struct v4l2_format> V4L2Decoder::getFormatInfo() {
662 ALOGV("%s()", __func__);
663 ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
664
665 struct v4l2_format format;
666 memset(&format, 0, sizeof(format));
667 format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
668 if (mDevice->ioctl(VIDIOC_G_FMT, &format) != 0) {
669 ALOGE("ioctl() failed: VIDIOC_G_FMT");
670 return std::nullopt;
671 }
672
673 return format;
674 }
675
getVisibleRect(const ui::Size & codedSize)676 Rect V4L2Decoder::getVisibleRect(const ui::Size& codedSize) {
677 ALOGV("%s()", __func__);
678 ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
679
680 struct v4l2_rect* visible_rect = nullptr;
681 struct v4l2_selection selection_arg;
682 memset(&selection_arg, 0, sizeof(selection_arg));
683 selection_arg.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
684 selection_arg.target = V4L2_SEL_TGT_COMPOSE;
685
686 if (mDevice->ioctl(VIDIOC_G_SELECTION, &selection_arg) == 0) {
687 ALOGV("VIDIOC_G_SELECTION is supported");
688 visible_rect = &selection_arg.r;
689 } else {
690 ALOGV("Fallback to VIDIOC_G_CROP");
691 struct v4l2_crop crop_arg;
692 memset(&crop_arg, 0, sizeof(crop_arg));
693 crop_arg.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
694
695 if (mDevice->ioctl(VIDIOC_G_CROP, &crop_arg) != 0) {
696 ALOGW("ioctl() VIDIOC_G_CROP failed");
697 return Rect(codedSize.width, codedSize.height);
698 }
699 visible_rect = &crop_arg.c;
700 }
701
702 Rect rect(visible_rect->left, visible_rect->top, visible_rect->left + visible_rect->width,
703 visible_rect->top + visible_rect->height);
704 ALOGV("visible rectangle is %s", toString(rect).c_str());
705 if (!contains(Rect(codedSize.width, codedSize.height), rect)) {
706 ALOGW("visible rectangle %s is not inside coded size %s", toString(rect).c_str(),
707 toString(codedSize).c_str());
708 return Rect(codedSize.width, codedSize.height);
709 }
710 if (rect.isEmpty()) {
711 ALOGW("visible size is empty");
712 return Rect(codedSize.width, codedSize.height);
713 }
714
715 return rect;
716 }
717
sendV4L2DecoderCmd(bool start)718 bool V4L2Decoder::sendV4L2DecoderCmd(bool start) {
719 ALOGV("%s(start=%d)", __func__, start);
720 ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
721
722 struct v4l2_decoder_cmd cmd;
723 memset(&cmd, 0, sizeof(cmd));
724 cmd.cmd = start ? V4L2_DEC_CMD_START : V4L2_DEC_CMD_STOP;
725 if (mDevice->ioctl(VIDIOC_DECODER_CMD, &cmd) != 0) {
726 ALOGE("ioctl() VIDIOC_DECODER_CMD failed: start=%d", start);
727 return false;
728 }
729
730 return true;
731 }
732
onError()733 void V4L2Decoder::onError() {
734 ALOGV("%s()", __func__);
735 ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
736
737 setState(State::Error);
738 mErrorCb.Run();
739 }
740
setState(State newState)741 void V4L2Decoder::setState(State newState) {
742 ALOGV("%s(%s)", __func__, StateToString(newState));
743 ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
744
745 if (mState == newState) return;
746 if (mState == State::Error) {
747 ALOGV("Already in Error state.");
748 return;
749 }
750
751 switch (newState) {
752 case State::Idle:
753 break;
754 case State::Decoding:
755 break;
756 case State::Draining:
757 if (mState != State::Decoding) newState = State::Error;
758 break;
759 case State::Error:
760 break;
761 }
762
763 ALOGI("Set state %s => %s", StateToString(mState), StateToString(newState));
764 mState = newState;
765 }
766
767 // static
StateToString(State state)768 const char* V4L2Decoder::StateToString(State state) {
769 switch (state) {
770 case State::Idle:
771 return "Idle";
772 case State::Decoding:
773 return "Decoding";
774 case State::Draining:
775 return "Draining";
776 case State::Error:
777 return "Error";
778 }
779 }
780
781 } // namespace android
782