1 /*
2  * Copyright (C) 2010 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 //#define LOG_NDEBUG 0
18 #define LOG_TAG "NuPlayer2Renderer"
19 #include <utils/Log.h>
20 
21 #include "JWakeLock.h"
22 #include "NuPlayer2Renderer.h"
23 #include <algorithm>
24 #include <cutils/properties.h>
25 #include <media/stagefright/foundation/ADebug.h>
26 #include <media/stagefright/foundation/AMessage.h>
27 #include <media/stagefright/foundation/AUtils.h>
28 #include <media/stagefright/MediaClock.h>
29 #include <media/stagefright/MediaCodecConstants.h>
30 #include <media/stagefright/MediaDefs.h>
31 #include <media/stagefright/MediaErrors.h>
32 #include <media/stagefright/Utils.h>
33 #include <media/stagefright/VideoFrameScheduler2.h>
34 #include <media/MediaCodecBuffer.h>
35 
36 #include <inttypes.h>
37 
38 namespace android {
39 
40 /*
41  * Example of common configuration settings in shell script form
42 
43    #Turn offload audio off (use PCM for Play Music) -- AudioPolicyManager
44    adb shell setprop audio.offload.disable 1
45 
46    #Allow offload audio with video (requires offloading to be enabled) -- AudioPolicyManager
47    adb shell setprop audio.offload.video 1
48 
49    #Use audio callbacks for PCM data
50    adb shell setprop media.stagefright.audio.cbk 1
51 
52    #Use deep buffer for PCM data with video (it is generally enabled for audio-only)
53    adb shell setprop media.stagefright.audio.deep 1
54 
55    #Set size of buffers for pcm audio sink in msec (example: 1000 msec)
56    adb shell setprop media.stagefright.audio.sink 1000
57 
58  * These configurations take effect for the next track played (not the current track).
59  */
60 
getUseAudioCallbackSetting()61 static inline bool getUseAudioCallbackSetting() {
62     return property_get_bool("media.stagefright.audio.cbk", false /* default_value */);
63 }
64 
getAudioSinkPcmMsSetting()65 static inline int32_t getAudioSinkPcmMsSetting() {
66     return property_get_int32(
67             "media.stagefright.audio.sink", 500 /* default_value */);
68 }
69 
70 // Maximum time in paused state when offloading audio decompression. When elapsed, the AudioSink
71 // is closed to allow the audio DSP to power down.
72 static const int64_t kOffloadPauseMaxUs = 10000000LL;
73 
74 // Maximum allowed delay from AudioSink, 1.5 seconds.
75 static const int64_t kMaxAllowedAudioSinkDelayUs = 1500000LL;
76 
77 static const int64_t kMinimumAudioClockUpdatePeriodUs = 20 /* msec */ * 1000;
78 
79 // Default video frame display duration when only video exists.
80 // Used to set max media time in MediaClock.
81 static const int64_t kDefaultVideoFrameIntervalUs = 100000LL;
82 
83 // static
84 const NuPlayer2::Renderer::PcmInfo NuPlayer2::Renderer::AUDIO_PCMINFO_INITIALIZER = {
85         AUDIO_CHANNEL_NONE,
86         AUDIO_OUTPUT_FLAG_NONE,
87         AUDIO_FORMAT_INVALID,
88         0, // mNumChannels
89         0 // mSampleRate
90 };
91 
92 // static
93 const int64_t NuPlayer2::Renderer::kMinPositionUpdateDelayUs = 100000LL;
94 
audioFormatFromEncoding(int32_t pcmEncoding)95 static audio_format_t constexpr audioFormatFromEncoding(int32_t pcmEncoding) {
96     switch (pcmEncoding) {
97     case kAudioEncodingPcmFloat:
98         return AUDIO_FORMAT_PCM_FLOAT;
99     case kAudioEncodingPcm16bit:
100         return AUDIO_FORMAT_PCM_16_BIT;
101     case kAudioEncodingPcm8bit:
102         return AUDIO_FORMAT_PCM_8_BIT;  // TODO: do we want to support this?
103     default:
104         ALOGE("%s: Invalid encoding: %d", __func__, pcmEncoding);
105         return AUDIO_FORMAT_INVALID;
106     }
107 }
108 
Renderer(const sp<MediaPlayer2Interface::AudioSink> & sink,const sp<MediaClock> & mediaClock,const sp<AMessage> & notify,const sp<JObjectHolder> & context,uint32_t flags)109 NuPlayer2::Renderer::Renderer(
110         const sp<MediaPlayer2Interface::AudioSink> &sink,
111         const sp<MediaClock> &mediaClock,
112         const sp<AMessage> &notify,
113         const sp<JObjectHolder> &context,
114         uint32_t flags)
115     : mAudioSink(sink),
116       mUseVirtualAudioSink(false),
117       mNotify(notify),
118       mFlags(flags),
119       mNumFramesWritten(0),
120       mDrainAudioQueuePending(false),
121       mDrainVideoQueuePending(false),
122       mAudioQueueGeneration(0),
123       mVideoQueueGeneration(0),
124       mAudioDrainGeneration(0),
125       mVideoDrainGeneration(0),
126       mAudioEOSGeneration(0),
127       mMediaClock(mediaClock),
128       mPlaybackSettings(AUDIO_PLAYBACK_RATE_DEFAULT),
129       mAudioFirstAnchorTimeMediaUs(-1),
130       mAnchorTimeMediaUs(-1),
131       mAnchorNumFramesWritten(-1),
132       mVideoLateByUs(0LL),
133       mNextVideoTimeMediaUs(-1),
134       mHasAudio(false),
135       mHasVideo(false),
136       mNotifyCompleteAudio(false),
137       mNotifyCompleteVideo(false),
138       mSyncQueues(false),
139       mPaused(true),
140       mPauseDrainAudioAllowedUs(0),
141       mVideoSampleReceived(false),
142       mVideoRenderingStarted(false),
143       mVideoRenderingStartGeneration(0),
144       mAudioRenderingStartGeneration(0),
145       mRenderingDataDelivered(false),
146       mNextAudioClockUpdateTimeUs(-1),
147       mLastAudioMediaTimeUs(-1),
148       mAudioOffloadPauseTimeoutGeneration(0),
149       mAudioTornDown(false),
150       mCurrentOffloadInfo(AUDIO_INFO_INITIALIZER),
151       mCurrentPcmInfo(AUDIO_PCMINFO_INITIALIZER),
152       mTotalBuffersQueued(0),
153       mLastAudioBufferDrained(0),
154       mUseAudioCallback(false),
155       mWakeLock(new JWakeLock(context)) {
156     CHECK(mediaClock != NULL);
157     mMediaClock->setPlaybackRate(mPlaybackSettings.mSpeed);
158 }
159 
~Renderer()160 NuPlayer2::Renderer::~Renderer() {
161     if (offloadingAudio()) {
162         mAudioSink->stop();
163         mAudioSink->flush();
164         mAudioSink->close();
165     }
166 
167     // Try to avoid racing condition in case callback is still on.
168     Mutex::Autolock autoLock(mLock);
169     if (mUseAudioCallback) {
170         flushQueue(&mAudioQueue);
171         flushQueue(&mVideoQueue);
172     }
173     mWakeLock.clear();
174     mVideoScheduler.clear();
175     mNotify.clear();
176     mAudioSink.clear();
177 }
178 
queueBuffer(bool audio,const sp<MediaCodecBuffer> & buffer,const sp<AMessage> & notifyConsumed)179 void NuPlayer2::Renderer::queueBuffer(
180         bool audio,
181         const sp<MediaCodecBuffer> &buffer,
182         const sp<AMessage> &notifyConsumed) {
183     sp<AMessage> msg = new AMessage(kWhatQueueBuffer, this);
184     msg->setInt32("queueGeneration", getQueueGeneration(audio));
185     msg->setInt32("audio", static_cast<int32_t>(audio));
186     msg->setObject("buffer", buffer);
187     msg->setMessage("notifyConsumed", notifyConsumed);
188     msg->post();
189 }
190 
queueEOS(bool audio,status_t finalResult)191 void NuPlayer2::Renderer::queueEOS(bool audio, status_t finalResult) {
192     CHECK_NE(finalResult, (status_t)OK);
193 
194     sp<AMessage> msg = new AMessage(kWhatQueueEOS, this);
195     msg->setInt32("queueGeneration", getQueueGeneration(audio));
196     msg->setInt32("audio", static_cast<int32_t>(audio));
197     msg->setInt32("finalResult", finalResult);
198     msg->post();
199 }
200 
setPlaybackSettings(const AudioPlaybackRate & rate)201 status_t NuPlayer2::Renderer::setPlaybackSettings(const AudioPlaybackRate &rate) {
202     sp<AMessage> msg = new AMessage(kWhatConfigPlayback, this);
203     writeToAMessage(msg, rate);
204     sp<AMessage> response;
205     status_t err = msg->postAndAwaitResponse(&response);
206     if (err == OK && response != NULL) {
207         CHECK(response->findInt32("err", &err));
208     }
209     return err;
210 }
211 
onConfigPlayback(const AudioPlaybackRate & rate)212 status_t NuPlayer2::Renderer::onConfigPlayback(const AudioPlaybackRate &rate /* sanitized */) {
213     if (rate.mSpeed <= 0.f) {
214         ALOGW("playback rate cannot be %f", rate.mSpeed);
215         return BAD_VALUE;
216     }
217 
218     if (mAudioSink != NULL && mAudioSink->ready()) {
219         status_t err = mAudioSink->setPlaybackRate(rate);
220         if (err != OK) {
221             ALOGW("failed to get playback rate from audio sink, err(%d)", err);
222             return err;
223         }
224     }
225     mPlaybackSettings = rate;
226     mMediaClock->setPlaybackRate(mPlaybackSettings.mSpeed);
227     return OK;
228 }
229 
getPlaybackSettings(AudioPlaybackRate * rate)230 status_t NuPlayer2::Renderer::getPlaybackSettings(AudioPlaybackRate *rate /* nonnull */) {
231     sp<AMessage> msg = new AMessage(kWhatGetPlaybackSettings, this);
232     sp<AMessage> response;
233     status_t err = msg->postAndAwaitResponse(&response);
234     if (err == OK && response != NULL) {
235         CHECK(response->findInt32("err", &err));
236         if (err == OK) {
237             readFromAMessage(response, rate);
238         }
239     }
240     return err;
241 }
242 
onGetPlaybackSettings(AudioPlaybackRate * rate)243 status_t NuPlayer2::Renderer::onGetPlaybackSettings(AudioPlaybackRate *rate /* nonnull */) {
244     if (mAudioSink != NULL && mAudioSink->ready()) {
245         status_t err = mAudioSink->getPlaybackRate(rate);
246         if (err == OK) {
247             if (!isAudioPlaybackRateEqual(*rate, mPlaybackSettings)) {
248                 ALOGW("correcting mismatch in internal/external playback rate, %f vs %f",
249                       rate->mSpeed, mPlaybackSettings.mSpeed);
250             }
251             // get playback settings used by audiosink, as it may be
252             // slightly off due to audiosink not taking small changes.
253             mPlaybackSettings = *rate;
254         }
255         return err;
256     }
257     *rate = mPlaybackSettings;
258     return OK;
259 }
260 
setSyncSettings(const AVSyncSettings & sync,float videoFpsHint)261 status_t NuPlayer2::Renderer::setSyncSettings(const AVSyncSettings &sync, float videoFpsHint) {
262     sp<AMessage> msg = new AMessage(kWhatConfigSync, this);
263     writeToAMessage(msg, sync, videoFpsHint);
264     sp<AMessage> response;
265     status_t err = msg->postAndAwaitResponse(&response);
266     if (err == OK && response != NULL) {
267         CHECK(response->findInt32("err", &err));
268     }
269     return err;
270 }
271 
onConfigSync(const AVSyncSettings & sync,float videoFpsHint __unused)272 status_t NuPlayer2::Renderer::onConfigSync(const AVSyncSettings &sync, float videoFpsHint __unused) {
273     if (sync.mSource != AVSYNC_SOURCE_DEFAULT) {
274         return BAD_VALUE;
275     }
276     // TODO: support sync sources
277     return INVALID_OPERATION;
278 }
279 
getSyncSettings(AVSyncSettings * sync,float * videoFps)280 status_t NuPlayer2::Renderer::getSyncSettings(AVSyncSettings *sync, float *videoFps) {
281     sp<AMessage> msg = new AMessage(kWhatGetSyncSettings, this);
282     sp<AMessage> response;
283     status_t err = msg->postAndAwaitResponse(&response);
284     if (err == OK && response != NULL) {
285         CHECK(response->findInt32("err", &err));
286         if (err == OK) {
287             readFromAMessage(response, sync, videoFps);
288         }
289     }
290     return err;
291 }
292 
onGetSyncSettings(AVSyncSettings * sync,float * videoFps)293 status_t NuPlayer2::Renderer::onGetSyncSettings(
294         AVSyncSettings *sync /* nonnull */, float *videoFps /* nonnull */) {
295     *sync = mSyncSettings;
296     *videoFps = -1.f;
297     return OK;
298 }
299 
flush(bool audio,bool notifyComplete)300 void NuPlayer2::Renderer::flush(bool audio, bool notifyComplete) {
301     {
302         Mutex::Autolock autoLock(mLock);
303         if (audio) {
304             mNotifyCompleteAudio |= notifyComplete;
305             clearAudioFirstAnchorTime_l();
306             ++mAudioQueueGeneration;
307             ++mAudioDrainGeneration;
308         } else {
309             mNotifyCompleteVideo |= notifyComplete;
310             ++mVideoQueueGeneration;
311             ++mVideoDrainGeneration;
312             mNextVideoTimeMediaUs = -1;
313         }
314 
315         mMediaClock->clearAnchor();
316         mVideoLateByUs = 0;
317         mSyncQueues = false;
318     }
319 
320     sp<AMessage> msg = new AMessage(kWhatFlush, this);
321     msg->setInt32("audio", static_cast<int32_t>(audio));
322     msg->post();
323 }
324 
signalTimeDiscontinuity()325 void NuPlayer2::Renderer::signalTimeDiscontinuity() {
326 }
327 
signalDisableOffloadAudio()328 void NuPlayer2::Renderer::signalDisableOffloadAudio() {
329     (new AMessage(kWhatDisableOffloadAudio, this))->post();
330 }
331 
signalEnableOffloadAudio()332 void NuPlayer2::Renderer::signalEnableOffloadAudio() {
333     (new AMessage(kWhatEnableOffloadAudio, this))->post();
334 }
335 
pause()336 void NuPlayer2::Renderer::pause() {
337     (new AMessage(kWhatPause, this))->post();
338 }
339 
resume()340 void NuPlayer2::Renderer::resume() {
341     (new AMessage(kWhatResume, this))->post();
342 }
343 
setVideoFrameRate(float fps)344 void NuPlayer2::Renderer::setVideoFrameRate(float fps) {
345     sp<AMessage> msg = new AMessage(kWhatSetVideoFrameRate, this);
346     msg->setFloat("frame-rate", fps);
347     msg->post();
348 }
349 
350 // Called on any threads without mLock acquired.
getCurrentPosition(int64_t * mediaUs)351 status_t NuPlayer2::Renderer::getCurrentPosition(int64_t *mediaUs) {
352     status_t result = mMediaClock->getMediaTime(ALooper::GetNowUs(), mediaUs);
353     if (result == OK) {
354         return result;
355     }
356 
357     // MediaClock has not started yet. Try to start it if possible.
358     {
359         Mutex::Autolock autoLock(mLock);
360         if (mAudioFirstAnchorTimeMediaUs == -1) {
361             return result;
362         }
363 
364         AudioTimestamp ts;
365         status_t res = mAudioSink->getTimestamp(ts);
366         if (res != OK) {
367             return result;
368         }
369 
370         // AudioSink has rendered some frames.
371         int64_t nowUs = ALooper::GetNowUs();
372         int64_t nowMediaUs = mAudioSink->getPlayedOutDurationUs(nowUs)
373                 + mAudioFirstAnchorTimeMediaUs;
374         mMediaClock->updateAnchor(nowMediaUs, nowUs, -1);
375     }
376 
377     return mMediaClock->getMediaTime(ALooper::GetNowUs(), mediaUs);
378 }
379 
clearAudioFirstAnchorTime_l()380 void NuPlayer2::Renderer::clearAudioFirstAnchorTime_l() {
381     mAudioFirstAnchorTimeMediaUs = -1;
382     mMediaClock->setStartingTimeMedia(-1);
383 }
384 
setAudioFirstAnchorTimeIfNeeded_l(int64_t mediaUs)385 void NuPlayer2::Renderer::setAudioFirstAnchorTimeIfNeeded_l(int64_t mediaUs) {
386     if (mAudioFirstAnchorTimeMediaUs == -1) {
387         mAudioFirstAnchorTimeMediaUs = mediaUs;
388         mMediaClock->setStartingTimeMedia(mediaUs);
389     }
390 }
391 
392 // Called on renderer looper.
clearAnchorTime()393 void NuPlayer2::Renderer::clearAnchorTime() {
394     mMediaClock->clearAnchor();
395     mAnchorTimeMediaUs = -1;
396     mAnchorNumFramesWritten = -1;
397 }
398 
setVideoLateByUs(int64_t lateUs)399 void NuPlayer2::Renderer::setVideoLateByUs(int64_t lateUs) {
400     Mutex::Autolock autoLock(mLock);
401     mVideoLateByUs = lateUs;
402 }
403 
getVideoLateByUs()404 int64_t NuPlayer2::Renderer::getVideoLateByUs() {
405     Mutex::Autolock autoLock(mLock);
406     return mVideoLateByUs;
407 }
408 
openAudioSink(const sp<AMessage> & format,bool offloadOnly,bool hasVideo,uint32_t flags,bool * isOffloaded,bool isStreaming)409 status_t NuPlayer2::Renderer::openAudioSink(
410         const sp<AMessage> &format,
411         bool offloadOnly,
412         bool hasVideo,
413         uint32_t flags,
414         bool *isOffloaded,
415         bool isStreaming) {
416     sp<AMessage> msg = new AMessage(kWhatOpenAudioSink, this);
417     msg->setMessage("format", format);
418     msg->setInt32("offload-only", offloadOnly);
419     msg->setInt32("has-video", hasVideo);
420     msg->setInt32("flags", flags);
421     msg->setInt32("isStreaming", isStreaming);
422 
423     sp<AMessage> response;
424     status_t postStatus = msg->postAndAwaitResponse(&response);
425 
426     int32_t err;
427     if (postStatus != OK || response.get() == nullptr || !response->findInt32("err", &err)) {
428         err = INVALID_OPERATION;
429     } else if (err == OK && isOffloaded != NULL) {
430         int32_t offload;
431         CHECK(response->findInt32("offload", &offload));
432         *isOffloaded = (offload != 0);
433     }
434     return err;
435 }
436 
closeAudioSink()437 void NuPlayer2::Renderer::closeAudioSink() {
438     sp<AMessage> msg = new AMessage(kWhatCloseAudioSink, this);
439 
440     sp<AMessage> response;
441     msg->postAndAwaitResponse(&response);
442 }
443 
changeAudioFormat(const sp<AMessage> & format,bool offloadOnly,bool hasVideo,uint32_t flags,bool isStreaming,const sp<AMessage> & notify)444 void NuPlayer2::Renderer::changeAudioFormat(
445         const sp<AMessage> &format,
446         bool offloadOnly,
447         bool hasVideo,
448         uint32_t flags,
449         bool isStreaming,
450         const sp<AMessage> &notify) {
451     sp<AMessage> meta = new AMessage;
452     meta->setMessage("format", format);
453     meta->setInt32("offload-only", offloadOnly);
454     meta->setInt32("has-video", hasVideo);
455     meta->setInt32("flags", flags);
456     meta->setInt32("isStreaming", isStreaming);
457 
458     sp<AMessage> msg = new AMessage(kWhatChangeAudioFormat, this);
459     msg->setInt32("queueGeneration", getQueueGeneration(true /* audio */));
460     msg->setMessage("notify", notify);
461     msg->setMessage("meta", meta);
462     msg->post();
463 }
464 
onMessageReceived(const sp<AMessage> & msg)465 void NuPlayer2::Renderer::onMessageReceived(const sp<AMessage> &msg) {
466     switch (msg->what()) {
467         case kWhatOpenAudioSink:
468         {
469             sp<AMessage> format;
470             CHECK(msg->findMessage("format", &format));
471 
472             int32_t offloadOnly;
473             CHECK(msg->findInt32("offload-only", &offloadOnly));
474 
475             int32_t hasVideo;
476             CHECK(msg->findInt32("has-video", &hasVideo));
477 
478             uint32_t flags;
479             CHECK(msg->findInt32("flags", (int32_t *)&flags));
480 
481             uint32_t isStreaming;
482             CHECK(msg->findInt32("isStreaming", (int32_t *)&isStreaming));
483 
484             status_t err = onOpenAudioSink(format, offloadOnly, hasVideo, flags, isStreaming);
485 
486             sp<AMessage> response = new AMessage;
487             response->setInt32("err", err);
488             response->setInt32("offload", offloadingAudio());
489 
490             sp<AReplyToken> replyID;
491             CHECK(msg->senderAwaitsResponse(&replyID));
492             response->postReply(replyID);
493 
494             break;
495         }
496 
497         case kWhatCloseAudioSink:
498         {
499             sp<AReplyToken> replyID;
500             CHECK(msg->senderAwaitsResponse(&replyID));
501 
502             onCloseAudioSink();
503 
504             sp<AMessage> response = new AMessage;
505             response->postReply(replyID);
506             break;
507         }
508 
509         case kWhatStopAudioSink:
510         {
511             mAudioSink->stop();
512             break;
513         }
514 
515         case kWhatChangeAudioFormat:
516         {
517             int32_t queueGeneration;
518             CHECK(msg->findInt32("queueGeneration", &queueGeneration));
519 
520             sp<AMessage> notify;
521             CHECK(msg->findMessage("notify", &notify));
522 
523             if (offloadingAudio()) {
524                 ALOGW("changeAudioFormat should NOT be called in offload mode");
525                 notify->setInt32("err", INVALID_OPERATION);
526                 notify->post();
527                 break;
528             }
529 
530             sp<AMessage> meta;
531             CHECK(msg->findMessage("meta", &meta));
532 
533             if (queueGeneration != getQueueGeneration(true /* audio */)
534                     || mAudioQueue.empty()) {
535                 onChangeAudioFormat(meta, notify);
536                 break;
537             }
538 
539             QueueEntry entry;
540             entry.mNotifyConsumed = notify;
541             entry.mMeta = meta;
542 
543             Mutex::Autolock autoLock(mLock);
544             mAudioQueue.push_back(entry);
545             postDrainAudioQueue_l();
546 
547             break;
548         }
549 
550         case kWhatDrainAudioQueue:
551         {
552             mDrainAudioQueuePending = false;
553 
554             int32_t generation;
555             CHECK(msg->findInt32("drainGeneration", &generation));
556             if (generation != getDrainGeneration(true /* audio */)) {
557                 break;
558             }
559 
560             if (onDrainAudioQueue()) {
561                 uint32_t numFramesPlayed;
562                 CHECK_EQ(mAudioSink->getPosition(&numFramesPlayed),
563                          (status_t)OK);
564 
565                 // Handle AudioTrack race when start is immediately called after flush.
566                 uint32_t numFramesPendingPlayout =
567                     (mNumFramesWritten > numFramesPlayed ?
568                         mNumFramesWritten - numFramesPlayed : 0);
569 
570                 // This is how long the audio sink will have data to
571                 // play back.
572                 int64_t delayUs =
573                     mAudioSink->msecsPerFrame()
574                         * numFramesPendingPlayout * 1000ll;
575                 if (mPlaybackSettings.mSpeed > 1.0f) {
576                     delayUs /= mPlaybackSettings.mSpeed;
577                 }
578 
579                 // Let's give it more data after about half that time
580                 // has elapsed.
581                 delayUs /= 2;
582                 // check the buffer size to estimate maximum delay permitted.
583                 const int64_t maxDrainDelayUs = std::max(
584                         mAudioSink->getBufferDurationInUs(), (int64_t)500000 /* half second */);
585                 ALOGD_IF(delayUs > maxDrainDelayUs, "postDrainAudioQueue long delay: %lld > %lld",
586                         (long long)delayUs, (long long)maxDrainDelayUs);
587                 Mutex::Autolock autoLock(mLock);
588                 postDrainAudioQueue_l(delayUs);
589             }
590             break;
591         }
592 
593         case kWhatDrainVideoQueue:
594         {
595             int32_t generation;
596             CHECK(msg->findInt32("drainGeneration", &generation));
597             if (generation != getDrainGeneration(false /* audio */)) {
598                 break;
599             }
600 
601             mDrainVideoQueuePending = false;
602 
603             onDrainVideoQueue();
604 
605             postDrainVideoQueue();
606             break;
607         }
608 
609         case kWhatPostDrainVideoQueue:
610         {
611             int32_t generation;
612             CHECK(msg->findInt32("drainGeneration", &generation));
613             if (generation != getDrainGeneration(false /* audio */)) {
614                 break;
615             }
616 
617             mDrainVideoQueuePending = false;
618             postDrainVideoQueue();
619             break;
620         }
621 
622         case kWhatQueueBuffer:
623         {
624             onQueueBuffer(msg);
625             break;
626         }
627 
628         case kWhatQueueEOS:
629         {
630             onQueueEOS(msg);
631             break;
632         }
633 
634         case kWhatEOS:
635         {
636             int32_t generation;
637             CHECK(msg->findInt32("audioEOSGeneration", &generation));
638             if (generation != mAudioEOSGeneration) {
639                 break;
640             }
641             status_t finalResult;
642             CHECK(msg->findInt32("finalResult", &finalResult));
643             notifyEOS(true /* audio */, finalResult);
644             break;
645         }
646 
647         case kWhatConfigPlayback:
648         {
649             sp<AReplyToken> replyID;
650             CHECK(msg->senderAwaitsResponse(&replyID));
651             AudioPlaybackRate rate;
652             readFromAMessage(msg, &rate);
653             status_t err = onConfigPlayback(rate);
654             sp<AMessage> response = new AMessage;
655             response->setInt32("err", err);
656             response->postReply(replyID);
657             break;
658         }
659 
660         case kWhatGetPlaybackSettings:
661         {
662             sp<AReplyToken> replyID;
663             CHECK(msg->senderAwaitsResponse(&replyID));
664             AudioPlaybackRate rate = AUDIO_PLAYBACK_RATE_DEFAULT;
665             status_t err = onGetPlaybackSettings(&rate);
666             sp<AMessage> response = new AMessage;
667             if (err == OK) {
668                 writeToAMessage(response, rate);
669             }
670             response->setInt32("err", err);
671             response->postReply(replyID);
672             break;
673         }
674 
675         case kWhatConfigSync:
676         {
677             sp<AReplyToken> replyID;
678             CHECK(msg->senderAwaitsResponse(&replyID));
679             AVSyncSettings sync;
680             float videoFpsHint;
681             readFromAMessage(msg, &sync, &videoFpsHint);
682             status_t err = onConfigSync(sync, videoFpsHint);
683             sp<AMessage> response = new AMessage;
684             response->setInt32("err", err);
685             response->postReply(replyID);
686             break;
687         }
688 
689         case kWhatGetSyncSettings:
690         {
691             sp<AReplyToken> replyID;
692             CHECK(msg->senderAwaitsResponse(&replyID));
693 
694             ALOGV("kWhatGetSyncSettings");
695             AVSyncSettings sync;
696             float videoFps = -1.f;
697             status_t err = onGetSyncSettings(&sync, &videoFps);
698             sp<AMessage> response = new AMessage;
699             if (err == OK) {
700                 writeToAMessage(response, sync, videoFps);
701             }
702             response->setInt32("err", err);
703             response->postReply(replyID);
704             break;
705         }
706 
707         case kWhatFlush:
708         {
709             onFlush(msg);
710             break;
711         }
712 
713         case kWhatDisableOffloadAudio:
714         {
715             onDisableOffloadAudio();
716             break;
717         }
718 
719         case kWhatEnableOffloadAudio:
720         {
721             onEnableOffloadAudio();
722             break;
723         }
724 
725         case kWhatPause:
726         {
727             onPause();
728             break;
729         }
730 
731         case kWhatResume:
732         {
733             onResume();
734             break;
735         }
736 
737         case kWhatSetVideoFrameRate:
738         {
739             float fps;
740             CHECK(msg->findFloat("frame-rate", &fps));
741             onSetVideoFrameRate(fps);
742             break;
743         }
744 
745         case kWhatAudioTearDown:
746         {
747             int32_t reason;
748             CHECK(msg->findInt32("reason", &reason));
749 
750             onAudioTearDown((AudioTearDownReason)reason);
751             break;
752         }
753 
754         case kWhatAudioOffloadPauseTimeout:
755         {
756             int32_t generation;
757             CHECK(msg->findInt32("drainGeneration", &generation));
758             if (generation != mAudioOffloadPauseTimeoutGeneration) {
759                 break;
760             }
761             ALOGV("Audio Offload tear down due to pause timeout.");
762             onAudioTearDown(kDueToTimeout);
763             mWakeLock->release();
764             break;
765         }
766 
767         default:
768             TRESPASS();
769             break;
770     }
771 }
772 
postDrainAudioQueue_l(int64_t delayUs)773 void NuPlayer2::Renderer::postDrainAudioQueue_l(int64_t delayUs) {
774     if (mDrainAudioQueuePending || mSyncQueues || mUseAudioCallback) {
775         return;
776     }
777 
778     if (mAudioQueue.empty()) {
779         return;
780     }
781 
782     // FIXME: if paused, wait until AudioTrack stop() is complete before delivering data.
783     if (mPaused) {
784         const int64_t diffUs = mPauseDrainAudioAllowedUs - ALooper::GetNowUs();
785         if (diffUs > delayUs) {
786             delayUs = diffUs;
787         }
788     }
789 
790     mDrainAudioQueuePending = true;
791     sp<AMessage> msg = new AMessage(kWhatDrainAudioQueue, this);
792     msg->setInt32("drainGeneration", mAudioDrainGeneration);
793     msg->post(delayUs);
794 }
795 
prepareForMediaRenderingStart_l()796 void NuPlayer2::Renderer::prepareForMediaRenderingStart_l() {
797     mAudioRenderingStartGeneration = mAudioDrainGeneration;
798     mVideoRenderingStartGeneration = mVideoDrainGeneration;
799     mRenderingDataDelivered = false;
800 }
801 
notifyIfMediaRenderingStarted_l()802 void NuPlayer2::Renderer::notifyIfMediaRenderingStarted_l() {
803     if (mVideoRenderingStartGeneration == mVideoDrainGeneration &&
804         mAudioRenderingStartGeneration == mAudioDrainGeneration) {
805         mRenderingDataDelivered = true;
806         if (mPaused) {
807             return;
808         }
809         mVideoRenderingStartGeneration = -1;
810         mAudioRenderingStartGeneration = -1;
811 
812         sp<AMessage> notify = mNotify->dup();
813         notify->setInt32("what", kWhatMediaRenderingStart);
814         notify->post();
815     }
816 }
817 
818 // static
AudioSinkCallback(MediaPlayer2Interface::AudioSink *,void * buffer,size_t size,void * cookie,MediaPlayer2Interface::AudioSink::cb_event_t event)819 size_t NuPlayer2::Renderer::AudioSinkCallback(
820         MediaPlayer2Interface::AudioSink * /* audioSink */,
821         void *buffer,
822         size_t size,
823         void *cookie,
824         MediaPlayer2Interface::AudioSink::cb_event_t event) {
825     NuPlayer2::Renderer *me = (NuPlayer2::Renderer *)cookie;
826 
827     switch (event) {
828         case MediaPlayer2Interface::AudioSink::CB_EVENT_FILL_BUFFER:
829         {
830             return me->fillAudioBuffer(buffer, size);
831             break;
832         }
833 
834         case MediaPlayer2Interface::AudioSink::CB_EVENT_STREAM_END:
835         {
836             ALOGV("AudioSink::CB_EVENT_STREAM_END");
837             me->notifyEOSCallback();
838             break;
839         }
840 
841         case MediaPlayer2Interface::AudioSink::CB_EVENT_TEAR_DOWN:
842         {
843             ALOGV("AudioSink::CB_EVENT_TEAR_DOWN");
844             me->notifyAudioTearDown(kDueToError);
845             break;
846         }
847     }
848 
849     return 0;
850 }
851 
notifyEOSCallback()852 void NuPlayer2::Renderer::notifyEOSCallback() {
853     Mutex::Autolock autoLock(mLock);
854 
855     if (!mUseAudioCallback) {
856         return;
857     }
858 
859     notifyEOS_l(true /* audio */, ERROR_END_OF_STREAM);
860 }
861 
fillAudioBuffer(void * buffer,size_t size)862 size_t NuPlayer2::Renderer::fillAudioBuffer(void *buffer, size_t size) {
863     Mutex::Autolock autoLock(mLock);
864 
865     if (!mUseAudioCallback) {
866         return 0;
867     }
868 
869     bool hasEOS = false;
870 
871     size_t sizeCopied = 0;
872     bool firstEntry = true;
873     QueueEntry *entry;  // will be valid after while loop if hasEOS is set.
874     while (sizeCopied < size && !mAudioQueue.empty()) {
875         entry = &*mAudioQueue.begin();
876 
877         if (entry->mBuffer == NULL) { // EOS
878             hasEOS = true;
879             mAudioQueue.erase(mAudioQueue.begin());
880             break;
881         }
882 
883         if (firstEntry && entry->mOffset == 0) {
884             firstEntry = false;
885             int64_t mediaTimeUs;
886             CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
887             ALOGV("fillAudioBuffer: rendering audio at media time %.2f secs", mediaTimeUs / 1E6);
888             setAudioFirstAnchorTimeIfNeeded_l(mediaTimeUs);
889         }
890 
891         size_t copy = entry->mBuffer->size() - entry->mOffset;
892         size_t sizeRemaining = size - sizeCopied;
893         if (copy > sizeRemaining) {
894             copy = sizeRemaining;
895         }
896 
897         memcpy((char *)buffer + sizeCopied,
898                entry->mBuffer->data() + entry->mOffset,
899                copy);
900 
901         entry->mOffset += copy;
902         if (entry->mOffset == entry->mBuffer->size()) {
903             entry->mNotifyConsumed->post();
904             mAudioQueue.erase(mAudioQueue.begin());
905             entry = NULL;
906         }
907         sizeCopied += copy;
908 
909         notifyIfMediaRenderingStarted_l();
910     }
911 
912     if (mAudioFirstAnchorTimeMediaUs >= 0) {
913         int64_t nowUs = ALooper::GetNowUs();
914         int64_t nowMediaUs =
915             mAudioFirstAnchorTimeMediaUs + mAudioSink->getPlayedOutDurationUs(nowUs);
916         // we don't know how much data we are queueing for offloaded tracks.
917         mMediaClock->updateAnchor(nowMediaUs, nowUs, INT64_MAX);
918     }
919 
920     // for non-offloaded audio, we need to compute the frames written because
921     // there is no EVENT_STREAM_END notification. The frames written gives
922     // an estimate on the pending played out duration.
923     if (!offloadingAudio()) {
924         mNumFramesWritten += sizeCopied / mAudioSink->frameSize();
925     }
926 
927     if (hasEOS) {
928         (new AMessage(kWhatStopAudioSink, this))->post();
929         // As there is currently no EVENT_STREAM_END callback notification for
930         // non-offloaded audio tracks, we need to post the EOS ourselves.
931         if (!offloadingAudio()) {
932             int64_t postEOSDelayUs = 0;
933             if (mAudioSink->needsTrailingPadding()) {
934                 postEOSDelayUs = getPendingAudioPlayoutDurationUs(ALooper::GetNowUs());
935             }
936             ALOGV("fillAudioBuffer: notifyEOS_l "
937                     "mNumFramesWritten:%u  finalResult:%d  postEOSDelay:%lld",
938                     mNumFramesWritten, entry->mFinalResult, (long long)postEOSDelayUs);
939             notifyEOS_l(true /* audio */, entry->mFinalResult, postEOSDelayUs);
940         }
941     }
942     return sizeCopied;
943 }
944 
drainAudioQueueUntilLastEOS()945 void NuPlayer2::Renderer::drainAudioQueueUntilLastEOS() {
946     List<QueueEntry>::iterator it = mAudioQueue.begin(), itEOS = it;
947     bool foundEOS = false;
948     while (it != mAudioQueue.end()) {
949         int32_t eos;
950         QueueEntry *entry = &*it++;
951         if ((entry->mBuffer == nullptr && entry->mNotifyConsumed == nullptr)
952                 || (entry->mNotifyConsumed->findInt32("eos", &eos) && eos != 0)) {
953             itEOS = it;
954             foundEOS = true;
955         }
956     }
957 
958     if (foundEOS) {
959         // post all replies before EOS and drop the samples
960         for (it = mAudioQueue.begin(); it != itEOS; it++) {
961             if (it->mBuffer == nullptr) {
962                 if (it->mNotifyConsumed == nullptr) {
963                     // delay doesn't matter as we don't even have an AudioTrack
964                     notifyEOS(true /* audio */, it->mFinalResult);
965                 } else {
966                     // TAG for re-opening audio sink.
967                     onChangeAudioFormat(it->mMeta, it->mNotifyConsumed);
968                 }
969             } else {
970                 it->mNotifyConsumed->post();
971             }
972         }
973         mAudioQueue.erase(mAudioQueue.begin(), itEOS);
974     }
975 }
976 
onDrainAudioQueue()977 bool NuPlayer2::Renderer::onDrainAudioQueue() {
978     // do not drain audio during teardown as queued buffers may be invalid.
979     if (mAudioTornDown) {
980         return false;
981     }
982     // TODO: This call to getPosition checks if AudioTrack has been created
983     // in AudioSink before draining audio. If AudioTrack doesn't exist, then
984     // CHECKs on getPosition will fail.
985     // We still need to figure out why AudioTrack is not created when
986     // this function is called. One possible reason could be leftover
987     // audio. Another possible place is to check whether decoder
988     // has received INFO_FORMAT_CHANGED as the first buffer since
989     // AudioSink is opened there, and possible interactions with flush
990     // immediately after start. Investigate error message
991     // "vorbis_dsp_synthesis returned -135", along with RTSP.
992     uint32_t numFramesPlayed;
993     if (mAudioSink->getPosition(&numFramesPlayed) != OK) {
994         // When getPosition fails, renderer will not reschedule the draining
995         // unless new samples are queued.
996         // If we have pending EOS (or "eos" marker for discontinuities), we need
997         // to post these now as NuPlayer2Decoder might be waiting for it.
998         drainAudioQueueUntilLastEOS();
999 
1000         ALOGW("onDrainAudioQueue(): audio sink is not ready");
1001         return false;
1002     }
1003 
1004 #if 0
1005     ssize_t numFramesAvailableToWrite =
1006         mAudioSink->frameCount() - (mNumFramesWritten - numFramesPlayed);
1007 
1008     if (numFramesAvailableToWrite == mAudioSink->frameCount()) {
1009         ALOGI("audio sink underrun");
1010     } else {
1011         ALOGV("audio queue has %d frames left to play",
1012              mAudioSink->frameCount() - numFramesAvailableToWrite);
1013     }
1014 #endif
1015 
1016     uint32_t prevFramesWritten = mNumFramesWritten;
1017     while (!mAudioQueue.empty()) {
1018         QueueEntry *entry = &*mAudioQueue.begin();
1019 
1020         if (entry->mBuffer == NULL) {
1021             if (entry->mNotifyConsumed != nullptr) {
1022                 // TAG for re-open audio sink.
1023                 onChangeAudioFormat(entry->mMeta, entry->mNotifyConsumed);
1024                 mAudioQueue.erase(mAudioQueue.begin());
1025                 continue;
1026             }
1027 
1028             // EOS
1029             if (mPaused) {
1030                 // Do not notify EOS when paused.
1031                 // This is needed to avoid switch to next clip while in pause.
1032                 ALOGV("onDrainAudioQueue(): Do not notify EOS when paused");
1033                 return false;
1034             }
1035 
1036             int64_t postEOSDelayUs = 0;
1037             if (mAudioSink->needsTrailingPadding()) {
1038                 postEOSDelayUs = getPendingAudioPlayoutDurationUs(ALooper::GetNowUs());
1039             }
1040             notifyEOS(true /* audio */, entry->mFinalResult, postEOSDelayUs);
1041             mLastAudioMediaTimeUs = getDurationUsIfPlayedAtSampleRate(mNumFramesWritten);
1042 
1043             mAudioQueue.erase(mAudioQueue.begin());
1044             entry = NULL;
1045             if (mAudioSink->needsTrailingPadding()) {
1046                 // If we're not in gapless playback (i.e. through setNextPlayer), we
1047                 // need to stop the track here, because that will play out the last
1048                 // little bit at the end of the file. Otherwise short files won't play.
1049                 mAudioSink->stop();
1050                 mNumFramesWritten = 0;
1051             }
1052             return false;
1053         }
1054 
1055         mLastAudioBufferDrained = entry->mBufferOrdinal;
1056 
1057         // ignore 0-sized buffer which could be EOS marker with no data
1058         if (entry->mOffset == 0 && entry->mBuffer->size() > 0) {
1059             int64_t mediaTimeUs;
1060             CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
1061             ALOGV("onDrainAudioQueue: rendering audio at media time %.2f secs",
1062                     mediaTimeUs / 1E6);
1063             onNewAudioMediaTime(mediaTimeUs);
1064         }
1065 
1066         size_t copy = entry->mBuffer->size() - entry->mOffset;
1067 
1068         ssize_t written = mAudioSink->write(entry->mBuffer->data() + entry->mOffset,
1069                                             copy, false /* blocking */);
1070         if (written < 0) {
1071             // An error in AudioSink write. Perhaps the AudioSink was not properly opened.
1072             if (written == WOULD_BLOCK) {
1073                 ALOGV("AudioSink write would block when writing %zu bytes", copy);
1074             } else {
1075                 ALOGE("AudioSink write error(%zd) when writing %zu bytes", written, copy);
1076                 // This can only happen when AudioSink was opened with doNotReconnect flag set to
1077                 // true, in which case the NuPlayer2 will handle the reconnect.
1078                 notifyAudioTearDown(kDueToError);
1079             }
1080             break;
1081         }
1082 
1083         entry->mOffset += written;
1084         size_t remainder = entry->mBuffer->size() - entry->mOffset;
1085         if ((ssize_t)remainder < mAudioSink->frameSize()) {
1086             if (remainder > 0) {
1087                 ALOGW("Corrupted audio buffer has fractional frames, discarding %zu bytes.",
1088                         remainder);
1089                 entry->mOffset += remainder;
1090                 copy -= remainder;
1091             }
1092 
1093             entry->mNotifyConsumed->post();
1094             mAudioQueue.erase(mAudioQueue.begin());
1095 
1096             entry = NULL;
1097         }
1098 
1099         size_t copiedFrames = written / mAudioSink->frameSize();
1100         mNumFramesWritten += copiedFrames;
1101 
1102         {
1103             Mutex::Autolock autoLock(mLock);
1104             int64_t maxTimeMedia;
1105             maxTimeMedia =
1106                 mAnchorTimeMediaUs +
1107                         (int64_t)(max((long long)mNumFramesWritten - mAnchorNumFramesWritten, 0LL)
1108                                 * 1000LL * mAudioSink->msecsPerFrame());
1109             mMediaClock->updateMaxTimeMedia(maxTimeMedia);
1110 
1111             notifyIfMediaRenderingStarted_l();
1112         }
1113 
1114         if (written != (ssize_t)copy) {
1115             // A short count was received from AudioSink::write()
1116             //
1117             // AudioSink write is called in non-blocking mode.
1118             // It may return with a short count when:
1119             //
1120             // 1) Size to be copied is not a multiple of the frame size. Fractional frames are
1121             //    discarded.
1122             // 2) The data to be copied exceeds the available buffer in AudioSink.
1123             // 3) An error occurs and data has been partially copied to the buffer in AudioSink.
1124             // 4) AudioSink is an AudioCache for data retrieval, and the AudioCache is exceeded.
1125 
1126             // (Case 1)
1127             // Must be a multiple of the frame size.  If it is not a multiple of a frame size, it
1128             // needs to fail, as we should not carry over fractional frames between calls.
1129             CHECK_EQ(copy % mAudioSink->frameSize(), 0u);
1130 
1131             // (Case 2, 3, 4)
1132             // Return early to the caller.
1133             // Beware of calling immediately again as this may busy-loop if you are not careful.
1134             ALOGV("AudioSink write short frame count %zd < %zu", written, copy);
1135             break;
1136         }
1137     }
1138 
1139     // calculate whether we need to reschedule another write.
1140     bool reschedule = !mAudioQueue.empty()
1141             && (!mPaused
1142                 || prevFramesWritten != mNumFramesWritten); // permit pause to fill buffers
1143     //ALOGD("reschedule:%d  empty:%d  mPaused:%d  prevFramesWritten:%u  mNumFramesWritten:%u",
1144     //        reschedule, mAudioQueue.empty(), mPaused, prevFramesWritten, mNumFramesWritten);
1145     return reschedule;
1146 }
1147 
getDurationUsIfPlayedAtSampleRate(uint32_t numFrames)1148 int64_t NuPlayer2::Renderer::getDurationUsIfPlayedAtSampleRate(uint32_t numFrames) {
1149     int32_t sampleRate = offloadingAudio() ?
1150             mCurrentOffloadInfo.sample_rate : mCurrentPcmInfo.mSampleRate;
1151     if (sampleRate == 0) {
1152         ALOGE("sampleRate is 0 in %s mode", offloadingAudio() ? "offload" : "non-offload");
1153         return 0;
1154     }
1155     return (int64_t)(numFrames * 1000000LL / sampleRate);
1156 }
1157 
1158 // Calculate duration of pending samples if played at normal rate (i.e., 1.0).
getPendingAudioPlayoutDurationUs(int64_t nowUs)1159 int64_t NuPlayer2::Renderer::getPendingAudioPlayoutDurationUs(int64_t nowUs) {
1160     int64_t writtenAudioDurationUs = getDurationUsIfPlayedAtSampleRate(mNumFramesWritten);
1161     if (mUseVirtualAudioSink) {
1162         int64_t nowUs = ALooper::GetNowUs();
1163         int64_t mediaUs;
1164         if (mMediaClock->getMediaTime(nowUs, &mediaUs) != OK) {
1165             return 0LL;
1166         } else {
1167             return writtenAudioDurationUs - (mediaUs - mAudioFirstAnchorTimeMediaUs);
1168         }
1169     }
1170 
1171     const int64_t audioSinkPlayedUs = mAudioSink->getPlayedOutDurationUs(nowUs);
1172     int64_t pendingUs = writtenAudioDurationUs - audioSinkPlayedUs;
1173     if (pendingUs < 0) {
1174         // This shouldn't happen unless the timestamp is stale.
1175         ALOGW("%s: pendingUs %lld < 0, clamping to zero, potential resume after pause "
1176                 "writtenAudioDurationUs: %lld, audioSinkPlayedUs: %lld",
1177                 __func__, (long long)pendingUs,
1178                 (long long)writtenAudioDurationUs, (long long)audioSinkPlayedUs);
1179         pendingUs = 0;
1180     }
1181     return pendingUs;
1182 }
1183 
getRealTimeUs(int64_t mediaTimeUs,int64_t nowUs)1184 int64_t NuPlayer2::Renderer::getRealTimeUs(int64_t mediaTimeUs, int64_t nowUs) {
1185     int64_t realUs;
1186     if (mMediaClock->getRealTimeFor(mediaTimeUs, &realUs) != OK) {
1187         // If failed to get current position, e.g. due to audio clock is
1188         // not ready, then just play out video immediately without delay.
1189         return nowUs;
1190     }
1191     return realUs;
1192 }
1193 
onNewAudioMediaTime(int64_t mediaTimeUs)1194 void NuPlayer2::Renderer::onNewAudioMediaTime(int64_t mediaTimeUs) {
1195     Mutex::Autolock autoLock(mLock);
1196     // TRICKY: vorbis decoder generates multiple frames with the same
1197     // timestamp, so only update on the first frame with a given timestamp
1198     if (mediaTimeUs == mAnchorTimeMediaUs) {
1199         return;
1200     }
1201     setAudioFirstAnchorTimeIfNeeded_l(mediaTimeUs);
1202 
1203     // mNextAudioClockUpdateTimeUs is -1 if we're waiting for audio sink to start
1204     if (mNextAudioClockUpdateTimeUs == -1) {
1205         AudioTimestamp ts;
1206         if (mAudioSink->getTimestamp(ts) == OK && ts.mPosition > 0) {
1207             mNextAudioClockUpdateTimeUs = 0; // start our clock updates
1208         }
1209     }
1210     int64_t nowUs = ALooper::GetNowUs();
1211     if (mNextAudioClockUpdateTimeUs >= 0) {
1212         if (nowUs >= mNextAudioClockUpdateTimeUs) {
1213             int64_t nowMediaUs = mediaTimeUs - getPendingAudioPlayoutDurationUs(nowUs);
1214             mMediaClock->updateAnchor(nowMediaUs, nowUs, mediaTimeUs);
1215             mUseVirtualAudioSink = false;
1216             mNextAudioClockUpdateTimeUs = nowUs + kMinimumAudioClockUpdatePeriodUs;
1217         }
1218     } else {
1219         int64_t unused;
1220         if ((mMediaClock->getMediaTime(nowUs, &unused) != OK)
1221                 && (getDurationUsIfPlayedAtSampleRate(mNumFramesWritten)
1222                         > kMaxAllowedAudioSinkDelayUs)) {
1223             // Enough data has been sent to AudioSink, but AudioSink has not rendered
1224             // any data yet. Something is wrong with AudioSink, e.g., the device is not
1225             // connected to audio out.
1226             // Switch to system clock. This essentially creates a virtual AudioSink with
1227             // initial latenty of getDurationUsIfPlayedAtSampleRate(mNumFramesWritten).
1228             // This virtual AudioSink renders audio data starting from the very first sample
1229             // and it's paced by system clock.
1230             ALOGW("AudioSink stuck. ARE YOU CONNECTED TO AUDIO OUT? Switching to system clock.");
1231             mMediaClock->updateAnchor(mAudioFirstAnchorTimeMediaUs, nowUs, mediaTimeUs);
1232             mUseVirtualAudioSink = true;
1233         }
1234     }
1235     mAnchorNumFramesWritten = mNumFramesWritten;
1236     mAnchorTimeMediaUs = mediaTimeUs;
1237 }
1238 
1239 // Called without mLock acquired.
postDrainVideoQueue()1240 void NuPlayer2::Renderer::postDrainVideoQueue() {
1241     if (mDrainVideoQueuePending
1242             || getSyncQueues()
1243             || (mPaused && mVideoSampleReceived)) {
1244         return;
1245     }
1246 
1247     if (mVideoQueue.empty()) {
1248         return;
1249     }
1250 
1251     QueueEntry &entry = *mVideoQueue.begin();
1252 
1253     sp<AMessage> msg = new AMessage(kWhatDrainVideoQueue, this);
1254     msg->setInt32("drainGeneration", getDrainGeneration(false /* audio */));
1255 
1256     if (entry.mBuffer == NULL) {
1257         // EOS doesn't carry a timestamp.
1258         msg->post();
1259         mDrainVideoQueuePending = true;
1260         return;
1261     }
1262 
1263     int64_t nowUs = ALooper::GetNowUs();
1264     if (mFlags & FLAG_REAL_TIME) {
1265         int64_t realTimeUs;
1266         CHECK(entry.mBuffer->meta()->findInt64("timeUs", &realTimeUs));
1267 
1268         realTimeUs = mVideoScheduler->schedule(realTimeUs * 1000) / 1000;
1269 
1270         int64_t twoVsyncsUs = 2 * (mVideoScheduler->getVsyncPeriod() / 1000);
1271 
1272         int64_t delayUs = realTimeUs - nowUs;
1273 
1274         ALOGW_IF(delayUs > 500000, "unusually high delayUs: %lld", (long long)delayUs);
1275         // post 2 display refreshes before rendering is due
1276         msg->post(delayUs > twoVsyncsUs ? delayUs - twoVsyncsUs : 0);
1277 
1278         mDrainVideoQueuePending = true;
1279         return;
1280     }
1281 
1282     int64_t mediaTimeUs;
1283     CHECK(entry.mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
1284 
1285     {
1286         Mutex::Autolock autoLock(mLock);
1287         if (mAnchorTimeMediaUs < 0) {
1288             mMediaClock->updateAnchor(mediaTimeUs, nowUs, mediaTimeUs);
1289             mAnchorTimeMediaUs = mediaTimeUs;
1290         }
1291     }
1292     mNextVideoTimeMediaUs = mediaTimeUs;
1293     if (!mHasAudio) {
1294         // smooth out videos >= 10fps
1295         mMediaClock->updateMaxTimeMedia(mediaTimeUs + kDefaultVideoFrameIntervalUs);
1296     }
1297 
1298     if (!mVideoSampleReceived || mediaTimeUs < mAudioFirstAnchorTimeMediaUs) {
1299         msg->post();
1300     } else {
1301         int64_t twoVsyncsUs = 2 * (mVideoScheduler->getVsyncPeriod() / 1000);
1302 
1303         // post 2 display refreshes before rendering is due
1304         mMediaClock->addTimer(msg, mediaTimeUs, -twoVsyncsUs);
1305     }
1306 
1307     mDrainVideoQueuePending = true;
1308 }
1309 
onDrainVideoQueue()1310 void NuPlayer2::Renderer::onDrainVideoQueue() {
1311     if (mVideoQueue.empty()) {
1312         return;
1313     }
1314 
1315     QueueEntry *entry = &*mVideoQueue.begin();
1316 
1317     if (entry->mBuffer == NULL) {
1318         // EOS
1319 
1320         notifyEOS(false /* audio */, entry->mFinalResult);
1321 
1322         mVideoQueue.erase(mVideoQueue.begin());
1323         entry = NULL;
1324 
1325         setVideoLateByUs(0);
1326         return;
1327     }
1328 
1329     int64_t nowUs = ALooper::GetNowUs();
1330     int64_t realTimeUs;
1331     int64_t mediaTimeUs = -1;
1332     if (mFlags & FLAG_REAL_TIME) {
1333         CHECK(entry->mBuffer->meta()->findInt64("timeUs", &realTimeUs));
1334     } else {
1335         CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
1336 
1337         realTimeUs = getRealTimeUs(mediaTimeUs, nowUs);
1338     }
1339     realTimeUs = mVideoScheduler->schedule(realTimeUs * 1000) / 1000;
1340 
1341     bool tooLate = false;
1342 
1343     if (!mPaused) {
1344         setVideoLateByUs(nowUs - realTimeUs);
1345         tooLate = (mVideoLateByUs > 40000);
1346 
1347         if (tooLate) {
1348             ALOGV("video late by %lld us (%.2f secs)",
1349                  (long long)mVideoLateByUs, mVideoLateByUs / 1E6);
1350         } else {
1351             int64_t mediaUs = 0;
1352             mMediaClock->getMediaTime(realTimeUs, &mediaUs);
1353             ALOGV("rendering video at media time %.2f secs",
1354                     (mFlags & FLAG_REAL_TIME ? realTimeUs :
1355                     mediaUs) / 1E6);
1356 
1357             if (!(mFlags & FLAG_REAL_TIME)
1358                     && mLastAudioMediaTimeUs != -1
1359                     && mediaTimeUs > mLastAudioMediaTimeUs) {
1360                 // If audio ends before video, video continues to drive media clock.
1361                 // Also smooth out videos >= 10fps.
1362                 mMediaClock->updateMaxTimeMedia(mediaTimeUs + kDefaultVideoFrameIntervalUs);
1363             }
1364         }
1365     } else {
1366         setVideoLateByUs(0);
1367         if (!mVideoSampleReceived && !mHasAudio) {
1368             // This will ensure that the first frame after a flush won't be used as anchor
1369             // when renderer is in paused state, because resume can happen any time after seek.
1370             clearAnchorTime();
1371         }
1372     }
1373 
1374     // Always render the first video frame while keeping stats on A/V sync.
1375     if (!mVideoSampleReceived) {
1376         realTimeUs = nowUs;
1377         tooLate = false;
1378     }
1379 
1380     entry->mNotifyConsumed->setInt64("timestampNs", realTimeUs * 1000LL);
1381     entry->mNotifyConsumed->setInt32("render", !tooLate);
1382     entry->mNotifyConsumed->post();
1383     mVideoQueue.erase(mVideoQueue.begin());
1384     entry = NULL;
1385 
1386     mVideoSampleReceived = true;
1387 
1388     if (!mPaused) {
1389         if (!mVideoRenderingStarted) {
1390             mVideoRenderingStarted = true;
1391             notifyVideoRenderingStart();
1392         }
1393         Mutex::Autolock autoLock(mLock);
1394         notifyIfMediaRenderingStarted_l();
1395     }
1396 }
1397 
notifyVideoRenderingStart()1398 void NuPlayer2::Renderer::notifyVideoRenderingStart() {
1399     sp<AMessage> notify = mNotify->dup();
1400     notify->setInt32("what", kWhatVideoRenderingStart);
1401     notify->post();
1402 }
1403 
notifyEOS(bool audio,status_t finalResult,int64_t delayUs)1404 void NuPlayer2::Renderer::notifyEOS(bool audio, status_t finalResult, int64_t delayUs) {
1405     Mutex::Autolock autoLock(mLock);
1406     notifyEOS_l(audio, finalResult, delayUs);
1407 }
1408 
notifyEOS_l(bool audio,status_t finalResult,int64_t delayUs)1409 void NuPlayer2::Renderer::notifyEOS_l(bool audio, status_t finalResult, int64_t delayUs) {
1410     if (audio && delayUs > 0) {
1411         sp<AMessage> msg = new AMessage(kWhatEOS, this);
1412         msg->setInt32("audioEOSGeneration", mAudioEOSGeneration);
1413         msg->setInt32("finalResult", finalResult);
1414         msg->post(delayUs);
1415         return;
1416     }
1417     sp<AMessage> notify = mNotify->dup();
1418     notify->setInt32("what", kWhatEOS);
1419     notify->setInt32("audio", static_cast<int32_t>(audio));
1420     notify->setInt32("finalResult", finalResult);
1421     notify->post(delayUs);
1422 
1423     if (audio) {
1424         // Video might outlive audio. Clear anchor to enable video only case.
1425         mAnchorTimeMediaUs = -1;
1426         mHasAudio = false;
1427         if (mNextVideoTimeMediaUs >= 0) {
1428             int64_t mediaUs = 0;
1429             int64_t nowUs = ALooper::GetNowUs();
1430             status_t result = mMediaClock->getMediaTime(nowUs, &mediaUs);
1431             if (result == OK) {
1432                 if (mNextVideoTimeMediaUs > mediaUs) {
1433                     mMediaClock->updateMaxTimeMedia(mNextVideoTimeMediaUs);
1434                 }
1435             } else {
1436                 mMediaClock->updateAnchor(
1437                         mNextVideoTimeMediaUs, nowUs,
1438                         mNextVideoTimeMediaUs + kDefaultVideoFrameIntervalUs);
1439             }
1440         }
1441     }
1442 }
1443 
notifyAudioTearDown(AudioTearDownReason reason)1444 void NuPlayer2::Renderer::notifyAudioTearDown(AudioTearDownReason reason) {
1445     sp<AMessage> msg = new AMessage(kWhatAudioTearDown, this);
1446     msg->setInt32("reason", reason);
1447     msg->post();
1448 }
1449 
onQueueBuffer(const sp<AMessage> & msg)1450 void NuPlayer2::Renderer::onQueueBuffer(const sp<AMessage> &msg) {
1451     int32_t audio;
1452     CHECK(msg->findInt32("audio", &audio));
1453 
1454     if (dropBufferIfStale(audio, msg)) {
1455         return;
1456     }
1457 
1458     if (audio) {
1459         mHasAudio = true;
1460     } else {
1461         mHasVideo = true;
1462     }
1463 
1464     if (mHasVideo) {
1465         if (mVideoScheduler == NULL) {
1466             mVideoScheduler = new VideoFrameScheduler2();
1467             mVideoScheduler->init();
1468         }
1469     }
1470 
1471     sp<RefBase> obj;
1472     CHECK(msg->findObject("buffer", &obj));
1473     sp<MediaCodecBuffer> buffer = static_cast<MediaCodecBuffer *>(obj.get());
1474 
1475     sp<AMessage> notifyConsumed;
1476     CHECK(msg->findMessage("notifyConsumed", &notifyConsumed));
1477 
1478     QueueEntry entry;
1479     entry.mBuffer = buffer;
1480     entry.mNotifyConsumed = notifyConsumed;
1481     entry.mOffset = 0;
1482     entry.mFinalResult = OK;
1483     entry.mBufferOrdinal = ++mTotalBuffersQueued;
1484 
1485     if (audio) {
1486         Mutex::Autolock autoLock(mLock);
1487         mAudioQueue.push_back(entry);
1488         postDrainAudioQueue_l();
1489     } else {
1490         mVideoQueue.push_back(entry);
1491         postDrainVideoQueue();
1492     }
1493 
1494     Mutex::Autolock autoLock(mLock);
1495     if (!mSyncQueues || mAudioQueue.empty() || mVideoQueue.empty()) {
1496         return;
1497     }
1498 
1499     sp<MediaCodecBuffer> firstAudioBuffer = (*mAudioQueue.begin()).mBuffer;
1500     sp<MediaCodecBuffer> firstVideoBuffer = (*mVideoQueue.begin()).mBuffer;
1501 
1502     if (firstAudioBuffer == NULL || firstVideoBuffer == NULL) {
1503         // EOS signalled on either queue.
1504         syncQueuesDone_l();
1505         return;
1506     }
1507 
1508     int64_t firstAudioTimeUs;
1509     int64_t firstVideoTimeUs;
1510     CHECK(firstAudioBuffer->meta()
1511             ->findInt64("timeUs", &firstAudioTimeUs));
1512     CHECK(firstVideoBuffer->meta()
1513             ->findInt64("timeUs", &firstVideoTimeUs));
1514 
1515     int64_t diff = firstVideoTimeUs - firstAudioTimeUs;
1516 
1517     ALOGV("queueDiff = %.2f secs", diff / 1E6);
1518 
1519     if (diff > 100000LL) {
1520         // Audio data starts More than 0.1 secs before video.
1521         // Drop some audio.
1522 
1523         (*mAudioQueue.begin()).mNotifyConsumed->post();
1524         mAudioQueue.erase(mAudioQueue.begin());
1525         return;
1526     }
1527 
1528     syncQueuesDone_l();
1529 }
1530 
syncQueuesDone_l()1531 void NuPlayer2::Renderer::syncQueuesDone_l() {
1532     if (!mSyncQueues) {
1533         return;
1534     }
1535 
1536     mSyncQueues = false;
1537 
1538     if (!mAudioQueue.empty()) {
1539         postDrainAudioQueue_l();
1540     }
1541 
1542     if (!mVideoQueue.empty()) {
1543         mLock.unlock();
1544         postDrainVideoQueue();
1545         mLock.lock();
1546     }
1547 }
1548 
onQueueEOS(const sp<AMessage> & msg)1549 void NuPlayer2::Renderer::onQueueEOS(const sp<AMessage> &msg) {
1550     int32_t audio;
1551     CHECK(msg->findInt32("audio", &audio));
1552 
1553     if (dropBufferIfStale(audio, msg)) {
1554         return;
1555     }
1556 
1557     int32_t finalResult;
1558     CHECK(msg->findInt32("finalResult", &finalResult));
1559 
1560     QueueEntry entry;
1561     entry.mOffset = 0;
1562     entry.mFinalResult = finalResult;
1563 
1564     if (audio) {
1565         Mutex::Autolock autoLock(mLock);
1566         if (mAudioQueue.empty() && mSyncQueues) {
1567             syncQueuesDone_l();
1568         }
1569         mAudioQueue.push_back(entry);
1570         postDrainAudioQueue_l();
1571     } else {
1572         if (mVideoQueue.empty() && getSyncQueues()) {
1573             Mutex::Autolock autoLock(mLock);
1574             syncQueuesDone_l();
1575         }
1576         mVideoQueue.push_back(entry);
1577         postDrainVideoQueue();
1578     }
1579 }
1580 
onFlush(const sp<AMessage> & msg)1581 void NuPlayer2::Renderer::onFlush(const sp<AMessage> &msg) {
1582     int32_t audio, notifyComplete;
1583     CHECK(msg->findInt32("audio", &audio));
1584 
1585     {
1586         Mutex::Autolock autoLock(mLock);
1587         if (audio) {
1588             notifyComplete = mNotifyCompleteAudio;
1589             mNotifyCompleteAudio = false;
1590             mLastAudioMediaTimeUs = -1;
1591 
1592             mHasAudio = false;
1593             if (mNextVideoTimeMediaUs >= 0) {
1594                 int64_t nowUs = ALooper::GetNowUs();
1595                 mMediaClock->updateAnchor(
1596                         mNextVideoTimeMediaUs, nowUs,
1597                         mNextVideoTimeMediaUs + kDefaultVideoFrameIntervalUs);
1598             }
1599         } else {
1600             notifyComplete = mNotifyCompleteVideo;
1601             mNotifyCompleteVideo = false;
1602             mVideoRenderingStarted = false;
1603         }
1604 
1605         // If we're currently syncing the queues, i.e. dropping audio while
1606         // aligning the first audio/video buffer times and only one of the
1607         // two queues has data, we may starve that queue by not requesting
1608         // more buffers from the decoder. If the other source then encounters
1609         // a discontinuity that leads to flushing, we'll never find the
1610         // corresponding discontinuity on the other queue.
1611         // Therefore we'll stop syncing the queues if at least one of them
1612         // is flushed.
1613         syncQueuesDone_l();
1614     }
1615     clearAnchorTime();
1616 
1617     ALOGV("flushing %s", audio ? "audio" : "video");
1618     if (audio) {
1619         {
1620             Mutex::Autolock autoLock(mLock);
1621             flushQueue(&mAudioQueue);
1622 
1623             ++mAudioDrainGeneration;
1624             ++mAudioEOSGeneration;
1625             prepareForMediaRenderingStart_l();
1626 
1627             // the frame count will be reset after flush.
1628             clearAudioFirstAnchorTime_l();
1629         }
1630 
1631         mDrainAudioQueuePending = false;
1632 
1633         if (offloadingAudio()) {
1634             mAudioSink->pause();
1635             mAudioSink->flush();
1636             if (!mPaused) {
1637                 mAudioSink->start();
1638             }
1639         } else {
1640             mAudioSink->pause();
1641             mAudioSink->flush();
1642             // Call stop() to signal to the AudioSink to completely fill the
1643             // internal buffer before resuming playback.
1644             // FIXME: this is ignored after flush().
1645             mAudioSink->stop();
1646             if (mPaused) {
1647                 // Race condition: if renderer is paused and audio sink is stopped,
1648                 // we need to make sure that the audio track buffer fully drains
1649                 // before delivering data.
1650                 // FIXME: remove this if we can detect if stop() is complete.
1651                 const int delayUs = 2 * 50 * 1000; // (2 full mixer thread cycles at 50ms)
1652                 mPauseDrainAudioAllowedUs = ALooper::GetNowUs() + delayUs;
1653             } else {
1654                 mAudioSink->start();
1655             }
1656             mNumFramesWritten = 0;
1657         }
1658         mNextAudioClockUpdateTimeUs = -1;
1659     } else {
1660         flushQueue(&mVideoQueue);
1661 
1662         mDrainVideoQueuePending = false;
1663 
1664         if (mVideoScheduler != NULL) {
1665             mVideoScheduler->restart();
1666         }
1667 
1668         Mutex::Autolock autoLock(mLock);
1669         ++mVideoDrainGeneration;
1670         prepareForMediaRenderingStart_l();
1671     }
1672 
1673     mVideoSampleReceived = false;
1674 
1675     if (notifyComplete) {
1676         notifyFlushComplete(audio);
1677     }
1678 }
1679 
flushQueue(List<QueueEntry> * queue)1680 void NuPlayer2::Renderer::flushQueue(List<QueueEntry> *queue) {
1681     while (!queue->empty()) {
1682         QueueEntry *entry = &*queue->begin();
1683 
1684         if (entry->mBuffer != NULL) {
1685             entry->mNotifyConsumed->post();
1686         } else if (entry->mNotifyConsumed != nullptr) {
1687             // Is it needed to open audio sink now?
1688             onChangeAudioFormat(entry->mMeta, entry->mNotifyConsumed);
1689         }
1690 
1691         queue->erase(queue->begin());
1692         entry = NULL;
1693     }
1694 }
1695 
notifyFlushComplete(bool audio)1696 void NuPlayer2::Renderer::notifyFlushComplete(bool audio) {
1697     sp<AMessage> notify = mNotify->dup();
1698     notify->setInt32("what", kWhatFlushComplete);
1699     notify->setInt32("audio", static_cast<int32_t>(audio));
1700     notify->post();
1701 }
1702 
dropBufferIfStale(bool audio,const sp<AMessage> & msg)1703 bool NuPlayer2::Renderer::dropBufferIfStale(
1704         bool audio, const sp<AMessage> &msg) {
1705     int32_t queueGeneration;
1706     CHECK(msg->findInt32("queueGeneration", &queueGeneration));
1707 
1708     if (queueGeneration == getQueueGeneration(audio)) {
1709         return false;
1710     }
1711 
1712     sp<AMessage> notifyConsumed;
1713     if (msg->findMessage("notifyConsumed", &notifyConsumed)) {
1714         notifyConsumed->post();
1715     }
1716 
1717     return true;
1718 }
1719 
onAudioSinkChanged()1720 void NuPlayer2::Renderer::onAudioSinkChanged() {
1721     if (offloadingAudio()) {
1722         return;
1723     }
1724     CHECK(!mDrainAudioQueuePending);
1725     mNumFramesWritten = 0;
1726     mAnchorNumFramesWritten = -1;
1727     uint32_t written;
1728     if (mAudioSink->getFramesWritten(&written) == OK) {
1729         mNumFramesWritten = written;
1730     }
1731 }
1732 
onDisableOffloadAudio()1733 void NuPlayer2::Renderer::onDisableOffloadAudio() {
1734     Mutex::Autolock autoLock(mLock);
1735     mFlags &= ~FLAG_OFFLOAD_AUDIO;
1736     ++mAudioDrainGeneration;
1737     if (mAudioRenderingStartGeneration != -1) {
1738         prepareForMediaRenderingStart_l();
1739     }
1740 }
1741 
onEnableOffloadAudio()1742 void NuPlayer2::Renderer::onEnableOffloadAudio() {
1743     Mutex::Autolock autoLock(mLock);
1744     mFlags |= FLAG_OFFLOAD_AUDIO;
1745     ++mAudioDrainGeneration;
1746     if (mAudioRenderingStartGeneration != -1) {
1747         prepareForMediaRenderingStart_l();
1748     }
1749 }
1750 
onPause()1751 void NuPlayer2::Renderer::onPause() {
1752     if (mPaused) {
1753         return;
1754     }
1755 
1756     {
1757         Mutex::Autolock autoLock(mLock);
1758         // we do not increment audio drain generation so that we fill audio buffer during pause.
1759         ++mVideoDrainGeneration;
1760         prepareForMediaRenderingStart_l();
1761         mPaused = true;
1762         mMediaClock->setPlaybackRate(0.0);
1763     }
1764 
1765     mDrainAudioQueuePending = false;
1766     mDrainVideoQueuePending = false;
1767 
1768     // Note: audio data may not have been decoded, and the AudioSink may not be opened.
1769     mAudioSink->pause();
1770     startAudioOffloadPauseTimeout();
1771 
1772     ALOGV("now paused audio queue has %zu entries, video has %zu entries",
1773           mAudioQueue.size(), mVideoQueue.size());
1774 }
1775 
onResume()1776 void NuPlayer2::Renderer::onResume() {
1777     if (!mPaused) {
1778         return;
1779     }
1780 
1781     // Note: audio data may not have been decoded, and the AudioSink may not be opened.
1782     cancelAudioOffloadPauseTimeout();
1783     if (mAudioSink->ready()) {
1784         status_t err = mAudioSink->start();
1785         if (err != OK) {
1786             ALOGE("cannot start AudioSink err %d", err);
1787             notifyAudioTearDown(kDueToError);
1788         }
1789     }
1790 
1791     {
1792         Mutex::Autolock autoLock(mLock);
1793         mPaused = false;
1794         // rendering started message may have been delayed if we were paused.
1795         if (mRenderingDataDelivered) {
1796             notifyIfMediaRenderingStarted_l();
1797         }
1798         // configure audiosink as we did not do it when pausing
1799         if (mAudioSink != NULL && mAudioSink->ready()) {
1800             mAudioSink->setPlaybackRate(mPlaybackSettings);
1801         }
1802 
1803         mMediaClock->setPlaybackRate(mPlaybackSettings.mSpeed);
1804 
1805         if (!mAudioQueue.empty()) {
1806             postDrainAudioQueue_l();
1807         }
1808     }
1809 
1810     if (!mVideoQueue.empty()) {
1811         postDrainVideoQueue();
1812     }
1813 }
1814 
onSetVideoFrameRate(float fps)1815 void NuPlayer2::Renderer::onSetVideoFrameRate(float fps) {
1816     if (mVideoScheduler == NULL) {
1817         mVideoScheduler = new VideoFrameScheduler2();
1818     }
1819     mVideoScheduler->init(fps);
1820 }
1821 
getQueueGeneration(bool audio)1822 int32_t NuPlayer2::Renderer::getQueueGeneration(bool audio) {
1823     Mutex::Autolock autoLock(mLock);
1824     return (audio ? mAudioQueueGeneration : mVideoQueueGeneration);
1825 }
1826 
getDrainGeneration(bool audio)1827 int32_t NuPlayer2::Renderer::getDrainGeneration(bool audio) {
1828     Mutex::Autolock autoLock(mLock);
1829     return (audio ? mAudioDrainGeneration : mVideoDrainGeneration);
1830 }
1831 
getSyncQueues()1832 bool NuPlayer2::Renderer::getSyncQueues() {
1833     Mutex::Autolock autoLock(mLock);
1834     return mSyncQueues;
1835 }
1836 
onAudioTearDown(AudioTearDownReason reason)1837 void NuPlayer2::Renderer::onAudioTearDown(AudioTearDownReason reason) {
1838     if (mAudioTornDown) {
1839         return;
1840     }
1841     mAudioTornDown = true;
1842 
1843     int64_t currentPositionUs;
1844     sp<AMessage> notify = mNotify->dup();
1845     if (getCurrentPosition(&currentPositionUs) == OK) {
1846         notify->setInt64("positionUs", currentPositionUs);
1847     }
1848 
1849     mAudioSink->stop();
1850     mAudioSink->flush();
1851 
1852     notify->setInt32("what", kWhatAudioTearDown);
1853     notify->setInt32("reason", reason);
1854     notify->post();
1855 }
1856 
startAudioOffloadPauseTimeout()1857 void NuPlayer2::Renderer::startAudioOffloadPauseTimeout() {
1858     if (offloadingAudio()) {
1859         mWakeLock->acquire();
1860         sp<AMessage> msg = new AMessage(kWhatAudioOffloadPauseTimeout, this);
1861         msg->setInt32("drainGeneration", mAudioOffloadPauseTimeoutGeneration);
1862         msg->post(kOffloadPauseMaxUs);
1863     }
1864 }
1865 
cancelAudioOffloadPauseTimeout()1866 void NuPlayer2::Renderer::cancelAudioOffloadPauseTimeout() {
1867     // We may have called startAudioOffloadPauseTimeout() without
1868     // the AudioSink open and with offloadingAudio enabled.
1869     //
1870     // When we cancel, it may be that offloadingAudio is subsequently disabled, so regardless
1871     // we always release the wakelock and increment the pause timeout generation.
1872     //
1873     // Note: The acquired wakelock prevents the device from suspending
1874     // immediately after offload pause (in case a resume happens shortly thereafter).
1875     mWakeLock->release(true);
1876     ++mAudioOffloadPauseTimeoutGeneration;
1877 }
1878 
onOpenAudioSink(const sp<AMessage> & format,bool offloadOnly,bool hasVideo,uint32_t flags,bool isStreaming)1879 status_t NuPlayer2::Renderer::onOpenAudioSink(
1880         const sp<AMessage> &format,
1881         bool offloadOnly,
1882         bool hasVideo,
1883         uint32_t flags,
1884         bool isStreaming) {
1885     ALOGV("openAudioSink: offloadOnly(%d) offloadingAudio(%d)",
1886             offloadOnly, offloadingAudio());
1887 
1888     bool audioSinkChanged = false;
1889 
1890     int32_t numChannels;
1891     CHECK(format->findInt32("channel-count", &numChannels));
1892 
1893     int32_t channelMask;
1894     if (!format->findInt32("channel-mask", &channelMask)) {
1895         // signal to the AudioSink to derive the mask from count.
1896         channelMask = CHANNEL_MASK_USE_CHANNEL_ORDER;
1897     }
1898 
1899     int32_t sampleRate;
1900     CHECK(format->findInt32("sample-rate", &sampleRate));
1901 
1902     // read pcm encoding from MediaCodec output format, if available
1903     int32_t pcmEncoding;
1904     audio_format_t audioFormat =
1905             format->findInt32(KEY_PCM_ENCODING, &pcmEncoding) ?
1906                     audioFormatFromEncoding(pcmEncoding) : AUDIO_FORMAT_PCM_16_BIT;
1907 
1908     if (offloadingAudio()) {
1909         AString mime;
1910         CHECK(format->findString("mime", &mime));
1911         status_t err = mapMimeToAudioFormat(audioFormat, mime.c_str());
1912 
1913         if (err != OK) {
1914             ALOGE("Couldn't map mime \"%s\" to a valid "
1915                     "audio_format", mime.c_str());
1916             onDisableOffloadAudio();
1917         } else {
1918             ALOGV("Mime \"%s\" mapped to audio_format 0x%x",
1919                     mime.c_str(), audioFormat);
1920 
1921             int avgBitRate = -1;
1922             format->findInt32("bitrate", &avgBitRate);
1923 
1924             int32_t aacProfile = -1;
1925             if (audioFormat == AUDIO_FORMAT_AAC
1926                     && format->findInt32("aac-profile", &aacProfile)) {
1927                 // Redefine AAC format as per aac profile
1928                 mapAACProfileToAudioFormat(
1929                         audioFormat,
1930                         aacProfile);
1931             }
1932 
1933             audio_offload_info_t offloadInfo = AUDIO_INFO_INITIALIZER;
1934             offloadInfo.duration_us = -1;
1935             format->findInt64(
1936                     "durationUs", &offloadInfo.duration_us);
1937             offloadInfo.sample_rate = sampleRate;
1938             offloadInfo.channel_mask = channelMask;
1939             offloadInfo.format = audioFormat;
1940             offloadInfo.stream_type = AUDIO_STREAM_MUSIC;
1941             offloadInfo.bit_rate = avgBitRate;
1942             offloadInfo.has_video = hasVideo;
1943             offloadInfo.is_streaming = isStreaming;
1944 
1945             if (memcmp(&mCurrentOffloadInfo, &offloadInfo, sizeof(offloadInfo)) == 0) {
1946                 ALOGV("openAudioSink: no change in offload mode");
1947                 // no change from previous configuration, everything ok.
1948                 return OK;
1949             }
1950             mCurrentPcmInfo = AUDIO_PCMINFO_INITIALIZER;
1951 
1952             ALOGV("openAudioSink: try to open AudioSink in offload mode");
1953             uint32_t offloadFlags = flags;
1954             offloadFlags |= AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD;
1955             offloadFlags &= ~AUDIO_OUTPUT_FLAG_DEEP_BUFFER;
1956             audioSinkChanged = true;
1957             mAudioSink->close();
1958 
1959             err = mAudioSink->open(
1960                     sampleRate,
1961                     numChannels,
1962                     (audio_channel_mask_t)channelMask,
1963                     audioFormat,
1964                     &NuPlayer2::Renderer::AudioSinkCallback,
1965                     this,
1966                     (audio_output_flags_t)offloadFlags,
1967                     &offloadInfo);
1968 
1969             if (err == OK) {
1970                 err = mAudioSink->setPlaybackRate(mPlaybackSettings);
1971             }
1972 
1973             if (err == OK) {
1974                 // If the playback is offloaded to h/w, we pass
1975                 // the HAL some metadata information.
1976                 // We don't want to do this for PCM because it
1977                 // will be going through the AudioFlinger mixer
1978                 // before reaching the hardware.
1979                 // TODO
1980                 mCurrentOffloadInfo = offloadInfo;
1981                 if (!mPaused) { // for preview mode, don't start if paused
1982                     err = mAudioSink->start();
1983                 }
1984                 ALOGV_IF(err == OK, "openAudioSink: offload succeeded");
1985             }
1986             if (err != OK) {
1987                 // Clean up, fall back to non offload mode.
1988                 mAudioSink->close();
1989                 onDisableOffloadAudio();
1990                 mCurrentOffloadInfo = AUDIO_INFO_INITIALIZER;
1991                 ALOGV("openAudioSink: offload failed");
1992                 if (offloadOnly) {
1993                     notifyAudioTearDown(kForceNonOffload);
1994                 }
1995             } else {
1996                 mUseAudioCallback = true;  // offload mode transfers data through callback
1997                 ++mAudioDrainGeneration;  // discard pending kWhatDrainAudioQueue message.
1998             }
1999         }
2000     }
2001     if (!offloadOnly && !offloadingAudio()) {
2002         ALOGV("openAudioSink: open AudioSink in NON-offload mode");
2003         uint32_t pcmFlags = flags;
2004         pcmFlags &= ~AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD;
2005 
2006         const PcmInfo info = {
2007                 (audio_channel_mask_t)channelMask,
2008                 (audio_output_flags_t)pcmFlags,
2009                 audioFormat,
2010                 numChannels,
2011                 sampleRate
2012         };
2013         if (memcmp(&mCurrentPcmInfo, &info, sizeof(info)) == 0) {
2014             ALOGV("openAudioSink: no change in pcm mode");
2015             // no change from previous configuration, everything ok.
2016             return OK;
2017         }
2018 
2019         audioSinkChanged = true;
2020         mAudioSink->close();
2021         mCurrentOffloadInfo = AUDIO_INFO_INITIALIZER;
2022         // Note: It is possible to set up the callback, but not use it to send audio data.
2023         // This requires a fix in AudioSink to explicitly specify the transfer mode.
2024         mUseAudioCallback = getUseAudioCallbackSetting();
2025         if (mUseAudioCallback) {
2026             ++mAudioDrainGeneration;  // discard pending kWhatDrainAudioQueue message.
2027         }
2028 
2029         // Compute the desired buffer size.
2030         // For callback mode, the amount of time before wakeup is about half the buffer size.
2031         const uint32_t frameCount =
2032                 (unsigned long long)sampleRate * getAudioSinkPcmMsSetting() / 1000;
2033 
2034         // We should always be able to set our playback settings if the sink is closed.
2035         LOG_ALWAYS_FATAL_IF(mAudioSink->setPlaybackRate(mPlaybackSettings) != OK,
2036                 "onOpenAudioSink: can't set playback rate on closed sink");
2037         status_t err = mAudioSink->open(
2038                     sampleRate,
2039                     numChannels,
2040                     (audio_channel_mask_t)channelMask,
2041                     audioFormat,
2042                     mUseAudioCallback ? &NuPlayer2::Renderer::AudioSinkCallback : NULL,
2043                     mUseAudioCallback ? this : NULL,
2044                     (audio_output_flags_t)pcmFlags,
2045                     NULL,
2046                     frameCount);
2047         if (err != OK) {
2048             ALOGW("openAudioSink: non offloaded open failed status: %d", err);
2049             mAudioSink->close();
2050             mCurrentPcmInfo = AUDIO_PCMINFO_INITIALIZER;
2051             return err;
2052         }
2053         mCurrentPcmInfo = info;
2054         if (!mPaused) { // for preview mode, don't start if paused
2055             mAudioSink->start();
2056         }
2057     }
2058     if (audioSinkChanged) {
2059         onAudioSinkChanged();
2060     }
2061     mAudioTornDown = false;
2062     return OK;
2063 }
2064 
onCloseAudioSink()2065 void NuPlayer2::Renderer::onCloseAudioSink() {
2066     mAudioSink->close();
2067     mCurrentOffloadInfo = AUDIO_INFO_INITIALIZER;
2068     mCurrentPcmInfo = AUDIO_PCMINFO_INITIALIZER;
2069 }
2070 
onChangeAudioFormat(const sp<AMessage> & meta,const sp<AMessage> & notify)2071 void NuPlayer2::Renderer::onChangeAudioFormat(
2072         const sp<AMessage> &meta, const sp<AMessage> &notify) {
2073     sp<AMessage> format;
2074     CHECK(meta->findMessage("format", &format));
2075 
2076     int32_t offloadOnly;
2077     CHECK(meta->findInt32("offload-only", &offloadOnly));
2078 
2079     int32_t hasVideo;
2080     CHECK(meta->findInt32("has-video", &hasVideo));
2081 
2082     uint32_t flags;
2083     CHECK(meta->findInt32("flags", (int32_t *)&flags));
2084 
2085     uint32_t isStreaming;
2086     CHECK(meta->findInt32("isStreaming", (int32_t *)&isStreaming));
2087 
2088     status_t err = onOpenAudioSink(format, offloadOnly, hasVideo, flags, isStreaming);
2089 
2090     if (err != OK) {
2091         notify->setInt32("err", err);
2092     }
2093     notify->post();
2094 }
2095 
2096 }  // namespace android
2097