1 /*
2  * Copyright (C) 2010 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 //#define LOG_NDEBUG 0
18 #define LOG_TAG "NuPlayerRenderer"
19 #include <utils/Log.h>
20 
21 #include "NuPlayerRenderer.h"
22 #include <algorithm>
23 #include <cutils/properties.h>
24 #include <media/stagefright/foundation/ADebug.h>
25 #include <media/stagefright/foundation/AMessage.h>
26 #include <media/stagefright/foundation/AUtils.h>
27 #include <media/stagefright/foundation/AWakeLock.h>
28 #include <media/stagefright/MediaClock.h>
29 #include <media/stagefright/MediaErrors.h>
30 #include <media/stagefright/MetaData.h>
31 #include <media/stagefright/Utils.h>
32 #include <media/stagefright/VideoFrameScheduler.h>
33 #include <media/MediaCodecBuffer.h>
34 
35 #include <inttypes.h>
36 
37 namespace android {
38 
39 /*
40  * Example of common configuration settings in shell script form
41 
42    #Turn offload audio off (use PCM for Play Music) -- AudioPolicyManager
43    adb shell setprop audio.offload.disable 1
44 
45    #Allow offload audio with video (requires offloading to be enabled) -- AudioPolicyManager
46    adb shell setprop audio.offload.video 1
47 
48    #Use audio callbacks for PCM data
49    adb shell setprop media.stagefright.audio.cbk 1
50 
51    #Use deep buffer for PCM data with video (it is generally enabled for audio-only)
52    adb shell setprop media.stagefright.audio.deep 1
53 
54    #Set size of buffers for pcm audio sink in msec (example: 1000 msec)
55    adb shell setprop media.stagefright.audio.sink 1000
56 
57  * These configurations take effect for the next track played (not the current track).
58  */
59 
getUseAudioCallbackSetting()60 static inline bool getUseAudioCallbackSetting() {
61     return property_get_bool("media.stagefright.audio.cbk", false /* default_value */);
62 }
63 
getAudioSinkPcmMsSetting()64 static inline int32_t getAudioSinkPcmMsSetting() {
65     return property_get_int32(
66             "media.stagefright.audio.sink", 500 /* default_value */);
67 }
68 
69 // Maximum time in paused state when offloading audio decompression. When elapsed, the AudioSink
70 // is closed to allow the audio DSP to power down.
71 static const int64_t kOffloadPauseMaxUs = 10000000ll;
72 
73 // Maximum allowed delay from AudioSink, 1.5 seconds.
74 static const int64_t kMaxAllowedAudioSinkDelayUs = 1500000ll;
75 
76 static const int64_t kMinimumAudioClockUpdatePeriodUs = 20 /* msec */ * 1000;
77 
78 // static
79 const NuPlayer::Renderer::PcmInfo NuPlayer::Renderer::AUDIO_PCMINFO_INITIALIZER = {
80         AUDIO_CHANNEL_NONE,
81         AUDIO_OUTPUT_FLAG_NONE,
82         AUDIO_FORMAT_INVALID,
83         0, // mNumChannels
84         0 // mSampleRate
85 };
86 
87 // static
88 const int64_t NuPlayer::Renderer::kMinPositionUpdateDelayUs = 100000ll;
89 
Renderer(const sp<MediaPlayerBase::AudioSink> & sink,const sp<AMessage> & notify,uint32_t flags)90 NuPlayer::Renderer::Renderer(
91         const sp<MediaPlayerBase::AudioSink> &sink,
92         const sp<AMessage> &notify,
93         uint32_t flags)
94     : mAudioSink(sink),
95       mUseVirtualAudioSink(false),
96       mNotify(notify),
97       mFlags(flags),
98       mNumFramesWritten(0),
99       mDrainAudioQueuePending(false),
100       mDrainVideoQueuePending(false),
101       mAudioQueueGeneration(0),
102       mVideoQueueGeneration(0),
103       mAudioDrainGeneration(0),
104       mVideoDrainGeneration(0),
105       mAudioEOSGeneration(0),
106       mPlaybackSettings(AUDIO_PLAYBACK_RATE_DEFAULT),
107       mAudioFirstAnchorTimeMediaUs(-1),
108       mAnchorTimeMediaUs(-1),
109       mAnchorNumFramesWritten(-1),
110       mVideoLateByUs(0ll),
111       mHasAudio(false),
112       mHasVideo(false),
113       mNotifyCompleteAudio(false),
114       mNotifyCompleteVideo(false),
115       mSyncQueues(false),
116       mPaused(false),
117       mPauseDrainAudioAllowedUs(0),
118       mVideoSampleReceived(false),
119       mVideoRenderingStarted(false),
120       mVideoRenderingStartGeneration(0),
121       mAudioRenderingStartGeneration(0),
122       mRenderingDataDelivered(false),
123       mNextAudioClockUpdateTimeUs(-1),
124       mLastAudioMediaTimeUs(-1),
125       mAudioOffloadPauseTimeoutGeneration(0),
126       mAudioTornDown(false),
127       mCurrentOffloadInfo(AUDIO_INFO_INITIALIZER),
128       mCurrentPcmInfo(AUDIO_PCMINFO_INITIALIZER),
129       mTotalBuffersQueued(0),
130       mLastAudioBufferDrained(0),
131       mUseAudioCallback(false),
132       mWakeLock(new AWakeLock()) {
133     mMediaClock = new MediaClock;
134     mPlaybackRate = mPlaybackSettings.mSpeed;
135     mMediaClock->setPlaybackRate(mPlaybackRate);
136 }
137 
~Renderer()138 NuPlayer::Renderer::~Renderer() {
139     if (offloadingAudio()) {
140         mAudioSink->stop();
141         mAudioSink->flush();
142         mAudioSink->close();
143     }
144 
145     // Try to avoid racing condition in case callback is still on.
146     Mutex::Autolock autoLock(mLock);
147     if (mUseAudioCallback) {
148         flushQueue(&mAudioQueue);
149         flushQueue(&mVideoQueue);
150     }
151     mWakeLock.clear();
152     mMediaClock.clear();
153     mVideoScheduler.clear();
154     mNotify.clear();
155     mAudioSink.clear();
156 }
157 
queueBuffer(bool audio,const sp<MediaCodecBuffer> & buffer,const sp<AMessage> & notifyConsumed)158 void NuPlayer::Renderer::queueBuffer(
159         bool audio,
160         const sp<MediaCodecBuffer> &buffer,
161         const sp<AMessage> &notifyConsumed) {
162     sp<AMessage> msg = new AMessage(kWhatQueueBuffer, this);
163     msg->setInt32("queueGeneration", getQueueGeneration(audio));
164     msg->setInt32("audio", static_cast<int32_t>(audio));
165     msg->setObject("buffer", buffer);
166     msg->setMessage("notifyConsumed", notifyConsumed);
167     msg->post();
168 }
169 
queueEOS(bool audio,status_t finalResult)170 void NuPlayer::Renderer::queueEOS(bool audio, status_t finalResult) {
171     CHECK_NE(finalResult, (status_t)OK);
172 
173     sp<AMessage> msg = new AMessage(kWhatQueueEOS, this);
174     msg->setInt32("queueGeneration", getQueueGeneration(audio));
175     msg->setInt32("audio", static_cast<int32_t>(audio));
176     msg->setInt32("finalResult", finalResult);
177     msg->post();
178 }
179 
setPlaybackSettings(const AudioPlaybackRate & rate)180 status_t NuPlayer::Renderer::setPlaybackSettings(const AudioPlaybackRate &rate) {
181     sp<AMessage> msg = new AMessage(kWhatConfigPlayback, this);
182     writeToAMessage(msg, rate);
183     sp<AMessage> response;
184     status_t err = msg->postAndAwaitResponse(&response);
185     if (err == OK && response != NULL) {
186         CHECK(response->findInt32("err", &err));
187     }
188     return err;
189 }
190 
onConfigPlayback(const AudioPlaybackRate & rate)191 status_t NuPlayer::Renderer::onConfigPlayback(const AudioPlaybackRate &rate /* sanitized */) {
192     if (rate.mSpeed == 0.f) {
193         onPause();
194         // don't call audiosink's setPlaybackRate if pausing, as pitch does not
195         // have to correspond to the any non-0 speed (e.g old speed). Keep
196         // settings nonetheless, using the old speed, in case audiosink changes.
197         AudioPlaybackRate newRate = rate;
198         newRate.mSpeed = mPlaybackSettings.mSpeed;
199         mPlaybackSettings = newRate;
200         return OK;
201     }
202 
203     if (mAudioSink != NULL && mAudioSink->ready()) {
204         status_t err = mAudioSink->setPlaybackRate(rate);
205         if (err != OK) {
206             return err;
207         }
208     }
209     mPlaybackSettings = rate;
210     mPlaybackRate = rate.mSpeed;
211     mMediaClock->setPlaybackRate(mPlaybackRate);
212     return OK;
213 }
214 
getPlaybackSettings(AudioPlaybackRate * rate)215 status_t NuPlayer::Renderer::getPlaybackSettings(AudioPlaybackRate *rate /* nonnull */) {
216     sp<AMessage> msg = new AMessage(kWhatGetPlaybackSettings, this);
217     sp<AMessage> response;
218     status_t err = msg->postAndAwaitResponse(&response);
219     if (err == OK && response != NULL) {
220         CHECK(response->findInt32("err", &err));
221         if (err == OK) {
222             readFromAMessage(response, rate);
223         }
224     }
225     return err;
226 }
227 
onGetPlaybackSettings(AudioPlaybackRate * rate)228 status_t NuPlayer::Renderer::onGetPlaybackSettings(AudioPlaybackRate *rate /* nonnull */) {
229     if (mAudioSink != NULL && mAudioSink->ready()) {
230         status_t err = mAudioSink->getPlaybackRate(rate);
231         if (err == OK) {
232             if (!isAudioPlaybackRateEqual(*rate, mPlaybackSettings)) {
233                 ALOGW("correcting mismatch in internal/external playback rate");
234             }
235             // get playback settings used by audiosink, as it may be
236             // slightly off due to audiosink not taking small changes.
237             mPlaybackSettings = *rate;
238             if (mPaused) {
239                 rate->mSpeed = 0.f;
240             }
241         }
242         return err;
243     }
244     *rate = mPlaybackSettings;
245     return OK;
246 }
247 
setSyncSettings(const AVSyncSettings & sync,float videoFpsHint)248 status_t NuPlayer::Renderer::setSyncSettings(const AVSyncSettings &sync, float videoFpsHint) {
249     sp<AMessage> msg = new AMessage(kWhatConfigSync, this);
250     writeToAMessage(msg, sync, videoFpsHint);
251     sp<AMessage> response;
252     status_t err = msg->postAndAwaitResponse(&response);
253     if (err == OK && response != NULL) {
254         CHECK(response->findInt32("err", &err));
255     }
256     return err;
257 }
258 
onConfigSync(const AVSyncSettings & sync,float videoFpsHint __unused)259 status_t NuPlayer::Renderer::onConfigSync(const AVSyncSettings &sync, float videoFpsHint __unused) {
260     if (sync.mSource != AVSYNC_SOURCE_DEFAULT) {
261         return BAD_VALUE;
262     }
263     // TODO: support sync sources
264     return INVALID_OPERATION;
265 }
266 
getSyncSettings(AVSyncSettings * sync,float * videoFps)267 status_t NuPlayer::Renderer::getSyncSettings(AVSyncSettings *sync, float *videoFps) {
268     sp<AMessage> msg = new AMessage(kWhatGetSyncSettings, this);
269     sp<AMessage> response;
270     status_t err = msg->postAndAwaitResponse(&response);
271     if (err == OK && response != NULL) {
272         CHECK(response->findInt32("err", &err));
273         if (err == OK) {
274             readFromAMessage(response, sync, videoFps);
275         }
276     }
277     return err;
278 }
279 
onGetSyncSettings(AVSyncSettings * sync,float * videoFps)280 status_t NuPlayer::Renderer::onGetSyncSettings(
281         AVSyncSettings *sync /* nonnull */, float *videoFps /* nonnull */) {
282     *sync = mSyncSettings;
283     *videoFps = -1.f;
284     return OK;
285 }
286 
flush(bool audio,bool notifyComplete)287 void NuPlayer::Renderer::flush(bool audio, bool notifyComplete) {
288     {
289         Mutex::Autolock autoLock(mLock);
290         if (audio) {
291             mNotifyCompleteAudio |= notifyComplete;
292             clearAudioFirstAnchorTime_l();
293             ++mAudioQueueGeneration;
294             ++mAudioDrainGeneration;
295         } else {
296             mNotifyCompleteVideo |= notifyComplete;
297             ++mVideoQueueGeneration;
298             ++mVideoDrainGeneration;
299         }
300 
301         mMediaClock->clearAnchor();
302         mVideoLateByUs = 0;
303         mSyncQueues = false;
304     }
305 
306     sp<AMessage> msg = new AMessage(kWhatFlush, this);
307     msg->setInt32("audio", static_cast<int32_t>(audio));
308     msg->post();
309 }
310 
signalTimeDiscontinuity()311 void NuPlayer::Renderer::signalTimeDiscontinuity() {
312 }
313 
signalDisableOffloadAudio()314 void NuPlayer::Renderer::signalDisableOffloadAudio() {
315     (new AMessage(kWhatDisableOffloadAudio, this))->post();
316 }
317 
signalEnableOffloadAudio()318 void NuPlayer::Renderer::signalEnableOffloadAudio() {
319     (new AMessage(kWhatEnableOffloadAudio, this))->post();
320 }
321 
pause()322 void NuPlayer::Renderer::pause() {
323     (new AMessage(kWhatPause, this))->post();
324 }
325 
resume()326 void NuPlayer::Renderer::resume() {
327     (new AMessage(kWhatResume, this))->post();
328 }
329 
setVideoFrameRate(float fps)330 void NuPlayer::Renderer::setVideoFrameRate(float fps) {
331     sp<AMessage> msg = new AMessage(kWhatSetVideoFrameRate, this);
332     msg->setFloat("frame-rate", fps);
333     msg->post();
334 }
335 
336 // Called on any threads without mLock acquired.
getCurrentPosition(int64_t * mediaUs)337 status_t NuPlayer::Renderer::getCurrentPosition(int64_t *mediaUs) {
338     status_t result = mMediaClock->getMediaTime(ALooper::GetNowUs(), mediaUs);
339     if (result == OK) {
340         return result;
341     }
342 
343     // MediaClock has not started yet. Try to start it if possible.
344     {
345         Mutex::Autolock autoLock(mLock);
346         if (mAudioFirstAnchorTimeMediaUs == -1) {
347             return result;
348         }
349 
350         AudioTimestamp ts;
351         status_t res = mAudioSink->getTimestamp(ts);
352         if (res != OK) {
353             return result;
354         }
355 
356         // AudioSink has rendered some frames.
357         int64_t nowUs = ALooper::GetNowUs();
358         int64_t nowMediaUs = mAudioSink->getPlayedOutDurationUs(nowUs)
359                 + mAudioFirstAnchorTimeMediaUs;
360         mMediaClock->updateAnchor(nowMediaUs, nowUs, -1);
361     }
362 
363     return mMediaClock->getMediaTime(ALooper::GetNowUs(), mediaUs);
364 }
365 
clearAudioFirstAnchorTime_l()366 void NuPlayer::Renderer::clearAudioFirstAnchorTime_l() {
367     mAudioFirstAnchorTimeMediaUs = -1;
368     mMediaClock->setStartingTimeMedia(-1);
369 }
370 
setAudioFirstAnchorTimeIfNeeded_l(int64_t mediaUs)371 void NuPlayer::Renderer::setAudioFirstAnchorTimeIfNeeded_l(int64_t mediaUs) {
372     if (mAudioFirstAnchorTimeMediaUs == -1) {
373         mAudioFirstAnchorTimeMediaUs = mediaUs;
374         mMediaClock->setStartingTimeMedia(mediaUs);
375     }
376 }
377 
378 // Called on renderer looper.
clearAnchorTime()379 void NuPlayer::Renderer::clearAnchorTime() {
380     mMediaClock->clearAnchor();
381     mAnchorTimeMediaUs = -1;
382     mAnchorNumFramesWritten = -1;
383 }
384 
setVideoLateByUs(int64_t lateUs)385 void NuPlayer::Renderer::setVideoLateByUs(int64_t lateUs) {
386     Mutex::Autolock autoLock(mLock);
387     mVideoLateByUs = lateUs;
388 }
389 
getVideoLateByUs()390 int64_t NuPlayer::Renderer::getVideoLateByUs() {
391     Mutex::Autolock autoLock(mLock);
392     return mVideoLateByUs;
393 }
394 
openAudioSink(const sp<AMessage> & format,bool offloadOnly,bool hasVideo,uint32_t flags,bool * isOffloaded,bool isStreaming)395 status_t NuPlayer::Renderer::openAudioSink(
396         const sp<AMessage> &format,
397         bool offloadOnly,
398         bool hasVideo,
399         uint32_t flags,
400         bool *isOffloaded,
401         bool isStreaming) {
402     sp<AMessage> msg = new AMessage(kWhatOpenAudioSink, this);
403     msg->setMessage("format", format);
404     msg->setInt32("offload-only", offloadOnly);
405     msg->setInt32("has-video", hasVideo);
406     msg->setInt32("flags", flags);
407     msg->setInt32("isStreaming", isStreaming);
408 
409     sp<AMessage> response;
410     status_t postStatus = msg->postAndAwaitResponse(&response);
411 
412     int32_t err;
413     if (postStatus != OK || response.get() == nullptr || !response->findInt32("err", &err)) {
414         err = INVALID_OPERATION;
415     } else if (err == OK && isOffloaded != NULL) {
416         int32_t offload;
417         CHECK(response->findInt32("offload", &offload));
418         *isOffloaded = (offload != 0);
419     }
420     return err;
421 }
422 
closeAudioSink()423 void NuPlayer::Renderer::closeAudioSink() {
424     sp<AMessage> msg = new AMessage(kWhatCloseAudioSink, this);
425 
426     sp<AMessage> response;
427     msg->postAndAwaitResponse(&response);
428 }
429 
changeAudioFormat(const sp<AMessage> & format,bool offloadOnly,bool hasVideo,uint32_t flags,bool isStreaming,const sp<AMessage> & notify)430 void NuPlayer::Renderer::changeAudioFormat(
431         const sp<AMessage> &format,
432         bool offloadOnly,
433         bool hasVideo,
434         uint32_t flags,
435         bool isStreaming,
436         const sp<AMessage> &notify) {
437     sp<AMessage> meta = new AMessage;
438     meta->setMessage("format", format);
439     meta->setInt32("offload-only", offloadOnly);
440     meta->setInt32("has-video", hasVideo);
441     meta->setInt32("flags", flags);
442     meta->setInt32("isStreaming", isStreaming);
443 
444     sp<AMessage> msg = new AMessage(kWhatChangeAudioFormat, this);
445     msg->setInt32("queueGeneration", getQueueGeneration(true /* audio */));
446     msg->setMessage("notify", notify);
447     msg->setMessage("meta", meta);
448     msg->post();
449 }
450 
onMessageReceived(const sp<AMessage> & msg)451 void NuPlayer::Renderer::onMessageReceived(const sp<AMessage> &msg) {
452     switch (msg->what()) {
453         case kWhatOpenAudioSink:
454         {
455             sp<AMessage> format;
456             CHECK(msg->findMessage("format", &format));
457 
458             int32_t offloadOnly;
459             CHECK(msg->findInt32("offload-only", &offloadOnly));
460 
461             int32_t hasVideo;
462             CHECK(msg->findInt32("has-video", &hasVideo));
463 
464             uint32_t flags;
465             CHECK(msg->findInt32("flags", (int32_t *)&flags));
466 
467             uint32_t isStreaming;
468             CHECK(msg->findInt32("isStreaming", (int32_t *)&isStreaming));
469 
470             status_t err = onOpenAudioSink(format, offloadOnly, hasVideo, flags, isStreaming);
471 
472             sp<AMessage> response = new AMessage;
473             response->setInt32("err", err);
474             response->setInt32("offload", offloadingAudio());
475 
476             sp<AReplyToken> replyID;
477             CHECK(msg->senderAwaitsResponse(&replyID));
478             response->postReply(replyID);
479 
480             break;
481         }
482 
483         case kWhatCloseAudioSink:
484         {
485             sp<AReplyToken> replyID;
486             CHECK(msg->senderAwaitsResponse(&replyID));
487 
488             onCloseAudioSink();
489 
490             sp<AMessage> response = new AMessage;
491             response->postReply(replyID);
492             break;
493         }
494 
495         case kWhatStopAudioSink:
496         {
497             mAudioSink->stop();
498             break;
499         }
500 
501         case kWhatChangeAudioFormat:
502         {
503             int32_t queueGeneration;
504             CHECK(msg->findInt32("queueGeneration", &queueGeneration));
505 
506             sp<AMessage> notify;
507             CHECK(msg->findMessage("notify", &notify));
508 
509             if (offloadingAudio()) {
510                 ALOGW("changeAudioFormat should NOT be called in offload mode");
511                 notify->setInt32("err", INVALID_OPERATION);
512                 notify->post();
513                 break;
514             }
515 
516             sp<AMessage> meta;
517             CHECK(msg->findMessage("meta", &meta));
518 
519             if (queueGeneration != getQueueGeneration(true /* audio */)
520                     || mAudioQueue.empty()) {
521                 onChangeAudioFormat(meta, notify);
522                 break;
523             }
524 
525             QueueEntry entry;
526             entry.mNotifyConsumed = notify;
527             entry.mMeta = meta;
528 
529             Mutex::Autolock autoLock(mLock);
530             mAudioQueue.push_back(entry);
531             postDrainAudioQueue_l();
532 
533             break;
534         }
535 
536         case kWhatDrainAudioQueue:
537         {
538             mDrainAudioQueuePending = false;
539 
540             int32_t generation;
541             CHECK(msg->findInt32("drainGeneration", &generation));
542             if (generation != getDrainGeneration(true /* audio */)) {
543                 break;
544             }
545 
546             if (onDrainAudioQueue()) {
547                 uint32_t numFramesPlayed;
548                 CHECK_EQ(mAudioSink->getPosition(&numFramesPlayed),
549                          (status_t)OK);
550 
551                 uint32_t numFramesPendingPlayout =
552                     mNumFramesWritten - numFramesPlayed;
553 
554                 // This is how long the audio sink will have data to
555                 // play back.
556                 int64_t delayUs =
557                     mAudioSink->msecsPerFrame()
558                         * numFramesPendingPlayout * 1000ll;
559                 if (mPlaybackRate > 1.0f) {
560                     delayUs /= mPlaybackRate;
561                 }
562 
563                 // Let's give it more data after about half that time
564                 // has elapsed.
565                 delayUs /= 2;
566                 // check the buffer size to estimate maximum delay permitted.
567                 const int64_t maxDrainDelayUs = std::max(
568                         mAudioSink->getBufferDurationInUs(), (int64_t)500000 /* half second */);
569                 ALOGD_IF(delayUs > maxDrainDelayUs, "postDrainAudioQueue long delay: %lld > %lld",
570                         (long long)delayUs, (long long)maxDrainDelayUs);
571                 Mutex::Autolock autoLock(mLock);
572                 postDrainAudioQueue_l(delayUs);
573             }
574             break;
575         }
576 
577         case kWhatDrainVideoQueue:
578         {
579             int32_t generation;
580             CHECK(msg->findInt32("drainGeneration", &generation));
581             if (generation != getDrainGeneration(false /* audio */)) {
582                 break;
583             }
584 
585             mDrainVideoQueuePending = false;
586 
587             onDrainVideoQueue();
588 
589             postDrainVideoQueue();
590             break;
591         }
592 
593         case kWhatPostDrainVideoQueue:
594         {
595             int32_t generation;
596             CHECK(msg->findInt32("drainGeneration", &generation));
597             if (generation != getDrainGeneration(false /* audio */)) {
598                 break;
599             }
600 
601             mDrainVideoQueuePending = false;
602             postDrainVideoQueue();
603             break;
604         }
605 
606         case kWhatQueueBuffer:
607         {
608             onQueueBuffer(msg);
609             break;
610         }
611 
612         case kWhatQueueEOS:
613         {
614             onQueueEOS(msg);
615             break;
616         }
617 
618         case kWhatEOS:
619         {
620             int32_t generation;
621             CHECK(msg->findInt32("audioEOSGeneration", &generation));
622             if (generation != mAudioEOSGeneration) {
623                 break;
624             }
625             status_t finalResult;
626             CHECK(msg->findInt32("finalResult", &finalResult));
627             notifyEOS(true /* audio */, finalResult);
628             break;
629         }
630 
631         case kWhatConfigPlayback:
632         {
633             sp<AReplyToken> replyID;
634             CHECK(msg->senderAwaitsResponse(&replyID));
635             AudioPlaybackRate rate;
636             readFromAMessage(msg, &rate);
637             status_t err = onConfigPlayback(rate);
638             sp<AMessage> response = new AMessage;
639             response->setInt32("err", err);
640             response->postReply(replyID);
641             break;
642         }
643 
644         case kWhatGetPlaybackSettings:
645         {
646             sp<AReplyToken> replyID;
647             CHECK(msg->senderAwaitsResponse(&replyID));
648             AudioPlaybackRate rate = AUDIO_PLAYBACK_RATE_DEFAULT;
649             status_t err = onGetPlaybackSettings(&rate);
650             sp<AMessage> response = new AMessage;
651             if (err == OK) {
652                 writeToAMessage(response, rate);
653             }
654             response->setInt32("err", err);
655             response->postReply(replyID);
656             break;
657         }
658 
659         case kWhatConfigSync:
660         {
661             sp<AReplyToken> replyID;
662             CHECK(msg->senderAwaitsResponse(&replyID));
663             AVSyncSettings sync;
664             float videoFpsHint;
665             readFromAMessage(msg, &sync, &videoFpsHint);
666             status_t err = onConfigSync(sync, videoFpsHint);
667             sp<AMessage> response = new AMessage;
668             response->setInt32("err", err);
669             response->postReply(replyID);
670             break;
671         }
672 
673         case kWhatGetSyncSettings:
674         {
675             sp<AReplyToken> replyID;
676             CHECK(msg->senderAwaitsResponse(&replyID));
677 
678             ALOGV("kWhatGetSyncSettings");
679             AVSyncSettings sync;
680             float videoFps = -1.f;
681             status_t err = onGetSyncSettings(&sync, &videoFps);
682             sp<AMessage> response = new AMessage;
683             if (err == OK) {
684                 writeToAMessage(response, sync, videoFps);
685             }
686             response->setInt32("err", err);
687             response->postReply(replyID);
688             break;
689         }
690 
691         case kWhatFlush:
692         {
693             onFlush(msg);
694             break;
695         }
696 
697         case kWhatDisableOffloadAudio:
698         {
699             onDisableOffloadAudio();
700             break;
701         }
702 
703         case kWhatEnableOffloadAudio:
704         {
705             onEnableOffloadAudio();
706             break;
707         }
708 
709         case kWhatPause:
710         {
711             onPause();
712             break;
713         }
714 
715         case kWhatResume:
716         {
717             onResume();
718             break;
719         }
720 
721         case kWhatSetVideoFrameRate:
722         {
723             float fps;
724             CHECK(msg->findFloat("frame-rate", &fps));
725             onSetVideoFrameRate(fps);
726             break;
727         }
728 
729         case kWhatAudioTearDown:
730         {
731             int32_t reason;
732             CHECK(msg->findInt32("reason", &reason));
733 
734             onAudioTearDown((AudioTearDownReason)reason);
735             break;
736         }
737 
738         case kWhatAudioOffloadPauseTimeout:
739         {
740             int32_t generation;
741             CHECK(msg->findInt32("drainGeneration", &generation));
742             if (generation != mAudioOffloadPauseTimeoutGeneration) {
743                 break;
744             }
745             ALOGV("Audio Offload tear down due to pause timeout.");
746             onAudioTearDown(kDueToTimeout);
747             mWakeLock->release();
748             break;
749         }
750 
751         default:
752             TRESPASS();
753             break;
754     }
755 }
756 
postDrainAudioQueue_l(int64_t delayUs)757 void NuPlayer::Renderer::postDrainAudioQueue_l(int64_t delayUs) {
758     if (mDrainAudioQueuePending || mSyncQueues || mUseAudioCallback) {
759         return;
760     }
761 
762     if (mAudioQueue.empty()) {
763         return;
764     }
765 
766     // FIXME: if paused, wait until AudioTrack stop() is complete before delivering data.
767     if (mPaused) {
768         const int64_t diffUs = mPauseDrainAudioAllowedUs - ALooper::GetNowUs();
769         if (diffUs > delayUs) {
770             delayUs = diffUs;
771         }
772     }
773 
774     mDrainAudioQueuePending = true;
775     sp<AMessage> msg = new AMessage(kWhatDrainAudioQueue, this);
776     msg->setInt32("drainGeneration", mAudioDrainGeneration);
777     msg->post(delayUs);
778 }
779 
prepareForMediaRenderingStart_l()780 void NuPlayer::Renderer::prepareForMediaRenderingStart_l() {
781     mAudioRenderingStartGeneration = mAudioDrainGeneration;
782     mVideoRenderingStartGeneration = mVideoDrainGeneration;
783     mRenderingDataDelivered = false;
784 }
785 
notifyIfMediaRenderingStarted_l()786 void NuPlayer::Renderer::notifyIfMediaRenderingStarted_l() {
787     if (mVideoRenderingStartGeneration == mVideoDrainGeneration &&
788         mAudioRenderingStartGeneration == mAudioDrainGeneration) {
789         mRenderingDataDelivered = true;
790         if (mPaused) {
791             return;
792         }
793         mVideoRenderingStartGeneration = -1;
794         mAudioRenderingStartGeneration = -1;
795 
796         sp<AMessage> notify = mNotify->dup();
797         notify->setInt32("what", kWhatMediaRenderingStart);
798         notify->post();
799     }
800 }
801 
802 // static
AudioSinkCallback(MediaPlayerBase::AudioSink *,void * buffer,size_t size,void * cookie,MediaPlayerBase::AudioSink::cb_event_t event)803 size_t NuPlayer::Renderer::AudioSinkCallback(
804         MediaPlayerBase::AudioSink * /* audioSink */,
805         void *buffer,
806         size_t size,
807         void *cookie,
808         MediaPlayerBase::AudioSink::cb_event_t event) {
809     NuPlayer::Renderer *me = (NuPlayer::Renderer *)cookie;
810 
811     switch (event) {
812         case MediaPlayerBase::AudioSink::CB_EVENT_FILL_BUFFER:
813         {
814             return me->fillAudioBuffer(buffer, size);
815             break;
816         }
817 
818         case MediaPlayerBase::AudioSink::CB_EVENT_STREAM_END:
819         {
820             ALOGV("AudioSink::CB_EVENT_STREAM_END");
821             me->notifyEOSCallback();
822             break;
823         }
824 
825         case MediaPlayerBase::AudioSink::CB_EVENT_TEAR_DOWN:
826         {
827             ALOGV("AudioSink::CB_EVENT_TEAR_DOWN");
828             me->notifyAudioTearDown(kDueToError);
829             break;
830         }
831     }
832 
833     return 0;
834 }
835 
notifyEOSCallback()836 void NuPlayer::Renderer::notifyEOSCallback() {
837     Mutex::Autolock autoLock(mLock);
838 
839     if (!mUseAudioCallback) {
840         return;
841     }
842 
843     notifyEOS(true /* audio */, ERROR_END_OF_STREAM);
844 }
845 
fillAudioBuffer(void * buffer,size_t size)846 size_t NuPlayer::Renderer::fillAudioBuffer(void *buffer, size_t size) {
847     Mutex::Autolock autoLock(mLock);
848 
849     if (!mUseAudioCallback) {
850         return 0;
851     }
852 
853     bool hasEOS = false;
854 
855     size_t sizeCopied = 0;
856     bool firstEntry = true;
857     QueueEntry *entry;  // will be valid after while loop if hasEOS is set.
858     while (sizeCopied < size && !mAudioQueue.empty()) {
859         entry = &*mAudioQueue.begin();
860 
861         if (entry->mBuffer == NULL) { // EOS
862             hasEOS = true;
863             mAudioQueue.erase(mAudioQueue.begin());
864             break;
865         }
866 
867         if (firstEntry && entry->mOffset == 0) {
868             firstEntry = false;
869             int64_t mediaTimeUs;
870             CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
871             ALOGV("fillAudioBuffer: rendering audio at media time %.2f secs", mediaTimeUs / 1E6);
872             setAudioFirstAnchorTimeIfNeeded_l(mediaTimeUs);
873         }
874 
875         size_t copy = entry->mBuffer->size() - entry->mOffset;
876         size_t sizeRemaining = size - sizeCopied;
877         if (copy > sizeRemaining) {
878             copy = sizeRemaining;
879         }
880 
881         memcpy((char *)buffer + sizeCopied,
882                entry->mBuffer->data() + entry->mOffset,
883                copy);
884 
885         entry->mOffset += copy;
886         if (entry->mOffset == entry->mBuffer->size()) {
887             entry->mNotifyConsumed->post();
888             mAudioQueue.erase(mAudioQueue.begin());
889             entry = NULL;
890         }
891         sizeCopied += copy;
892 
893         notifyIfMediaRenderingStarted_l();
894     }
895 
896     if (mAudioFirstAnchorTimeMediaUs >= 0) {
897         int64_t nowUs = ALooper::GetNowUs();
898         int64_t nowMediaUs =
899             mAudioFirstAnchorTimeMediaUs + mAudioSink->getPlayedOutDurationUs(nowUs);
900         // we don't know how much data we are queueing for offloaded tracks.
901         mMediaClock->updateAnchor(nowMediaUs, nowUs, INT64_MAX);
902     }
903 
904     // for non-offloaded audio, we need to compute the frames written because
905     // there is no EVENT_STREAM_END notification. The frames written gives
906     // an estimate on the pending played out duration.
907     if (!offloadingAudio()) {
908         mNumFramesWritten += sizeCopied / mAudioSink->frameSize();
909     }
910 
911     if (hasEOS) {
912         (new AMessage(kWhatStopAudioSink, this))->post();
913         // As there is currently no EVENT_STREAM_END callback notification for
914         // non-offloaded audio tracks, we need to post the EOS ourselves.
915         if (!offloadingAudio()) {
916             int64_t postEOSDelayUs = 0;
917             if (mAudioSink->needsTrailingPadding()) {
918                 postEOSDelayUs = getPendingAudioPlayoutDurationUs(ALooper::GetNowUs());
919             }
920             ALOGV("fillAudioBuffer: notifyEOS "
921                     "mNumFramesWritten:%u  finalResult:%d  postEOSDelay:%lld",
922                     mNumFramesWritten, entry->mFinalResult, (long long)postEOSDelayUs);
923             notifyEOS(true /* audio */, entry->mFinalResult, postEOSDelayUs);
924         }
925     }
926     return sizeCopied;
927 }
928 
drainAudioQueueUntilLastEOS()929 void NuPlayer::Renderer::drainAudioQueueUntilLastEOS() {
930     List<QueueEntry>::iterator it = mAudioQueue.begin(), itEOS = it;
931     bool foundEOS = false;
932     while (it != mAudioQueue.end()) {
933         int32_t eos;
934         QueueEntry *entry = &*it++;
935         if ((entry->mBuffer == nullptr && entry->mNotifyConsumed == nullptr)
936                 || (entry->mNotifyConsumed->findInt32("eos", &eos) && eos != 0)) {
937             itEOS = it;
938             foundEOS = true;
939         }
940     }
941 
942     if (foundEOS) {
943         // post all replies before EOS and drop the samples
944         for (it = mAudioQueue.begin(); it != itEOS; it++) {
945             if (it->mBuffer == nullptr) {
946                 if (it->mNotifyConsumed == nullptr) {
947                     // delay doesn't matter as we don't even have an AudioTrack
948                     notifyEOS(true /* audio */, it->mFinalResult);
949                 } else {
950                     // TAG for re-opening audio sink.
951                     onChangeAudioFormat(it->mMeta, it->mNotifyConsumed);
952                 }
953             } else {
954                 it->mNotifyConsumed->post();
955             }
956         }
957         mAudioQueue.erase(mAudioQueue.begin(), itEOS);
958     }
959 }
960 
onDrainAudioQueue()961 bool NuPlayer::Renderer::onDrainAudioQueue() {
962     // do not drain audio during teardown as queued buffers may be invalid.
963     if (mAudioTornDown) {
964         return false;
965     }
966     // TODO: This call to getPosition checks if AudioTrack has been created
967     // in AudioSink before draining audio. If AudioTrack doesn't exist, then
968     // CHECKs on getPosition will fail.
969     // We still need to figure out why AudioTrack is not created when
970     // this function is called. One possible reason could be leftover
971     // audio. Another possible place is to check whether decoder
972     // has received INFO_FORMAT_CHANGED as the first buffer since
973     // AudioSink is opened there, and possible interactions with flush
974     // immediately after start. Investigate error message
975     // "vorbis_dsp_synthesis returned -135", along with RTSP.
976     uint32_t numFramesPlayed;
977     if (mAudioSink->getPosition(&numFramesPlayed) != OK) {
978         // When getPosition fails, renderer will not reschedule the draining
979         // unless new samples are queued.
980         // If we have pending EOS (or "eos" marker for discontinuities), we need
981         // to post these now as NuPlayerDecoder might be waiting for it.
982         drainAudioQueueUntilLastEOS();
983 
984         ALOGW("onDrainAudioQueue(): audio sink is not ready");
985         return false;
986     }
987 
988 #if 0
989     ssize_t numFramesAvailableToWrite =
990         mAudioSink->frameCount() - (mNumFramesWritten - numFramesPlayed);
991 
992     if (numFramesAvailableToWrite == mAudioSink->frameCount()) {
993         ALOGI("audio sink underrun");
994     } else {
995         ALOGV("audio queue has %d frames left to play",
996              mAudioSink->frameCount() - numFramesAvailableToWrite);
997     }
998 #endif
999 
1000     uint32_t prevFramesWritten = mNumFramesWritten;
1001     while (!mAudioQueue.empty()) {
1002         QueueEntry *entry = &*mAudioQueue.begin();
1003 
1004         if (entry->mBuffer == NULL) {
1005             if (entry->mNotifyConsumed != nullptr) {
1006                 // TAG for re-open audio sink.
1007                 onChangeAudioFormat(entry->mMeta, entry->mNotifyConsumed);
1008                 mAudioQueue.erase(mAudioQueue.begin());
1009                 continue;
1010             }
1011 
1012             // EOS
1013             if (mPaused) {
1014                 // Do not notify EOS when paused.
1015                 // This is needed to avoid switch to next clip while in pause.
1016                 ALOGV("onDrainAudioQueue(): Do not notify EOS when paused");
1017                 return false;
1018             }
1019 
1020             int64_t postEOSDelayUs = 0;
1021             if (mAudioSink->needsTrailingPadding()) {
1022                 postEOSDelayUs = getPendingAudioPlayoutDurationUs(ALooper::GetNowUs());
1023             }
1024             notifyEOS(true /* audio */, entry->mFinalResult, postEOSDelayUs);
1025             mLastAudioMediaTimeUs = getDurationUsIfPlayedAtSampleRate(mNumFramesWritten);
1026 
1027             mAudioQueue.erase(mAudioQueue.begin());
1028             entry = NULL;
1029             if (mAudioSink->needsTrailingPadding()) {
1030                 // If we're not in gapless playback (i.e. through setNextPlayer), we
1031                 // need to stop the track here, because that will play out the last
1032                 // little bit at the end of the file. Otherwise short files won't play.
1033                 mAudioSink->stop();
1034                 mNumFramesWritten = 0;
1035             }
1036             return false;
1037         }
1038 
1039         mLastAudioBufferDrained = entry->mBufferOrdinal;
1040 
1041         // ignore 0-sized buffer which could be EOS marker with no data
1042         if (entry->mOffset == 0 && entry->mBuffer->size() > 0) {
1043             int64_t mediaTimeUs;
1044             CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
1045             ALOGV("onDrainAudioQueue: rendering audio at media time %.2f secs",
1046                     mediaTimeUs / 1E6);
1047             onNewAudioMediaTime(mediaTimeUs);
1048         }
1049 
1050         size_t copy = entry->mBuffer->size() - entry->mOffset;
1051 
1052         ssize_t written = mAudioSink->write(entry->mBuffer->data() + entry->mOffset,
1053                                             copy, false /* blocking */);
1054         if (written < 0) {
1055             // An error in AudioSink write. Perhaps the AudioSink was not properly opened.
1056             if (written == WOULD_BLOCK) {
1057                 ALOGV("AudioSink write would block when writing %zu bytes", copy);
1058             } else {
1059                 ALOGE("AudioSink write error(%zd) when writing %zu bytes", written, copy);
1060                 // This can only happen when AudioSink was opened with doNotReconnect flag set to
1061                 // true, in which case the NuPlayer will handle the reconnect.
1062                 notifyAudioTearDown(kDueToError);
1063             }
1064             break;
1065         }
1066 
1067         entry->mOffset += written;
1068         size_t remainder = entry->mBuffer->size() - entry->mOffset;
1069         if ((ssize_t)remainder < mAudioSink->frameSize()) {
1070             if (remainder > 0) {
1071                 ALOGW("Corrupted audio buffer has fractional frames, discarding %zu bytes.",
1072                         remainder);
1073                 entry->mOffset += remainder;
1074                 copy -= remainder;
1075             }
1076 
1077             entry->mNotifyConsumed->post();
1078             mAudioQueue.erase(mAudioQueue.begin());
1079 
1080             entry = NULL;
1081         }
1082 
1083         size_t copiedFrames = written / mAudioSink->frameSize();
1084         mNumFramesWritten += copiedFrames;
1085 
1086         {
1087             Mutex::Autolock autoLock(mLock);
1088             int64_t maxTimeMedia;
1089             maxTimeMedia =
1090                 mAnchorTimeMediaUs +
1091                         (int64_t)(max((long long)mNumFramesWritten - mAnchorNumFramesWritten, 0LL)
1092                                 * 1000LL * mAudioSink->msecsPerFrame());
1093             mMediaClock->updateMaxTimeMedia(maxTimeMedia);
1094 
1095             notifyIfMediaRenderingStarted_l();
1096         }
1097 
1098         if (written != (ssize_t)copy) {
1099             // A short count was received from AudioSink::write()
1100             //
1101             // AudioSink write is called in non-blocking mode.
1102             // It may return with a short count when:
1103             //
1104             // 1) Size to be copied is not a multiple of the frame size. Fractional frames are
1105             //    discarded.
1106             // 2) The data to be copied exceeds the available buffer in AudioSink.
1107             // 3) An error occurs and data has been partially copied to the buffer in AudioSink.
1108             // 4) AudioSink is an AudioCache for data retrieval, and the AudioCache is exceeded.
1109 
1110             // (Case 1)
1111             // Must be a multiple of the frame size.  If it is not a multiple of a frame size, it
1112             // needs to fail, as we should not carry over fractional frames between calls.
1113             CHECK_EQ(copy % mAudioSink->frameSize(), 0);
1114 
1115             // (Case 2, 3, 4)
1116             // Return early to the caller.
1117             // Beware of calling immediately again as this may busy-loop if you are not careful.
1118             ALOGV("AudioSink write short frame count %zd < %zu", written, copy);
1119             break;
1120         }
1121     }
1122 
1123     // calculate whether we need to reschedule another write.
1124     bool reschedule = !mAudioQueue.empty()
1125             && (!mPaused
1126                 || prevFramesWritten != mNumFramesWritten); // permit pause to fill buffers
1127     //ALOGD("reschedule:%d  empty:%d  mPaused:%d  prevFramesWritten:%u  mNumFramesWritten:%u",
1128     //        reschedule, mAudioQueue.empty(), mPaused, prevFramesWritten, mNumFramesWritten);
1129     return reschedule;
1130 }
1131 
getDurationUsIfPlayedAtSampleRate(uint32_t numFrames)1132 int64_t NuPlayer::Renderer::getDurationUsIfPlayedAtSampleRate(uint32_t numFrames) {
1133     int32_t sampleRate = offloadingAudio() ?
1134             mCurrentOffloadInfo.sample_rate : mCurrentPcmInfo.mSampleRate;
1135     if (sampleRate == 0) {
1136         ALOGE("sampleRate is 0 in %s mode", offloadingAudio() ? "offload" : "non-offload");
1137         return 0;
1138     }
1139     // TODO: remove the (int32_t) casting below as it may overflow at 12.4 hours.
1140     return (int64_t)((int32_t)numFrames * 1000000LL / sampleRate);
1141 }
1142 
1143 // Calculate duration of pending samples if played at normal rate (i.e., 1.0).
getPendingAudioPlayoutDurationUs(int64_t nowUs)1144 int64_t NuPlayer::Renderer::getPendingAudioPlayoutDurationUs(int64_t nowUs) {
1145     int64_t writtenAudioDurationUs = getDurationUsIfPlayedAtSampleRate(mNumFramesWritten);
1146     if (mUseVirtualAudioSink) {
1147         int64_t nowUs = ALooper::GetNowUs();
1148         int64_t mediaUs;
1149         if (mMediaClock->getMediaTime(nowUs, &mediaUs) != OK) {
1150             return 0ll;
1151         } else {
1152             return writtenAudioDurationUs - (mediaUs - mAudioFirstAnchorTimeMediaUs);
1153         }
1154     }
1155     return writtenAudioDurationUs - mAudioSink->getPlayedOutDurationUs(nowUs);
1156 }
1157 
getRealTimeUs(int64_t mediaTimeUs,int64_t nowUs)1158 int64_t NuPlayer::Renderer::getRealTimeUs(int64_t mediaTimeUs, int64_t nowUs) {
1159     int64_t realUs;
1160     if (mMediaClock->getRealTimeFor(mediaTimeUs, &realUs) != OK) {
1161         // If failed to get current position, e.g. due to audio clock is
1162         // not ready, then just play out video immediately without delay.
1163         return nowUs;
1164     }
1165     return realUs;
1166 }
1167 
onNewAudioMediaTime(int64_t mediaTimeUs)1168 void NuPlayer::Renderer::onNewAudioMediaTime(int64_t mediaTimeUs) {
1169     Mutex::Autolock autoLock(mLock);
1170     // TRICKY: vorbis decoder generates multiple frames with the same
1171     // timestamp, so only update on the first frame with a given timestamp
1172     if (mediaTimeUs == mAnchorTimeMediaUs) {
1173         return;
1174     }
1175     setAudioFirstAnchorTimeIfNeeded_l(mediaTimeUs);
1176 
1177     // mNextAudioClockUpdateTimeUs is -1 if we're waiting for audio sink to start
1178     if (mNextAudioClockUpdateTimeUs == -1) {
1179         AudioTimestamp ts;
1180         if (mAudioSink->getTimestamp(ts) == OK && ts.mPosition > 0) {
1181             mNextAudioClockUpdateTimeUs = 0; // start our clock updates
1182         }
1183     }
1184     int64_t nowUs = ALooper::GetNowUs();
1185     if (mNextAudioClockUpdateTimeUs >= 0) {
1186         if (nowUs >= mNextAudioClockUpdateTimeUs) {
1187             int64_t nowMediaUs = mediaTimeUs - getPendingAudioPlayoutDurationUs(nowUs);
1188             mMediaClock->updateAnchor(nowMediaUs, nowUs, mediaTimeUs);
1189             mUseVirtualAudioSink = false;
1190             mNextAudioClockUpdateTimeUs = nowUs + kMinimumAudioClockUpdatePeriodUs;
1191         }
1192     } else {
1193         int64_t unused;
1194         if ((mMediaClock->getMediaTime(nowUs, &unused) != OK)
1195                 && (getDurationUsIfPlayedAtSampleRate(mNumFramesWritten)
1196                         > kMaxAllowedAudioSinkDelayUs)) {
1197             // Enough data has been sent to AudioSink, but AudioSink has not rendered
1198             // any data yet. Something is wrong with AudioSink, e.g., the device is not
1199             // connected to audio out.
1200             // Switch to system clock. This essentially creates a virtual AudioSink with
1201             // initial latenty of getDurationUsIfPlayedAtSampleRate(mNumFramesWritten).
1202             // This virtual AudioSink renders audio data starting from the very first sample
1203             // and it's paced by system clock.
1204             ALOGW("AudioSink stuck. ARE YOU CONNECTED TO AUDIO OUT? Switching to system clock.");
1205             mMediaClock->updateAnchor(mAudioFirstAnchorTimeMediaUs, nowUs, mediaTimeUs);
1206             mUseVirtualAudioSink = true;
1207         }
1208     }
1209     mAnchorNumFramesWritten = mNumFramesWritten;
1210     mAnchorTimeMediaUs = mediaTimeUs;
1211 }
1212 
1213 // Called without mLock acquired.
postDrainVideoQueue()1214 void NuPlayer::Renderer::postDrainVideoQueue() {
1215     if (mDrainVideoQueuePending
1216             || getSyncQueues()
1217             || (mPaused && mVideoSampleReceived)) {
1218         return;
1219     }
1220 
1221     if (mVideoQueue.empty()) {
1222         return;
1223     }
1224 
1225     QueueEntry &entry = *mVideoQueue.begin();
1226 
1227     sp<AMessage> msg = new AMessage(kWhatDrainVideoQueue, this);
1228     msg->setInt32("drainGeneration", getDrainGeneration(false /* audio */));
1229 
1230     if (entry.mBuffer == NULL) {
1231         // EOS doesn't carry a timestamp.
1232         msg->post();
1233         mDrainVideoQueuePending = true;
1234         return;
1235     }
1236 
1237     bool needRepostDrainVideoQueue = false;
1238     int64_t delayUs;
1239     int64_t nowUs = ALooper::GetNowUs();
1240     int64_t realTimeUs;
1241     if (mFlags & FLAG_REAL_TIME) {
1242         int64_t mediaTimeUs;
1243         CHECK(entry.mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
1244         realTimeUs = mediaTimeUs;
1245     } else {
1246         int64_t mediaTimeUs;
1247         CHECK(entry.mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
1248 
1249         {
1250             Mutex::Autolock autoLock(mLock);
1251             if (mAnchorTimeMediaUs < 0) {
1252                 mMediaClock->updateAnchor(mediaTimeUs, nowUs, mediaTimeUs);
1253                 mAnchorTimeMediaUs = mediaTimeUs;
1254                 realTimeUs = nowUs;
1255             } else if (!mVideoSampleReceived) {
1256                 // Always render the first video frame.
1257                 realTimeUs = nowUs;
1258             } else if (mAudioFirstAnchorTimeMediaUs < 0
1259                 || mMediaClock->getRealTimeFor(mediaTimeUs, &realTimeUs) == OK) {
1260                 realTimeUs = getRealTimeUs(mediaTimeUs, nowUs);
1261             } else if (mediaTimeUs - mAudioFirstAnchorTimeMediaUs >= 0) {
1262                 needRepostDrainVideoQueue = true;
1263                 realTimeUs = nowUs;
1264             } else {
1265                 realTimeUs = nowUs;
1266             }
1267         }
1268         if (!mHasAudio) {
1269             // smooth out videos >= 10fps
1270             mMediaClock->updateMaxTimeMedia(mediaTimeUs + 100000);
1271         }
1272 
1273         // Heuristics to handle situation when media time changed without a
1274         // discontinuity. If we have not drained an audio buffer that was
1275         // received after this buffer, repost in 10 msec. Otherwise repost
1276         // in 500 msec.
1277         delayUs = realTimeUs - nowUs;
1278         int64_t postDelayUs = -1;
1279         if (delayUs > 500000) {
1280             postDelayUs = 500000;
1281             if (mHasAudio && (mLastAudioBufferDrained - entry.mBufferOrdinal) <= 0) {
1282                 postDelayUs = 10000;
1283             }
1284         } else if (needRepostDrainVideoQueue) {
1285             // CHECK(mPlaybackRate > 0);
1286             // CHECK(mAudioFirstAnchorTimeMediaUs >= 0);
1287             // CHECK(mediaTimeUs - mAudioFirstAnchorTimeMediaUs >= 0);
1288             postDelayUs = mediaTimeUs - mAudioFirstAnchorTimeMediaUs;
1289             postDelayUs /= mPlaybackRate;
1290         }
1291 
1292         if (postDelayUs >= 0) {
1293             msg->setWhat(kWhatPostDrainVideoQueue);
1294             msg->post(postDelayUs);
1295             mVideoScheduler->restart();
1296             ALOGI("possible video time jump of %dms (%lld : %lld) or uninitialized media clock,"
1297                     " retrying in %dms",
1298                     (int)(delayUs / 1000), (long long)mediaTimeUs,
1299                     (long long)mAudioFirstAnchorTimeMediaUs, (int)(postDelayUs / 1000));
1300             mDrainVideoQueuePending = true;
1301             return;
1302         }
1303     }
1304 
1305     realTimeUs = mVideoScheduler->schedule(realTimeUs * 1000) / 1000;
1306     int64_t twoVsyncsUs = 2 * (mVideoScheduler->getVsyncPeriod() / 1000);
1307 
1308     delayUs = realTimeUs - nowUs;
1309 
1310     ALOGW_IF(delayUs > 500000, "unusually high delayUs: %" PRId64, delayUs);
1311     // post 2 display refreshes before rendering is due
1312     msg->post(delayUs > twoVsyncsUs ? delayUs - twoVsyncsUs : 0);
1313 
1314     mDrainVideoQueuePending = true;
1315 }
1316 
onDrainVideoQueue()1317 void NuPlayer::Renderer::onDrainVideoQueue() {
1318     if (mVideoQueue.empty()) {
1319         return;
1320     }
1321 
1322     QueueEntry *entry = &*mVideoQueue.begin();
1323 
1324     if (entry->mBuffer == NULL) {
1325         // EOS
1326 
1327         notifyEOS(false /* audio */, entry->mFinalResult);
1328 
1329         mVideoQueue.erase(mVideoQueue.begin());
1330         entry = NULL;
1331 
1332         setVideoLateByUs(0);
1333         return;
1334     }
1335 
1336     int64_t nowUs = ALooper::GetNowUs();
1337     int64_t realTimeUs;
1338     int64_t mediaTimeUs = -1;
1339     if (mFlags & FLAG_REAL_TIME) {
1340         CHECK(entry->mBuffer->meta()->findInt64("timeUs", &realTimeUs));
1341     } else {
1342         CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
1343 
1344         realTimeUs = getRealTimeUs(mediaTimeUs, nowUs);
1345     }
1346 
1347     bool tooLate = false;
1348 
1349     if (!mPaused) {
1350         setVideoLateByUs(nowUs - realTimeUs);
1351         tooLate = (mVideoLateByUs > 40000);
1352 
1353         if (tooLate) {
1354             ALOGV("video late by %lld us (%.2f secs)",
1355                  (long long)mVideoLateByUs, mVideoLateByUs / 1E6);
1356         } else {
1357             int64_t mediaUs = 0;
1358             mMediaClock->getMediaTime(realTimeUs, &mediaUs);
1359             ALOGV("rendering video at media time %.2f secs",
1360                     (mFlags & FLAG_REAL_TIME ? realTimeUs :
1361                     mediaUs) / 1E6);
1362 
1363             if (!(mFlags & FLAG_REAL_TIME)
1364                     && mLastAudioMediaTimeUs != -1
1365                     && mediaTimeUs > mLastAudioMediaTimeUs) {
1366                 // If audio ends before video, video continues to drive media clock.
1367                 // Also smooth out videos >= 10fps.
1368                 mMediaClock->updateMaxTimeMedia(mediaTimeUs + 100000);
1369             }
1370         }
1371     } else {
1372         setVideoLateByUs(0);
1373         if (!mVideoSampleReceived && !mHasAudio) {
1374             // This will ensure that the first frame after a flush won't be used as anchor
1375             // when renderer is in paused state, because resume can happen any time after seek.
1376             clearAnchorTime();
1377         }
1378     }
1379 
1380     // Always render the first video frame while keeping stats on A/V sync.
1381     if (!mVideoSampleReceived) {
1382         realTimeUs = nowUs;
1383         tooLate = false;
1384     }
1385 
1386     entry->mNotifyConsumed->setInt64("timestampNs", realTimeUs * 1000ll);
1387     entry->mNotifyConsumed->setInt32("render", !tooLate);
1388     entry->mNotifyConsumed->post();
1389     mVideoQueue.erase(mVideoQueue.begin());
1390     entry = NULL;
1391 
1392     mVideoSampleReceived = true;
1393 
1394     if (!mPaused) {
1395         if (!mVideoRenderingStarted) {
1396             mVideoRenderingStarted = true;
1397             notifyVideoRenderingStart();
1398         }
1399         Mutex::Autolock autoLock(mLock);
1400         notifyIfMediaRenderingStarted_l();
1401     }
1402 }
1403 
notifyVideoRenderingStart()1404 void NuPlayer::Renderer::notifyVideoRenderingStart() {
1405     sp<AMessage> notify = mNotify->dup();
1406     notify->setInt32("what", kWhatVideoRenderingStart);
1407     notify->post();
1408 }
1409 
notifyEOS(bool audio,status_t finalResult,int64_t delayUs)1410 void NuPlayer::Renderer::notifyEOS(bool audio, status_t finalResult, int64_t delayUs) {
1411     if (audio && delayUs > 0) {
1412         sp<AMessage> msg = new AMessage(kWhatEOS, this);
1413         msg->setInt32("audioEOSGeneration", mAudioEOSGeneration);
1414         msg->setInt32("finalResult", finalResult);
1415         msg->post(delayUs);
1416         return;
1417     }
1418     sp<AMessage> notify = mNotify->dup();
1419     notify->setInt32("what", kWhatEOS);
1420     notify->setInt32("audio", static_cast<int32_t>(audio));
1421     notify->setInt32("finalResult", finalResult);
1422     notify->post(delayUs);
1423 }
1424 
notifyAudioTearDown(AudioTearDownReason reason)1425 void NuPlayer::Renderer::notifyAudioTearDown(AudioTearDownReason reason) {
1426     sp<AMessage> msg = new AMessage(kWhatAudioTearDown, this);
1427     msg->setInt32("reason", reason);
1428     msg->post();
1429 }
1430 
onQueueBuffer(const sp<AMessage> & msg)1431 void NuPlayer::Renderer::onQueueBuffer(const sp<AMessage> &msg) {
1432     int32_t audio;
1433     CHECK(msg->findInt32("audio", &audio));
1434 
1435     if (dropBufferIfStale(audio, msg)) {
1436         return;
1437     }
1438 
1439     if (audio) {
1440         mHasAudio = true;
1441     } else {
1442         mHasVideo = true;
1443     }
1444 
1445     if (mHasVideo) {
1446         if (mVideoScheduler == NULL) {
1447             mVideoScheduler = new VideoFrameScheduler();
1448             mVideoScheduler->init();
1449         }
1450     }
1451 
1452     sp<RefBase> obj;
1453     CHECK(msg->findObject("buffer", &obj));
1454     sp<MediaCodecBuffer> buffer = static_cast<MediaCodecBuffer *>(obj.get());
1455 
1456     sp<AMessage> notifyConsumed;
1457     CHECK(msg->findMessage("notifyConsumed", &notifyConsumed));
1458 
1459     QueueEntry entry;
1460     entry.mBuffer = buffer;
1461     entry.mNotifyConsumed = notifyConsumed;
1462     entry.mOffset = 0;
1463     entry.mFinalResult = OK;
1464     entry.mBufferOrdinal = ++mTotalBuffersQueued;
1465 
1466     if (audio) {
1467         Mutex::Autolock autoLock(mLock);
1468         mAudioQueue.push_back(entry);
1469         postDrainAudioQueue_l();
1470     } else {
1471         mVideoQueue.push_back(entry);
1472         postDrainVideoQueue();
1473     }
1474 
1475     Mutex::Autolock autoLock(mLock);
1476     if (!mSyncQueues || mAudioQueue.empty() || mVideoQueue.empty()) {
1477         return;
1478     }
1479 
1480     sp<MediaCodecBuffer> firstAudioBuffer = (*mAudioQueue.begin()).mBuffer;
1481     sp<MediaCodecBuffer> firstVideoBuffer = (*mVideoQueue.begin()).mBuffer;
1482 
1483     if (firstAudioBuffer == NULL || firstVideoBuffer == NULL) {
1484         // EOS signalled on either queue.
1485         syncQueuesDone_l();
1486         return;
1487     }
1488 
1489     int64_t firstAudioTimeUs;
1490     int64_t firstVideoTimeUs;
1491     CHECK(firstAudioBuffer->meta()
1492             ->findInt64("timeUs", &firstAudioTimeUs));
1493     CHECK(firstVideoBuffer->meta()
1494             ->findInt64("timeUs", &firstVideoTimeUs));
1495 
1496     int64_t diff = firstVideoTimeUs - firstAudioTimeUs;
1497 
1498     ALOGV("queueDiff = %.2f secs", diff / 1E6);
1499 
1500     if (diff > 100000ll) {
1501         // Audio data starts More than 0.1 secs before video.
1502         // Drop some audio.
1503 
1504         (*mAudioQueue.begin()).mNotifyConsumed->post();
1505         mAudioQueue.erase(mAudioQueue.begin());
1506         return;
1507     }
1508 
1509     syncQueuesDone_l();
1510 }
1511 
syncQueuesDone_l()1512 void NuPlayer::Renderer::syncQueuesDone_l() {
1513     if (!mSyncQueues) {
1514         return;
1515     }
1516 
1517     mSyncQueues = false;
1518 
1519     if (!mAudioQueue.empty()) {
1520         postDrainAudioQueue_l();
1521     }
1522 
1523     if (!mVideoQueue.empty()) {
1524         mLock.unlock();
1525         postDrainVideoQueue();
1526         mLock.lock();
1527     }
1528 }
1529 
onQueueEOS(const sp<AMessage> & msg)1530 void NuPlayer::Renderer::onQueueEOS(const sp<AMessage> &msg) {
1531     int32_t audio;
1532     CHECK(msg->findInt32("audio", &audio));
1533 
1534     if (dropBufferIfStale(audio, msg)) {
1535         return;
1536     }
1537 
1538     int32_t finalResult;
1539     CHECK(msg->findInt32("finalResult", &finalResult));
1540 
1541     QueueEntry entry;
1542     entry.mOffset = 0;
1543     entry.mFinalResult = finalResult;
1544 
1545     if (audio) {
1546         Mutex::Autolock autoLock(mLock);
1547         if (mAudioQueue.empty() && mSyncQueues) {
1548             syncQueuesDone_l();
1549         }
1550         mAudioQueue.push_back(entry);
1551         postDrainAudioQueue_l();
1552     } else {
1553         if (mVideoQueue.empty() && getSyncQueues()) {
1554             Mutex::Autolock autoLock(mLock);
1555             syncQueuesDone_l();
1556         }
1557         mVideoQueue.push_back(entry);
1558         postDrainVideoQueue();
1559     }
1560 }
1561 
onFlush(const sp<AMessage> & msg)1562 void NuPlayer::Renderer::onFlush(const sp<AMessage> &msg) {
1563     int32_t audio, notifyComplete;
1564     CHECK(msg->findInt32("audio", &audio));
1565 
1566     {
1567         Mutex::Autolock autoLock(mLock);
1568         if (audio) {
1569             notifyComplete = mNotifyCompleteAudio;
1570             mNotifyCompleteAudio = false;
1571             mLastAudioMediaTimeUs = -1;
1572         } else {
1573             notifyComplete = mNotifyCompleteVideo;
1574             mNotifyCompleteVideo = false;
1575         }
1576 
1577         // If we're currently syncing the queues, i.e. dropping audio while
1578         // aligning the first audio/video buffer times and only one of the
1579         // two queues has data, we may starve that queue by not requesting
1580         // more buffers from the decoder. If the other source then encounters
1581         // a discontinuity that leads to flushing, we'll never find the
1582         // corresponding discontinuity on the other queue.
1583         // Therefore we'll stop syncing the queues if at least one of them
1584         // is flushed.
1585         syncQueuesDone_l();
1586     }
1587     clearAnchorTime();
1588 
1589     ALOGV("flushing %s", audio ? "audio" : "video");
1590     if (audio) {
1591         {
1592             Mutex::Autolock autoLock(mLock);
1593             flushQueue(&mAudioQueue);
1594 
1595             ++mAudioDrainGeneration;
1596             ++mAudioEOSGeneration;
1597             prepareForMediaRenderingStart_l();
1598 
1599             // the frame count will be reset after flush.
1600             clearAudioFirstAnchorTime_l();
1601         }
1602 
1603         mDrainAudioQueuePending = false;
1604 
1605         if (offloadingAudio()) {
1606             mAudioSink->pause();
1607             mAudioSink->flush();
1608             if (!mPaused) {
1609                 mAudioSink->start();
1610             }
1611         } else {
1612             mAudioSink->pause();
1613             mAudioSink->flush();
1614             // Call stop() to signal to the AudioSink to completely fill the
1615             // internal buffer before resuming playback.
1616             // FIXME: this is ignored after flush().
1617             mAudioSink->stop();
1618             if (mPaused) {
1619                 // Race condition: if renderer is paused and audio sink is stopped,
1620                 // we need to make sure that the audio track buffer fully drains
1621                 // before delivering data.
1622                 // FIXME: remove this if we can detect if stop() is complete.
1623                 const int delayUs = 2 * 50 * 1000; // (2 full mixer thread cycles at 50ms)
1624                 mPauseDrainAudioAllowedUs = ALooper::GetNowUs() + delayUs;
1625             } else {
1626                 mAudioSink->start();
1627             }
1628             mNumFramesWritten = 0;
1629         }
1630         mNextAudioClockUpdateTimeUs = -1;
1631     } else {
1632         flushQueue(&mVideoQueue);
1633 
1634         mDrainVideoQueuePending = false;
1635 
1636         if (mVideoScheduler != NULL) {
1637             mVideoScheduler->restart();
1638         }
1639 
1640         Mutex::Autolock autoLock(mLock);
1641         ++mVideoDrainGeneration;
1642         prepareForMediaRenderingStart_l();
1643     }
1644 
1645     mVideoSampleReceived = false;
1646 
1647     if (notifyComplete) {
1648         notifyFlushComplete(audio);
1649     }
1650 }
1651 
flushQueue(List<QueueEntry> * queue)1652 void NuPlayer::Renderer::flushQueue(List<QueueEntry> *queue) {
1653     while (!queue->empty()) {
1654         QueueEntry *entry = &*queue->begin();
1655 
1656         if (entry->mBuffer != NULL) {
1657             entry->mNotifyConsumed->post();
1658         } else if (entry->mNotifyConsumed != nullptr) {
1659             // Is it needed to open audio sink now?
1660             onChangeAudioFormat(entry->mMeta, entry->mNotifyConsumed);
1661         }
1662 
1663         queue->erase(queue->begin());
1664         entry = NULL;
1665     }
1666 }
1667 
notifyFlushComplete(bool audio)1668 void NuPlayer::Renderer::notifyFlushComplete(bool audio) {
1669     sp<AMessage> notify = mNotify->dup();
1670     notify->setInt32("what", kWhatFlushComplete);
1671     notify->setInt32("audio", static_cast<int32_t>(audio));
1672     notify->post();
1673 }
1674 
dropBufferIfStale(bool audio,const sp<AMessage> & msg)1675 bool NuPlayer::Renderer::dropBufferIfStale(
1676         bool audio, const sp<AMessage> &msg) {
1677     int32_t queueGeneration;
1678     CHECK(msg->findInt32("queueGeneration", &queueGeneration));
1679 
1680     if (queueGeneration == getQueueGeneration(audio)) {
1681         return false;
1682     }
1683 
1684     sp<AMessage> notifyConsumed;
1685     if (msg->findMessage("notifyConsumed", &notifyConsumed)) {
1686         notifyConsumed->post();
1687     }
1688 
1689     return true;
1690 }
1691 
onAudioSinkChanged()1692 void NuPlayer::Renderer::onAudioSinkChanged() {
1693     if (offloadingAudio()) {
1694         return;
1695     }
1696     CHECK(!mDrainAudioQueuePending);
1697     mNumFramesWritten = 0;
1698     mAnchorNumFramesWritten = -1;
1699     uint32_t written;
1700     if (mAudioSink->getFramesWritten(&written) == OK) {
1701         mNumFramesWritten = written;
1702     }
1703 }
1704 
onDisableOffloadAudio()1705 void NuPlayer::Renderer::onDisableOffloadAudio() {
1706     Mutex::Autolock autoLock(mLock);
1707     mFlags &= ~FLAG_OFFLOAD_AUDIO;
1708     ++mAudioDrainGeneration;
1709     if (mAudioRenderingStartGeneration != -1) {
1710         prepareForMediaRenderingStart_l();
1711     }
1712 }
1713 
onEnableOffloadAudio()1714 void NuPlayer::Renderer::onEnableOffloadAudio() {
1715     Mutex::Autolock autoLock(mLock);
1716     mFlags |= FLAG_OFFLOAD_AUDIO;
1717     ++mAudioDrainGeneration;
1718     if (mAudioRenderingStartGeneration != -1) {
1719         prepareForMediaRenderingStart_l();
1720     }
1721 }
1722 
onPause()1723 void NuPlayer::Renderer::onPause() {
1724     if (mPaused) {
1725         return;
1726     }
1727 
1728     {
1729         Mutex::Autolock autoLock(mLock);
1730         // we do not increment audio drain generation so that we fill audio buffer during pause.
1731         ++mVideoDrainGeneration;
1732         prepareForMediaRenderingStart_l();
1733         mPaused = true;
1734         mMediaClock->setPlaybackRate(0.0);
1735     }
1736 
1737     mDrainAudioQueuePending = false;
1738     mDrainVideoQueuePending = false;
1739 
1740     // Note: audio data may not have been decoded, and the AudioSink may not be opened.
1741     mAudioSink->pause();
1742     startAudioOffloadPauseTimeout();
1743 
1744     ALOGV("now paused audio queue has %zu entries, video has %zu entries",
1745           mAudioQueue.size(), mVideoQueue.size());
1746 }
1747 
onResume()1748 void NuPlayer::Renderer::onResume() {
1749     if (!mPaused) {
1750         return;
1751     }
1752 
1753     // Note: audio data may not have been decoded, and the AudioSink may not be opened.
1754     cancelAudioOffloadPauseTimeout();
1755     if (mAudioSink->ready()) {
1756         status_t err = mAudioSink->start();
1757         if (err != OK) {
1758             ALOGE("cannot start AudioSink err %d", err);
1759             notifyAudioTearDown(kDueToError);
1760         }
1761     }
1762 
1763     {
1764         Mutex::Autolock autoLock(mLock);
1765         mPaused = false;
1766         // rendering started message may have been delayed if we were paused.
1767         if (mRenderingDataDelivered) {
1768             notifyIfMediaRenderingStarted_l();
1769         }
1770         // configure audiosink as we did not do it when pausing
1771         if (mAudioSink != NULL && mAudioSink->ready()) {
1772             mAudioSink->setPlaybackRate(mPlaybackSettings);
1773         }
1774 
1775         mMediaClock->setPlaybackRate(mPlaybackRate);
1776 
1777         if (!mAudioQueue.empty()) {
1778             postDrainAudioQueue_l();
1779         }
1780     }
1781 
1782     if (!mVideoQueue.empty()) {
1783         postDrainVideoQueue();
1784     }
1785 }
1786 
onSetVideoFrameRate(float fps)1787 void NuPlayer::Renderer::onSetVideoFrameRate(float fps) {
1788     if (mVideoScheduler == NULL) {
1789         mVideoScheduler = new VideoFrameScheduler();
1790     }
1791     mVideoScheduler->init(fps);
1792 }
1793 
getQueueGeneration(bool audio)1794 int32_t NuPlayer::Renderer::getQueueGeneration(bool audio) {
1795     Mutex::Autolock autoLock(mLock);
1796     return (audio ? mAudioQueueGeneration : mVideoQueueGeneration);
1797 }
1798 
getDrainGeneration(bool audio)1799 int32_t NuPlayer::Renderer::getDrainGeneration(bool audio) {
1800     Mutex::Autolock autoLock(mLock);
1801     return (audio ? mAudioDrainGeneration : mVideoDrainGeneration);
1802 }
1803 
getSyncQueues()1804 bool NuPlayer::Renderer::getSyncQueues() {
1805     Mutex::Autolock autoLock(mLock);
1806     return mSyncQueues;
1807 }
1808 
onAudioTearDown(AudioTearDownReason reason)1809 void NuPlayer::Renderer::onAudioTearDown(AudioTearDownReason reason) {
1810     if (mAudioTornDown) {
1811         return;
1812     }
1813     mAudioTornDown = true;
1814 
1815     int64_t currentPositionUs;
1816     sp<AMessage> notify = mNotify->dup();
1817     if (getCurrentPosition(&currentPositionUs) == OK) {
1818         notify->setInt64("positionUs", currentPositionUs);
1819     }
1820 
1821     mAudioSink->stop();
1822     mAudioSink->flush();
1823 
1824     notify->setInt32("what", kWhatAudioTearDown);
1825     notify->setInt32("reason", reason);
1826     notify->post();
1827 }
1828 
startAudioOffloadPauseTimeout()1829 void NuPlayer::Renderer::startAudioOffloadPauseTimeout() {
1830     if (offloadingAudio()) {
1831         mWakeLock->acquire();
1832         sp<AMessage> msg = new AMessage(kWhatAudioOffloadPauseTimeout, this);
1833         msg->setInt32("drainGeneration", mAudioOffloadPauseTimeoutGeneration);
1834         msg->post(kOffloadPauseMaxUs);
1835     }
1836 }
1837 
cancelAudioOffloadPauseTimeout()1838 void NuPlayer::Renderer::cancelAudioOffloadPauseTimeout() {
1839     // We may have called startAudioOffloadPauseTimeout() without
1840     // the AudioSink open and with offloadingAudio enabled.
1841     //
1842     // When we cancel, it may be that offloadingAudio is subsequently disabled, so regardless
1843     // we always release the wakelock and increment the pause timeout generation.
1844     //
1845     // Note: The acquired wakelock prevents the device from suspending
1846     // immediately after offload pause (in case a resume happens shortly thereafter).
1847     mWakeLock->release(true);
1848     ++mAudioOffloadPauseTimeoutGeneration;
1849 }
1850 
onOpenAudioSink(const sp<AMessage> & format,bool offloadOnly,bool hasVideo,uint32_t flags,bool isStreaming)1851 status_t NuPlayer::Renderer::onOpenAudioSink(
1852         const sp<AMessage> &format,
1853         bool offloadOnly,
1854         bool hasVideo,
1855         uint32_t flags,
1856         bool isStreaming) {
1857     ALOGV("openAudioSink: offloadOnly(%d) offloadingAudio(%d)",
1858             offloadOnly, offloadingAudio());
1859     bool audioSinkChanged = false;
1860 
1861     int32_t numChannels;
1862     CHECK(format->findInt32("channel-count", &numChannels));
1863 
1864     int32_t channelMask;
1865     if (!format->findInt32("channel-mask", &channelMask)) {
1866         // signal to the AudioSink to derive the mask from count.
1867         channelMask = CHANNEL_MASK_USE_CHANNEL_ORDER;
1868     }
1869 
1870     int32_t sampleRate;
1871     CHECK(format->findInt32("sample-rate", &sampleRate));
1872 
1873     if (offloadingAudio()) {
1874         audio_format_t audioFormat = AUDIO_FORMAT_PCM_16_BIT;
1875         AString mime;
1876         CHECK(format->findString("mime", &mime));
1877         status_t err = mapMimeToAudioFormat(audioFormat, mime.c_str());
1878 
1879         if (err != OK) {
1880             ALOGE("Couldn't map mime \"%s\" to a valid "
1881                     "audio_format", mime.c_str());
1882             onDisableOffloadAudio();
1883         } else {
1884             ALOGV("Mime \"%s\" mapped to audio_format 0x%x",
1885                     mime.c_str(), audioFormat);
1886 
1887             int avgBitRate = -1;
1888             format->findInt32("bitrate", &avgBitRate);
1889 
1890             int32_t aacProfile = -1;
1891             if (audioFormat == AUDIO_FORMAT_AAC
1892                     && format->findInt32("aac-profile", &aacProfile)) {
1893                 // Redefine AAC format as per aac profile
1894                 mapAACProfileToAudioFormat(
1895                         audioFormat,
1896                         aacProfile);
1897             }
1898 
1899             audio_offload_info_t offloadInfo = AUDIO_INFO_INITIALIZER;
1900             offloadInfo.duration_us = -1;
1901             format->findInt64(
1902                     "durationUs", &offloadInfo.duration_us);
1903             offloadInfo.sample_rate = sampleRate;
1904             offloadInfo.channel_mask = channelMask;
1905             offloadInfo.format = audioFormat;
1906             offloadInfo.stream_type = AUDIO_STREAM_MUSIC;
1907             offloadInfo.bit_rate = avgBitRate;
1908             offloadInfo.has_video = hasVideo;
1909             offloadInfo.is_streaming = isStreaming;
1910 
1911             if (memcmp(&mCurrentOffloadInfo, &offloadInfo, sizeof(offloadInfo)) == 0) {
1912                 ALOGV("openAudioSink: no change in offload mode");
1913                 // no change from previous configuration, everything ok.
1914                 return OK;
1915             }
1916             mCurrentPcmInfo = AUDIO_PCMINFO_INITIALIZER;
1917 
1918             ALOGV("openAudioSink: try to open AudioSink in offload mode");
1919             uint32_t offloadFlags = flags;
1920             offloadFlags |= AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD;
1921             offloadFlags &= ~AUDIO_OUTPUT_FLAG_DEEP_BUFFER;
1922             audioSinkChanged = true;
1923             mAudioSink->close();
1924 
1925             err = mAudioSink->open(
1926                     sampleRate,
1927                     numChannels,
1928                     (audio_channel_mask_t)channelMask,
1929                     audioFormat,
1930                     0 /* bufferCount - unused */,
1931                     &NuPlayer::Renderer::AudioSinkCallback,
1932                     this,
1933                     (audio_output_flags_t)offloadFlags,
1934                     &offloadInfo);
1935 
1936             if (err == OK) {
1937                 err = mAudioSink->setPlaybackRate(mPlaybackSettings);
1938             }
1939 
1940             if (err == OK) {
1941                 // If the playback is offloaded to h/w, we pass
1942                 // the HAL some metadata information.
1943                 // We don't want to do this for PCM because it
1944                 // will be going through the AudioFlinger mixer
1945                 // before reaching the hardware.
1946                 // TODO
1947                 mCurrentOffloadInfo = offloadInfo;
1948                 if (!mPaused) { // for preview mode, don't start if paused
1949                     err = mAudioSink->start();
1950                 }
1951                 ALOGV_IF(err == OK, "openAudioSink: offload succeeded");
1952             }
1953             if (err != OK) {
1954                 // Clean up, fall back to non offload mode.
1955                 mAudioSink->close();
1956                 onDisableOffloadAudio();
1957                 mCurrentOffloadInfo = AUDIO_INFO_INITIALIZER;
1958                 ALOGV("openAudioSink: offload failed");
1959                 if (offloadOnly) {
1960                     notifyAudioTearDown(kForceNonOffload);
1961                 }
1962             } else {
1963                 mUseAudioCallback = true;  // offload mode transfers data through callback
1964                 ++mAudioDrainGeneration;  // discard pending kWhatDrainAudioQueue message.
1965             }
1966         }
1967     }
1968     if (!offloadOnly && !offloadingAudio()) {
1969         ALOGV("openAudioSink: open AudioSink in NON-offload mode");
1970         uint32_t pcmFlags = flags;
1971         pcmFlags &= ~AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD;
1972 
1973         const PcmInfo info = {
1974                 (audio_channel_mask_t)channelMask,
1975                 (audio_output_flags_t)pcmFlags,
1976                 AUDIO_FORMAT_PCM_16_BIT, // TODO: change to audioFormat
1977                 numChannels,
1978                 sampleRate
1979         };
1980         if (memcmp(&mCurrentPcmInfo, &info, sizeof(info)) == 0) {
1981             ALOGV("openAudioSink: no change in pcm mode");
1982             // no change from previous configuration, everything ok.
1983             return OK;
1984         }
1985 
1986         audioSinkChanged = true;
1987         mAudioSink->close();
1988         mCurrentOffloadInfo = AUDIO_INFO_INITIALIZER;
1989         // Note: It is possible to set up the callback, but not use it to send audio data.
1990         // This requires a fix in AudioSink to explicitly specify the transfer mode.
1991         mUseAudioCallback = getUseAudioCallbackSetting();
1992         if (mUseAudioCallback) {
1993             ++mAudioDrainGeneration;  // discard pending kWhatDrainAudioQueue message.
1994         }
1995 
1996         // Compute the desired buffer size.
1997         // For callback mode, the amount of time before wakeup is about half the buffer size.
1998         const uint32_t frameCount =
1999                 (unsigned long long)sampleRate * getAudioSinkPcmMsSetting() / 1000;
2000 
2001         // The doNotReconnect means AudioSink will signal back and let NuPlayer to re-construct
2002         // AudioSink. We don't want this when there's video because it will cause a video seek to
2003         // the previous I frame. But we do want this when there's only audio because it will give
2004         // NuPlayer a chance to switch from non-offload mode to offload mode.
2005         // So we only set doNotReconnect when there's no video.
2006         const bool doNotReconnect = !hasVideo;
2007 
2008         // We should always be able to set our playback settings if the sink is closed.
2009         LOG_ALWAYS_FATAL_IF(mAudioSink->setPlaybackRate(mPlaybackSettings) != OK,
2010                 "onOpenAudioSink: can't set playback rate on closed sink");
2011         status_t err = mAudioSink->open(
2012                     sampleRate,
2013                     numChannels,
2014                     (audio_channel_mask_t)channelMask,
2015                     AUDIO_FORMAT_PCM_16_BIT,
2016                     0 /* bufferCount - unused */,
2017                     mUseAudioCallback ? &NuPlayer::Renderer::AudioSinkCallback : NULL,
2018                     mUseAudioCallback ? this : NULL,
2019                     (audio_output_flags_t)pcmFlags,
2020                     NULL,
2021                     doNotReconnect,
2022                     frameCount);
2023         if (err != OK) {
2024             ALOGW("openAudioSink: non offloaded open failed status: %d", err);
2025             mAudioSink->close();
2026             mCurrentPcmInfo = AUDIO_PCMINFO_INITIALIZER;
2027             return err;
2028         }
2029         mCurrentPcmInfo = info;
2030         if (!mPaused) { // for preview mode, don't start if paused
2031             mAudioSink->start();
2032         }
2033     }
2034     if (audioSinkChanged) {
2035         onAudioSinkChanged();
2036     }
2037     mAudioTornDown = false;
2038     return OK;
2039 }
2040 
onCloseAudioSink()2041 void NuPlayer::Renderer::onCloseAudioSink() {
2042     mAudioSink->close();
2043     mCurrentOffloadInfo = AUDIO_INFO_INITIALIZER;
2044     mCurrentPcmInfo = AUDIO_PCMINFO_INITIALIZER;
2045 }
2046 
onChangeAudioFormat(const sp<AMessage> & meta,const sp<AMessage> & notify)2047 void NuPlayer::Renderer::onChangeAudioFormat(
2048         const sp<AMessage> &meta, const sp<AMessage> &notify) {
2049     sp<AMessage> format;
2050     CHECK(meta->findMessage("format", &format));
2051 
2052     int32_t offloadOnly;
2053     CHECK(meta->findInt32("offload-only", &offloadOnly));
2054 
2055     int32_t hasVideo;
2056     CHECK(meta->findInt32("has-video", &hasVideo));
2057 
2058     uint32_t flags;
2059     CHECK(meta->findInt32("flags", (int32_t *)&flags));
2060 
2061     uint32_t isStreaming;
2062     CHECK(meta->findInt32("isStreaming", (int32_t *)&isStreaming));
2063 
2064     status_t err = onOpenAudioSink(format, offloadOnly, hasVideo, flags, isStreaming);
2065 
2066     if (err != OK) {
2067         notify->setInt32("err", err);
2068     }
2069     notify->post();
2070 }
2071 
2072 }  // namespace android
2073 
2074