1 /*
2  * Copyright (C) 2010 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 //#define LOG_NDEBUG 0
18 #define LOG_TAG "NuPlayerRenderer"
19 #include <utils/Log.h>
20 
21 #include "NuPlayerRenderer.h"
22 #include <cutils/properties.h>
23 #include <media/stagefright/foundation/ABuffer.h>
24 #include <media/stagefright/foundation/ADebug.h>
25 #include <media/stagefright/foundation/AMessage.h>
26 #include <media/stagefright/foundation/AUtils.h>
27 #include <media/stagefright/foundation/AWakeLock.h>
28 #include <media/stagefright/MediaClock.h>
29 #include <media/stagefright/MediaErrors.h>
30 #include <media/stagefright/MetaData.h>
31 #include <media/stagefright/Utils.h>
32 #include <media/stagefright/VideoFrameScheduler.h>
33 
34 #include <inttypes.h>
35 
36 namespace android {
37 
38 /*
39  * Example of common configuration settings in shell script form
40 
41    #Turn offload audio off (use PCM for Play Music) -- AudioPolicyManager
42    adb shell setprop audio.offload.disable 1
43 
44    #Allow offload audio with video (requires offloading to be enabled) -- AudioPolicyManager
45    adb shell setprop audio.offload.video 1
46 
47    #Use audio callbacks for PCM data
48    adb shell setprop media.stagefright.audio.cbk 1
49 
50    #Use deep buffer for PCM data with video (it is generally enabled for audio-only)
51    adb shell setprop media.stagefright.audio.deep 1
52 
53    #Set size of buffers for pcm audio sink in msec (example: 1000 msec)
54    adb shell setprop media.stagefright.audio.sink 1000
55 
56  * These configurations take effect for the next track played (not the current track).
57  */
58 
getUseAudioCallbackSetting()59 static inline bool getUseAudioCallbackSetting() {
60     return property_get_bool("media.stagefright.audio.cbk", false /* default_value */);
61 }
62 
getAudioSinkPcmMsSetting()63 static inline int32_t getAudioSinkPcmMsSetting() {
64     return property_get_int32(
65             "media.stagefright.audio.sink", 500 /* default_value */);
66 }
67 
68 // Maximum time in paused state when offloading audio decompression. When elapsed, the AudioSink
69 // is closed to allow the audio DSP to power down.
70 static const int64_t kOffloadPauseMaxUs = 10000000ll;
71 
72 // static
73 const NuPlayer::Renderer::PcmInfo NuPlayer::Renderer::AUDIO_PCMINFO_INITIALIZER = {
74         AUDIO_CHANNEL_NONE,
75         AUDIO_OUTPUT_FLAG_NONE,
76         AUDIO_FORMAT_INVALID,
77         0, // mNumChannels
78         0 // mSampleRate
79 };
80 
81 // static
82 const int64_t NuPlayer::Renderer::kMinPositionUpdateDelayUs = 100000ll;
83 
Renderer(const sp<MediaPlayerBase::AudioSink> & sink,const sp<AMessage> & notify,uint32_t flags)84 NuPlayer::Renderer::Renderer(
85         const sp<MediaPlayerBase::AudioSink> &sink,
86         const sp<AMessage> &notify,
87         uint32_t flags)
88     : mAudioSink(sink),
89       mNotify(notify),
90       mFlags(flags),
91       mNumFramesWritten(0),
92       mDrainAudioQueuePending(false),
93       mDrainVideoQueuePending(false),
94       mAudioQueueGeneration(0),
95       mVideoQueueGeneration(0),
96       mAudioDrainGeneration(0),
97       mVideoDrainGeneration(0),
98       mPlaybackSettings(AUDIO_PLAYBACK_RATE_DEFAULT),
99       mAudioFirstAnchorTimeMediaUs(-1),
100       mAnchorTimeMediaUs(-1),
101       mAnchorNumFramesWritten(-1),
102       mVideoLateByUs(0ll),
103       mHasAudio(false),
104       mHasVideo(false),
105       mNotifyCompleteAudio(false),
106       mNotifyCompleteVideo(false),
107       mSyncQueues(false),
108       mPaused(false),
109       mVideoSampleReceived(false),
110       mVideoRenderingStarted(false),
111       mVideoRenderingStartGeneration(0),
112       mAudioRenderingStartGeneration(0),
113       mAudioOffloadPauseTimeoutGeneration(0),
114       mAudioTornDown(false),
115       mCurrentOffloadInfo(AUDIO_INFO_INITIALIZER),
116       mCurrentPcmInfo(AUDIO_PCMINFO_INITIALIZER),
117       mTotalBuffersQueued(0),
118       mLastAudioBufferDrained(0),
119       mUseAudioCallback(false),
120       mWakeLock(new AWakeLock()) {
121     mMediaClock = new MediaClock;
122     mPlaybackRate = mPlaybackSettings.mSpeed;
123     mMediaClock->setPlaybackRate(mPlaybackRate);
124 }
125 
~Renderer()126 NuPlayer::Renderer::~Renderer() {
127     if (offloadingAudio()) {
128         mAudioSink->stop();
129         mAudioSink->flush();
130         mAudioSink->close();
131     }
132 }
133 
queueBuffer(bool audio,const sp<ABuffer> & buffer,const sp<AMessage> & notifyConsumed)134 void NuPlayer::Renderer::queueBuffer(
135         bool audio,
136         const sp<ABuffer> &buffer,
137         const sp<AMessage> &notifyConsumed) {
138     sp<AMessage> msg = new AMessage(kWhatQueueBuffer, this);
139     msg->setInt32("queueGeneration", getQueueGeneration(audio));
140     msg->setInt32("audio", static_cast<int32_t>(audio));
141     msg->setBuffer("buffer", buffer);
142     msg->setMessage("notifyConsumed", notifyConsumed);
143     msg->post();
144 }
145 
queueEOS(bool audio,status_t finalResult)146 void NuPlayer::Renderer::queueEOS(bool audio, status_t finalResult) {
147     CHECK_NE(finalResult, (status_t)OK);
148 
149     sp<AMessage> msg = new AMessage(kWhatQueueEOS, this);
150     msg->setInt32("queueGeneration", getQueueGeneration(audio));
151     msg->setInt32("audio", static_cast<int32_t>(audio));
152     msg->setInt32("finalResult", finalResult);
153     msg->post();
154 }
155 
setPlaybackSettings(const AudioPlaybackRate & rate)156 status_t NuPlayer::Renderer::setPlaybackSettings(const AudioPlaybackRate &rate) {
157     sp<AMessage> msg = new AMessage(kWhatConfigPlayback, this);
158     writeToAMessage(msg, rate);
159     sp<AMessage> response;
160     status_t err = msg->postAndAwaitResponse(&response);
161     if (err == OK && response != NULL) {
162         CHECK(response->findInt32("err", &err));
163     }
164     return err;
165 }
166 
onConfigPlayback(const AudioPlaybackRate & rate)167 status_t NuPlayer::Renderer::onConfigPlayback(const AudioPlaybackRate &rate /* sanitized */) {
168     if (rate.mSpeed == 0.f) {
169         onPause();
170         // don't call audiosink's setPlaybackRate if pausing, as pitch does not
171         // have to correspond to the any non-0 speed (e.g old speed). Keep
172         // settings nonetheless, using the old speed, in case audiosink changes.
173         AudioPlaybackRate newRate = rate;
174         newRate.mSpeed = mPlaybackSettings.mSpeed;
175         mPlaybackSettings = newRate;
176         return OK;
177     }
178 
179     if (mAudioSink != NULL && mAudioSink->ready()) {
180         status_t err = mAudioSink->setPlaybackRate(rate);
181         if (err != OK) {
182             return err;
183         }
184     }
185     mPlaybackSettings = rate;
186     mPlaybackRate = rate.mSpeed;
187     mMediaClock->setPlaybackRate(mPlaybackRate);
188     return OK;
189 }
190 
getPlaybackSettings(AudioPlaybackRate * rate)191 status_t NuPlayer::Renderer::getPlaybackSettings(AudioPlaybackRate *rate /* nonnull */) {
192     sp<AMessage> msg = new AMessage(kWhatGetPlaybackSettings, this);
193     sp<AMessage> response;
194     status_t err = msg->postAndAwaitResponse(&response);
195     if (err == OK && response != NULL) {
196         CHECK(response->findInt32("err", &err));
197         if (err == OK) {
198             readFromAMessage(response, rate);
199         }
200     }
201     return err;
202 }
203 
onGetPlaybackSettings(AudioPlaybackRate * rate)204 status_t NuPlayer::Renderer::onGetPlaybackSettings(AudioPlaybackRate *rate /* nonnull */) {
205     if (mAudioSink != NULL && mAudioSink->ready()) {
206         status_t err = mAudioSink->getPlaybackRate(rate);
207         if (err == OK) {
208             if (!isAudioPlaybackRateEqual(*rate, mPlaybackSettings)) {
209                 ALOGW("correcting mismatch in internal/external playback rate");
210             }
211             // get playback settings used by audiosink, as it may be
212             // slightly off due to audiosink not taking small changes.
213             mPlaybackSettings = *rate;
214             if (mPaused) {
215                 rate->mSpeed = 0.f;
216             }
217         }
218         return err;
219     }
220     *rate = mPlaybackSettings;
221     return OK;
222 }
223 
setSyncSettings(const AVSyncSettings & sync,float videoFpsHint)224 status_t NuPlayer::Renderer::setSyncSettings(const AVSyncSettings &sync, float videoFpsHint) {
225     sp<AMessage> msg = new AMessage(kWhatConfigSync, this);
226     writeToAMessage(msg, sync, videoFpsHint);
227     sp<AMessage> response;
228     status_t err = msg->postAndAwaitResponse(&response);
229     if (err == OK && response != NULL) {
230         CHECK(response->findInt32("err", &err));
231     }
232     return err;
233 }
234 
onConfigSync(const AVSyncSettings & sync,float videoFpsHint __unused)235 status_t NuPlayer::Renderer::onConfigSync(const AVSyncSettings &sync, float videoFpsHint __unused) {
236     if (sync.mSource != AVSYNC_SOURCE_DEFAULT) {
237         return BAD_VALUE;
238     }
239     // TODO: support sync sources
240     return INVALID_OPERATION;
241 }
242 
getSyncSettings(AVSyncSettings * sync,float * videoFps)243 status_t NuPlayer::Renderer::getSyncSettings(AVSyncSettings *sync, float *videoFps) {
244     sp<AMessage> msg = new AMessage(kWhatGetSyncSettings, this);
245     sp<AMessage> response;
246     status_t err = msg->postAndAwaitResponse(&response);
247     if (err == OK && response != NULL) {
248         CHECK(response->findInt32("err", &err));
249         if (err == OK) {
250             readFromAMessage(response, sync, videoFps);
251         }
252     }
253     return err;
254 }
255 
onGetSyncSettings(AVSyncSettings * sync,float * videoFps)256 status_t NuPlayer::Renderer::onGetSyncSettings(
257         AVSyncSettings *sync /* nonnull */, float *videoFps /* nonnull */) {
258     *sync = mSyncSettings;
259     *videoFps = -1.f;
260     return OK;
261 }
262 
flush(bool audio,bool notifyComplete)263 void NuPlayer::Renderer::flush(bool audio, bool notifyComplete) {
264     {
265         Mutex::Autolock autoLock(mLock);
266         if (audio) {
267             mNotifyCompleteAudio |= notifyComplete;
268             clearAudioFirstAnchorTime_l();
269             ++mAudioQueueGeneration;
270             ++mAudioDrainGeneration;
271         } else {
272             mNotifyCompleteVideo |= notifyComplete;
273             ++mVideoQueueGeneration;
274             ++mVideoDrainGeneration;
275         }
276 
277         clearAnchorTime_l();
278         mVideoLateByUs = 0;
279         mSyncQueues = false;
280     }
281 
282     sp<AMessage> msg = new AMessage(kWhatFlush, this);
283     msg->setInt32("audio", static_cast<int32_t>(audio));
284     msg->post();
285 }
286 
signalTimeDiscontinuity()287 void NuPlayer::Renderer::signalTimeDiscontinuity() {
288 }
289 
signalDisableOffloadAudio()290 void NuPlayer::Renderer::signalDisableOffloadAudio() {
291     (new AMessage(kWhatDisableOffloadAudio, this))->post();
292 }
293 
signalEnableOffloadAudio()294 void NuPlayer::Renderer::signalEnableOffloadAudio() {
295     (new AMessage(kWhatEnableOffloadAudio, this))->post();
296 }
297 
pause()298 void NuPlayer::Renderer::pause() {
299     (new AMessage(kWhatPause, this))->post();
300 }
301 
resume()302 void NuPlayer::Renderer::resume() {
303     (new AMessage(kWhatResume, this))->post();
304 }
305 
setVideoFrameRate(float fps)306 void NuPlayer::Renderer::setVideoFrameRate(float fps) {
307     sp<AMessage> msg = new AMessage(kWhatSetVideoFrameRate, this);
308     msg->setFloat("frame-rate", fps);
309     msg->post();
310 }
311 
312 // Called on any threads.
getCurrentPosition(int64_t * mediaUs)313 status_t NuPlayer::Renderer::getCurrentPosition(int64_t *mediaUs) {
314     return mMediaClock->getMediaTime(ALooper::GetNowUs(), mediaUs);
315 }
316 
clearAudioFirstAnchorTime_l()317 void NuPlayer::Renderer::clearAudioFirstAnchorTime_l() {
318     mAudioFirstAnchorTimeMediaUs = -1;
319     mMediaClock->setStartingTimeMedia(-1);
320 }
321 
setAudioFirstAnchorTimeIfNeeded_l(int64_t mediaUs)322 void NuPlayer::Renderer::setAudioFirstAnchorTimeIfNeeded_l(int64_t mediaUs) {
323     if (mAudioFirstAnchorTimeMediaUs == -1) {
324         mAudioFirstAnchorTimeMediaUs = mediaUs;
325         mMediaClock->setStartingTimeMedia(mediaUs);
326     }
327 }
328 
clearAnchorTime_l()329 void NuPlayer::Renderer::clearAnchorTime_l() {
330     mMediaClock->clearAnchor();
331     mAnchorTimeMediaUs = -1;
332     mAnchorNumFramesWritten = -1;
333 }
334 
setVideoLateByUs(int64_t lateUs)335 void NuPlayer::Renderer::setVideoLateByUs(int64_t lateUs) {
336     Mutex::Autolock autoLock(mLock);
337     mVideoLateByUs = lateUs;
338 }
339 
getVideoLateByUs()340 int64_t NuPlayer::Renderer::getVideoLateByUs() {
341     Mutex::Autolock autoLock(mLock);
342     return mVideoLateByUs;
343 }
344 
openAudioSink(const sp<AMessage> & format,bool offloadOnly,bool hasVideo,uint32_t flags,bool * isOffloaded)345 status_t NuPlayer::Renderer::openAudioSink(
346         const sp<AMessage> &format,
347         bool offloadOnly,
348         bool hasVideo,
349         uint32_t flags,
350         bool *isOffloaded) {
351     sp<AMessage> msg = new AMessage(kWhatOpenAudioSink, this);
352     msg->setMessage("format", format);
353     msg->setInt32("offload-only", offloadOnly);
354     msg->setInt32("has-video", hasVideo);
355     msg->setInt32("flags", flags);
356 
357     sp<AMessage> response;
358     msg->postAndAwaitResponse(&response);
359 
360     int32_t err;
361     if (!response->findInt32("err", &err)) {
362         err = INVALID_OPERATION;
363     } else if (err == OK && isOffloaded != NULL) {
364         int32_t offload;
365         CHECK(response->findInt32("offload", &offload));
366         *isOffloaded = (offload != 0);
367     }
368     return err;
369 }
370 
closeAudioSink()371 void NuPlayer::Renderer::closeAudioSink() {
372     sp<AMessage> msg = new AMessage(kWhatCloseAudioSink, this);
373 
374     sp<AMessage> response;
375     msg->postAndAwaitResponse(&response);
376 }
377 
onMessageReceived(const sp<AMessage> & msg)378 void NuPlayer::Renderer::onMessageReceived(const sp<AMessage> &msg) {
379     switch (msg->what()) {
380         case kWhatOpenAudioSink:
381         {
382             sp<AMessage> format;
383             CHECK(msg->findMessage("format", &format));
384 
385             int32_t offloadOnly;
386             CHECK(msg->findInt32("offload-only", &offloadOnly));
387 
388             int32_t hasVideo;
389             CHECK(msg->findInt32("has-video", &hasVideo));
390 
391             uint32_t flags;
392             CHECK(msg->findInt32("flags", (int32_t *)&flags));
393 
394             status_t err = onOpenAudioSink(format, offloadOnly, hasVideo, flags);
395 
396             sp<AMessage> response = new AMessage;
397             response->setInt32("err", err);
398             response->setInt32("offload", offloadingAudio());
399 
400             sp<AReplyToken> replyID;
401             CHECK(msg->senderAwaitsResponse(&replyID));
402             response->postReply(replyID);
403 
404             break;
405         }
406 
407         case kWhatCloseAudioSink:
408         {
409             sp<AReplyToken> replyID;
410             CHECK(msg->senderAwaitsResponse(&replyID));
411 
412             onCloseAudioSink();
413 
414             sp<AMessage> response = new AMessage;
415             response->postReply(replyID);
416             break;
417         }
418 
419         case kWhatStopAudioSink:
420         {
421             mAudioSink->stop();
422             break;
423         }
424 
425         case kWhatDrainAudioQueue:
426         {
427             mDrainAudioQueuePending = false;
428 
429             int32_t generation;
430             CHECK(msg->findInt32("drainGeneration", &generation));
431             if (generation != getDrainGeneration(true /* audio */)) {
432                 break;
433             }
434 
435             if (onDrainAudioQueue()) {
436                 uint32_t numFramesPlayed;
437                 CHECK_EQ(mAudioSink->getPosition(&numFramesPlayed),
438                          (status_t)OK);
439 
440                 uint32_t numFramesPendingPlayout =
441                     mNumFramesWritten - numFramesPlayed;
442 
443                 // This is how long the audio sink will have data to
444                 // play back.
445                 int64_t delayUs =
446                     mAudioSink->msecsPerFrame()
447                         * numFramesPendingPlayout * 1000ll;
448                 if (mPlaybackRate > 1.0f) {
449                     delayUs /= mPlaybackRate;
450                 }
451 
452                 // Let's give it more data after about half that time
453                 // has elapsed.
454                 Mutex::Autolock autoLock(mLock);
455                 postDrainAudioQueue_l(delayUs / 2);
456             }
457             break;
458         }
459 
460         case kWhatDrainVideoQueue:
461         {
462             int32_t generation;
463             CHECK(msg->findInt32("drainGeneration", &generation));
464             if (generation != getDrainGeneration(false /* audio */)) {
465                 break;
466             }
467 
468             mDrainVideoQueuePending = false;
469 
470             onDrainVideoQueue();
471 
472             postDrainVideoQueue();
473             break;
474         }
475 
476         case kWhatPostDrainVideoQueue:
477         {
478             int32_t generation;
479             CHECK(msg->findInt32("drainGeneration", &generation));
480             if (generation != getDrainGeneration(false /* audio */)) {
481                 break;
482             }
483 
484             mDrainVideoQueuePending = false;
485             postDrainVideoQueue();
486             break;
487         }
488 
489         case kWhatQueueBuffer:
490         {
491             onQueueBuffer(msg);
492             break;
493         }
494 
495         case kWhatQueueEOS:
496         {
497             onQueueEOS(msg);
498             break;
499         }
500 
501         case kWhatConfigPlayback:
502         {
503             sp<AReplyToken> replyID;
504             CHECK(msg->senderAwaitsResponse(&replyID));
505             AudioPlaybackRate rate;
506             readFromAMessage(msg, &rate);
507             status_t err = onConfigPlayback(rate);
508             sp<AMessage> response = new AMessage;
509             response->setInt32("err", err);
510             response->postReply(replyID);
511             break;
512         }
513 
514         case kWhatGetPlaybackSettings:
515         {
516             sp<AReplyToken> replyID;
517             CHECK(msg->senderAwaitsResponse(&replyID));
518             AudioPlaybackRate rate = AUDIO_PLAYBACK_RATE_DEFAULT;
519             status_t err = onGetPlaybackSettings(&rate);
520             sp<AMessage> response = new AMessage;
521             if (err == OK) {
522                 writeToAMessage(response, rate);
523             }
524             response->setInt32("err", err);
525             response->postReply(replyID);
526             break;
527         }
528 
529         case kWhatConfigSync:
530         {
531             sp<AReplyToken> replyID;
532             CHECK(msg->senderAwaitsResponse(&replyID));
533             AVSyncSettings sync;
534             float videoFpsHint;
535             readFromAMessage(msg, &sync, &videoFpsHint);
536             status_t err = onConfigSync(sync, videoFpsHint);
537             sp<AMessage> response = new AMessage;
538             response->setInt32("err", err);
539             response->postReply(replyID);
540             break;
541         }
542 
543         case kWhatGetSyncSettings:
544         {
545             sp<AReplyToken> replyID;
546             CHECK(msg->senderAwaitsResponse(&replyID));
547 
548             ALOGV("kWhatGetSyncSettings");
549             AVSyncSettings sync;
550             float videoFps = -1.f;
551             status_t err = onGetSyncSettings(&sync, &videoFps);
552             sp<AMessage> response = new AMessage;
553             if (err == OK) {
554                 writeToAMessage(response, sync, videoFps);
555             }
556             response->setInt32("err", err);
557             response->postReply(replyID);
558             break;
559         }
560 
561         case kWhatFlush:
562         {
563             onFlush(msg);
564             break;
565         }
566 
567         case kWhatDisableOffloadAudio:
568         {
569             onDisableOffloadAudio();
570             break;
571         }
572 
573         case kWhatEnableOffloadAudio:
574         {
575             onEnableOffloadAudio();
576             break;
577         }
578 
579         case kWhatPause:
580         {
581             onPause();
582             break;
583         }
584 
585         case kWhatResume:
586         {
587             onResume();
588             break;
589         }
590 
591         case kWhatSetVideoFrameRate:
592         {
593             float fps;
594             CHECK(msg->findFloat("frame-rate", &fps));
595             onSetVideoFrameRate(fps);
596             break;
597         }
598 
599         case kWhatAudioTearDown:
600         {
601             onAudioTearDown(kDueToError);
602             break;
603         }
604 
605         case kWhatAudioOffloadPauseTimeout:
606         {
607             int32_t generation;
608             CHECK(msg->findInt32("drainGeneration", &generation));
609             if (generation != mAudioOffloadPauseTimeoutGeneration) {
610                 break;
611             }
612             ALOGV("Audio Offload tear down due to pause timeout.");
613             onAudioTearDown(kDueToTimeout);
614             mWakeLock->release();
615             break;
616         }
617 
618         default:
619             TRESPASS();
620             break;
621     }
622 }
623 
postDrainAudioQueue_l(int64_t delayUs)624 void NuPlayer::Renderer::postDrainAudioQueue_l(int64_t delayUs) {
625     if (mDrainAudioQueuePending || mSyncQueues || mUseAudioCallback) {
626         return;
627     }
628 
629     if (mAudioQueue.empty()) {
630         return;
631     }
632 
633     mDrainAudioQueuePending = true;
634     sp<AMessage> msg = new AMessage(kWhatDrainAudioQueue, this);
635     msg->setInt32("drainGeneration", mAudioDrainGeneration);
636     msg->post(delayUs);
637 }
638 
prepareForMediaRenderingStart_l()639 void NuPlayer::Renderer::prepareForMediaRenderingStart_l() {
640     mAudioRenderingStartGeneration = mAudioDrainGeneration;
641     mVideoRenderingStartGeneration = mVideoDrainGeneration;
642 }
643 
notifyIfMediaRenderingStarted_l()644 void NuPlayer::Renderer::notifyIfMediaRenderingStarted_l() {
645     if (mVideoRenderingStartGeneration == mVideoDrainGeneration &&
646         mAudioRenderingStartGeneration == mAudioDrainGeneration) {
647         mVideoRenderingStartGeneration = -1;
648         mAudioRenderingStartGeneration = -1;
649 
650         sp<AMessage> notify = mNotify->dup();
651         notify->setInt32("what", kWhatMediaRenderingStart);
652         notify->post();
653     }
654 }
655 
656 // static
AudioSinkCallback(MediaPlayerBase::AudioSink *,void * buffer,size_t size,void * cookie,MediaPlayerBase::AudioSink::cb_event_t event)657 size_t NuPlayer::Renderer::AudioSinkCallback(
658         MediaPlayerBase::AudioSink * /* audioSink */,
659         void *buffer,
660         size_t size,
661         void *cookie,
662         MediaPlayerBase::AudioSink::cb_event_t event) {
663     NuPlayer::Renderer *me = (NuPlayer::Renderer *)cookie;
664 
665     switch (event) {
666         case MediaPlayerBase::AudioSink::CB_EVENT_FILL_BUFFER:
667         {
668             return me->fillAudioBuffer(buffer, size);
669             break;
670         }
671 
672         case MediaPlayerBase::AudioSink::CB_EVENT_STREAM_END:
673         {
674             ALOGV("AudioSink::CB_EVENT_STREAM_END");
675             me->notifyEOS(true /* audio */, ERROR_END_OF_STREAM);
676             break;
677         }
678 
679         case MediaPlayerBase::AudioSink::CB_EVENT_TEAR_DOWN:
680         {
681             ALOGV("AudioSink::CB_EVENT_TEAR_DOWN");
682             me->notifyAudioTearDown();
683             break;
684         }
685     }
686 
687     return 0;
688 }
689 
fillAudioBuffer(void * buffer,size_t size)690 size_t NuPlayer::Renderer::fillAudioBuffer(void *buffer, size_t size) {
691     Mutex::Autolock autoLock(mLock);
692 
693     if (!mUseAudioCallback) {
694         return 0;
695     }
696 
697     bool hasEOS = false;
698 
699     size_t sizeCopied = 0;
700     bool firstEntry = true;
701     QueueEntry *entry;  // will be valid after while loop if hasEOS is set.
702     while (sizeCopied < size && !mAudioQueue.empty()) {
703         entry = &*mAudioQueue.begin();
704 
705         if (entry->mBuffer == NULL) { // EOS
706             hasEOS = true;
707             mAudioQueue.erase(mAudioQueue.begin());
708             break;
709         }
710 
711         if (firstEntry && entry->mOffset == 0) {
712             firstEntry = false;
713             int64_t mediaTimeUs;
714             CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
715             ALOGV("fillAudioBuffer: rendering audio at media time %.2f secs", mediaTimeUs / 1E6);
716             setAudioFirstAnchorTimeIfNeeded_l(mediaTimeUs);
717         }
718 
719         size_t copy = entry->mBuffer->size() - entry->mOffset;
720         size_t sizeRemaining = size - sizeCopied;
721         if (copy > sizeRemaining) {
722             copy = sizeRemaining;
723         }
724 
725         memcpy((char *)buffer + sizeCopied,
726                entry->mBuffer->data() + entry->mOffset,
727                copy);
728 
729         entry->mOffset += copy;
730         if (entry->mOffset == entry->mBuffer->size()) {
731             entry->mNotifyConsumed->post();
732             mAudioQueue.erase(mAudioQueue.begin());
733             entry = NULL;
734         }
735         sizeCopied += copy;
736 
737         notifyIfMediaRenderingStarted_l();
738     }
739 
740     if (mAudioFirstAnchorTimeMediaUs >= 0) {
741         int64_t nowUs = ALooper::GetNowUs();
742         int64_t nowMediaUs =
743             mAudioFirstAnchorTimeMediaUs + getPlayedOutAudioDurationUs(nowUs);
744         // we don't know how much data we are queueing for offloaded tracks.
745         mMediaClock->updateAnchor(nowMediaUs, nowUs, INT64_MAX);
746     }
747 
748     // for non-offloaded audio, we need to compute the frames written because
749     // there is no EVENT_STREAM_END notification. The frames written gives
750     // an estimate on the pending played out duration.
751     if (!offloadingAudio()) {
752         mNumFramesWritten += sizeCopied / mAudioSink->frameSize();
753     }
754 
755     if (hasEOS) {
756         (new AMessage(kWhatStopAudioSink, this))->post();
757         // As there is currently no EVENT_STREAM_END callback notification for
758         // non-offloaded audio tracks, we need to post the EOS ourselves.
759         if (!offloadingAudio()) {
760             int64_t postEOSDelayUs = 0;
761             if (mAudioSink->needsTrailingPadding()) {
762                 postEOSDelayUs = getPendingAudioPlayoutDurationUs(ALooper::GetNowUs());
763             }
764             ALOGV("fillAudioBuffer: notifyEOS "
765                     "mNumFramesWritten:%u  finalResult:%d  postEOSDelay:%lld",
766                     mNumFramesWritten, entry->mFinalResult, (long long)postEOSDelayUs);
767             notifyEOS(true /* audio */, entry->mFinalResult, postEOSDelayUs);
768         }
769     }
770     return sizeCopied;
771 }
772 
drainAudioQueueUntilLastEOS()773 void NuPlayer::Renderer::drainAudioQueueUntilLastEOS() {
774     List<QueueEntry>::iterator it = mAudioQueue.begin(), itEOS = it;
775     bool foundEOS = false;
776     while (it != mAudioQueue.end()) {
777         int32_t eos;
778         QueueEntry *entry = &*it++;
779         if (entry->mBuffer == NULL
780                 || (entry->mNotifyConsumed->findInt32("eos", &eos) && eos != 0)) {
781             itEOS = it;
782             foundEOS = true;
783         }
784     }
785 
786     if (foundEOS) {
787         // post all replies before EOS and drop the samples
788         for (it = mAudioQueue.begin(); it != itEOS; it++) {
789             if (it->mBuffer == NULL) {
790                 // delay doesn't matter as we don't even have an AudioTrack
791                 notifyEOS(true /* audio */, it->mFinalResult);
792             } else {
793                 it->mNotifyConsumed->post();
794             }
795         }
796         mAudioQueue.erase(mAudioQueue.begin(), itEOS);
797     }
798 }
799 
onDrainAudioQueue()800 bool NuPlayer::Renderer::onDrainAudioQueue() {
801     // TODO: This call to getPosition checks if AudioTrack has been created
802     // in AudioSink before draining audio. If AudioTrack doesn't exist, then
803     // CHECKs on getPosition will fail.
804     // We still need to figure out why AudioTrack is not created when
805     // this function is called. One possible reason could be leftover
806     // audio. Another possible place is to check whether decoder
807     // has received INFO_FORMAT_CHANGED as the first buffer since
808     // AudioSink is opened there, and possible interactions with flush
809     // immediately after start. Investigate error message
810     // "vorbis_dsp_synthesis returned -135", along with RTSP.
811     uint32_t numFramesPlayed;
812     if (mAudioSink->getPosition(&numFramesPlayed) != OK) {
813         // When getPosition fails, renderer will not reschedule the draining
814         // unless new samples are queued.
815         // If we have pending EOS (or "eos" marker for discontinuities), we need
816         // to post these now as NuPlayerDecoder might be waiting for it.
817         drainAudioQueueUntilLastEOS();
818 
819         ALOGW("onDrainAudioQueue(): audio sink is not ready");
820         return false;
821     }
822 
823 #if 0
824     ssize_t numFramesAvailableToWrite =
825         mAudioSink->frameCount() - (mNumFramesWritten - numFramesPlayed);
826 
827     if (numFramesAvailableToWrite == mAudioSink->frameCount()) {
828         ALOGI("audio sink underrun");
829     } else {
830         ALOGV("audio queue has %d frames left to play",
831              mAudioSink->frameCount() - numFramesAvailableToWrite);
832     }
833 #endif
834 
835     uint32_t prevFramesWritten = mNumFramesWritten;
836     while (!mAudioQueue.empty()) {
837         QueueEntry *entry = &*mAudioQueue.begin();
838 
839         mLastAudioBufferDrained = entry->mBufferOrdinal;
840 
841         if (entry->mBuffer == NULL) {
842             // EOS
843             int64_t postEOSDelayUs = 0;
844             if (mAudioSink->needsTrailingPadding()) {
845                 postEOSDelayUs = getPendingAudioPlayoutDurationUs(ALooper::GetNowUs());
846             }
847             notifyEOS(true /* audio */, entry->mFinalResult, postEOSDelayUs);
848 
849             mAudioQueue.erase(mAudioQueue.begin());
850             entry = NULL;
851             if (mAudioSink->needsTrailingPadding()) {
852                 // If we're not in gapless playback (i.e. through setNextPlayer), we
853                 // need to stop the track here, because that will play out the last
854                 // little bit at the end of the file. Otherwise short files won't play.
855                 mAudioSink->stop();
856                 mNumFramesWritten = 0;
857             }
858             return false;
859         }
860 
861         // ignore 0-sized buffer which could be EOS marker with no data
862         if (entry->mOffset == 0 && entry->mBuffer->size() > 0) {
863             int64_t mediaTimeUs;
864             CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
865             ALOGV("onDrainAudioQueue: rendering audio at media time %.2f secs",
866                     mediaTimeUs / 1E6);
867             onNewAudioMediaTime(mediaTimeUs);
868         }
869 
870         size_t copy = entry->mBuffer->size() - entry->mOffset;
871 
872         ssize_t written = mAudioSink->write(entry->mBuffer->data() + entry->mOffset,
873                                             copy, false /* blocking */);
874         if (written < 0) {
875             // An error in AudioSink write. Perhaps the AudioSink was not properly opened.
876             if (written == WOULD_BLOCK) {
877                 ALOGV("AudioSink write would block when writing %zu bytes", copy);
878             } else {
879                 ALOGE("AudioSink write error(%zd) when writing %zu bytes", written, copy);
880                 notifyAudioTearDown();
881             }
882             break;
883         }
884 
885         entry->mOffset += written;
886         if (entry->mOffset == entry->mBuffer->size()) {
887             entry->mNotifyConsumed->post();
888             mAudioQueue.erase(mAudioQueue.begin());
889 
890             entry = NULL;
891         }
892 
893         size_t copiedFrames = written / mAudioSink->frameSize();
894         mNumFramesWritten += copiedFrames;
895 
896         {
897             Mutex::Autolock autoLock(mLock);
898             notifyIfMediaRenderingStarted_l();
899         }
900 
901         if (written != (ssize_t)copy) {
902             // A short count was received from AudioSink::write()
903             //
904             // AudioSink write is called in non-blocking mode.
905             // It may return with a short count when:
906             //
907             // 1) Size to be copied is not a multiple of the frame size. We consider this fatal.
908             // 2) The data to be copied exceeds the available buffer in AudioSink.
909             // 3) An error occurs and data has been partially copied to the buffer in AudioSink.
910             // 4) AudioSink is an AudioCache for data retrieval, and the AudioCache is exceeded.
911 
912             // (Case 1)
913             // Must be a multiple of the frame size.  If it is not a multiple of a frame size, it
914             // needs to fail, as we should not carry over fractional frames between calls.
915             CHECK_EQ(copy % mAudioSink->frameSize(), 0);
916 
917             // (Case 2, 3, 4)
918             // Return early to the caller.
919             // Beware of calling immediately again as this may busy-loop if you are not careful.
920             ALOGV("AudioSink write short frame count %zd < %zu", written, copy);
921             break;
922         }
923     }
924     int64_t maxTimeMedia;
925     {
926         Mutex::Autolock autoLock(mLock);
927         maxTimeMedia =
928             mAnchorTimeMediaUs +
929                     (int64_t)(max((long long)mNumFramesWritten - mAnchorNumFramesWritten, 0LL)
930                             * 1000LL * mAudioSink->msecsPerFrame());
931     }
932     mMediaClock->updateMaxTimeMedia(maxTimeMedia);
933 
934     // calculate whether we need to reschedule another write.
935     bool reschedule = !mAudioQueue.empty()
936             && (!mPaused
937                 || prevFramesWritten != mNumFramesWritten); // permit pause to fill buffers
938     //ALOGD("reschedule:%d  empty:%d  mPaused:%d  prevFramesWritten:%u  mNumFramesWritten:%u",
939     //        reschedule, mAudioQueue.empty(), mPaused, prevFramesWritten, mNumFramesWritten);
940     return reschedule;
941 }
942 
getDurationUsIfPlayedAtSampleRate(uint32_t numFrames)943 int64_t NuPlayer::Renderer::getDurationUsIfPlayedAtSampleRate(uint32_t numFrames) {
944     int32_t sampleRate = offloadingAudio() ?
945             mCurrentOffloadInfo.sample_rate : mCurrentPcmInfo.mSampleRate;
946     // TODO: remove the (int32_t) casting below as it may overflow at 12.4 hours.
947     return (int64_t)((int32_t)numFrames * 1000000LL / sampleRate);
948 }
949 
950 // Calculate duration of pending samples if played at normal rate (i.e., 1.0).
getPendingAudioPlayoutDurationUs(int64_t nowUs)951 int64_t NuPlayer::Renderer::getPendingAudioPlayoutDurationUs(int64_t nowUs) {
952     int64_t writtenAudioDurationUs = getDurationUsIfPlayedAtSampleRate(mNumFramesWritten);
953     return writtenAudioDurationUs - getPlayedOutAudioDurationUs(nowUs);
954 }
955 
getRealTimeUs(int64_t mediaTimeUs,int64_t nowUs)956 int64_t NuPlayer::Renderer::getRealTimeUs(int64_t mediaTimeUs, int64_t nowUs) {
957     int64_t realUs;
958     if (mMediaClock->getRealTimeFor(mediaTimeUs, &realUs) != OK) {
959         // If failed to get current position, e.g. due to audio clock is
960         // not ready, then just play out video immediately without delay.
961         return nowUs;
962     }
963     return realUs;
964 }
965 
onNewAudioMediaTime(int64_t mediaTimeUs)966 void NuPlayer::Renderer::onNewAudioMediaTime(int64_t mediaTimeUs) {
967     Mutex::Autolock autoLock(mLock);
968     // TRICKY: vorbis decoder generates multiple frames with the same
969     // timestamp, so only update on the first frame with a given timestamp
970     if (mediaTimeUs == mAnchorTimeMediaUs) {
971         return;
972     }
973     setAudioFirstAnchorTimeIfNeeded_l(mediaTimeUs);
974     int64_t nowUs = ALooper::GetNowUs();
975     int64_t nowMediaUs = mediaTimeUs - getPendingAudioPlayoutDurationUs(nowUs);
976     mMediaClock->updateAnchor(nowMediaUs, nowUs, mediaTimeUs);
977     mAnchorNumFramesWritten = mNumFramesWritten;
978     mAnchorTimeMediaUs = mediaTimeUs;
979 }
980 
981 // Called without mLock acquired.
postDrainVideoQueue()982 void NuPlayer::Renderer::postDrainVideoQueue() {
983     if (mDrainVideoQueuePending
984             || getSyncQueues()
985             || (mPaused && mVideoSampleReceived)) {
986         return;
987     }
988 
989     if (mVideoQueue.empty()) {
990         return;
991     }
992 
993     QueueEntry &entry = *mVideoQueue.begin();
994 
995     sp<AMessage> msg = new AMessage(kWhatDrainVideoQueue, this);
996     msg->setInt32("drainGeneration", getDrainGeneration(false /* audio */));
997 
998     if (entry.mBuffer == NULL) {
999         // EOS doesn't carry a timestamp.
1000         msg->post();
1001         mDrainVideoQueuePending = true;
1002         return;
1003     }
1004 
1005     int64_t delayUs;
1006     int64_t nowUs = ALooper::GetNowUs();
1007     int64_t realTimeUs;
1008     if (mFlags & FLAG_REAL_TIME) {
1009         int64_t mediaTimeUs;
1010         CHECK(entry.mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
1011         realTimeUs = mediaTimeUs;
1012     } else {
1013         int64_t mediaTimeUs;
1014         CHECK(entry.mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
1015 
1016         {
1017             Mutex::Autolock autoLock(mLock);
1018             if (mAnchorTimeMediaUs < 0) {
1019                 mMediaClock->updateAnchor(mediaTimeUs, nowUs, mediaTimeUs);
1020                 mAnchorTimeMediaUs = mediaTimeUs;
1021                 realTimeUs = nowUs;
1022             } else {
1023                 realTimeUs = getRealTimeUs(mediaTimeUs, nowUs);
1024             }
1025         }
1026         if (!mHasAudio) {
1027             // smooth out videos >= 10fps
1028             mMediaClock->updateMaxTimeMedia(mediaTimeUs + 100000);
1029         }
1030 
1031         // Heuristics to handle situation when media time changed without a
1032         // discontinuity. If we have not drained an audio buffer that was
1033         // received after this buffer, repost in 10 msec. Otherwise repost
1034         // in 500 msec.
1035         delayUs = realTimeUs - nowUs;
1036         if (delayUs > 500000) {
1037             int64_t postDelayUs = 500000;
1038             if (mHasAudio && (mLastAudioBufferDrained - entry.mBufferOrdinal) <= 0) {
1039                 postDelayUs = 10000;
1040             }
1041             msg->setWhat(kWhatPostDrainVideoQueue);
1042             msg->post(postDelayUs);
1043             mVideoScheduler->restart();
1044             ALOGI("possible video time jump of %dms, retrying in %dms",
1045                     (int)(delayUs / 1000), (int)(postDelayUs / 1000));
1046             mDrainVideoQueuePending = true;
1047             return;
1048         }
1049     }
1050 
1051     realTimeUs = mVideoScheduler->schedule(realTimeUs * 1000) / 1000;
1052     int64_t twoVsyncsUs = 2 * (mVideoScheduler->getVsyncPeriod() / 1000);
1053 
1054     delayUs = realTimeUs - nowUs;
1055 
1056     ALOGW_IF(delayUs > 500000, "unusually high delayUs: %" PRId64, delayUs);
1057     // post 2 display refreshes before rendering is due
1058     msg->post(delayUs > twoVsyncsUs ? delayUs - twoVsyncsUs : 0);
1059 
1060     mDrainVideoQueuePending = true;
1061 }
1062 
onDrainVideoQueue()1063 void NuPlayer::Renderer::onDrainVideoQueue() {
1064     if (mVideoQueue.empty()) {
1065         return;
1066     }
1067 
1068     QueueEntry *entry = &*mVideoQueue.begin();
1069 
1070     if (entry->mBuffer == NULL) {
1071         // EOS
1072 
1073         notifyEOS(false /* audio */, entry->mFinalResult);
1074 
1075         mVideoQueue.erase(mVideoQueue.begin());
1076         entry = NULL;
1077 
1078         setVideoLateByUs(0);
1079         return;
1080     }
1081 
1082     int64_t nowUs = -1;
1083     int64_t realTimeUs;
1084     if (mFlags & FLAG_REAL_TIME) {
1085         CHECK(entry->mBuffer->meta()->findInt64("timeUs", &realTimeUs));
1086     } else {
1087         int64_t mediaTimeUs;
1088         CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
1089 
1090         nowUs = ALooper::GetNowUs();
1091         realTimeUs = getRealTimeUs(mediaTimeUs, nowUs);
1092     }
1093 
1094     bool tooLate = false;
1095 
1096     if (!mPaused) {
1097         if (nowUs == -1) {
1098             nowUs = ALooper::GetNowUs();
1099         }
1100         setVideoLateByUs(nowUs - realTimeUs);
1101         tooLate = (mVideoLateByUs > 40000);
1102 
1103         if (tooLate) {
1104             ALOGV("video late by %lld us (%.2f secs)",
1105                  (long long)mVideoLateByUs, mVideoLateByUs / 1E6);
1106         } else {
1107             int64_t mediaUs = 0;
1108             mMediaClock->getMediaTime(realTimeUs, &mediaUs);
1109             ALOGV("rendering video at media time %.2f secs",
1110                     (mFlags & FLAG_REAL_TIME ? realTimeUs :
1111                     mediaUs) / 1E6);
1112         }
1113     } else {
1114         setVideoLateByUs(0);
1115         if (!mVideoSampleReceived && !mHasAudio) {
1116             // This will ensure that the first frame after a flush won't be used as anchor
1117             // when renderer is in paused state, because resume can happen any time after seek.
1118             Mutex::Autolock autoLock(mLock);
1119             clearAnchorTime_l();
1120         }
1121     }
1122 
1123     entry->mNotifyConsumed->setInt64("timestampNs", realTimeUs * 1000ll);
1124     entry->mNotifyConsumed->setInt32("render", !tooLate);
1125     entry->mNotifyConsumed->post();
1126     mVideoQueue.erase(mVideoQueue.begin());
1127     entry = NULL;
1128 
1129     mVideoSampleReceived = true;
1130 
1131     if (!mPaused) {
1132         if (!mVideoRenderingStarted) {
1133             mVideoRenderingStarted = true;
1134             notifyVideoRenderingStart();
1135         }
1136         Mutex::Autolock autoLock(mLock);
1137         notifyIfMediaRenderingStarted_l();
1138     }
1139 }
1140 
notifyVideoRenderingStart()1141 void NuPlayer::Renderer::notifyVideoRenderingStart() {
1142     sp<AMessage> notify = mNotify->dup();
1143     notify->setInt32("what", kWhatVideoRenderingStart);
1144     notify->post();
1145 }
1146 
notifyEOS(bool audio,status_t finalResult,int64_t delayUs)1147 void NuPlayer::Renderer::notifyEOS(bool audio, status_t finalResult, int64_t delayUs) {
1148     sp<AMessage> notify = mNotify->dup();
1149     notify->setInt32("what", kWhatEOS);
1150     notify->setInt32("audio", static_cast<int32_t>(audio));
1151     notify->setInt32("finalResult", finalResult);
1152     notify->post(delayUs);
1153 }
1154 
notifyAudioTearDown()1155 void NuPlayer::Renderer::notifyAudioTearDown() {
1156     (new AMessage(kWhatAudioTearDown, this))->post();
1157 }
1158 
onQueueBuffer(const sp<AMessage> & msg)1159 void NuPlayer::Renderer::onQueueBuffer(const sp<AMessage> &msg) {
1160     int32_t audio;
1161     CHECK(msg->findInt32("audio", &audio));
1162 
1163     if (dropBufferIfStale(audio, msg)) {
1164         return;
1165     }
1166 
1167     if (audio) {
1168         mHasAudio = true;
1169     } else {
1170         mHasVideo = true;
1171     }
1172 
1173     if (mHasVideo) {
1174         if (mVideoScheduler == NULL) {
1175             mVideoScheduler = new VideoFrameScheduler();
1176             mVideoScheduler->init();
1177         }
1178     }
1179 
1180     sp<ABuffer> buffer;
1181     CHECK(msg->findBuffer("buffer", &buffer));
1182 
1183     sp<AMessage> notifyConsumed;
1184     CHECK(msg->findMessage("notifyConsumed", &notifyConsumed));
1185 
1186     QueueEntry entry;
1187     entry.mBuffer = buffer;
1188     entry.mNotifyConsumed = notifyConsumed;
1189     entry.mOffset = 0;
1190     entry.mFinalResult = OK;
1191     entry.mBufferOrdinal = ++mTotalBuffersQueued;
1192 
1193     if (audio) {
1194         Mutex::Autolock autoLock(mLock);
1195         mAudioQueue.push_back(entry);
1196         postDrainAudioQueue_l();
1197     } else {
1198         mVideoQueue.push_back(entry);
1199         postDrainVideoQueue();
1200     }
1201 
1202     Mutex::Autolock autoLock(mLock);
1203     if (!mSyncQueues || mAudioQueue.empty() || mVideoQueue.empty()) {
1204         return;
1205     }
1206 
1207     sp<ABuffer> firstAudioBuffer = (*mAudioQueue.begin()).mBuffer;
1208     sp<ABuffer> firstVideoBuffer = (*mVideoQueue.begin()).mBuffer;
1209 
1210     if (firstAudioBuffer == NULL || firstVideoBuffer == NULL) {
1211         // EOS signalled on either queue.
1212         syncQueuesDone_l();
1213         return;
1214     }
1215 
1216     int64_t firstAudioTimeUs;
1217     int64_t firstVideoTimeUs;
1218     CHECK(firstAudioBuffer->meta()
1219             ->findInt64("timeUs", &firstAudioTimeUs));
1220     CHECK(firstVideoBuffer->meta()
1221             ->findInt64("timeUs", &firstVideoTimeUs));
1222 
1223     int64_t diff = firstVideoTimeUs - firstAudioTimeUs;
1224 
1225     ALOGV("queueDiff = %.2f secs", diff / 1E6);
1226 
1227     if (diff > 100000ll) {
1228         // Audio data starts More than 0.1 secs before video.
1229         // Drop some audio.
1230 
1231         (*mAudioQueue.begin()).mNotifyConsumed->post();
1232         mAudioQueue.erase(mAudioQueue.begin());
1233         return;
1234     }
1235 
1236     syncQueuesDone_l();
1237 }
1238 
syncQueuesDone_l()1239 void NuPlayer::Renderer::syncQueuesDone_l() {
1240     if (!mSyncQueues) {
1241         return;
1242     }
1243 
1244     mSyncQueues = false;
1245 
1246     if (!mAudioQueue.empty()) {
1247         postDrainAudioQueue_l();
1248     }
1249 
1250     if (!mVideoQueue.empty()) {
1251         mLock.unlock();
1252         postDrainVideoQueue();
1253         mLock.lock();
1254     }
1255 }
1256 
onQueueEOS(const sp<AMessage> & msg)1257 void NuPlayer::Renderer::onQueueEOS(const sp<AMessage> &msg) {
1258     int32_t audio;
1259     CHECK(msg->findInt32("audio", &audio));
1260 
1261     if (dropBufferIfStale(audio, msg)) {
1262         return;
1263     }
1264 
1265     int32_t finalResult;
1266     CHECK(msg->findInt32("finalResult", &finalResult));
1267 
1268     QueueEntry entry;
1269     entry.mOffset = 0;
1270     entry.mFinalResult = finalResult;
1271 
1272     if (audio) {
1273         Mutex::Autolock autoLock(mLock);
1274         if (mAudioQueue.empty() && mSyncQueues) {
1275             syncQueuesDone_l();
1276         }
1277         mAudioQueue.push_back(entry);
1278         postDrainAudioQueue_l();
1279     } else {
1280         if (mVideoQueue.empty() && getSyncQueues()) {
1281             Mutex::Autolock autoLock(mLock);
1282             syncQueuesDone_l();
1283         }
1284         mVideoQueue.push_back(entry);
1285         postDrainVideoQueue();
1286     }
1287 }
1288 
onFlush(const sp<AMessage> & msg)1289 void NuPlayer::Renderer::onFlush(const sp<AMessage> &msg) {
1290     int32_t audio, notifyComplete;
1291     CHECK(msg->findInt32("audio", &audio));
1292 
1293     {
1294         Mutex::Autolock autoLock(mLock);
1295         if (audio) {
1296             notifyComplete = mNotifyCompleteAudio;
1297             mNotifyCompleteAudio = false;
1298         } else {
1299             notifyComplete = mNotifyCompleteVideo;
1300             mNotifyCompleteVideo = false;
1301         }
1302 
1303         // If we're currently syncing the queues, i.e. dropping audio while
1304         // aligning the first audio/video buffer times and only one of the
1305         // two queues has data, we may starve that queue by not requesting
1306         // more buffers from the decoder. If the other source then encounters
1307         // a discontinuity that leads to flushing, we'll never find the
1308         // corresponding discontinuity on the other queue.
1309         // Therefore we'll stop syncing the queues if at least one of them
1310         // is flushed.
1311         syncQueuesDone_l();
1312         clearAnchorTime_l();
1313     }
1314 
1315     ALOGV("flushing %s", audio ? "audio" : "video");
1316     if (audio) {
1317         {
1318             Mutex::Autolock autoLock(mLock);
1319             flushQueue(&mAudioQueue);
1320 
1321             ++mAudioDrainGeneration;
1322             prepareForMediaRenderingStart_l();
1323 
1324             // the frame count will be reset after flush.
1325             clearAudioFirstAnchorTime_l();
1326         }
1327 
1328         mDrainAudioQueuePending = false;
1329 
1330         if (offloadingAudio()) {
1331             mAudioSink->pause();
1332             mAudioSink->flush();
1333             if (!mPaused) {
1334                 mAudioSink->start();
1335             }
1336         } else {
1337             mAudioSink->pause();
1338             mAudioSink->flush();
1339             // Call stop() to signal to the AudioSink to completely fill the
1340             // internal buffer before resuming playback.
1341             mAudioSink->stop();
1342             if (!mPaused) {
1343                 mAudioSink->start();
1344             }
1345             mNumFramesWritten = 0;
1346         }
1347     } else {
1348         flushQueue(&mVideoQueue);
1349 
1350         mDrainVideoQueuePending = false;
1351 
1352         if (mVideoScheduler != NULL) {
1353             mVideoScheduler->restart();
1354         }
1355 
1356         Mutex::Autolock autoLock(mLock);
1357         ++mVideoDrainGeneration;
1358         prepareForMediaRenderingStart_l();
1359     }
1360 
1361     mVideoSampleReceived = false;
1362 
1363     if (notifyComplete) {
1364         notifyFlushComplete(audio);
1365     }
1366 }
1367 
flushQueue(List<QueueEntry> * queue)1368 void NuPlayer::Renderer::flushQueue(List<QueueEntry> *queue) {
1369     while (!queue->empty()) {
1370         QueueEntry *entry = &*queue->begin();
1371 
1372         if (entry->mBuffer != NULL) {
1373             entry->mNotifyConsumed->post();
1374         }
1375 
1376         queue->erase(queue->begin());
1377         entry = NULL;
1378     }
1379 }
1380 
notifyFlushComplete(bool audio)1381 void NuPlayer::Renderer::notifyFlushComplete(bool audio) {
1382     sp<AMessage> notify = mNotify->dup();
1383     notify->setInt32("what", kWhatFlushComplete);
1384     notify->setInt32("audio", static_cast<int32_t>(audio));
1385     notify->post();
1386 }
1387 
dropBufferIfStale(bool audio,const sp<AMessage> & msg)1388 bool NuPlayer::Renderer::dropBufferIfStale(
1389         bool audio, const sp<AMessage> &msg) {
1390     int32_t queueGeneration;
1391     CHECK(msg->findInt32("queueGeneration", &queueGeneration));
1392 
1393     if (queueGeneration == getQueueGeneration(audio)) {
1394         return false;
1395     }
1396 
1397     sp<AMessage> notifyConsumed;
1398     if (msg->findMessage("notifyConsumed", &notifyConsumed)) {
1399         notifyConsumed->post();
1400     }
1401 
1402     return true;
1403 }
1404 
onAudioSinkChanged()1405 void NuPlayer::Renderer::onAudioSinkChanged() {
1406     if (offloadingAudio()) {
1407         return;
1408     }
1409     CHECK(!mDrainAudioQueuePending);
1410     mNumFramesWritten = 0;
1411     {
1412         Mutex::Autolock autoLock(mLock);
1413         mAnchorNumFramesWritten = -1;
1414     }
1415     uint32_t written;
1416     if (mAudioSink->getFramesWritten(&written) == OK) {
1417         mNumFramesWritten = written;
1418     }
1419 }
1420 
onDisableOffloadAudio()1421 void NuPlayer::Renderer::onDisableOffloadAudio() {
1422     Mutex::Autolock autoLock(mLock);
1423     mFlags &= ~FLAG_OFFLOAD_AUDIO;
1424     ++mAudioDrainGeneration;
1425     if (mAudioRenderingStartGeneration != -1) {
1426         prepareForMediaRenderingStart_l();
1427     }
1428 }
1429 
onEnableOffloadAudio()1430 void NuPlayer::Renderer::onEnableOffloadAudio() {
1431     Mutex::Autolock autoLock(mLock);
1432     mFlags |= FLAG_OFFLOAD_AUDIO;
1433     ++mAudioDrainGeneration;
1434     if (mAudioRenderingStartGeneration != -1) {
1435         prepareForMediaRenderingStart_l();
1436     }
1437 }
1438 
onPause()1439 void NuPlayer::Renderer::onPause() {
1440     if (mPaused) {
1441         return;
1442     }
1443 
1444     {
1445         Mutex::Autolock autoLock(mLock);
1446         // we do not increment audio drain generation so that we fill audio buffer during pause.
1447         ++mVideoDrainGeneration;
1448         prepareForMediaRenderingStart_l();
1449         mPaused = true;
1450         mMediaClock->setPlaybackRate(0.0);
1451     }
1452 
1453     mDrainAudioQueuePending = false;
1454     mDrainVideoQueuePending = false;
1455 
1456     if (mHasAudio) {
1457         mAudioSink->pause();
1458         startAudioOffloadPauseTimeout();
1459     }
1460 
1461     ALOGV("now paused audio queue has %zu entries, video has %zu entries",
1462           mAudioQueue.size(), mVideoQueue.size());
1463 }
1464 
onResume()1465 void NuPlayer::Renderer::onResume() {
1466     if (!mPaused) {
1467         return;
1468     }
1469 
1470     if (mHasAudio) {
1471         cancelAudioOffloadPauseTimeout();
1472         status_t err = mAudioSink->start();
1473         if (err != OK) {
1474             notifyAudioTearDown();
1475         }
1476     }
1477 
1478     {
1479         Mutex::Autolock autoLock(mLock);
1480         mPaused = false;
1481 
1482         // configure audiosink as we did not do it when pausing
1483         if (mAudioSink != NULL && mAudioSink->ready()) {
1484             mAudioSink->setPlaybackRate(mPlaybackSettings);
1485         }
1486 
1487         mMediaClock->setPlaybackRate(mPlaybackRate);
1488 
1489         if (!mAudioQueue.empty()) {
1490             postDrainAudioQueue_l();
1491         }
1492     }
1493 
1494     if (!mVideoQueue.empty()) {
1495         postDrainVideoQueue();
1496     }
1497 }
1498 
onSetVideoFrameRate(float fps)1499 void NuPlayer::Renderer::onSetVideoFrameRate(float fps) {
1500     if (mVideoScheduler == NULL) {
1501         mVideoScheduler = new VideoFrameScheduler();
1502     }
1503     mVideoScheduler->init(fps);
1504 }
1505 
getQueueGeneration(bool audio)1506 int32_t NuPlayer::Renderer::getQueueGeneration(bool audio) {
1507     Mutex::Autolock autoLock(mLock);
1508     return (audio ? mAudioQueueGeneration : mVideoQueueGeneration);
1509 }
1510 
getDrainGeneration(bool audio)1511 int32_t NuPlayer::Renderer::getDrainGeneration(bool audio) {
1512     Mutex::Autolock autoLock(mLock);
1513     return (audio ? mAudioDrainGeneration : mVideoDrainGeneration);
1514 }
1515 
getSyncQueues()1516 bool NuPlayer::Renderer::getSyncQueues() {
1517     Mutex::Autolock autoLock(mLock);
1518     return mSyncQueues;
1519 }
1520 
1521 // TODO: Remove unnecessary calls to getPlayedOutAudioDurationUs()
1522 // as it acquires locks and may query the audio driver.
1523 //
1524 // Some calls could conceivably retrieve extrapolated data instead of
1525 // accessing getTimestamp() or getPosition() every time a data buffer with
1526 // a media time is received.
1527 //
1528 // Calculate duration of played samples if played at normal rate (i.e., 1.0).
getPlayedOutAudioDurationUs(int64_t nowUs)1529 int64_t NuPlayer::Renderer::getPlayedOutAudioDurationUs(int64_t nowUs) {
1530     uint32_t numFramesPlayed;
1531     int64_t numFramesPlayedAt;
1532     AudioTimestamp ts;
1533     static const int64_t kStaleTimestamp100ms = 100000;
1534 
1535     status_t res = mAudioSink->getTimestamp(ts);
1536     if (res == OK) {                 // case 1: mixing audio tracks and offloaded tracks.
1537         numFramesPlayed = ts.mPosition;
1538         numFramesPlayedAt =
1539             ts.mTime.tv_sec * 1000000LL + ts.mTime.tv_nsec / 1000;
1540         const int64_t timestampAge = nowUs - numFramesPlayedAt;
1541         if (timestampAge > kStaleTimestamp100ms) {
1542             // This is an audio FIXME.
1543             // getTimestamp returns a timestamp which may come from audio mixing threads.
1544             // After pausing, the MixerThread may go idle, thus the mTime estimate may
1545             // become stale. Assuming that the MixerThread runs 20ms, with FastMixer at 5ms,
1546             // the max latency should be about 25ms with an average around 12ms (to be verified).
1547             // For safety we use 100ms.
1548             ALOGV("getTimestamp: returned stale timestamp nowUs(%lld) numFramesPlayedAt(%lld)",
1549                     (long long)nowUs, (long long)numFramesPlayedAt);
1550             numFramesPlayedAt = nowUs - kStaleTimestamp100ms;
1551         }
1552         //ALOGD("getTimestamp: OK %d %lld", numFramesPlayed, (long long)numFramesPlayedAt);
1553     } else if (res == WOULD_BLOCK) { // case 2: transitory state on start of a new track
1554         numFramesPlayed = 0;
1555         numFramesPlayedAt = nowUs;
1556         //ALOGD("getTimestamp: WOULD_BLOCK %d %lld",
1557         //        numFramesPlayed, (long long)numFramesPlayedAt);
1558     } else {                         // case 3: transitory at new track or audio fast tracks.
1559         res = mAudioSink->getPosition(&numFramesPlayed);
1560         CHECK_EQ(res, (status_t)OK);
1561         numFramesPlayedAt = nowUs;
1562         numFramesPlayedAt += 1000LL * mAudioSink->latency() / 2; /* XXX */
1563         //ALOGD("getPosition: %u %lld", numFramesPlayed, (long long)numFramesPlayedAt);
1564     }
1565 
1566     //CHECK_EQ(numFramesPlayed & (1 << 31), 0);  // can't be negative until 12.4 hrs, test
1567     int64_t durationUs = getDurationUsIfPlayedAtSampleRate(numFramesPlayed)
1568             + nowUs - numFramesPlayedAt;
1569     if (durationUs < 0) {
1570         // Occurs when numFramesPlayed position is very small and the following:
1571         // (1) In case 1, the time nowUs is computed before getTimestamp() is called and
1572         //     numFramesPlayedAt is greater than nowUs by time more than numFramesPlayed.
1573         // (2) In case 3, using getPosition and adding mAudioSink->latency() to
1574         //     numFramesPlayedAt, by a time amount greater than numFramesPlayed.
1575         //
1576         // Both of these are transitory conditions.
1577         ALOGV("getPlayedOutAudioDurationUs: negative duration %lld set to zero", (long long)durationUs);
1578         durationUs = 0;
1579     }
1580     ALOGV("getPlayedOutAudioDurationUs(%lld) nowUs(%lld) frames(%u) framesAt(%lld)",
1581             (long long)durationUs, (long long)nowUs, numFramesPlayed, (long long)numFramesPlayedAt);
1582     return durationUs;
1583 }
1584 
onAudioTearDown(AudioTearDownReason reason)1585 void NuPlayer::Renderer::onAudioTearDown(AudioTearDownReason reason) {
1586     if (mAudioTornDown) {
1587         return;
1588     }
1589     mAudioTornDown = true;
1590 
1591     int64_t currentPositionUs;
1592     sp<AMessage> notify = mNotify->dup();
1593     if (getCurrentPosition(&currentPositionUs) == OK) {
1594         notify->setInt64("positionUs", currentPositionUs);
1595     }
1596 
1597     mAudioSink->stop();
1598     mAudioSink->flush();
1599 
1600     notify->setInt32("what", kWhatAudioTearDown);
1601     notify->setInt32("reason", reason);
1602     notify->post();
1603 }
1604 
startAudioOffloadPauseTimeout()1605 void NuPlayer::Renderer::startAudioOffloadPauseTimeout() {
1606     if (offloadingAudio()) {
1607         mWakeLock->acquire();
1608         sp<AMessage> msg = new AMessage(kWhatAudioOffloadPauseTimeout, this);
1609         msg->setInt32("drainGeneration", mAudioOffloadPauseTimeoutGeneration);
1610         msg->post(kOffloadPauseMaxUs);
1611     }
1612 }
1613 
cancelAudioOffloadPauseTimeout()1614 void NuPlayer::Renderer::cancelAudioOffloadPauseTimeout() {
1615     if (offloadingAudio()) {
1616         mWakeLock->release(true);
1617         ++mAudioOffloadPauseTimeoutGeneration;
1618     }
1619 }
1620 
onOpenAudioSink(const sp<AMessage> & format,bool offloadOnly,bool hasVideo,uint32_t flags)1621 status_t NuPlayer::Renderer::onOpenAudioSink(
1622         const sp<AMessage> &format,
1623         bool offloadOnly,
1624         bool hasVideo,
1625         uint32_t flags) {
1626     ALOGV("openAudioSink: offloadOnly(%d) offloadingAudio(%d)",
1627             offloadOnly, offloadingAudio());
1628     bool audioSinkChanged = false;
1629 
1630     int32_t numChannels;
1631     CHECK(format->findInt32("channel-count", &numChannels));
1632 
1633     int32_t channelMask;
1634     if (!format->findInt32("channel-mask", &channelMask)) {
1635         // signal to the AudioSink to derive the mask from count.
1636         channelMask = CHANNEL_MASK_USE_CHANNEL_ORDER;
1637     }
1638 
1639     int32_t sampleRate;
1640     CHECK(format->findInt32("sample-rate", &sampleRate));
1641 
1642     if (offloadingAudio()) {
1643         audio_format_t audioFormat = AUDIO_FORMAT_PCM_16_BIT;
1644         AString mime;
1645         CHECK(format->findString("mime", &mime));
1646         status_t err = mapMimeToAudioFormat(audioFormat, mime.c_str());
1647 
1648         if (err != OK) {
1649             ALOGE("Couldn't map mime \"%s\" to a valid "
1650                     "audio_format", mime.c_str());
1651             onDisableOffloadAudio();
1652         } else {
1653             ALOGV("Mime \"%s\" mapped to audio_format 0x%x",
1654                     mime.c_str(), audioFormat);
1655 
1656             int avgBitRate = -1;
1657             format->findInt32("bit-rate", &avgBitRate);
1658 
1659             int32_t aacProfile = -1;
1660             if (audioFormat == AUDIO_FORMAT_AAC
1661                     && format->findInt32("aac-profile", &aacProfile)) {
1662                 // Redefine AAC format as per aac profile
1663                 mapAACProfileToAudioFormat(
1664                         audioFormat,
1665                         aacProfile);
1666             }
1667 
1668             audio_offload_info_t offloadInfo = AUDIO_INFO_INITIALIZER;
1669             offloadInfo.duration_us = -1;
1670             format->findInt64(
1671                     "durationUs", &offloadInfo.duration_us);
1672             offloadInfo.sample_rate = sampleRate;
1673             offloadInfo.channel_mask = channelMask;
1674             offloadInfo.format = audioFormat;
1675             offloadInfo.stream_type = AUDIO_STREAM_MUSIC;
1676             offloadInfo.bit_rate = avgBitRate;
1677             offloadInfo.has_video = hasVideo;
1678             offloadInfo.is_streaming = true;
1679 
1680             if (memcmp(&mCurrentOffloadInfo, &offloadInfo, sizeof(offloadInfo)) == 0) {
1681                 ALOGV("openAudioSink: no change in offload mode");
1682                 // no change from previous configuration, everything ok.
1683                 return OK;
1684             }
1685             mCurrentPcmInfo = AUDIO_PCMINFO_INITIALIZER;
1686 
1687             ALOGV("openAudioSink: try to open AudioSink in offload mode");
1688             uint32_t offloadFlags = flags;
1689             offloadFlags |= AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD;
1690             offloadFlags &= ~AUDIO_OUTPUT_FLAG_DEEP_BUFFER;
1691             audioSinkChanged = true;
1692             mAudioSink->close();
1693 
1694             err = mAudioSink->open(
1695                     sampleRate,
1696                     numChannels,
1697                     (audio_channel_mask_t)channelMask,
1698                     audioFormat,
1699                     0 /* bufferCount - unused */,
1700                     &NuPlayer::Renderer::AudioSinkCallback,
1701                     this,
1702                     (audio_output_flags_t)offloadFlags,
1703                     &offloadInfo);
1704 
1705             if (err == OK) {
1706                 err = mAudioSink->setPlaybackRate(mPlaybackSettings);
1707             }
1708 
1709             if (err == OK) {
1710                 // If the playback is offloaded to h/w, we pass
1711                 // the HAL some metadata information.
1712                 // We don't want to do this for PCM because it
1713                 // will be going through the AudioFlinger mixer
1714                 // before reaching the hardware.
1715                 // TODO
1716                 mCurrentOffloadInfo = offloadInfo;
1717                 if (!mPaused) { // for preview mode, don't start if paused
1718                     err = mAudioSink->start();
1719                 }
1720                 ALOGV_IF(err == OK, "openAudioSink: offload succeeded");
1721             }
1722             if (err != OK) {
1723                 // Clean up, fall back to non offload mode.
1724                 mAudioSink->close();
1725                 onDisableOffloadAudio();
1726                 mCurrentOffloadInfo = AUDIO_INFO_INITIALIZER;
1727                 ALOGV("openAudioSink: offload failed");
1728             } else {
1729                 mUseAudioCallback = true;  // offload mode transfers data through callback
1730                 ++mAudioDrainGeneration;  // discard pending kWhatDrainAudioQueue message.
1731             }
1732         }
1733     }
1734     if (!offloadOnly && !offloadingAudio()) {
1735         ALOGV("openAudioSink: open AudioSink in NON-offload mode");
1736         uint32_t pcmFlags = flags;
1737         pcmFlags &= ~AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD;
1738 
1739         const PcmInfo info = {
1740                 (audio_channel_mask_t)channelMask,
1741                 (audio_output_flags_t)pcmFlags,
1742                 AUDIO_FORMAT_PCM_16_BIT, // TODO: change to audioFormat
1743                 numChannels,
1744                 sampleRate
1745         };
1746         if (memcmp(&mCurrentPcmInfo, &info, sizeof(info)) == 0) {
1747             ALOGV("openAudioSink: no change in pcm mode");
1748             // no change from previous configuration, everything ok.
1749             return OK;
1750         }
1751 
1752         audioSinkChanged = true;
1753         mAudioSink->close();
1754         mCurrentOffloadInfo = AUDIO_INFO_INITIALIZER;
1755         // Note: It is possible to set up the callback, but not use it to send audio data.
1756         // This requires a fix in AudioSink to explicitly specify the transfer mode.
1757         mUseAudioCallback = getUseAudioCallbackSetting();
1758         if (mUseAudioCallback) {
1759             ++mAudioDrainGeneration;  // discard pending kWhatDrainAudioQueue message.
1760         }
1761 
1762         // Compute the desired buffer size.
1763         // For callback mode, the amount of time before wakeup is about half the buffer size.
1764         const uint32_t frameCount =
1765                 (unsigned long long)sampleRate * getAudioSinkPcmMsSetting() / 1000;
1766 
1767         status_t err = mAudioSink->open(
1768                     sampleRate,
1769                     numChannels,
1770                     (audio_channel_mask_t)channelMask,
1771                     AUDIO_FORMAT_PCM_16_BIT,
1772                     0 /* bufferCount - unused */,
1773                     mUseAudioCallback ? &NuPlayer::Renderer::AudioSinkCallback : NULL,
1774                     mUseAudioCallback ? this : NULL,
1775                     (audio_output_flags_t)pcmFlags,
1776                     NULL,
1777                     true /* doNotReconnect */,
1778                     frameCount);
1779         if (err == OK) {
1780             err = mAudioSink->setPlaybackRate(mPlaybackSettings);
1781         }
1782         if (err != OK) {
1783             ALOGW("openAudioSink: non offloaded open failed status: %d", err);
1784             mCurrentPcmInfo = AUDIO_PCMINFO_INITIALIZER;
1785             return err;
1786         }
1787         mCurrentPcmInfo = info;
1788         if (!mPaused) { // for preview mode, don't start if paused
1789             mAudioSink->start();
1790         }
1791     }
1792     if (audioSinkChanged) {
1793         onAudioSinkChanged();
1794     }
1795     mAudioTornDown = false;
1796     return OK;
1797 }
1798 
onCloseAudioSink()1799 void NuPlayer::Renderer::onCloseAudioSink() {
1800     mAudioSink->close();
1801     mCurrentOffloadInfo = AUDIO_INFO_INITIALIZER;
1802     mCurrentPcmInfo = AUDIO_PCMINFO_INITIALIZER;
1803 }
1804 
1805 }  // namespace android
1806 
1807