1 /*
2  * Copyright (C) 2010 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 //#define LOG_NDEBUG 0
18 #define LOG_TAG "NuPlayerRenderer"
19 #include <utils/Log.h>
20 
21 #include "AWakeLock.h"
22 #include "NuPlayerRenderer.h"
23 #include <algorithm>
24 #include <cutils/properties.h>
25 #include <media/stagefright/foundation/ADebug.h>
26 #include <media/stagefright/foundation/AMessage.h>
27 #include <media/stagefright/foundation/AUtils.h>
28 #include <media/stagefright/MediaClock.h>
29 #include <media/stagefright/MediaErrors.h>
30 #include <media/stagefright/MetaData.h>
31 #include <media/stagefright/Utils.h>
32 #include <media/stagefright/VideoFrameScheduler.h>
33 #include <media/MediaCodecBuffer.h>
34 
35 #include <inttypes.h>
36 
37 namespace android {
38 
39 /*
40  * Example of common configuration settings in shell script form
41 
42    #Turn offload audio off (use PCM for Play Music) -- AudioPolicyManager
43    adb shell setprop audio.offload.disable 1
44 
45    #Allow offload audio with video (requires offloading to be enabled) -- AudioPolicyManager
46    adb shell setprop audio.offload.video 1
47 
48    #Use audio callbacks for PCM data
49    adb shell setprop media.stagefright.audio.cbk 1
50 
51    #Use deep buffer for PCM data with video (it is generally enabled for audio-only)
52    adb shell setprop media.stagefright.audio.deep 1
53 
54    #Set size of buffers for pcm audio sink in msec (example: 1000 msec)
55    adb shell setprop media.stagefright.audio.sink 1000
56 
57  * These configurations take effect for the next track played (not the current track).
58  */
59 
getUseAudioCallbackSetting()60 static inline bool getUseAudioCallbackSetting() {
61     return property_get_bool("media.stagefright.audio.cbk", false /* default_value */);
62 }
63 
getAudioSinkPcmMsSetting()64 static inline int32_t getAudioSinkPcmMsSetting() {
65     return property_get_int32(
66             "media.stagefright.audio.sink", 500 /* default_value */);
67 }
68 
69 // Maximum time in paused state when offloading audio decompression. When elapsed, the AudioSink
70 // is closed to allow the audio DSP to power down.
71 static const int64_t kOffloadPauseMaxUs = 10000000ll;
72 
73 // Maximum allowed delay from AudioSink, 1.5 seconds.
74 static const int64_t kMaxAllowedAudioSinkDelayUs = 1500000ll;
75 
76 static const int64_t kMinimumAudioClockUpdatePeriodUs = 20 /* msec */ * 1000;
77 
78 // static
79 const NuPlayer::Renderer::PcmInfo NuPlayer::Renderer::AUDIO_PCMINFO_INITIALIZER = {
80         AUDIO_CHANNEL_NONE,
81         AUDIO_OUTPUT_FLAG_NONE,
82         AUDIO_FORMAT_INVALID,
83         0, // mNumChannels
84         0 // mSampleRate
85 };
86 
87 // static
88 const int64_t NuPlayer::Renderer::kMinPositionUpdateDelayUs = 100000ll;
89 
Renderer(const sp<MediaPlayerBase::AudioSink> & sink,const sp<MediaClock> & mediaClock,const sp<AMessage> & notify,uint32_t flags)90 NuPlayer::Renderer::Renderer(
91         const sp<MediaPlayerBase::AudioSink> &sink,
92         const sp<MediaClock> &mediaClock,
93         const sp<AMessage> &notify,
94         uint32_t flags)
95     : mAudioSink(sink),
96       mUseVirtualAudioSink(false),
97       mNotify(notify),
98       mFlags(flags),
99       mNumFramesWritten(0),
100       mDrainAudioQueuePending(false),
101       mDrainVideoQueuePending(false),
102       mAudioQueueGeneration(0),
103       mVideoQueueGeneration(0),
104       mAudioDrainGeneration(0),
105       mVideoDrainGeneration(0),
106       mAudioEOSGeneration(0),
107       mMediaClock(mediaClock),
108       mPlaybackSettings(AUDIO_PLAYBACK_RATE_DEFAULT),
109       mAudioFirstAnchorTimeMediaUs(-1),
110       mAnchorTimeMediaUs(-1),
111       mAnchorNumFramesWritten(-1),
112       mVideoLateByUs(0ll),
113       mNextVideoTimeMediaUs(-1),
114       mHasAudio(false),
115       mHasVideo(false),
116       mNotifyCompleteAudio(false),
117       mNotifyCompleteVideo(false),
118       mSyncQueues(false),
119       mPaused(false),
120       mPauseDrainAudioAllowedUs(0),
121       mVideoSampleReceived(false),
122       mVideoRenderingStarted(false),
123       mVideoRenderingStartGeneration(0),
124       mAudioRenderingStartGeneration(0),
125       mRenderingDataDelivered(false),
126       mNextAudioClockUpdateTimeUs(-1),
127       mLastAudioMediaTimeUs(-1),
128       mAudioOffloadPauseTimeoutGeneration(0),
129       mAudioTornDown(false),
130       mCurrentOffloadInfo(AUDIO_INFO_INITIALIZER),
131       mCurrentPcmInfo(AUDIO_PCMINFO_INITIALIZER),
132       mTotalBuffersQueued(0),
133       mLastAudioBufferDrained(0),
134       mUseAudioCallback(false),
135       mWakeLock(new AWakeLock()) {
136     CHECK(mediaClock != NULL);
137     mPlaybackRate = mPlaybackSettings.mSpeed;
138     mMediaClock->setPlaybackRate(mPlaybackRate);
139 }
140 
~Renderer()141 NuPlayer::Renderer::~Renderer() {
142     if (offloadingAudio()) {
143         mAudioSink->stop();
144         mAudioSink->flush();
145         mAudioSink->close();
146     }
147 
148     // Try to avoid racing condition in case callback is still on.
149     Mutex::Autolock autoLock(mLock);
150     if (mUseAudioCallback) {
151         flushQueue(&mAudioQueue);
152         flushQueue(&mVideoQueue);
153     }
154     mWakeLock.clear();
155     mVideoScheduler.clear();
156     mNotify.clear();
157     mAudioSink.clear();
158 }
159 
queueBuffer(bool audio,const sp<MediaCodecBuffer> & buffer,const sp<AMessage> & notifyConsumed)160 void NuPlayer::Renderer::queueBuffer(
161         bool audio,
162         const sp<MediaCodecBuffer> &buffer,
163         const sp<AMessage> &notifyConsumed) {
164     sp<AMessage> msg = new AMessage(kWhatQueueBuffer, this);
165     msg->setInt32("queueGeneration", getQueueGeneration(audio));
166     msg->setInt32("audio", static_cast<int32_t>(audio));
167     msg->setObject("buffer", buffer);
168     msg->setMessage("notifyConsumed", notifyConsumed);
169     msg->post();
170 }
171 
queueEOS(bool audio,status_t finalResult)172 void NuPlayer::Renderer::queueEOS(bool audio, status_t finalResult) {
173     CHECK_NE(finalResult, (status_t)OK);
174 
175     sp<AMessage> msg = new AMessage(kWhatQueueEOS, this);
176     msg->setInt32("queueGeneration", getQueueGeneration(audio));
177     msg->setInt32("audio", static_cast<int32_t>(audio));
178     msg->setInt32("finalResult", finalResult);
179     msg->post();
180 }
181 
setPlaybackSettings(const AudioPlaybackRate & rate)182 status_t NuPlayer::Renderer::setPlaybackSettings(const AudioPlaybackRate &rate) {
183     sp<AMessage> msg = new AMessage(kWhatConfigPlayback, this);
184     writeToAMessage(msg, rate);
185     sp<AMessage> response;
186     status_t err = msg->postAndAwaitResponse(&response);
187     if (err == OK && response != NULL) {
188         CHECK(response->findInt32("err", &err));
189     }
190     return err;
191 }
192 
onConfigPlayback(const AudioPlaybackRate & rate)193 status_t NuPlayer::Renderer::onConfigPlayback(const AudioPlaybackRate &rate /* sanitized */) {
194     if (rate.mSpeed == 0.f) {
195         onPause();
196         // don't call audiosink's setPlaybackRate if pausing, as pitch does not
197         // have to correspond to the any non-0 speed (e.g old speed). Keep
198         // settings nonetheless, using the old speed, in case audiosink changes.
199         AudioPlaybackRate newRate = rate;
200         newRate.mSpeed = mPlaybackSettings.mSpeed;
201         mPlaybackSettings = newRate;
202         return OK;
203     }
204 
205     if (mAudioSink != NULL && mAudioSink->ready()) {
206         status_t err = mAudioSink->setPlaybackRate(rate);
207         if (err != OK) {
208             return err;
209         }
210     }
211     mPlaybackSettings = rate;
212     mPlaybackRate = rate.mSpeed;
213     mMediaClock->setPlaybackRate(mPlaybackRate);
214     return OK;
215 }
216 
getPlaybackSettings(AudioPlaybackRate * rate)217 status_t NuPlayer::Renderer::getPlaybackSettings(AudioPlaybackRate *rate /* nonnull */) {
218     sp<AMessage> msg = new AMessage(kWhatGetPlaybackSettings, this);
219     sp<AMessage> response;
220     status_t err = msg->postAndAwaitResponse(&response);
221     if (err == OK && response != NULL) {
222         CHECK(response->findInt32("err", &err));
223         if (err == OK) {
224             readFromAMessage(response, rate);
225         }
226     }
227     return err;
228 }
229 
onGetPlaybackSettings(AudioPlaybackRate * rate)230 status_t NuPlayer::Renderer::onGetPlaybackSettings(AudioPlaybackRate *rate /* nonnull */) {
231     if (mAudioSink != NULL && mAudioSink->ready()) {
232         status_t err = mAudioSink->getPlaybackRate(rate);
233         if (err == OK) {
234             if (!isAudioPlaybackRateEqual(*rate, mPlaybackSettings)) {
235                 ALOGW("correcting mismatch in internal/external playback rate");
236             }
237             // get playback settings used by audiosink, as it may be
238             // slightly off due to audiosink not taking small changes.
239             mPlaybackSettings = *rate;
240             if (mPaused) {
241                 rate->mSpeed = 0.f;
242             }
243         }
244         return err;
245     }
246     *rate = mPlaybackSettings;
247     return OK;
248 }
249 
setSyncSettings(const AVSyncSettings & sync,float videoFpsHint)250 status_t NuPlayer::Renderer::setSyncSettings(const AVSyncSettings &sync, float videoFpsHint) {
251     sp<AMessage> msg = new AMessage(kWhatConfigSync, this);
252     writeToAMessage(msg, sync, videoFpsHint);
253     sp<AMessage> response;
254     status_t err = msg->postAndAwaitResponse(&response);
255     if (err == OK && response != NULL) {
256         CHECK(response->findInt32("err", &err));
257     }
258     return err;
259 }
260 
onConfigSync(const AVSyncSettings & sync,float videoFpsHint __unused)261 status_t NuPlayer::Renderer::onConfigSync(const AVSyncSettings &sync, float videoFpsHint __unused) {
262     if (sync.mSource != AVSYNC_SOURCE_DEFAULT) {
263         return BAD_VALUE;
264     }
265     // TODO: support sync sources
266     return INVALID_OPERATION;
267 }
268 
getSyncSettings(AVSyncSettings * sync,float * videoFps)269 status_t NuPlayer::Renderer::getSyncSettings(AVSyncSettings *sync, float *videoFps) {
270     sp<AMessage> msg = new AMessage(kWhatGetSyncSettings, this);
271     sp<AMessage> response;
272     status_t err = msg->postAndAwaitResponse(&response);
273     if (err == OK && response != NULL) {
274         CHECK(response->findInt32("err", &err));
275         if (err == OK) {
276             readFromAMessage(response, sync, videoFps);
277         }
278     }
279     return err;
280 }
281 
onGetSyncSettings(AVSyncSettings * sync,float * videoFps)282 status_t NuPlayer::Renderer::onGetSyncSettings(
283         AVSyncSettings *sync /* nonnull */, float *videoFps /* nonnull */) {
284     *sync = mSyncSettings;
285     *videoFps = -1.f;
286     return OK;
287 }
288 
flush(bool audio,bool notifyComplete)289 void NuPlayer::Renderer::flush(bool audio, bool notifyComplete) {
290     {
291         Mutex::Autolock autoLock(mLock);
292         if (audio) {
293             mNotifyCompleteAudio |= notifyComplete;
294             clearAudioFirstAnchorTime_l();
295             ++mAudioQueueGeneration;
296             ++mAudioDrainGeneration;
297         } else {
298             mNotifyCompleteVideo |= notifyComplete;
299             ++mVideoQueueGeneration;
300             ++mVideoDrainGeneration;
301         }
302 
303         mMediaClock->clearAnchor();
304         mVideoLateByUs = 0;
305         mNextVideoTimeMediaUs = -1;
306         mSyncQueues = false;
307     }
308 
309     sp<AMessage> msg = new AMessage(kWhatFlush, this);
310     msg->setInt32("audio", static_cast<int32_t>(audio));
311     msg->post();
312 }
313 
signalTimeDiscontinuity()314 void NuPlayer::Renderer::signalTimeDiscontinuity() {
315 }
316 
signalDisableOffloadAudio()317 void NuPlayer::Renderer::signalDisableOffloadAudio() {
318     (new AMessage(kWhatDisableOffloadAudio, this))->post();
319 }
320 
signalEnableOffloadAudio()321 void NuPlayer::Renderer::signalEnableOffloadAudio() {
322     (new AMessage(kWhatEnableOffloadAudio, this))->post();
323 }
324 
pause()325 void NuPlayer::Renderer::pause() {
326     (new AMessage(kWhatPause, this))->post();
327 }
328 
resume()329 void NuPlayer::Renderer::resume() {
330     (new AMessage(kWhatResume, this))->post();
331 }
332 
setVideoFrameRate(float fps)333 void NuPlayer::Renderer::setVideoFrameRate(float fps) {
334     sp<AMessage> msg = new AMessage(kWhatSetVideoFrameRate, this);
335     msg->setFloat("frame-rate", fps);
336     msg->post();
337 }
338 
339 // Called on any threads without mLock acquired.
getCurrentPosition(int64_t * mediaUs)340 status_t NuPlayer::Renderer::getCurrentPosition(int64_t *mediaUs) {
341     status_t result = mMediaClock->getMediaTime(ALooper::GetNowUs(), mediaUs);
342     if (result == OK) {
343         return result;
344     }
345 
346     // MediaClock has not started yet. Try to start it if possible.
347     {
348         Mutex::Autolock autoLock(mLock);
349         if (mAudioFirstAnchorTimeMediaUs == -1) {
350             return result;
351         }
352 
353         AudioTimestamp ts;
354         status_t res = mAudioSink->getTimestamp(ts);
355         if (res != OK) {
356             return result;
357         }
358 
359         // AudioSink has rendered some frames.
360         int64_t nowUs = ALooper::GetNowUs();
361         int64_t nowMediaUs = mAudioSink->getPlayedOutDurationUs(nowUs)
362                 + mAudioFirstAnchorTimeMediaUs;
363         mMediaClock->updateAnchor(nowMediaUs, nowUs, -1);
364     }
365 
366     return mMediaClock->getMediaTime(ALooper::GetNowUs(), mediaUs);
367 }
368 
clearAudioFirstAnchorTime_l()369 void NuPlayer::Renderer::clearAudioFirstAnchorTime_l() {
370     mAudioFirstAnchorTimeMediaUs = -1;
371     mMediaClock->setStartingTimeMedia(-1);
372 }
373 
setAudioFirstAnchorTimeIfNeeded_l(int64_t mediaUs)374 void NuPlayer::Renderer::setAudioFirstAnchorTimeIfNeeded_l(int64_t mediaUs) {
375     if (mAudioFirstAnchorTimeMediaUs == -1) {
376         mAudioFirstAnchorTimeMediaUs = mediaUs;
377         mMediaClock->setStartingTimeMedia(mediaUs);
378     }
379 }
380 
381 // Called on renderer looper.
clearAnchorTime()382 void NuPlayer::Renderer::clearAnchorTime() {
383     mMediaClock->clearAnchor();
384     mAnchorTimeMediaUs = -1;
385     mAnchorNumFramesWritten = -1;
386 }
387 
setVideoLateByUs(int64_t lateUs)388 void NuPlayer::Renderer::setVideoLateByUs(int64_t lateUs) {
389     Mutex::Autolock autoLock(mLock);
390     mVideoLateByUs = lateUs;
391 }
392 
getVideoLateByUs()393 int64_t NuPlayer::Renderer::getVideoLateByUs() {
394     Mutex::Autolock autoLock(mLock);
395     return mVideoLateByUs;
396 }
397 
openAudioSink(const sp<AMessage> & format,bool offloadOnly,bool hasVideo,uint32_t flags,bool * isOffloaded,bool isStreaming)398 status_t NuPlayer::Renderer::openAudioSink(
399         const sp<AMessage> &format,
400         bool offloadOnly,
401         bool hasVideo,
402         uint32_t flags,
403         bool *isOffloaded,
404         bool isStreaming) {
405     sp<AMessage> msg = new AMessage(kWhatOpenAudioSink, this);
406     msg->setMessage("format", format);
407     msg->setInt32("offload-only", offloadOnly);
408     msg->setInt32("has-video", hasVideo);
409     msg->setInt32("flags", flags);
410     msg->setInt32("isStreaming", isStreaming);
411 
412     sp<AMessage> response;
413     status_t postStatus = msg->postAndAwaitResponse(&response);
414 
415     int32_t err;
416     if (postStatus != OK || response.get() == nullptr || !response->findInt32("err", &err)) {
417         err = INVALID_OPERATION;
418     } else if (err == OK && isOffloaded != NULL) {
419         int32_t offload;
420         CHECK(response->findInt32("offload", &offload));
421         *isOffloaded = (offload != 0);
422     }
423     return err;
424 }
425 
closeAudioSink()426 void NuPlayer::Renderer::closeAudioSink() {
427     sp<AMessage> msg = new AMessage(kWhatCloseAudioSink, this);
428 
429     sp<AMessage> response;
430     msg->postAndAwaitResponse(&response);
431 }
432 
changeAudioFormat(const sp<AMessage> & format,bool offloadOnly,bool hasVideo,uint32_t flags,bool isStreaming,const sp<AMessage> & notify)433 void NuPlayer::Renderer::changeAudioFormat(
434         const sp<AMessage> &format,
435         bool offloadOnly,
436         bool hasVideo,
437         uint32_t flags,
438         bool isStreaming,
439         const sp<AMessage> &notify) {
440     sp<AMessage> meta = new AMessage;
441     meta->setMessage("format", format);
442     meta->setInt32("offload-only", offloadOnly);
443     meta->setInt32("has-video", hasVideo);
444     meta->setInt32("flags", flags);
445     meta->setInt32("isStreaming", isStreaming);
446 
447     sp<AMessage> msg = new AMessage(kWhatChangeAudioFormat, this);
448     msg->setInt32("queueGeneration", getQueueGeneration(true /* audio */));
449     msg->setMessage("notify", notify);
450     msg->setMessage("meta", meta);
451     msg->post();
452 }
453 
onMessageReceived(const sp<AMessage> & msg)454 void NuPlayer::Renderer::onMessageReceived(const sp<AMessage> &msg) {
455     switch (msg->what()) {
456         case kWhatOpenAudioSink:
457         {
458             sp<AMessage> format;
459             CHECK(msg->findMessage("format", &format));
460 
461             int32_t offloadOnly;
462             CHECK(msg->findInt32("offload-only", &offloadOnly));
463 
464             int32_t hasVideo;
465             CHECK(msg->findInt32("has-video", &hasVideo));
466 
467             uint32_t flags;
468             CHECK(msg->findInt32("flags", (int32_t *)&flags));
469 
470             uint32_t isStreaming;
471             CHECK(msg->findInt32("isStreaming", (int32_t *)&isStreaming));
472 
473             status_t err = onOpenAudioSink(format, offloadOnly, hasVideo, flags, isStreaming);
474 
475             sp<AMessage> response = new AMessage;
476             response->setInt32("err", err);
477             response->setInt32("offload", offloadingAudio());
478 
479             sp<AReplyToken> replyID;
480             CHECK(msg->senderAwaitsResponse(&replyID));
481             response->postReply(replyID);
482 
483             break;
484         }
485 
486         case kWhatCloseAudioSink:
487         {
488             sp<AReplyToken> replyID;
489             CHECK(msg->senderAwaitsResponse(&replyID));
490 
491             onCloseAudioSink();
492 
493             sp<AMessage> response = new AMessage;
494             response->postReply(replyID);
495             break;
496         }
497 
498         case kWhatStopAudioSink:
499         {
500             mAudioSink->stop();
501             break;
502         }
503 
504         case kWhatChangeAudioFormat:
505         {
506             int32_t queueGeneration;
507             CHECK(msg->findInt32("queueGeneration", &queueGeneration));
508 
509             sp<AMessage> notify;
510             CHECK(msg->findMessage("notify", &notify));
511 
512             if (offloadingAudio()) {
513                 ALOGW("changeAudioFormat should NOT be called in offload mode");
514                 notify->setInt32("err", INVALID_OPERATION);
515                 notify->post();
516                 break;
517             }
518 
519             sp<AMessage> meta;
520             CHECK(msg->findMessage("meta", &meta));
521 
522             if (queueGeneration != getQueueGeneration(true /* audio */)
523                     || mAudioQueue.empty()) {
524                 onChangeAudioFormat(meta, notify);
525                 break;
526             }
527 
528             QueueEntry entry;
529             entry.mNotifyConsumed = notify;
530             entry.mMeta = meta;
531 
532             Mutex::Autolock autoLock(mLock);
533             mAudioQueue.push_back(entry);
534             postDrainAudioQueue_l();
535 
536             break;
537         }
538 
539         case kWhatDrainAudioQueue:
540         {
541             mDrainAudioQueuePending = false;
542 
543             int32_t generation;
544             CHECK(msg->findInt32("drainGeneration", &generation));
545             if (generation != getDrainGeneration(true /* audio */)) {
546                 break;
547             }
548 
549             if (onDrainAudioQueue()) {
550                 uint32_t numFramesPlayed;
551                 CHECK_EQ(mAudioSink->getPosition(&numFramesPlayed),
552                          (status_t)OK);
553 
554                 // Handle AudioTrack race when start is immediately called after flush.
555                 uint32_t numFramesPendingPlayout =
556                     (mNumFramesWritten > numFramesPlayed ?
557                         mNumFramesWritten - numFramesPlayed : 0);
558 
559                 // This is how long the audio sink will have data to
560                 // play back.
561                 int64_t delayUs =
562                     mAudioSink->msecsPerFrame()
563                         * numFramesPendingPlayout * 1000ll;
564                 if (mPlaybackRate > 1.0f) {
565                     delayUs /= mPlaybackRate;
566                 }
567 
568                 // Let's give it more data after about half that time
569                 // has elapsed.
570                 delayUs /= 2;
571                 // check the buffer size to estimate maximum delay permitted.
572                 const int64_t maxDrainDelayUs = std::max(
573                         mAudioSink->getBufferDurationInUs(), (int64_t)500000 /* half second */);
574                 ALOGD_IF(delayUs > maxDrainDelayUs, "postDrainAudioQueue long delay: %lld > %lld",
575                         (long long)delayUs, (long long)maxDrainDelayUs);
576                 Mutex::Autolock autoLock(mLock);
577                 postDrainAudioQueue_l(delayUs);
578             }
579             break;
580         }
581 
582         case kWhatDrainVideoQueue:
583         {
584             int32_t generation;
585             CHECK(msg->findInt32("drainGeneration", &generation));
586             if (generation != getDrainGeneration(false /* audio */)) {
587                 break;
588             }
589 
590             mDrainVideoQueuePending = false;
591 
592             onDrainVideoQueue();
593 
594             postDrainVideoQueue();
595             break;
596         }
597 
598         case kWhatPostDrainVideoQueue:
599         {
600             int32_t generation;
601             CHECK(msg->findInt32("drainGeneration", &generation));
602             if (generation != getDrainGeneration(false /* audio */)) {
603                 break;
604             }
605 
606             mDrainVideoQueuePending = false;
607             postDrainVideoQueue();
608             break;
609         }
610 
611         case kWhatQueueBuffer:
612         {
613             onQueueBuffer(msg);
614             break;
615         }
616 
617         case kWhatQueueEOS:
618         {
619             onQueueEOS(msg);
620             break;
621         }
622 
623         case kWhatEOS:
624         {
625             int32_t generation;
626             CHECK(msg->findInt32("audioEOSGeneration", &generation));
627             if (generation != mAudioEOSGeneration) {
628                 break;
629             }
630             status_t finalResult;
631             CHECK(msg->findInt32("finalResult", &finalResult));
632             notifyEOS(true /* audio */, finalResult);
633             break;
634         }
635 
636         case kWhatConfigPlayback:
637         {
638             sp<AReplyToken> replyID;
639             CHECK(msg->senderAwaitsResponse(&replyID));
640             AudioPlaybackRate rate;
641             readFromAMessage(msg, &rate);
642             status_t err = onConfigPlayback(rate);
643             sp<AMessage> response = new AMessage;
644             response->setInt32("err", err);
645             response->postReply(replyID);
646             break;
647         }
648 
649         case kWhatGetPlaybackSettings:
650         {
651             sp<AReplyToken> replyID;
652             CHECK(msg->senderAwaitsResponse(&replyID));
653             AudioPlaybackRate rate = AUDIO_PLAYBACK_RATE_DEFAULT;
654             status_t err = onGetPlaybackSettings(&rate);
655             sp<AMessage> response = new AMessage;
656             if (err == OK) {
657                 writeToAMessage(response, rate);
658             }
659             response->setInt32("err", err);
660             response->postReply(replyID);
661             break;
662         }
663 
664         case kWhatConfigSync:
665         {
666             sp<AReplyToken> replyID;
667             CHECK(msg->senderAwaitsResponse(&replyID));
668             AVSyncSettings sync;
669             float videoFpsHint;
670             readFromAMessage(msg, &sync, &videoFpsHint);
671             status_t err = onConfigSync(sync, videoFpsHint);
672             sp<AMessage> response = new AMessage;
673             response->setInt32("err", err);
674             response->postReply(replyID);
675             break;
676         }
677 
678         case kWhatGetSyncSettings:
679         {
680             sp<AReplyToken> replyID;
681             CHECK(msg->senderAwaitsResponse(&replyID));
682 
683             ALOGV("kWhatGetSyncSettings");
684             AVSyncSettings sync;
685             float videoFps = -1.f;
686             status_t err = onGetSyncSettings(&sync, &videoFps);
687             sp<AMessage> response = new AMessage;
688             if (err == OK) {
689                 writeToAMessage(response, sync, videoFps);
690             }
691             response->setInt32("err", err);
692             response->postReply(replyID);
693             break;
694         }
695 
696         case kWhatFlush:
697         {
698             onFlush(msg);
699             break;
700         }
701 
702         case kWhatDisableOffloadAudio:
703         {
704             onDisableOffloadAudio();
705             break;
706         }
707 
708         case kWhatEnableOffloadAudio:
709         {
710             onEnableOffloadAudio();
711             break;
712         }
713 
714         case kWhatPause:
715         {
716             onPause();
717             break;
718         }
719 
720         case kWhatResume:
721         {
722             onResume();
723             break;
724         }
725 
726         case kWhatSetVideoFrameRate:
727         {
728             float fps;
729             CHECK(msg->findFloat("frame-rate", &fps));
730             onSetVideoFrameRate(fps);
731             break;
732         }
733 
734         case kWhatAudioTearDown:
735         {
736             int32_t reason;
737             CHECK(msg->findInt32("reason", &reason));
738 
739             onAudioTearDown((AudioTearDownReason)reason);
740             break;
741         }
742 
743         case kWhatAudioOffloadPauseTimeout:
744         {
745             int32_t generation;
746             CHECK(msg->findInt32("drainGeneration", &generation));
747             if (generation != mAudioOffloadPauseTimeoutGeneration) {
748                 break;
749             }
750             ALOGV("Audio Offload tear down due to pause timeout.");
751             onAudioTearDown(kDueToTimeout);
752             mWakeLock->release();
753             break;
754         }
755 
756         default:
757             TRESPASS();
758             break;
759     }
760 }
761 
postDrainAudioQueue_l(int64_t delayUs)762 void NuPlayer::Renderer::postDrainAudioQueue_l(int64_t delayUs) {
763     if (mDrainAudioQueuePending || mSyncQueues || mUseAudioCallback) {
764         return;
765     }
766 
767     if (mAudioQueue.empty()) {
768         return;
769     }
770 
771     // FIXME: if paused, wait until AudioTrack stop() is complete before delivering data.
772     if (mPaused) {
773         const int64_t diffUs = mPauseDrainAudioAllowedUs - ALooper::GetNowUs();
774         if (diffUs > delayUs) {
775             delayUs = diffUs;
776         }
777     }
778 
779     mDrainAudioQueuePending = true;
780     sp<AMessage> msg = new AMessage(kWhatDrainAudioQueue, this);
781     msg->setInt32("drainGeneration", mAudioDrainGeneration);
782     msg->post(delayUs);
783 }
784 
prepareForMediaRenderingStart_l()785 void NuPlayer::Renderer::prepareForMediaRenderingStart_l() {
786     mAudioRenderingStartGeneration = mAudioDrainGeneration;
787     mVideoRenderingStartGeneration = mVideoDrainGeneration;
788     mRenderingDataDelivered = false;
789 }
790 
notifyIfMediaRenderingStarted_l()791 void NuPlayer::Renderer::notifyIfMediaRenderingStarted_l() {
792     if (mVideoRenderingStartGeneration == mVideoDrainGeneration &&
793         mAudioRenderingStartGeneration == mAudioDrainGeneration) {
794         mRenderingDataDelivered = true;
795         if (mPaused) {
796             return;
797         }
798         mVideoRenderingStartGeneration = -1;
799         mAudioRenderingStartGeneration = -1;
800 
801         sp<AMessage> notify = mNotify->dup();
802         notify->setInt32("what", kWhatMediaRenderingStart);
803         notify->post();
804     }
805 }
806 
807 // static
AudioSinkCallback(MediaPlayerBase::AudioSink *,void * buffer,size_t size,void * cookie,MediaPlayerBase::AudioSink::cb_event_t event)808 size_t NuPlayer::Renderer::AudioSinkCallback(
809         MediaPlayerBase::AudioSink * /* audioSink */,
810         void *buffer,
811         size_t size,
812         void *cookie,
813         MediaPlayerBase::AudioSink::cb_event_t event) {
814     NuPlayer::Renderer *me = (NuPlayer::Renderer *)cookie;
815 
816     switch (event) {
817         case MediaPlayerBase::AudioSink::CB_EVENT_FILL_BUFFER:
818         {
819             return me->fillAudioBuffer(buffer, size);
820             break;
821         }
822 
823         case MediaPlayerBase::AudioSink::CB_EVENT_STREAM_END:
824         {
825             ALOGV("AudioSink::CB_EVENT_STREAM_END");
826             me->notifyEOSCallback();
827             break;
828         }
829 
830         case MediaPlayerBase::AudioSink::CB_EVENT_TEAR_DOWN:
831         {
832             ALOGV("AudioSink::CB_EVENT_TEAR_DOWN");
833             me->notifyAudioTearDown(kDueToError);
834             break;
835         }
836     }
837 
838     return 0;
839 }
840 
notifyEOSCallback()841 void NuPlayer::Renderer::notifyEOSCallback() {
842     Mutex::Autolock autoLock(mLock);
843 
844     if (!mUseAudioCallback) {
845         return;
846     }
847 
848     notifyEOS_l(true /* audio */, ERROR_END_OF_STREAM);
849 }
850 
fillAudioBuffer(void * buffer,size_t size)851 size_t NuPlayer::Renderer::fillAudioBuffer(void *buffer, size_t size) {
852     Mutex::Autolock autoLock(mLock);
853 
854     if (!mUseAudioCallback) {
855         return 0;
856     }
857 
858     bool hasEOS = false;
859 
860     size_t sizeCopied = 0;
861     bool firstEntry = true;
862     QueueEntry *entry;  // will be valid after while loop if hasEOS is set.
863     while (sizeCopied < size && !mAudioQueue.empty()) {
864         entry = &*mAudioQueue.begin();
865 
866         if (entry->mBuffer == NULL) { // EOS
867             hasEOS = true;
868             mAudioQueue.erase(mAudioQueue.begin());
869             break;
870         }
871 
872         if (firstEntry && entry->mOffset == 0) {
873             firstEntry = false;
874             int64_t mediaTimeUs;
875             CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
876             ALOGV("fillAudioBuffer: rendering audio at media time %.2f secs", mediaTimeUs / 1E6);
877             setAudioFirstAnchorTimeIfNeeded_l(mediaTimeUs);
878         }
879 
880         size_t copy = entry->mBuffer->size() - entry->mOffset;
881         size_t sizeRemaining = size - sizeCopied;
882         if (copy > sizeRemaining) {
883             copy = sizeRemaining;
884         }
885 
886         memcpy((char *)buffer + sizeCopied,
887                entry->mBuffer->data() + entry->mOffset,
888                copy);
889 
890         entry->mOffset += copy;
891         if (entry->mOffset == entry->mBuffer->size()) {
892             entry->mNotifyConsumed->post();
893             mAudioQueue.erase(mAudioQueue.begin());
894             entry = NULL;
895         }
896         sizeCopied += copy;
897 
898         notifyIfMediaRenderingStarted_l();
899     }
900 
901     if (mAudioFirstAnchorTimeMediaUs >= 0) {
902         int64_t nowUs = ALooper::GetNowUs();
903         int64_t nowMediaUs =
904             mAudioFirstAnchorTimeMediaUs + mAudioSink->getPlayedOutDurationUs(nowUs);
905         // we don't know how much data we are queueing for offloaded tracks.
906         mMediaClock->updateAnchor(nowMediaUs, nowUs, INT64_MAX);
907     }
908 
909     // for non-offloaded audio, we need to compute the frames written because
910     // there is no EVENT_STREAM_END notification. The frames written gives
911     // an estimate on the pending played out duration.
912     if (!offloadingAudio()) {
913         mNumFramesWritten += sizeCopied / mAudioSink->frameSize();
914     }
915 
916     if (hasEOS) {
917         (new AMessage(kWhatStopAudioSink, this))->post();
918         // As there is currently no EVENT_STREAM_END callback notification for
919         // non-offloaded audio tracks, we need to post the EOS ourselves.
920         if (!offloadingAudio()) {
921             int64_t postEOSDelayUs = 0;
922             if (mAudioSink->needsTrailingPadding()) {
923                 postEOSDelayUs = getPendingAudioPlayoutDurationUs(ALooper::GetNowUs());
924             }
925             ALOGV("fillAudioBuffer: notifyEOS_l "
926                     "mNumFramesWritten:%u  finalResult:%d  postEOSDelay:%lld",
927                     mNumFramesWritten, entry->mFinalResult, (long long)postEOSDelayUs);
928             notifyEOS_l(true /* audio */, entry->mFinalResult, postEOSDelayUs);
929         }
930     }
931     return sizeCopied;
932 }
933 
drainAudioQueueUntilLastEOS()934 void NuPlayer::Renderer::drainAudioQueueUntilLastEOS() {
935     List<QueueEntry>::iterator it = mAudioQueue.begin(), itEOS = it;
936     bool foundEOS = false;
937     while (it != mAudioQueue.end()) {
938         int32_t eos;
939         QueueEntry *entry = &*it++;
940         if ((entry->mBuffer == nullptr && entry->mNotifyConsumed == nullptr)
941                 || (entry->mNotifyConsumed->findInt32("eos", &eos) && eos != 0)) {
942             itEOS = it;
943             foundEOS = true;
944         }
945     }
946 
947     if (foundEOS) {
948         // post all replies before EOS and drop the samples
949         for (it = mAudioQueue.begin(); it != itEOS; it++) {
950             if (it->mBuffer == nullptr) {
951                 if (it->mNotifyConsumed == nullptr) {
952                     // delay doesn't matter as we don't even have an AudioTrack
953                     notifyEOS(true /* audio */, it->mFinalResult);
954                 } else {
955                     // TAG for re-opening audio sink.
956                     onChangeAudioFormat(it->mMeta, it->mNotifyConsumed);
957                 }
958             } else {
959                 it->mNotifyConsumed->post();
960             }
961         }
962         mAudioQueue.erase(mAudioQueue.begin(), itEOS);
963     }
964 }
965 
onDrainAudioQueue()966 bool NuPlayer::Renderer::onDrainAudioQueue() {
967     // do not drain audio during teardown as queued buffers may be invalid.
968     if (mAudioTornDown) {
969         return false;
970     }
971     // TODO: This call to getPosition checks if AudioTrack has been created
972     // in AudioSink before draining audio. If AudioTrack doesn't exist, then
973     // CHECKs on getPosition will fail.
974     // We still need to figure out why AudioTrack is not created when
975     // this function is called. One possible reason could be leftover
976     // audio. Another possible place is to check whether decoder
977     // has received INFO_FORMAT_CHANGED as the first buffer since
978     // AudioSink is opened there, and possible interactions with flush
979     // immediately after start. Investigate error message
980     // "vorbis_dsp_synthesis returned -135", along with RTSP.
981     uint32_t numFramesPlayed;
982     if (mAudioSink->getPosition(&numFramesPlayed) != OK) {
983         // When getPosition fails, renderer will not reschedule the draining
984         // unless new samples are queued.
985         // If we have pending EOS (or "eos" marker for discontinuities), we need
986         // to post these now as NuPlayerDecoder might be waiting for it.
987         drainAudioQueueUntilLastEOS();
988 
989         ALOGW("onDrainAudioQueue(): audio sink is not ready");
990         return false;
991     }
992 
993 #if 0
994     ssize_t numFramesAvailableToWrite =
995         mAudioSink->frameCount() - (mNumFramesWritten - numFramesPlayed);
996 
997     if (numFramesAvailableToWrite == mAudioSink->frameCount()) {
998         ALOGI("audio sink underrun");
999     } else {
1000         ALOGV("audio queue has %d frames left to play",
1001              mAudioSink->frameCount() - numFramesAvailableToWrite);
1002     }
1003 #endif
1004 
1005     uint32_t prevFramesWritten = mNumFramesWritten;
1006     while (!mAudioQueue.empty()) {
1007         QueueEntry *entry = &*mAudioQueue.begin();
1008 
1009         if (entry->mBuffer == NULL) {
1010             if (entry->mNotifyConsumed != nullptr) {
1011                 // TAG for re-open audio sink.
1012                 onChangeAudioFormat(entry->mMeta, entry->mNotifyConsumed);
1013                 mAudioQueue.erase(mAudioQueue.begin());
1014                 continue;
1015             }
1016 
1017             // EOS
1018             if (mPaused) {
1019                 // Do not notify EOS when paused.
1020                 // This is needed to avoid switch to next clip while in pause.
1021                 ALOGV("onDrainAudioQueue(): Do not notify EOS when paused");
1022                 return false;
1023             }
1024 
1025             int64_t postEOSDelayUs = 0;
1026             if (mAudioSink->needsTrailingPadding()) {
1027                 postEOSDelayUs = getPendingAudioPlayoutDurationUs(ALooper::GetNowUs());
1028             }
1029             notifyEOS(true /* audio */, entry->mFinalResult, postEOSDelayUs);
1030             mLastAudioMediaTimeUs = getDurationUsIfPlayedAtSampleRate(mNumFramesWritten);
1031 
1032             mAudioQueue.erase(mAudioQueue.begin());
1033             entry = NULL;
1034             if (mAudioSink->needsTrailingPadding()) {
1035                 // If we're not in gapless playback (i.e. through setNextPlayer), we
1036                 // need to stop the track here, because that will play out the last
1037                 // little bit at the end of the file. Otherwise short files won't play.
1038                 mAudioSink->stop();
1039                 mNumFramesWritten = 0;
1040             }
1041             return false;
1042         }
1043 
1044         mLastAudioBufferDrained = entry->mBufferOrdinal;
1045 
1046         // ignore 0-sized buffer which could be EOS marker with no data
1047         if (entry->mOffset == 0 && entry->mBuffer->size() > 0) {
1048             int64_t mediaTimeUs;
1049             CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
1050             ALOGV("onDrainAudioQueue: rendering audio at media time %.2f secs",
1051                     mediaTimeUs / 1E6);
1052             onNewAudioMediaTime(mediaTimeUs);
1053         }
1054 
1055         size_t copy = entry->mBuffer->size() - entry->mOffset;
1056 
1057         ssize_t written = mAudioSink->write(entry->mBuffer->data() + entry->mOffset,
1058                                             copy, false /* blocking */);
1059         if (written < 0) {
1060             // An error in AudioSink write. Perhaps the AudioSink was not properly opened.
1061             if (written == WOULD_BLOCK) {
1062                 ALOGV("AudioSink write would block when writing %zu bytes", copy);
1063             } else {
1064                 ALOGE("AudioSink write error(%zd) when writing %zu bytes", written, copy);
1065                 // This can only happen when AudioSink was opened with doNotReconnect flag set to
1066                 // true, in which case the NuPlayer will handle the reconnect.
1067                 notifyAudioTearDown(kDueToError);
1068             }
1069             break;
1070         }
1071 
1072         entry->mOffset += written;
1073         size_t remainder = entry->mBuffer->size() - entry->mOffset;
1074         if ((ssize_t)remainder < mAudioSink->frameSize()) {
1075             if (remainder > 0) {
1076                 ALOGW("Corrupted audio buffer has fractional frames, discarding %zu bytes.",
1077                         remainder);
1078                 entry->mOffset += remainder;
1079                 copy -= remainder;
1080             }
1081 
1082             entry->mNotifyConsumed->post();
1083             mAudioQueue.erase(mAudioQueue.begin());
1084 
1085             entry = NULL;
1086         }
1087 
1088         size_t copiedFrames = written / mAudioSink->frameSize();
1089         mNumFramesWritten += copiedFrames;
1090 
1091         {
1092             Mutex::Autolock autoLock(mLock);
1093             int64_t maxTimeMedia;
1094             maxTimeMedia =
1095                 mAnchorTimeMediaUs +
1096                         (int64_t)(max((long long)mNumFramesWritten - mAnchorNumFramesWritten, 0LL)
1097                                 * 1000LL * mAudioSink->msecsPerFrame());
1098             mMediaClock->updateMaxTimeMedia(maxTimeMedia);
1099 
1100             notifyIfMediaRenderingStarted_l();
1101         }
1102 
1103         if (written != (ssize_t)copy) {
1104             // A short count was received from AudioSink::write()
1105             //
1106             // AudioSink write is called in non-blocking mode.
1107             // It may return with a short count when:
1108             //
1109             // 1) Size to be copied is not a multiple of the frame size. Fractional frames are
1110             //    discarded.
1111             // 2) The data to be copied exceeds the available buffer in AudioSink.
1112             // 3) An error occurs and data has been partially copied to the buffer in AudioSink.
1113             // 4) AudioSink is an AudioCache for data retrieval, and the AudioCache is exceeded.
1114 
1115             // (Case 1)
1116             // Must be a multiple of the frame size.  If it is not a multiple of a frame size, it
1117             // needs to fail, as we should not carry over fractional frames between calls.
1118             CHECK_EQ(copy % mAudioSink->frameSize(), 0u);
1119 
1120             // (Case 2, 3, 4)
1121             // Return early to the caller.
1122             // Beware of calling immediately again as this may busy-loop if you are not careful.
1123             ALOGV("AudioSink write short frame count %zd < %zu", written, copy);
1124             break;
1125         }
1126     }
1127 
1128     // calculate whether we need to reschedule another write.
1129     bool reschedule = !mAudioQueue.empty()
1130             && (!mPaused
1131                 || prevFramesWritten != mNumFramesWritten); // permit pause to fill buffers
1132     //ALOGD("reschedule:%d  empty:%d  mPaused:%d  prevFramesWritten:%u  mNumFramesWritten:%u",
1133     //        reschedule, mAudioQueue.empty(), mPaused, prevFramesWritten, mNumFramesWritten);
1134     return reschedule;
1135 }
1136 
getDurationUsIfPlayedAtSampleRate(uint32_t numFrames)1137 int64_t NuPlayer::Renderer::getDurationUsIfPlayedAtSampleRate(uint32_t numFrames) {
1138     int32_t sampleRate = offloadingAudio() ?
1139             mCurrentOffloadInfo.sample_rate : mCurrentPcmInfo.mSampleRate;
1140     if (sampleRate == 0) {
1141         ALOGE("sampleRate is 0 in %s mode", offloadingAudio() ? "offload" : "non-offload");
1142         return 0;
1143     }
1144     // TODO: remove the (int32_t) casting below as it may overflow at 12.4 hours.
1145     return (int64_t)((int32_t)numFrames * 1000000LL / sampleRate);
1146 }
1147 
1148 // Calculate duration of pending samples if played at normal rate (i.e., 1.0).
getPendingAudioPlayoutDurationUs(int64_t nowUs)1149 int64_t NuPlayer::Renderer::getPendingAudioPlayoutDurationUs(int64_t nowUs) {
1150     int64_t writtenAudioDurationUs = getDurationUsIfPlayedAtSampleRate(mNumFramesWritten);
1151     if (mUseVirtualAudioSink) {
1152         int64_t nowUs = ALooper::GetNowUs();
1153         int64_t mediaUs;
1154         if (mMediaClock->getMediaTime(nowUs, &mediaUs) != OK) {
1155             return 0ll;
1156         } else {
1157             return writtenAudioDurationUs - (mediaUs - mAudioFirstAnchorTimeMediaUs);
1158         }
1159     }
1160 
1161     const int64_t audioSinkPlayedUs = mAudioSink->getPlayedOutDurationUs(nowUs);
1162     int64_t pendingUs = writtenAudioDurationUs - audioSinkPlayedUs;
1163     if (pendingUs < 0) {
1164         // This shouldn't happen unless the timestamp is stale.
1165         ALOGW("%s: pendingUs %lld < 0, clamping to zero, potential resume after pause "
1166                 "writtenAudioDurationUs: %lld, audioSinkPlayedUs: %lld",
1167                 __func__, (long long)pendingUs,
1168                 (long long)writtenAudioDurationUs, (long long)audioSinkPlayedUs);
1169         pendingUs = 0;
1170     }
1171     return pendingUs;
1172 }
1173 
getRealTimeUs(int64_t mediaTimeUs,int64_t nowUs)1174 int64_t NuPlayer::Renderer::getRealTimeUs(int64_t mediaTimeUs, int64_t nowUs) {
1175     int64_t realUs;
1176     if (mMediaClock->getRealTimeFor(mediaTimeUs, &realUs) != OK) {
1177         // If failed to get current position, e.g. due to audio clock is
1178         // not ready, then just play out video immediately without delay.
1179         return nowUs;
1180     }
1181     return realUs;
1182 }
1183 
onNewAudioMediaTime(int64_t mediaTimeUs)1184 void NuPlayer::Renderer::onNewAudioMediaTime(int64_t mediaTimeUs) {
1185     Mutex::Autolock autoLock(mLock);
1186     // TRICKY: vorbis decoder generates multiple frames with the same
1187     // timestamp, so only update on the first frame with a given timestamp
1188     if (mediaTimeUs == mAnchorTimeMediaUs) {
1189         return;
1190     }
1191     setAudioFirstAnchorTimeIfNeeded_l(mediaTimeUs);
1192 
1193     // mNextAudioClockUpdateTimeUs is -1 if we're waiting for audio sink to start
1194     if (mNextAudioClockUpdateTimeUs == -1) {
1195         AudioTimestamp ts;
1196         if (mAudioSink->getTimestamp(ts) == OK && ts.mPosition > 0) {
1197             mNextAudioClockUpdateTimeUs = 0; // start our clock updates
1198         }
1199     }
1200     int64_t nowUs = ALooper::GetNowUs();
1201     if (mNextAudioClockUpdateTimeUs >= 0) {
1202         if (nowUs >= mNextAudioClockUpdateTimeUs) {
1203             int64_t nowMediaUs = mediaTimeUs - getPendingAudioPlayoutDurationUs(nowUs);
1204             mMediaClock->updateAnchor(nowMediaUs, nowUs, mediaTimeUs);
1205             mUseVirtualAudioSink = false;
1206             mNextAudioClockUpdateTimeUs = nowUs + kMinimumAudioClockUpdatePeriodUs;
1207         }
1208     } else {
1209         int64_t unused;
1210         if ((mMediaClock->getMediaTime(nowUs, &unused) != OK)
1211                 && (getDurationUsIfPlayedAtSampleRate(mNumFramesWritten)
1212                         > kMaxAllowedAudioSinkDelayUs)) {
1213             // Enough data has been sent to AudioSink, but AudioSink has not rendered
1214             // any data yet. Something is wrong with AudioSink, e.g., the device is not
1215             // connected to audio out.
1216             // Switch to system clock. This essentially creates a virtual AudioSink with
1217             // initial latenty of getDurationUsIfPlayedAtSampleRate(mNumFramesWritten).
1218             // This virtual AudioSink renders audio data starting from the very first sample
1219             // and it's paced by system clock.
1220             ALOGW("AudioSink stuck. ARE YOU CONNECTED TO AUDIO OUT? Switching to system clock.");
1221             mMediaClock->updateAnchor(mAudioFirstAnchorTimeMediaUs, nowUs, mediaTimeUs);
1222             mUseVirtualAudioSink = true;
1223         }
1224     }
1225     mAnchorNumFramesWritten = mNumFramesWritten;
1226     mAnchorTimeMediaUs = mediaTimeUs;
1227 }
1228 
1229 // Called without mLock acquired.
postDrainVideoQueue()1230 void NuPlayer::Renderer::postDrainVideoQueue() {
1231     if (mDrainVideoQueuePending
1232             || getSyncQueues()
1233             || (mPaused && mVideoSampleReceived)) {
1234         return;
1235     }
1236 
1237     if (mVideoQueue.empty()) {
1238         return;
1239     }
1240 
1241     QueueEntry &entry = *mVideoQueue.begin();
1242 
1243     sp<AMessage> msg = new AMessage(kWhatDrainVideoQueue, this);
1244     msg->setInt32("drainGeneration", getDrainGeneration(false /* audio */));
1245 
1246     if (entry.mBuffer == NULL) {
1247         // EOS doesn't carry a timestamp.
1248         msg->post();
1249         mDrainVideoQueuePending = true;
1250         return;
1251     }
1252 
1253     int64_t nowUs = ALooper::GetNowUs();
1254     if (mFlags & FLAG_REAL_TIME) {
1255         int64_t realTimeUs;
1256         CHECK(entry.mBuffer->meta()->findInt64("timeUs", &realTimeUs));
1257 
1258         realTimeUs = mVideoScheduler->schedule(realTimeUs * 1000) / 1000;
1259 
1260         int64_t twoVsyncsUs = 2 * (mVideoScheduler->getVsyncPeriod() / 1000);
1261 
1262         int64_t delayUs = realTimeUs - nowUs;
1263 
1264         ALOGW_IF(delayUs > 500000, "unusually high delayUs: %lld", (long long)delayUs);
1265         // post 2 display refreshes before rendering is due
1266         msg->post(delayUs > twoVsyncsUs ? delayUs - twoVsyncsUs : 0);
1267 
1268         mDrainVideoQueuePending = true;
1269         return;
1270     }
1271 
1272     int64_t mediaTimeUs;
1273     CHECK(entry.mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
1274 
1275     {
1276         Mutex::Autolock autoLock(mLock);
1277         if (mAnchorTimeMediaUs < 0) {
1278             mMediaClock->updateAnchor(mediaTimeUs, nowUs, mediaTimeUs);
1279             mAnchorTimeMediaUs = mediaTimeUs;
1280         }
1281     }
1282     mNextVideoTimeMediaUs = mediaTimeUs + 100000;
1283     if (!mHasAudio) {
1284         // smooth out videos >= 10fps
1285         mMediaClock->updateMaxTimeMedia(mNextVideoTimeMediaUs);
1286     }
1287 
1288     if (!mVideoSampleReceived || mediaTimeUs < mAudioFirstAnchorTimeMediaUs) {
1289         msg->post();
1290     } else {
1291         int64_t twoVsyncsUs = 2 * (mVideoScheduler->getVsyncPeriod() / 1000);
1292 
1293         // post 2 display refreshes before rendering is due
1294         mMediaClock->addTimer(msg, mediaTimeUs, -twoVsyncsUs);
1295     }
1296 
1297     mDrainVideoQueuePending = true;
1298 }
1299 
onDrainVideoQueue()1300 void NuPlayer::Renderer::onDrainVideoQueue() {
1301     if (mVideoQueue.empty()) {
1302         return;
1303     }
1304 
1305     QueueEntry *entry = &*mVideoQueue.begin();
1306 
1307     if (entry->mBuffer == NULL) {
1308         // EOS
1309 
1310         notifyEOS(false /* audio */, entry->mFinalResult);
1311 
1312         mVideoQueue.erase(mVideoQueue.begin());
1313         entry = NULL;
1314 
1315         setVideoLateByUs(0);
1316         return;
1317     }
1318 
1319     int64_t nowUs = ALooper::GetNowUs();
1320     int64_t realTimeUs;
1321     int64_t mediaTimeUs = -1;
1322     if (mFlags & FLAG_REAL_TIME) {
1323         CHECK(entry->mBuffer->meta()->findInt64("timeUs", &realTimeUs));
1324     } else {
1325         CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
1326 
1327         realTimeUs = getRealTimeUs(mediaTimeUs, nowUs);
1328     }
1329     realTimeUs = mVideoScheduler->schedule(realTimeUs * 1000) / 1000;
1330 
1331     bool tooLate = false;
1332 
1333     if (!mPaused) {
1334         setVideoLateByUs(nowUs - realTimeUs);
1335         tooLate = (mVideoLateByUs > 40000);
1336 
1337         if (tooLate) {
1338             ALOGV("video late by %lld us (%.2f secs)",
1339                  (long long)mVideoLateByUs, mVideoLateByUs / 1E6);
1340         } else {
1341             int64_t mediaUs = 0;
1342             mMediaClock->getMediaTime(realTimeUs, &mediaUs);
1343             ALOGV("rendering video at media time %.2f secs",
1344                     (mFlags & FLAG_REAL_TIME ? realTimeUs :
1345                     mediaUs) / 1E6);
1346 
1347             if (!(mFlags & FLAG_REAL_TIME)
1348                     && mLastAudioMediaTimeUs != -1
1349                     && mediaTimeUs > mLastAudioMediaTimeUs) {
1350                 // If audio ends before video, video continues to drive media clock.
1351                 // Also smooth out videos >= 10fps.
1352                 mMediaClock->updateMaxTimeMedia(mediaTimeUs + 100000);
1353             }
1354         }
1355     } else {
1356         setVideoLateByUs(0);
1357         if (!mVideoSampleReceived && !mHasAudio) {
1358             // This will ensure that the first frame after a flush won't be used as anchor
1359             // when renderer is in paused state, because resume can happen any time after seek.
1360             clearAnchorTime();
1361         }
1362     }
1363 
1364     // Always render the first video frame while keeping stats on A/V sync.
1365     if (!mVideoSampleReceived) {
1366         realTimeUs = nowUs;
1367         tooLate = false;
1368     }
1369 
1370     entry->mNotifyConsumed->setInt64("timestampNs", realTimeUs * 1000ll);
1371     entry->mNotifyConsumed->setInt32("render", !tooLate);
1372     entry->mNotifyConsumed->post();
1373     mVideoQueue.erase(mVideoQueue.begin());
1374     entry = NULL;
1375 
1376     mVideoSampleReceived = true;
1377 
1378     if (!mPaused) {
1379         if (!mVideoRenderingStarted) {
1380             mVideoRenderingStarted = true;
1381             notifyVideoRenderingStart();
1382         }
1383         Mutex::Autolock autoLock(mLock);
1384         notifyIfMediaRenderingStarted_l();
1385     }
1386 }
1387 
notifyVideoRenderingStart()1388 void NuPlayer::Renderer::notifyVideoRenderingStart() {
1389     sp<AMessage> notify = mNotify->dup();
1390     notify->setInt32("what", kWhatVideoRenderingStart);
1391     notify->post();
1392 }
1393 
notifyEOS(bool audio,status_t finalResult,int64_t delayUs)1394 void NuPlayer::Renderer::notifyEOS(bool audio, status_t finalResult, int64_t delayUs) {
1395     Mutex::Autolock autoLock(mLock);
1396     notifyEOS_l(audio, finalResult, delayUs);
1397 }
1398 
notifyEOS_l(bool audio,status_t finalResult,int64_t delayUs)1399 void NuPlayer::Renderer::notifyEOS_l(bool audio, status_t finalResult, int64_t delayUs) {
1400     if (audio && delayUs > 0) {
1401         sp<AMessage> msg = new AMessage(kWhatEOS, this);
1402         msg->setInt32("audioEOSGeneration", mAudioEOSGeneration);
1403         msg->setInt32("finalResult", finalResult);
1404         msg->post(delayUs);
1405         return;
1406     }
1407     sp<AMessage> notify = mNotify->dup();
1408     notify->setInt32("what", kWhatEOS);
1409     notify->setInt32("audio", static_cast<int32_t>(audio));
1410     notify->setInt32("finalResult", finalResult);
1411     notify->post(delayUs);
1412 
1413     if (audio) {
1414         // Video might outlive audio. Clear anchor to enable video only case.
1415         mAnchorTimeMediaUs = -1;
1416         mHasAudio = false;
1417         if (mNextVideoTimeMediaUs >= 0) {
1418             int64_t mediaUs = 0;
1419             mMediaClock->getMediaTime(ALooper::GetNowUs(), &mediaUs);
1420             if (mNextVideoTimeMediaUs > mediaUs) {
1421                 mMediaClock->updateMaxTimeMedia(mNextVideoTimeMediaUs);
1422             }
1423         }
1424     }
1425 }
1426 
notifyAudioTearDown(AudioTearDownReason reason)1427 void NuPlayer::Renderer::notifyAudioTearDown(AudioTearDownReason reason) {
1428     sp<AMessage> msg = new AMessage(kWhatAudioTearDown, this);
1429     msg->setInt32("reason", reason);
1430     msg->post();
1431 }
1432 
onQueueBuffer(const sp<AMessage> & msg)1433 void NuPlayer::Renderer::onQueueBuffer(const sp<AMessage> &msg) {
1434     int32_t audio;
1435     CHECK(msg->findInt32("audio", &audio));
1436 
1437     if (dropBufferIfStale(audio, msg)) {
1438         return;
1439     }
1440 
1441     if (audio) {
1442         mHasAudio = true;
1443     } else {
1444         mHasVideo = true;
1445     }
1446 
1447     if (mHasVideo) {
1448         if (mVideoScheduler == NULL) {
1449             mVideoScheduler = new VideoFrameScheduler();
1450             mVideoScheduler->init();
1451         }
1452     }
1453 
1454     sp<RefBase> obj;
1455     CHECK(msg->findObject("buffer", &obj));
1456     sp<MediaCodecBuffer> buffer = static_cast<MediaCodecBuffer *>(obj.get());
1457 
1458     sp<AMessage> notifyConsumed;
1459     CHECK(msg->findMessage("notifyConsumed", &notifyConsumed));
1460 
1461     QueueEntry entry;
1462     entry.mBuffer = buffer;
1463     entry.mNotifyConsumed = notifyConsumed;
1464     entry.mOffset = 0;
1465     entry.mFinalResult = OK;
1466     entry.mBufferOrdinal = ++mTotalBuffersQueued;
1467 
1468     if (audio) {
1469         Mutex::Autolock autoLock(mLock);
1470         mAudioQueue.push_back(entry);
1471         postDrainAudioQueue_l();
1472     } else {
1473         mVideoQueue.push_back(entry);
1474         postDrainVideoQueue();
1475     }
1476 
1477     Mutex::Autolock autoLock(mLock);
1478     if (!mSyncQueues || mAudioQueue.empty() || mVideoQueue.empty()) {
1479         return;
1480     }
1481 
1482     sp<MediaCodecBuffer> firstAudioBuffer = (*mAudioQueue.begin()).mBuffer;
1483     sp<MediaCodecBuffer> firstVideoBuffer = (*mVideoQueue.begin()).mBuffer;
1484 
1485     if (firstAudioBuffer == NULL || firstVideoBuffer == NULL) {
1486         // EOS signalled on either queue.
1487         syncQueuesDone_l();
1488         return;
1489     }
1490 
1491     int64_t firstAudioTimeUs;
1492     int64_t firstVideoTimeUs;
1493     CHECK(firstAudioBuffer->meta()
1494             ->findInt64("timeUs", &firstAudioTimeUs));
1495     CHECK(firstVideoBuffer->meta()
1496             ->findInt64("timeUs", &firstVideoTimeUs));
1497 
1498     int64_t diff = firstVideoTimeUs - firstAudioTimeUs;
1499 
1500     ALOGV("queueDiff = %.2f secs", diff / 1E6);
1501 
1502     if (diff > 100000ll) {
1503         // Audio data starts More than 0.1 secs before video.
1504         // Drop some audio.
1505 
1506         (*mAudioQueue.begin()).mNotifyConsumed->post();
1507         mAudioQueue.erase(mAudioQueue.begin());
1508         return;
1509     }
1510 
1511     syncQueuesDone_l();
1512 }
1513 
syncQueuesDone_l()1514 void NuPlayer::Renderer::syncQueuesDone_l() {
1515     if (!mSyncQueues) {
1516         return;
1517     }
1518 
1519     mSyncQueues = false;
1520 
1521     if (!mAudioQueue.empty()) {
1522         postDrainAudioQueue_l();
1523     }
1524 
1525     if (!mVideoQueue.empty()) {
1526         mLock.unlock();
1527         postDrainVideoQueue();
1528         mLock.lock();
1529     }
1530 }
1531 
onQueueEOS(const sp<AMessage> & msg)1532 void NuPlayer::Renderer::onQueueEOS(const sp<AMessage> &msg) {
1533     int32_t audio;
1534     CHECK(msg->findInt32("audio", &audio));
1535 
1536     if (dropBufferIfStale(audio, msg)) {
1537         return;
1538     }
1539 
1540     int32_t finalResult;
1541     CHECK(msg->findInt32("finalResult", &finalResult));
1542 
1543     QueueEntry entry;
1544     entry.mOffset = 0;
1545     entry.mFinalResult = finalResult;
1546 
1547     if (audio) {
1548         Mutex::Autolock autoLock(mLock);
1549         if (mAudioQueue.empty() && mSyncQueues) {
1550             syncQueuesDone_l();
1551         }
1552         mAudioQueue.push_back(entry);
1553         postDrainAudioQueue_l();
1554     } else {
1555         if (mVideoQueue.empty() && getSyncQueues()) {
1556             Mutex::Autolock autoLock(mLock);
1557             syncQueuesDone_l();
1558         }
1559         mVideoQueue.push_back(entry);
1560         postDrainVideoQueue();
1561     }
1562 }
1563 
onFlush(const sp<AMessage> & msg)1564 void NuPlayer::Renderer::onFlush(const sp<AMessage> &msg) {
1565     int32_t audio, notifyComplete;
1566     CHECK(msg->findInt32("audio", &audio));
1567 
1568     {
1569         Mutex::Autolock autoLock(mLock);
1570         if (audio) {
1571             notifyComplete = mNotifyCompleteAudio;
1572             mNotifyCompleteAudio = false;
1573             mLastAudioMediaTimeUs = -1;
1574         } else {
1575             notifyComplete = mNotifyCompleteVideo;
1576             mNotifyCompleteVideo = false;
1577         }
1578 
1579         // If we're currently syncing the queues, i.e. dropping audio while
1580         // aligning the first audio/video buffer times and only one of the
1581         // two queues has data, we may starve that queue by not requesting
1582         // more buffers from the decoder. If the other source then encounters
1583         // a discontinuity that leads to flushing, we'll never find the
1584         // corresponding discontinuity on the other queue.
1585         // Therefore we'll stop syncing the queues if at least one of them
1586         // is flushed.
1587         syncQueuesDone_l();
1588     }
1589     clearAnchorTime();
1590 
1591     ALOGV("flushing %s", audio ? "audio" : "video");
1592     if (audio) {
1593         {
1594             Mutex::Autolock autoLock(mLock);
1595             flushQueue(&mAudioQueue);
1596 
1597             ++mAudioDrainGeneration;
1598             ++mAudioEOSGeneration;
1599             prepareForMediaRenderingStart_l();
1600 
1601             // the frame count will be reset after flush.
1602             clearAudioFirstAnchorTime_l();
1603         }
1604 
1605         mDrainAudioQueuePending = false;
1606 
1607         if (offloadingAudio()) {
1608             mAudioSink->pause();
1609             mAudioSink->flush();
1610             if (!mPaused) {
1611                 mAudioSink->start();
1612             }
1613         } else {
1614             mAudioSink->pause();
1615             mAudioSink->flush();
1616             // Call stop() to signal to the AudioSink to completely fill the
1617             // internal buffer before resuming playback.
1618             // FIXME: this is ignored after flush().
1619             mAudioSink->stop();
1620             if (!mPaused) {
1621                 mAudioSink->start();
1622             }
1623             mNumFramesWritten = 0;
1624         }
1625         mNextAudioClockUpdateTimeUs = -1;
1626     } else {
1627         flushQueue(&mVideoQueue);
1628 
1629         mDrainVideoQueuePending = false;
1630 
1631         if (mVideoScheduler != NULL) {
1632             mVideoScheduler->restart();
1633         }
1634 
1635         Mutex::Autolock autoLock(mLock);
1636         ++mVideoDrainGeneration;
1637         prepareForMediaRenderingStart_l();
1638     }
1639 
1640     mVideoSampleReceived = false;
1641 
1642     if (notifyComplete) {
1643         notifyFlushComplete(audio);
1644     }
1645 }
1646 
flushQueue(List<QueueEntry> * queue)1647 void NuPlayer::Renderer::flushQueue(List<QueueEntry> *queue) {
1648     while (!queue->empty()) {
1649         QueueEntry *entry = &*queue->begin();
1650 
1651         if (entry->mBuffer != NULL) {
1652             entry->mNotifyConsumed->post();
1653         } else if (entry->mNotifyConsumed != nullptr) {
1654             // Is it needed to open audio sink now?
1655             onChangeAudioFormat(entry->mMeta, entry->mNotifyConsumed);
1656         }
1657 
1658         queue->erase(queue->begin());
1659         entry = NULL;
1660     }
1661 }
1662 
notifyFlushComplete(bool audio)1663 void NuPlayer::Renderer::notifyFlushComplete(bool audio) {
1664     sp<AMessage> notify = mNotify->dup();
1665     notify->setInt32("what", kWhatFlushComplete);
1666     notify->setInt32("audio", static_cast<int32_t>(audio));
1667     notify->post();
1668 }
1669 
dropBufferIfStale(bool audio,const sp<AMessage> & msg)1670 bool NuPlayer::Renderer::dropBufferIfStale(
1671         bool audio, const sp<AMessage> &msg) {
1672     int32_t queueGeneration;
1673     CHECK(msg->findInt32("queueGeneration", &queueGeneration));
1674 
1675     if (queueGeneration == getQueueGeneration(audio)) {
1676         return false;
1677     }
1678 
1679     sp<AMessage> notifyConsumed;
1680     if (msg->findMessage("notifyConsumed", &notifyConsumed)) {
1681         notifyConsumed->post();
1682     }
1683 
1684     return true;
1685 }
1686 
onAudioSinkChanged()1687 void NuPlayer::Renderer::onAudioSinkChanged() {
1688     if (offloadingAudio()) {
1689         return;
1690     }
1691     CHECK(!mDrainAudioQueuePending);
1692     mNumFramesWritten = 0;
1693     mAnchorNumFramesWritten = -1;
1694     uint32_t written;
1695     if (mAudioSink->getFramesWritten(&written) == OK) {
1696         mNumFramesWritten = written;
1697     }
1698 }
1699 
onDisableOffloadAudio()1700 void NuPlayer::Renderer::onDisableOffloadAudio() {
1701     Mutex::Autolock autoLock(mLock);
1702     mFlags &= ~FLAG_OFFLOAD_AUDIO;
1703     ++mAudioDrainGeneration;
1704     if (mAudioRenderingStartGeneration != -1) {
1705         prepareForMediaRenderingStart_l();
1706         // PauseTimeout is applied to offload mode only. Cancel pending timer.
1707         cancelAudioOffloadPauseTimeout();
1708     }
1709 }
1710 
onEnableOffloadAudio()1711 void NuPlayer::Renderer::onEnableOffloadAudio() {
1712     Mutex::Autolock autoLock(mLock);
1713     mFlags |= FLAG_OFFLOAD_AUDIO;
1714     ++mAudioDrainGeneration;
1715     if (mAudioRenderingStartGeneration != -1) {
1716         prepareForMediaRenderingStart_l();
1717     }
1718 }
1719 
onPause()1720 void NuPlayer::Renderer::onPause() {
1721     if (mPaused) {
1722         return;
1723     }
1724 
1725     {
1726         Mutex::Autolock autoLock(mLock);
1727         // we do not increment audio drain generation so that we fill audio buffer during pause.
1728         ++mVideoDrainGeneration;
1729         prepareForMediaRenderingStart_l();
1730         mPaused = true;
1731         mMediaClock->setPlaybackRate(0.0);
1732     }
1733 
1734     mDrainAudioQueuePending = false;
1735     mDrainVideoQueuePending = false;
1736 
1737     // Note: audio data may not have been decoded, and the AudioSink may not be opened.
1738     mAudioSink->pause();
1739     startAudioOffloadPauseTimeout();
1740 
1741     ALOGV("now paused audio queue has %zu entries, video has %zu entries",
1742           mAudioQueue.size(), mVideoQueue.size());
1743 }
1744 
onResume()1745 void NuPlayer::Renderer::onResume() {
1746     if (!mPaused) {
1747         return;
1748     }
1749 
1750     // Note: audio data may not have been decoded, and the AudioSink may not be opened.
1751     cancelAudioOffloadPauseTimeout();
1752     if (mAudioSink->ready()) {
1753         status_t err = mAudioSink->start();
1754         if (err != OK) {
1755             ALOGE("cannot start AudioSink err %d", err);
1756             notifyAudioTearDown(kDueToError);
1757         }
1758     }
1759 
1760     {
1761         Mutex::Autolock autoLock(mLock);
1762         mPaused = false;
1763         // rendering started message may have been delayed if we were paused.
1764         if (mRenderingDataDelivered) {
1765             notifyIfMediaRenderingStarted_l();
1766         }
1767         // configure audiosink as we did not do it when pausing
1768         if (mAudioSink != NULL && mAudioSink->ready()) {
1769             mAudioSink->setPlaybackRate(mPlaybackSettings);
1770         }
1771 
1772         mMediaClock->setPlaybackRate(mPlaybackRate);
1773 
1774         if (!mAudioQueue.empty()) {
1775             postDrainAudioQueue_l();
1776         }
1777     }
1778 
1779     if (!mVideoQueue.empty()) {
1780         postDrainVideoQueue();
1781     }
1782 }
1783 
onSetVideoFrameRate(float fps)1784 void NuPlayer::Renderer::onSetVideoFrameRate(float fps) {
1785     if (mVideoScheduler == NULL) {
1786         mVideoScheduler = new VideoFrameScheduler();
1787     }
1788     mVideoScheduler->init(fps);
1789 }
1790 
getQueueGeneration(bool audio)1791 int32_t NuPlayer::Renderer::getQueueGeneration(bool audio) {
1792     Mutex::Autolock autoLock(mLock);
1793     return (audio ? mAudioQueueGeneration : mVideoQueueGeneration);
1794 }
1795 
getDrainGeneration(bool audio)1796 int32_t NuPlayer::Renderer::getDrainGeneration(bool audio) {
1797     Mutex::Autolock autoLock(mLock);
1798     return (audio ? mAudioDrainGeneration : mVideoDrainGeneration);
1799 }
1800 
getSyncQueues()1801 bool NuPlayer::Renderer::getSyncQueues() {
1802     Mutex::Autolock autoLock(mLock);
1803     return mSyncQueues;
1804 }
1805 
onAudioTearDown(AudioTearDownReason reason)1806 void NuPlayer::Renderer::onAudioTearDown(AudioTearDownReason reason) {
1807     if (mAudioTornDown) {
1808         return;
1809     }
1810 
1811     // TimeoutWhenPaused is only for offload mode.
1812     if (reason == kDueToTimeout && !offloadingAudio()) {
1813         return;
1814     }
1815 
1816     mAudioTornDown = true;
1817 
1818     int64_t currentPositionUs;
1819     sp<AMessage> notify = mNotify->dup();
1820     if (getCurrentPosition(&currentPositionUs) == OK) {
1821         notify->setInt64("positionUs", currentPositionUs);
1822     }
1823 
1824     mAudioSink->stop();
1825     mAudioSink->flush();
1826 
1827     notify->setInt32("what", kWhatAudioTearDown);
1828     notify->setInt32("reason", reason);
1829     notify->post();
1830 }
1831 
startAudioOffloadPauseTimeout()1832 void NuPlayer::Renderer::startAudioOffloadPauseTimeout() {
1833     if (offloadingAudio()) {
1834         mWakeLock->acquire();
1835         sp<AMessage> msg = new AMessage(kWhatAudioOffloadPauseTimeout, this);
1836         msg->setInt32("drainGeneration", mAudioOffloadPauseTimeoutGeneration);
1837         msg->post(kOffloadPauseMaxUs);
1838     }
1839 }
1840 
cancelAudioOffloadPauseTimeout()1841 void NuPlayer::Renderer::cancelAudioOffloadPauseTimeout() {
1842     // We may have called startAudioOffloadPauseTimeout() without
1843     // the AudioSink open and with offloadingAudio enabled.
1844     //
1845     // When we cancel, it may be that offloadingAudio is subsequently disabled, so regardless
1846     // we always release the wakelock and increment the pause timeout generation.
1847     //
1848     // Note: The acquired wakelock prevents the device from suspending
1849     // immediately after offload pause (in case a resume happens shortly thereafter).
1850     mWakeLock->release(true);
1851     ++mAudioOffloadPauseTimeoutGeneration;
1852 }
1853 
onOpenAudioSink(const sp<AMessage> & format,bool offloadOnly,bool hasVideo,uint32_t flags,bool isStreaming)1854 status_t NuPlayer::Renderer::onOpenAudioSink(
1855         const sp<AMessage> &format,
1856         bool offloadOnly,
1857         bool hasVideo,
1858         uint32_t flags,
1859         bool isStreaming) {
1860     ALOGV("openAudioSink: offloadOnly(%d) offloadingAudio(%d)",
1861             offloadOnly, offloadingAudio());
1862     bool audioSinkChanged = false;
1863 
1864     int32_t numChannels;
1865     CHECK(format->findInt32("channel-count", &numChannels));
1866 
1867     int32_t channelMask;
1868     if (!format->findInt32("channel-mask", &channelMask)) {
1869         // signal to the AudioSink to derive the mask from count.
1870         channelMask = CHANNEL_MASK_USE_CHANNEL_ORDER;
1871     }
1872 
1873     int32_t sampleRate;
1874     CHECK(format->findInt32("sample-rate", &sampleRate));
1875 
1876     if (offloadingAudio()) {
1877         audio_format_t audioFormat = AUDIO_FORMAT_PCM_16_BIT;
1878         AString mime;
1879         CHECK(format->findString("mime", &mime));
1880         status_t err = mapMimeToAudioFormat(audioFormat, mime.c_str());
1881 
1882         if (err != OK) {
1883             ALOGE("Couldn't map mime \"%s\" to a valid "
1884                     "audio_format", mime.c_str());
1885             onDisableOffloadAudio();
1886         } else {
1887             ALOGV("Mime \"%s\" mapped to audio_format 0x%x",
1888                     mime.c_str(), audioFormat);
1889 
1890             int avgBitRate = -1;
1891             format->findInt32("bitrate", &avgBitRate);
1892 
1893             int32_t aacProfile = -1;
1894             if (audioFormat == AUDIO_FORMAT_AAC
1895                     && format->findInt32("aac-profile", &aacProfile)) {
1896                 // Redefine AAC format as per aac profile
1897                 mapAACProfileToAudioFormat(
1898                         audioFormat,
1899                         aacProfile);
1900             }
1901 
1902             audio_offload_info_t offloadInfo = AUDIO_INFO_INITIALIZER;
1903             offloadInfo.duration_us = -1;
1904             format->findInt64(
1905                     "durationUs", &offloadInfo.duration_us);
1906             offloadInfo.sample_rate = sampleRate;
1907             offloadInfo.channel_mask = channelMask;
1908             offloadInfo.format = audioFormat;
1909             offloadInfo.stream_type = AUDIO_STREAM_MUSIC;
1910             offloadInfo.bit_rate = avgBitRate;
1911             offloadInfo.has_video = hasVideo;
1912             offloadInfo.is_streaming = isStreaming;
1913 
1914             if (memcmp(&mCurrentOffloadInfo, &offloadInfo, sizeof(offloadInfo)) == 0) {
1915                 ALOGV("openAudioSink: no change in offload mode");
1916                 // no change from previous configuration, everything ok.
1917                 return OK;
1918             }
1919             mCurrentPcmInfo = AUDIO_PCMINFO_INITIALIZER;
1920 
1921             ALOGV("openAudioSink: try to open AudioSink in offload mode");
1922             uint32_t offloadFlags = flags;
1923             offloadFlags |= AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD;
1924             offloadFlags &= ~AUDIO_OUTPUT_FLAG_DEEP_BUFFER;
1925             audioSinkChanged = true;
1926             mAudioSink->close();
1927 
1928             err = mAudioSink->open(
1929                     sampleRate,
1930                     numChannels,
1931                     (audio_channel_mask_t)channelMask,
1932                     audioFormat,
1933                     0 /* bufferCount - unused */,
1934                     &NuPlayer::Renderer::AudioSinkCallback,
1935                     this,
1936                     (audio_output_flags_t)offloadFlags,
1937                     &offloadInfo);
1938 
1939             if (err == OK) {
1940                 err = mAudioSink->setPlaybackRate(mPlaybackSettings);
1941             }
1942 
1943             if (err == OK) {
1944                 // If the playback is offloaded to h/w, we pass
1945                 // the HAL some metadata information.
1946                 // We don't want to do this for PCM because it
1947                 // will be going through the AudioFlinger mixer
1948                 // before reaching the hardware.
1949                 // TODO
1950                 mCurrentOffloadInfo = offloadInfo;
1951                 if (!mPaused) { // for preview mode, don't start if paused
1952                     err = mAudioSink->start();
1953                 }
1954                 ALOGV_IF(err == OK, "openAudioSink: offload succeeded");
1955             }
1956             if (err != OK) {
1957                 // Clean up, fall back to non offload mode.
1958                 mAudioSink->close();
1959                 onDisableOffloadAudio();
1960                 mCurrentOffloadInfo = AUDIO_INFO_INITIALIZER;
1961                 ALOGV("openAudioSink: offload failed");
1962                 if (offloadOnly) {
1963                     notifyAudioTearDown(kForceNonOffload);
1964                 }
1965             } else {
1966                 mUseAudioCallback = true;  // offload mode transfers data through callback
1967                 ++mAudioDrainGeneration;  // discard pending kWhatDrainAudioQueue message.
1968             }
1969         }
1970     }
1971     if (!offloadOnly && !offloadingAudio()) {
1972         ALOGV("openAudioSink: open AudioSink in NON-offload mode");
1973         uint32_t pcmFlags = flags;
1974         pcmFlags &= ~AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD;
1975 
1976         const PcmInfo info = {
1977                 (audio_channel_mask_t)channelMask,
1978                 (audio_output_flags_t)pcmFlags,
1979                 AUDIO_FORMAT_PCM_16_BIT, // TODO: change to audioFormat
1980                 numChannels,
1981                 sampleRate
1982         };
1983         if (memcmp(&mCurrentPcmInfo, &info, sizeof(info)) == 0) {
1984             ALOGV("openAudioSink: no change in pcm mode");
1985             // no change from previous configuration, everything ok.
1986             return OK;
1987         }
1988 
1989         audioSinkChanged = true;
1990         mAudioSink->close();
1991         mCurrentOffloadInfo = AUDIO_INFO_INITIALIZER;
1992         // Note: It is possible to set up the callback, but not use it to send audio data.
1993         // This requires a fix in AudioSink to explicitly specify the transfer mode.
1994         mUseAudioCallback = getUseAudioCallbackSetting();
1995         if (mUseAudioCallback) {
1996             ++mAudioDrainGeneration;  // discard pending kWhatDrainAudioQueue message.
1997         }
1998 
1999         // Compute the desired buffer size.
2000         // For callback mode, the amount of time before wakeup is about half the buffer size.
2001         const uint32_t frameCount =
2002                 (unsigned long long)sampleRate * getAudioSinkPcmMsSetting() / 1000;
2003 
2004         // The doNotReconnect means AudioSink will signal back and let NuPlayer to re-construct
2005         // AudioSink. We don't want this when there's video because it will cause a video seek to
2006         // the previous I frame. But we do want this when there's only audio because it will give
2007         // NuPlayer a chance to switch from non-offload mode to offload mode.
2008         // So we only set doNotReconnect when there's no video.
2009         const bool doNotReconnect = !hasVideo;
2010 
2011         // We should always be able to set our playback settings if the sink is closed.
2012         LOG_ALWAYS_FATAL_IF(mAudioSink->setPlaybackRate(mPlaybackSettings) != OK,
2013                 "onOpenAudioSink: can't set playback rate on closed sink");
2014         status_t err = mAudioSink->open(
2015                     sampleRate,
2016                     numChannels,
2017                     (audio_channel_mask_t)channelMask,
2018                     AUDIO_FORMAT_PCM_16_BIT,
2019                     0 /* bufferCount - unused */,
2020                     mUseAudioCallback ? &NuPlayer::Renderer::AudioSinkCallback : NULL,
2021                     mUseAudioCallback ? this : NULL,
2022                     (audio_output_flags_t)pcmFlags,
2023                     NULL,
2024                     doNotReconnect,
2025                     frameCount);
2026         if (err != OK) {
2027             ALOGW("openAudioSink: non offloaded open failed status: %d", err);
2028             mAudioSink->close();
2029             mCurrentPcmInfo = AUDIO_PCMINFO_INITIALIZER;
2030             return err;
2031         }
2032         mCurrentPcmInfo = info;
2033         if (!mPaused) { // for preview mode, don't start if paused
2034             mAudioSink->start();
2035         }
2036     }
2037     if (audioSinkChanged) {
2038         onAudioSinkChanged();
2039     }
2040     mAudioTornDown = false;
2041     return OK;
2042 }
2043 
onCloseAudioSink()2044 void NuPlayer::Renderer::onCloseAudioSink() {
2045     mAudioSink->close();
2046     mCurrentOffloadInfo = AUDIO_INFO_INITIALIZER;
2047     mCurrentPcmInfo = AUDIO_PCMINFO_INITIALIZER;
2048 }
2049 
onChangeAudioFormat(const sp<AMessage> & meta,const sp<AMessage> & notify)2050 void NuPlayer::Renderer::onChangeAudioFormat(
2051         const sp<AMessage> &meta, const sp<AMessage> &notify) {
2052     sp<AMessage> format;
2053     CHECK(meta->findMessage("format", &format));
2054 
2055     int32_t offloadOnly;
2056     CHECK(meta->findInt32("offload-only", &offloadOnly));
2057 
2058     int32_t hasVideo;
2059     CHECK(meta->findInt32("has-video", &hasVideo));
2060 
2061     uint32_t flags;
2062     CHECK(meta->findInt32("flags", (int32_t *)&flags));
2063 
2064     uint32_t isStreaming;
2065     CHECK(meta->findInt32("isStreaming", (int32_t *)&isStreaming));
2066 
2067     status_t err = onOpenAudioSink(format, offloadOnly, hasVideo, flags, isStreaming);
2068 
2069     if (err != OK) {
2070         notify->setInt32("err", err);
2071     }
2072     notify->post();
2073 }
2074 
2075 }  // namespace android
2076 
2077