1 /*
2  * Copyright (C) 2010 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 //#define LOG_NDEBUG 0
18 #define LOG_TAG "NuPlayerRenderer"
19 #include <utils/Log.h>
20 
21 #include "AWakeLock.h"
22 #include "NuPlayerRenderer.h"
23 #include <algorithm>
24 #include <cutils/properties.h>
25 #include <media/stagefright/foundation/ADebug.h>
26 #include <media/stagefright/foundation/AMessage.h>
27 #include <media/stagefright/foundation/AUtils.h>
28 #include <media/stagefright/MediaClock.h>
29 #include <media/stagefright/MediaCodecConstants.h>
30 #include <media/stagefright/MediaDefs.h>
31 #include <media/stagefright/MediaErrors.h>
32 #include <media/stagefright/MetaData.h>
33 #include <media/stagefright/Utils.h>
34 #include <media/stagefright/VideoFrameScheduler.h>
35 #include <media/MediaCodecBuffer.h>
36 #include <utils/SystemClock.h>
37 
38 #include <inttypes.h>
39 
40 namespace android {
41 
42 /*
43  * Example of common configuration settings in shell script form
44 
45    #Turn offload audio off (use PCM for Play Music) -- AudioPolicyManager
46    adb shell setprop audio.offload.disable 1
47 
48    #Allow offload audio with video (requires offloading to be enabled) -- AudioPolicyManager
49    adb shell setprop audio.offload.video 1
50 
51    #Use audio callbacks for PCM data
52    adb shell setprop media.stagefright.audio.cbk 1
53 
54    #Use deep buffer for PCM data with video (it is generally enabled for audio-only)
55    adb shell setprop media.stagefright.audio.deep 1
56 
57    #Set size of buffers for pcm audio sink in msec (example: 1000 msec)
58    adb shell setprop media.stagefright.audio.sink 1000
59 
60  * These configurations take effect for the next track played (not the current track).
61  */
62 
getUseAudioCallbackSetting()63 static inline bool getUseAudioCallbackSetting() {
64     return property_get_bool("media.stagefright.audio.cbk", false /* default_value */);
65 }
66 
getAudioSinkPcmMsSetting()67 static inline int32_t getAudioSinkPcmMsSetting() {
68     return property_get_int32(
69             "media.stagefright.audio.sink", 500 /* default_value */);
70 }
71 
72 // Maximum time in paused state when offloading audio decompression. When elapsed, the AudioSink
73 // is closed to allow the audio DSP to power down.
74 static const int64_t kOffloadPauseMaxUs = 10000000LL;
75 
76 // Maximum allowed delay from AudioSink, 1.5 seconds.
77 static const int64_t kMaxAllowedAudioSinkDelayUs = 1500000LL;
78 
79 static const int64_t kMinimumAudioClockUpdatePeriodUs = 20 /* msec */ * 1000;
80 
81 // Default video frame display duration when only video exists.
82 // Used to set max media time in MediaClock.
83 static const int64_t kDefaultVideoFrameIntervalUs = 100000LL;
84 
85 // static
86 const NuPlayer::Renderer::PcmInfo NuPlayer::Renderer::AUDIO_PCMINFO_INITIALIZER = {
87         AUDIO_CHANNEL_NONE,
88         AUDIO_OUTPUT_FLAG_NONE,
89         AUDIO_FORMAT_INVALID,
90         0, // mNumChannels
91         0 // mSampleRate
92 };
93 
94 // static
95 const int64_t NuPlayer::Renderer::kMinPositionUpdateDelayUs = 100000ll;
96 
audioFormatFromEncoding(int32_t pcmEncoding)97 static audio_format_t constexpr audioFormatFromEncoding(int32_t pcmEncoding) {
98     switch (pcmEncoding) {
99     case kAudioEncodingPcmFloat:
100         return AUDIO_FORMAT_PCM_FLOAT;
101     case kAudioEncodingPcm16bit:
102         return AUDIO_FORMAT_PCM_16_BIT;
103     case kAudioEncodingPcm8bit:
104         return AUDIO_FORMAT_PCM_8_BIT; // TODO: do we want to support this?
105     default:
106         ALOGE("%s: Invalid encoding: %d", __func__, pcmEncoding);
107         return AUDIO_FORMAT_INVALID;
108     }
109 }
110 
Renderer(const sp<MediaPlayerBase::AudioSink> & sink,const sp<MediaClock> & mediaClock,const sp<AMessage> & notify,uint32_t flags)111 NuPlayer::Renderer::Renderer(
112         const sp<MediaPlayerBase::AudioSink> &sink,
113         const sp<MediaClock> &mediaClock,
114         const sp<AMessage> &notify,
115         uint32_t flags)
116     : mAudioSink(sink),
117       mUseVirtualAudioSink(false),
118       mNotify(notify),
119       mFlags(flags),
120       mNumFramesWritten(0),
121       mDrainAudioQueuePending(false),
122       mDrainVideoQueuePending(false),
123       mAudioQueueGeneration(0),
124       mVideoQueueGeneration(0),
125       mAudioDrainGeneration(0),
126       mVideoDrainGeneration(0),
127       mAudioEOSGeneration(0),
128       mMediaClock(mediaClock),
129       mPlaybackSettings(AUDIO_PLAYBACK_RATE_DEFAULT),
130       mAudioFirstAnchorTimeMediaUs(-1),
131       mAnchorTimeMediaUs(-1),
132       mAnchorNumFramesWritten(-1),
133       mVideoLateByUs(0LL),
134       mNextVideoTimeMediaUs(-1),
135       mHasAudio(false),
136       mHasVideo(false),
137       mNotifyCompleteAudio(false),
138       mNotifyCompleteVideo(false),
139       mSyncQueues(false),
140       mPaused(false),
141       mPauseDrainAudioAllowedUs(0),
142       mVideoSampleReceived(false),
143       mVideoRenderingStarted(false),
144       mVideoRenderingStartGeneration(0),
145       mAudioRenderingStartGeneration(0),
146       mRenderingDataDelivered(false),
147       mNextAudioClockUpdateTimeUs(-1),
148       mLastAudioMediaTimeUs(-1),
149       mAudioOffloadPauseTimeoutGeneration(0),
150       mAudioTornDown(false),
151       mCurrentOffloadInfo(AUDIO_INFO_INITIALIZER),
152       mCurrentPcmInfo(AUDIO_PCMINFO_INITIALIZER),
153       mTotalBuffersQueued(0),
154       mLastAudioBufferDrained(0),
155       mUseAudioCallback(false),
156       mWakeLock(new AWakeLock()) {
157     CHECK(mediaClock != NULL);
158     mPlaybackRate = mPlaybackSettings.mSpeed;
159     mMediaClock->setPlaybackRate(mPlaybackRate);
160     (void)mSyncFlag.test_and_set();
161 }
162 
~Renderer()163 NuPlayer::Renderer::~Renderer() {
164     if (offloadingAudio()) {
165         mAudioSink->stop();
166         mAudioSink->flush();
167         mAudioSink->close();
168     }
169 
170     // Try to avoid racing condition in case callback is still on.
171     Mutex::Autolock autoLock(mLock);
172     if (mUseAudioCallback) {
173         flushQueue(&mAudioQueue);
174         flushQueue(&mVideoQueue);
175     }
176     mWakeLock.clear();
177     mVideoScheduler.clear();
178     mNotify.clear();
179     mAudioSink.clear();
180 }
181 
queueBuffer(bool audio,const sp<MediaCodecBuffer> & buffer,const sp<AMessage> & notifyConsumed)182 void NuPlayer::Renderer::queueBuffer(
183         bool audio,
184         const sp<MediaCodecBuffer> &buffer,
185         const sp<AMessage> &notifyConsumed) {
186     sp<AMessage> msg = new AMessage(kWhatQueueBuffer, this);
187     msg->setInt32("queueGeneration", getQueueGeneration(audio));
188     msg->setInt32("audio", static_cast<int32_t>(audio));
189     msg->setObject("buffer", buffer);
190     msg->setMessage("notifyConsumed", notifyConsumed);
191     msg->post();
192 }
193 
queueEOS(bool audio,status_t finalResult)194 void NuPlayer::Renderer::queueEOS(bool audio, status_t finalResult) {
195     CHECK_NE(finalResult, (status_t)OK);
196 
197     sp<AMessage> msg = new AMessage(kWhatQueueEOS, this);
198     msg->setInt32("queueGeneration", getQueueGeneration(audio));
199     msg->setInt32("audio", static_cast<int32_t>(audio));
200     msg->setInt32("finalResult", finalResult);
201     msg->post();
202 }
203 
setPlaybackSettings(const AudioPlaybackRate & rate)204 status_t NuPlayer::Renderer::setPlaybackSettings(const AudioPlaybackRate &rate) {
205     sp<AMessage> msg = new AMessage(kWhatConfigPlayback, this);
206     writeToAMessage(msg, rate);
207     sp<AMessage> response;
208     status_t err = msg->postAndAwaitResponse(&response);
209     if (err == OK && response != NULL) {
210         CHECK(response->findInt32("err", &err));
211     }
212     return err;
213 }
214 
onConfigPlayback(const AudioPlaybackRate & rate)215 status_t NuPlayer::Renderer::onConfigPlayback(const AudioPlaybackRate &rate /* sanitized */) {
216     if (rate.mSpeed == 0.f) {
217         onPause();
218         // don't call audiosink's setPlaybackRate if pausing, as pitch does not
219         // have to correspond to the any non-0 speed (e.g old speed). Keep
220         // settings nonetheless, using the old speed, in case audiosink changes.
221         AudioPlaybackRate newRate = rate;
222         newRate.mSpeed = mPlaybackSettings.mSpeed;
223         mPlaybackSettings = newRate;
224         return OK;
225     }
226 
227     if (mAudioSink != NULL && mAudioSink->ready()) {
228         status_t err = mAudioSink->setPlaybackRate(rate);
229         if (err != OK) {
230             return err;
231         }
232     }
233     mPlaybackSettings = rate;
234     mPlaybackRate = rate.mSpeed;
235     mMediaClock->setPlaybackRate(mPlaybackRate);
236     return OK;
237 }
238 
getPlaybackSettings(AudioPlaybackRate * rate)239 status_t NuPlayer::Renderer::getPlaybackSettings(AudioPlaybackRate *rate /* nonnull */) {
240     sp<AMessage> msg = new AMessage(kWhatGetPlaybackSettings, this);
241     sp<AMessage> response;
242     status_t err = msg->postAndAwaitResponse(&response);
243     if (err == OK && response != NULL) {
244         CHECK(response->findInt32("err", &err));
245         if (err == OK) {
246             readFromAMessage(response, rate);
247         }
248     }
249     return err;
250 }
251 
onGetPlaybackSettings(AudioPlaybackRate * rate)252 status_t NuPlayer::Renderer::onGetPlaybackSettings(AudioPlaybackRate *rate /* nonnull */) {
253     if (mAudioSink != NULL && mAudioSink->ready()) {
254         status_t err = mAudioSink->getPlaybackRate(rate);
255         if (err == OK) {
256             if (!isAudioPlaybackRateEqual(*rate, mPlaybackSettings)) {
257                 ALOGW("correcting mismatch in internal/external playback rate");
258             }
259             // get playback settings used by audiosink, as it may be
260             // slightly off due to audiosink not taking small changes.
261             mPlaybackSettings = *rate;
262             if (mPaused) {
263                 rate->mSpeed = 0.f;
264             }
265         }
266         return err;
267     }
268     *rate = mPlaybackSettings;
269     return OK;
270 }
271 
setSyncSettings(const AVSyncSettings & sync,float videoFpsHint)272 status_t NuPlayer::Renderer::setSyncSettings(const AVSyncSettings &sync, float videoFpsHint) {
273     sp<AMessage> msg = new AMessage(kWhatConfigSync, this);
274     writeToAMessage(msg, sync, videoFpsHint);
275     sp<AMessage> response;
276     status_t err = msg->postAndAwaitResponse(&response);
277     if (err == OK && response != NULL) {
278         CHECK(response->findInt32("err", &err));
279     }
280     return err;
281 }
282 
onConfigSync(const AVSyncSettings & sync,float videoFpsHint __unused)283 status_t NuPlayer::Renderer::onConfigSync(const AVSyncSettings &sync, float videoFpsHint __unused) {
284     if (sync.mSource != AVSYNC_SOURCE_DEFAULT) {
285         return BAD_VALUE;
286     }
287     // TODO: support sync sources
288     return INVALID_OPERATION;
289 }
290 
getSyncSettings(AVSyncSettings * sync,float * videoFps)291 status_t NuPlayer::Renderer::getSyncSettings(AVSyncSettings *sync, float *videoFps) {
292     sp<AMessage> msg = new AMessage(kWhatGetSyncSettings, this);
293     sp<AMessage> response;
294     status_t err = msg->postAndAwaitResponse(&response);
295     if (err == OK && response != NULL) {
296         CHECK(response->findInt32("err", &err));
297         if (err == OK) {
298             readFromAMessage(response, sync, videoFps);
299         }
300     }
301     return err;
302 }
303 
onGetSyncSettings(AVSyncSettings * sync,float * videoFps)304 status_t NuPlayer::Renderer::onGetSyncSettings(
305         AVSyncSettings *sync /* nonnull */, float *videoFps /* nonnull */) {
306     *sync = mSyncSettings;
307     *videoFps = -1.f;
308     return OK;
309 }
310 
flush(bool audio,bool notifyComplete)311 void NuPlayer::Renderer::flush(bool audio, bool notifyComplete) {
312     {
313         Mutex::Autolock autoLock(mLock);
314         if (audio) {
315             mNotifyCompleteAudio |= notifyComplete;
316             clearAudioFirstAnchorTime_l();
317             ++mAudioQueueGeneration;
318             ++mAudioDrainGeneration;
319         } else {
320             mNotifyCompleteVideo |= notifyComplete;
321             ++mVideoQueueGeneration;
322             ++mVideoDrainGeneration;
323             mNextVideoTimeMediaUs = -1;
324         }
325 
326         mMediaClock->clearAnchor();
327         mVideoLateByUs = 0;
328         mSyncQueues = false;
329     }
330 
331     // Wait until the current job in the message queue is done, to make sure
332     // buffer processing from the old generation is finished. After the current
333     // job is finished, access to buffers are protected by generation.
334     Mutex::Autolock syncLock(mSyncLock);
335     int64_t syncCount = mSyncCount;
336     mSyncFlag.clear();
337 
338     // Make sure message queue is not empty after mSyncFlag is cleared.
339     sp<AMessage> msg = new AMessage(kWhatFlush, this);
340     msg->setInt32("audio", static_cast<int32_t>(audio));
341     msg->post();
342 
343     int64_t uptimeMs = uptimeMillis();
344     while (mSyncCount == syncCount) {
345         (void)mSyncCondition.waitRelative(mSyncLock, ms2ns(1000));
346         if (uptimeMillis() - uptimeMs > 1000) {
347             ALOGW("flush(): no wake-up from sync point for 1s; stop waiting to "
348                   "prevent being stuck indefinitely.");
349             break;
350         }
351     }
352 }
353 
signalTimeDiscontinuity()354 void NuPlayer::Renderer::signalTimeDiscontinuity() {
355 }
356 
signalDisableOffloadAudio()357 void NuPlayer::Renderer::signalDisableOffloadAudio() {
358     (new AMessage(kWhatDisableOffloadAudio, this))->post();
359 }
360 
signalEnableOffloadAudio()361 void NuPlayer::Renderer::signalEnableOffloadAudio() {
362     (new AMessage(kWhatEnableOffloadAudio, this))->post();
363 }
364 
pause()365 void NuPlayer::Renderer::pause() {
366     (new AMessage(kWhatPause, this))->post();
367 }
368 
resume()369 void NuPlayer::Renderer::resume() {
370     (new AMessage(kWhatResume, this))->post();
371 }
372 
setVideoFrameRate(float fps)373 void NuPlayer::Renderer::setVideoFrameRate(float fps) {
374     sp<AMessage> msg = new AMessage(kWhatSetVideoFrameRate, this);
375     msg->setFloat("frame-rate", fps);
376     msg->post();
377 }
378 
379 // Called on any threads without mLock acquired.
getCurrentPosition(int64_t * mediaUs)380 status_t NuPlayer::Renderer::getCurrentPosition(int64_t *mediaUs) {
381     status_t result = mMediaClock->getMediaTime(ALooper::GetNowUs(), mediaUs);
382     if (result == OK) {
383         return result;
384     }
385 
386     // MediaClock has not started yet. Try to start it if possible.
387     {
388         Mutex::Autolock autoLock(mLock);
389         if (mAudioFirstAnchorTimeMediaUs == -1) {
390             return result;
391         }
392 
393         AudioTimestamp ts;
394         status_t res = mAudioSink->getTimestamp(ts);
395         if (res != OK) {
396             return result;
397         }
398 
399         // AudioSink has rendered some frames.
400         int64_t nowUs = ALooper::GetNowUs();
401         int64_t playedOutDurationUs = mAudioSink->getPlayedOutDurationUs(nowUs);
402         if (playedOutDurationUs == 0) {
403             *mediaUs = mAudioFirstAnchorTimeMediaUs;
404             return OK;
405         }
406         int64_t nowMediaUs = playedOutDurationUs + mAudioFirstAnchorTimeMediaUs;
407         mMediaClock->updateAnchor(nowMediaUs, nowUs, -1);
408     }
409 
410     return mMediaClock->getMediaTime(ALooper::GetNowUs(), mediaUs);
411 }
412 
clearAudioFirstAnchorTime_l()413 void NuPlayer::Renderer::clearAudioFirstAnchorTime_l() {
414     mAudioFirstAnchorTimeMediaUs = -1;
415     mMediaClock->setStartingTimeMedia(-1);
416 }
417 
setAudioFirstAnchorTimeIfNeeded_l(int64_t mediaUs)418 void NuPlayer::Renderer::setAudioFirstAnchorTimeIfNeeded_l(int64_t mediaUs) {
419     if (mAudioFirstAnchorTimeMediaUs == -1) {
420         mAudioFirstAnchorTimeMediaUs = mediaUs;
421         mMediaClock->setStartingTimeMedia(mediaUs);
422     }
423 }
424 
425 // Called on renderer looper.
clearAnchorTime()426 void NuPlayer::Renderer::clearAnchorTime() {
427     mMediaClock->clearAnchor();
428     mAnchorTimeMediaUs = -1;
429     mAnchorNumFramesWritten = -1;
430 }
431 
setVideoLateByUs(int64_t lateUs)432 void NuPlayer::Renderer::setVideoLateByUs(int64_t lateUs) {
433     Mutex::Autolock autoLock(mLock);
434     mVideoLateByUs = lateUs;
435 }
436 
getVideoLateByUs()437 int64_t NuPlayer::Renderer::getVideoLateByUs() {
438     Mutex::Autolock autoLock(mLock);
439     return mVideoLateByUs;
440 }
441 
openAudioSink(const sp<AMessage> & format,bool offloadOnly,bool hasVideo,uint32_t flags,bool * isOffloaded,bool isStreaming)442 status_t NuPlayer::Renderer::openAudioSink(
443         const sp<AMessage> &format,
444         bool offloadOnly,
445         bool hasVideo,
446         uint32_t flags,
447         bool *isOffloaded,
448         bool isStreaming) {
449     sp<AMessage> msg = new AMessage(kWhatOpenAudioSink, this);
450     msg->setMessage("format", format);
451     msg->setInt32("offload-only", offloadOnly);
452     msg->setInt32("has-video", hasVideo);
453     msg->setInt32("flags", flags);
454     msg->setInt32("isStreaming", isStreaming);
455 
456     sp<AMessage> response;
457     status_t postStatus = msg->postAndAwaitResponse(&response);
458 
459     int32_t err;
460     if (postStatus != OK || response.get() == nullptr || !response->findInt32("err", &err)) {
461         err = INVALID_OPERATION;
462     } else if (err == OK && isOffloaded != NULL) {
463         int32_t offload;
464         CHECK(response->findInt32("offload", &offload));
465         *isOffloaded = (offload != 0);
466     }
467     return err;
468 }
469 
closeAudioSink()470 void NuPlayer::Renderer::closeAudioSink() {
471     sp<AMessage> msg = new AMessage(kWhatCloseAudioSink, this);
472 
473     sp<AMessage> response;
474     msg->postAndAwaitResponse(&response);
475 }
476 
changeAudioFormat(const sp<AMessage> & format,bool offloadOnly,bool hasVideo,uint32_t flags,bool isStreaming,const sp<AMessage> & notify)477 void NuPlayer::Renderer::changeAudioFormat(
478         const sp<AMessage> &format,
479         bool offloadOnly,
480         bool hasVideo,
481         uint32_t flags,
482         bool isStreaming,
483         const sp<AMessage> &notify) {
484     sp<AMessage> meta = new AMessage;
485     meta->setMessage("format", format);
486     meta->setInt32("offload-only", offloadOnly);
487     meta->setInt32("has-video", hasVideo);
488     meta->setInt32("flags", flags);
489     meta->setInt32("isStreaming", isStreaming);
490 
491     sp<AMessage> msg = new AMessage(kWhatChangeAudioFormat, this);
492     msg->setInt32("queueGeneration", getQueueGeneration(true /* audio */));
493     msg->setMessage("notify", notify);
494     msg->setMessage("meta", meta);
495     msg->post();
496 }
497 
onMessageReceived(const sp<AMessage> & msg)498 void NuPlayer::Renderer::onMessageReceived(const sp<AMessage> &msg) {
499     switch (msg->what()) {
500         case kWhatOpenAudioSink:
501         {
502             sp<AMessage> format;
503             CHECK(msg->findMessage("format", &format));
504 
505             int32_t offloadOnly;
506             CHECK(msg->findInt32("offload-only", &offloadOnly));
507 
508             int32_t hasVideo;
509             CHECK(msg->findInt32("has-video", &hasVideo));
510 
511             uint32_t flags;
512             CHECK(msg->findInt32("flags", (int32_t *)&flags));
513 
514             uint32_t isStreaming;
515             CHECK(msg->findInt32("isStreaming", (int32_t *)&isStreaming));
516 
517             status_t err = onOpenAudioSink(format, offloadOnly, hasVideo, flags, isStreaming);
518 
519             sp<AMessage> response = new AMessage;
520             response->setInt32("err", err);
521             response->setInt32("offload", offloadingAudio());
522 
523             sp<AReplyToken> replyID;
524             CHECK(msg->senderAwaitsResponse(&replyID));
525             response->postReply(replyID);
526 
527             break;
528         }
529 
530         case kWhatCloseAudioSink:
531         {
532             sp<AReplyToken> replyID;
533             CHECK(msg->senderAwaitsResponse(&replyID));
534 
535             onCloseAudioSink();
536 
537             sp<AMessage> response = new AMessage;
538             response->postReply(replyID);
539             break;
540         }
541 
542         case kWhatStopAudioSink:
543         {
544             mAudioSink->stop();
545             break;
546         }
547 
548         case kWhatChangeAudioFormat:
549         {
550             int32_t queueGeneration;
551             CHECK(msg->findInt32("queueGeneration", &queueGeneration));
552 
553             sp<AMessage> notify;
554             CHECK(msg->findMessage("notify", &notify));
555 
556             if (offloadingAudio()) {
557                 ALOGW("changeAudioFormat should NOT be called in offload mode");
558                 notify->setInt32("err", INVALID_OPERATION);
559                 notify->post();
560                 break;
561             }
562 
563             sp<AMessage> meta;
564             CHECK(msg->findMessage("meta", &meta));
565 
566             if (queueGeneration != getQueueGeneration(true /* audio */)
567                     || mAudioQueue.empty()) {
568                 onChangeAudioFormat(meta, notify);
569                 break;
570             }
571 
572             QueueEntry entry;
573             entry.mNotifyConsumed = notify;
574             entry.mMeta = meta;
575 
576             Mutex::Autolock autoLock(mLock);
577             mAudioQueue.push_back(entry);
578             postDrainAudioQueue_l();
579 
580             break;
581         }
582 
583         case kWhatDrainAudioQueue:
584         {
585             mDrainAudioQueuePending = false;
586 
587             int32_t generation;
588             CHECK(msg->findInt32("drainGeneration", &generation));
589             if (generation != getDrainGeneration(true /* audio */)) {
590                 break;
591             }
592 
593             if (onDrainAudioQueue()) {
594                 uint32_t numFramesPlayed;
595                 CHECK_EQ(mAudioSink->getPosition(&numFramesPlayed),
596                          (status_t)OK);
597 
598                 // Handle AudioTrack race when start is immediately called after flush.
599                 uint32_t numFramesPendingPlayout =
600                     (mNumFramesWritten > numFramesPlayed ?
601                         mNumFramesWritten - numFramesPlayed : 0);
602 
603                 // This is how long the audio sink will have data to
604                 // play back.
605                 int64_t delayUs =
606                     mAudioSink->msecsPerFrame()
607                         * numFramesPendingPlayout * 1000LL;
608                 if (mPlaybackRate > 1.0f) {
609                     delayUs /= mPlaybackRate;
610                 }
611 
612                 // Let's give it more data after about half that time
613                 // has elapsed.
614                 delayUs /= 2;
615                 // check the buffer size to estimate maximum delay permitted.
616                 const int64_t maxDrainDelayUs = std::max(
617                         mAudioSink->getBufferDurationInUs(), (int64_t)500000 /* half second */);
618                 ALOGD_IF(delayUs > maxDrainDelayUs, "postDrainAudioQueue long delay: %lld > %lld",
619                         (long long)delayUs, (long long)maxDrainDelayUs);
620                 Mutex::Autolock autoLock(mLock);
621                 postDrainAudioQueue_l(delayUs);
622             }
623             break;
624         }
625 
626         case kWhatDrainVideoQueue:
627         {
628             int32_t generation;
629             CHECK(msg->findInt32("drainGeneration", &generation));
630             if (generation != getDrainGeneration(false /* audio */)) {
631                 break;
632             }
633 
634             mDrainVideoQueuePending = false;
635 
636             onDrainVideoQueue();
637 
638             postDrainVideoQueue();
639             break;
640         }
641 
642         case kWhatPostDrainVideoQueue:
643         {
644             int32_t generation;
645             CHECK(msg->findInt32("drainGeneration", &generation));
646             if (generation != getDrainGeneration(false /* audio */)) {
647                 break;
648             }
649 
650             mDrainVideoQueuePending = false;
651             postDrainVideoQueue();
652             break;
653         }
654 
655         case kWhatQueueBuffer:
656         {
657             onQueueBuffer(msg);
658             break;
659         }
660 
661         case kWhatQueueEOS:
662         {
663             onQueueEOS(msg);
664             break;
665         }
666 
667         case kWhatEOS:
668         {
669             int32_t generation;
670             CHECK(msg->findInt32("audioEOSGeneration", &generation));
671             if (generation != mAudioEOSGeneration) {
672                 break;
673             }
674             status_t finalResult;
675             CHECK(msg->findInt32("finalResult", &finalResult));
676             notifyEOS(true /* audio */, finalResult);
677             break;
678         }
679 
680         case kWhatConfigPlayback:
681         {
682             sp<AReplyToken> replyID;
683             CHECK(msg->senderAwaitsResponse(&replyID));
684             AudioPlaybackRate rate;
685             readFromAMessage(msg, &rate);
686             status_t err = onConfigPlayback(rate);
687             sp<AMessage> response = new AMessage;
688             response->setInt32("err", err);
689             response->postReply(replyID);
690             break;
691         }
692 
693         case kWhatGetPlaybackSettings:
694         {
695             sp<AReplyToken> replyID;
696             CHECK(msg->senderAwaitsResponse(&replyID));
697             AudioPlaybackRate rate = AUDIO_PLAYBACK_RATE_DEFAULT;
698             status_t err = onGetPlaybackSettings(&rate);
699             sp<AMessage> response = new AMessage;
700             if (err == OK) {
701                 writeToAMessage(response, rate);
702             }
703             response->setInt32("err", err);
704             response->postReply(replyID);
705             break;
706         }
707 
708         case kWhatConfigSync:
709         {
710             sp<AReplyToken> replyID;
711             CHECK(msg->senderAwaitsResponse(&replyID));
712             AVSyncSettings sync;
713             float videoFpsHint;
714             readFromAMessage(msg, &sync, &videoFpsHint);
715             status_t err = onConfigSync(sync, videoFpsHint);
716             sp<AMessage> response = new AMessage;
717             response->setInt32("err", err);
718             response->postReply(replyID);
719             break;
720         }
721 
722         case kWhatGetSyncSettings:
723         {
724             sp<AReplyToken> replyID;
725             CHECK(msg->senderAwaitsResponse(&replyID));
726 
727             ALOGV("kWhatGetSyncSettings");
728             AVSyncSettings sync;
729             float videoFps = -1.f;
730             status_t err = onGetSyncSettings(&sync, &videoFps);
731             sp<AMessage> response = new AMessage;
732             if (err == OK) {
733                 writeToAMessage(response, sync, videoFps);
734             }
735             response->setInt32("err", err);
736             response->postReply(replyID);
737             break;
738         }
739 
740         case kWhatFlush:
741         {
742             onFlush(msg);
743             break;
744         }
745 
746         case kWhatDisableOffloadAudio:
747         {
748             onDisableOffloadAudio();
749             break;
750         }
751 
752         case kWhatEnableOffloadAudio:
753         {
754             onEnableOffloadAudio();
755             break;
756         }
757 
758         case kWhatPause:
759         {
760             onPause();
761             break;
762         }
763 
764         case kWhatResume:
765         {
766             onResume();
767             break;
768         }
769 
770         case kWhatSetVideoFrameRate:
771         {
772             float fps;
773             CHECK(msg->findFloat("frame-rate", &fps));
774             onSetVideoFrameRate(fps);
775             break;
776         }
777 
778         case kWhatAudioTearDown:
779         {
780             int32_t reason;
781             CHECK(msg->findInt32("reason", &reason));
782 
783             onAudioTearDown((AudioTearDownReason)reason);
784             break;
785         }
786 
787         case kWhatAudioOffloadPauseTimeout:
788         {
789             int32_t generation;
790             CHECK(msg->findInt32("drainGeneration", &generation));
791             if (generation != mAudioOffloadPauseTimeoutGeneration) {
792                 break;
793             }
794             ALOGV("Audio Offload tear down due to pause timeout.");
795             onAudioTearDown(kDueToTimeout);
796             mWakeLock->release();
797             break;
798         }
799 
800         default:
801             TRESPASS();
802             break;
803     }
804     if (!mSyncFlag.test_and_set()) {
805         Mutex::Autolock syncLock(mSyncLock);
806         ++mSyncCount;
807         mSyncCondition.broadcast();
808     }
809 }
810 
postDrainAudioQueue_l(int64_t delayUs)811 void NuPlayer::Renderer::postDrainAudioQueue_l(int64_t delayUs) {
812     if (mDrainAudioQueuePending || mSyncQueues || mUseAudioCallback) {
813         return;
814     }
815 
816     if (mAudioQueue.empty()) {
817         return;
818     }
819 
820     // FIXME: if paused, wait until AudioTrack stop() is complete before delivering data.
821     if (mPaused) {
822         const int64_t diffUs = mPauseDrainAudioAllowedUs - ALooper::GetNowUs();
823         if (diffUs > delayUs) {
824             delayUs = diffUs;
825         }
826     }
827 
828     mDrainAudioQueuePending = true;
829     sp<AMessage> msg = new AMessage(kWhatDrainAudioQueue, this);
830     msg->setInt32("drainGeneration", mAudioDrainGeneration);
831     msg->post(delayUs);
832 }
833 
prepareForMediaRenderingStart_l()834 void NuPlayer::Renderer::prepareForMediaRenderingStart_l() {
835     mAudioRenderingStartGeneration = mAudioDrainGeneration;
836     mVideoRenderingStartGeneration = mVideoDrainGeneration;
837     mRenderingDataDelivered = false;
838 }
839 
notifyIfMediaRenderingStarted_l()840 void NuPlayer::Renderer::notifyIfMediaRenderingStarted_l() {
841     if (mVideoRenderingStartGeneration == mVideoDrainGeneration &&
842         mAudioRenderingStartGeneration == mAudioDrainGeneration) {
843         mRenderingDataDelivered = true;
844         if (mPaused) {
845             return;
846         }
847         mVideoRenderingStartGeneration = -1;
848         mAudioRenderingStartGeneration = -1;
849 
850         sp<AMessage> notify = mNotify->dup();
851         notify->setInt32("what", kWhatMediaRenderingStart);
852         notify->post();
853     }
854 }
855 
856 // static
AudioSinkCallback(MediaPlayerBase::AudioSink *,void * buffer,size_t size,void * cookie,MediaPlayerBase::AudioSink::cb_event_t event)857 size_t NuPlayer::Renderer::AudioSinkCallback(
858         MediaPlayerBase::AudioSink * /* audioSink */,
859         void *buffer,
860         size_t size,
861         void *cookie,
862         MediaPlayerBase::AudioSink::cb_event_t event) {
863     NuPlayer::Renderer *me = (NuPlayer::Renderer *)cookie;
864 
865     switch (event) {
866         case MediaPlayerBase::AudioSink::CB_EVENT_FILL_BUFFER:
867         {
868             return me->fillAudioBuffer(buffer, size);
869             break;
870         }
871 
872         case MediaPlayerBase::AudioSink::CB_EVENT_STREAM_END:
873         {
874             ALOGV("AudioSink::CB_EVENT_STREAM_END");
875             me->notifyEOSCallback();
876             break;
877         }
878 
879         case MediaPlayerBase::AudioSink::CB_EVENT_TEAR_DOWN:
880         {
881             ALOGV("AudioSink::CB_EVENT_TEAR_DOWN");
882             me->notifyAudioTearDown(kDueToError);
883             break;
884         }
885     }
886 
887     return 0;
888 }
889 
notifyEOSCallback()890 void NuPlayer::Renderer::notifyEOSCallback() {
891     Mutex::Autolock autoLock(mLock);
892 
893     if (!mUseAudioCallback) {
894         return;
895     }
896 
897     notifyEOS_l(true /* audio */, ERROR_END_OF_STREAM);
898 }
899 
fillAudioBuffer(void * buffer,size_t size)900 size_t NuPlayer::Renderer::fillAudioBuffer(void *buffer, size_t size) {
901     Mutex::Autolock autoLock(mLock);
902 
903     if (!mUseAudioCallback) {
904         return 0;
905     }
906 
907     bool hasEOS = false;
908 
909     size_t sizeCopied = 0;
910     bool firstEntry = true;
911     QueueEntry *entry;  // will be valid after while loop if hasEOS is set.
912     while (sizeCopied < size && !mAudioQueue.empty()) {
913         entry = &*mAudioQueue.begin();
914 
915         if (entry->mBuffer == NULL) { // EOS
916             hasEOS = true;
917             mAudioQueue.erase(mAudioQueue.begin());
918             break;
919         }
920 
921         if (firstEntry && entry->mOffset == 0) {
922             firstEntry = false;
923             int64_t mediaTimeUs;
924             CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
925             ALOGV("fillAudioBuffer: rendering audio at media time %.2f secs", mediaTimeUs / 1E6);
926             setAudioFirstAnchorTimeIfNeeded_l(mediaTimeUs);
927         }
928 
929         size_t copy = entry->mBuffer->size() - entry->mOffset;
930         size_t sizeRemaining = size - sizeCopied;
931         if (copy > sizeRemaining) {
932             copy = sizeRemaining;
933         }
934 
935         memcpy((char *)buffer + sizeCopied,
936                entry->mBuffer->data() + entry->mOffset,
937                copy);
938 
939         entry->mOffset += copy;
940         if (entry->mOffset == entry->mBuffer->size()) {
941             entry->mNotifyConsumed->post();
942             mAudioQueue.erase(mAudioQueue.begin());
943             entry = NULL;
944         }
945         sizeCopied += copy;
946 
947         notifyIfMediaRenderingStarted_l();
948     }
949 
950     if (mAudioFirstAnchorTimeMediaUs >= 0) {
951         int64_t nowUs = ALooper::GetNowUs();
952         int64_t nowMediaUs =
953             mAudioFirstAnchorTimeMediaUs + mAudioSink->getPlayedOutDurationUs(nowUs);
954         // we don't know how much data we are queueing for offloaded tracks.
955         mMediaClock->updateAnchor(nowMediaUs, nowUs, INT64_MAX);
956     }
957 
958     // for non-offloaded audio, we need to compute the frames written because
959     // there is no EVENT_STREAM_END notification. The frames written gives
960     // an estimate on the pending played out duration.
961     if (!offloadingAudio()) {
962         mNumFramesWritten += sizeCopied / mAudioSink->frameSize();
963     }
964 
965     if (hasEOS) {
966         (new AMessage(kWhatStopAudioSink, this))->post();
967         // As there is currently no EVENT_STREAM_END callback notification for
968         // non-offloaded audio tracks, we need to post the EOS ourselves.
969         if (!offloadingAudio()) {
970             int64_t postEOSDelayUs = 0;
971             if (mAudioSink->needsTrailingPadding()) {
972                 postEOSDelayUs = getPendingAudioPlayoutDurationUs(ALooper::GetNowUs());
973             }
974             ALOGV("fillAudioBuffer: notifyEOS_l "
975                     "mNumFramesWritten:%u  finalResult:%d  postEOSDelay:%lld",
976                     mNumFramesWritten, entry->mFinalResult, (long long)postEOSDelayUs);
977             notifyEOS_l(true /* audio */, entry->mFinalResult, postEOSDelayUs);
978         }
979     }
980     return sizeCopied;
981 }
982 
drainAudioQueueUntilLastEOS()983 void NuPlayer::Renderer::drainAudioQueueUntilLastEOS() {
984     List<QueueEntry>::iterator it = mAudioQueue.begin(), itEOS = it;
985     bool foundEOS = false;
986     while (it != mAudioQueue.end()) {
987         int32_t eos;
988         QueueEntry *entry = &*it++;
989         if ((entry->mBuffer == nullptr && entry->mNotifyConsumed == nullptr)
990                 || (entry->mNotifyConsumed->findInt32("eos", &eos) && eos != 0)) {
991             itEOS = it;
992             foundEOS = true;
993         }
994     }
995 
996     if (foundEOS) {
997         // post all replies before EOS and drop the samples
998         for (it = mAudioQueue.begin(); it != itEOS; it++) {
999             if (it->mBuffer == nullptr) {
1000                 if (it->mNotifyConsumed == nullptr) {
1001                     // delay doesn't matter as we don't even have an AudioTrack
1002                     notifyEOS(true /* audio */, it->mFinalResult);
1003                 } else {
1004                     // TAG for re-opening audio sink.
1005                     onChangeAudioFormat(it->mMeta, it->mNotifyConsumed);
1006                 }
1007             } else {
1008                 it->mNotifyConsumed->post();
1009             }
1010         }
1011         mAudioQueue.erase(mAudioQueue.begin(), itEOS);
1012     }
1013 }
1014 
onDrainAudioQueue()1015 bool NuPlayer::Renderer::onDrainAudioQueue() {
1016     // do not drain audio during teardown as queued buffers may be invalid.
1017     if (mAudioTornDown) {
1018         return false;
1019     }
1020     // TODO: This call to getPosition checks if AudioTrack has been created
1021     // in AudioSink before draining audio. If AudioTrack doesn't exist, then
1022     // CHECKs on getPosition will fail.
1023     // We still need to figure out why AudioTrack is not created when
1024     // this function is called. One possible reason could be leftover
1025     // audio. Another possible place is to check whether decoder
1026     // has received INFO_FORMAT_CHANGED as the first buffer since
1027     // AudioSink is opened there, and possible interactions with flush
1028     // immediately after start. Investigate error message
1029     // "vorbis_dsp_synthesis returned -135", along with RTSP.
1030     uint32_t numFramesPlayed;
1031     if (mAudioSink->getPosition(&numFramesPlayed) != OK) {
1032         // When getPosition fails, renderer will not reschedule the draining
1033         // unless new samples are queued.
1034         // If we have pending EOS (or "eos" marker for discontinuities), we need
1035         // to post these now as NuPlayerDecoder might be waiting for it.
1036         drainAudioQueueUntilLastEOS();
1037 
1038         ALOGW("onDrainAudioQueue(): audio sink is not ready");
1039         return false;
1040     }
1041 
1042 #if 0
1043     ssize_t numFramesAvailableToWrite =
1044         mAudioSink->frameCount() - (mNumFramesWritten - numFramesPlayed);
1045 
1046     if (numFramesAvailableToWrite == mAudioSink->frameCount()) {
1047         ALOGI("audio sink underrun");
1048     } else {
1049         ALOGV("audio queue has %d frames left to play",
1050              mAudioSink->frameCount() - numFramesAvailableToWrite);
1051     }
1052 #endif
1053 
1054     uint32_t prevFramesWritten = mNumFramesWritten;
1055     while (!mAudioQueue.empty()) {
1056         QueueEntry *entry = &*mAudioQueue.begin();
1057 
1058         if (entry->mBuffer == NULL) {
1059             if (entry->mNotifyConsumed != nullptr) {
1060                 // TAG for re-open audio sink.
1061                 onChangeAudioFormat(entry->mMeta, entry->mNotifyConsumed);
1062                 mAudioQueue.erase(mAudioQueue.begin());
1063                 continue;
1064             }
1065 
1066             // EOS
1067             if (mPaused) {
1068                 // Do not notify EOS when paused.
1069                 // This is needed to avoid switch to next clip while in pause.
1070                 ALOGV("onDrainAudioQueue(): Do not notify EOS when paused");
1071                 return false;
1072             }
1073 
1074             int64_t postEOSDelayUs = 0;
1075             if (mAudioSink->needsTrailingPadding()) {
1076                 postEOSDelayUs = getPendingAudioPlayoutDurationUs(ALooper::GetNowUs());
1077             }
1078             notifyEOS(true /* audio */, entry->mFinalResult, postEOSDelayUs);
1079             mLastAudioMediaTimeUs = getDurationUsIfPlayedAtSampleRate(mNumFramesWritten);
1080 
1081             mAudioQueue.erase(mAudioQueue.begin());
1082             entry = NULL;
1083             if (mAudioSink->needsTrailingPadding()) {
1084                 // If we're not in gapless playback (i.e. through setNextPlayer), we
1085                 // need to stop the track here, because that will play out the last
1086                 // little bit at the end of the file. Otherwise short files won't play.
1087                 mAudioSink->stop();
1088                 mNumFramesWritten = 0;
1089             }
1090             return false;
1091         }
1092 
1093         mLastAudioBufferDrained = entry->mBufferOrdinal;
1094 
1095         // ignore 0-sized buffer which could be EOS marker with no data
1096         if (entry->mOffset == 0 && entry->mBuffer->size() > 0) {
1097             int64_t mediaTimeUs;
1098             CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
1099             ALOGV("onDrainAudioQueue: rendering audio at media time %.2f secs",
1100                     mediaTimeUs / 1E6);
1101             onNewAudioMediaTime(mediaTimeUs);
1102         }
1103 
1104         size_t copy = entry->mBuffer->size() - entry->mOffset;
1105 
1106         ssize_t written = mAudioSink->write(entry->mBuffer->data() + entry->mOffset,
1107                                             copy, false /* blocking */);
1108         if (written < 0) {
1109             // An error in AudioSink write. Perhaps the AudioSink was not properly opened.
1110             if (written == WOULD_BLOCK) {
1111                 ALOGV("AudioSink write would block when writing %zu bytes", copy);
1112             } else {
1113                 ALOGE("AudioSink write error(%zd) when writing %zu bytes", written, copy);
1114                 // This can only happen when AudioSink was opened with doNotReconnect flag set to
1115                 // true, in which case the NuPlayer will handle the reconnect.
1116                 notifyAudioTearDown(kDueToError);
1117             }
1118             break;
1119         }
1120 
1121         entry->mOffset += written;
1122         size_t remainder = entry->mBuffer->size() - entry->mOffset;
1123         if ((ssize_t)remainder < mAudioSink->frameSize()) {
1124             if (remainder > 0) {
1125                 ALOGW("Corrupted audio buffer has fractional frames, discarding %zu bytes.",
1126                         remainder);
1127                 entry->mOffset += remainder;
1128                 copy -= remainder;
1129             }
1130 
1131             entry->mNotifyConsumed->post();
1132             mAudioQueue.erase(mAudioQueue.begin());
1133 
1134             entry = NULL;
1135         }
1136 
1137         size_t copiedFrames = written / mAudioSink->frameSize();
1138         mNumFramesWritten += copiedFrames;
1139 
1140         {
1141             Mutex::Autolock autoLock(mLock);
1142             int64_t maxTimeMedia;
1143             maxTimeMedia =
1144                 mAnchorTimeMediaUs +
1145                         (int64_t)(max((long long)mNumFramesWritten - mAnchorNumFramesWritten, 0LL)
1146                                 * 1000LL * mAudioSink->msecsPerFrame());
1147             mMediaClock->updateMaxTimeMedia(maxTimeMedia);
1148 
1149             notifyIfMediaRenderingStarted_l();
1150         }
1151 
1152         if (written != (ssize_t)copy) {
1153             // A short count was received from AudioSink::write()
1154             //
1155             // AudioSink write is called in non-blocking mode.
1156             // It may return with a short count when:
1157             //
1158             // 1) Size to be copied is not a multiple of the frame size. Fractional frames are
1159             //    discarded.
1160             // 2) The data to be copied exceeds the available buffer in AudioSink.
1161             // 3) An error occurs and data has been partially copied to the buffer in AudioSink.
1162             // 4) AudioSink is an AudioCache for data retrieval, and the AudioCache is exceeded.
1163 
1164             // (Case 1)
1165             // Must be a multiple of the frame size.  If it is not a multiple of a frame size, it
1166             // needs to fail, as we should not carry over fractional frames between calls.
1167             CHECK_EQ(copy % mAudioSink->frameSize(), 0u);
1168 
1169             // (Case 2, 3, 4)
1170             // Return early to the caller.
1171             // Beware of calling immediately again as this may busy-loop if you are not careful.
1172             ALOGV("AudioSink write short frame count %zd < %zu", written, copy);
1173             break;
1174         }
1175     }
1176 
1177     // calculate whether we need to reschedule another write.
1178     bool reschedule = !mAudioQueue.empty()
1179             && (!mPaused
1180                 || prevFramesWritten != mNumFramesWritten); // permit pause to fill buffers
1181     //ALOGD("reschedule:%d  empty:%d  mPaused:%d  prevFramesWritten:%u  mNumFramesWritten:%u",
1182     //        reschedule, mAudioQueue.empty(), mPaused, prevFramesWritten, mNumFramesWritten);
1183     return reschedule;
1184 }
1185 
getDurationUsIfPlayedAtSampleRate(uint32_t numFrames)1186 int64_t NuPlayer::Renderer::getDurationUsIfPlayedAtSampleRate(uint32_t numFrames) {
1187     int32_t sampleRate = offloadingAudio() ?
1188             mCurrentOffloadInfo.sample_rate : mCurrentPcmInfo.mSampleRate;
1189     if (sampleRate == 0) {
1190         ALOGE("sampleRate is 0 in %s mode", offloadingAudio() ? "offload" : "non-offload");
1191         return 0;
1192     }
1193 
1194     return (int64_t)(numFrames * 1000000LL / sampleRate);
1195 }
1196 
1197 // Calculate duration of pending samples if played at normal rate (i.e., 1.0).
getPendingAudioPlayoutDurationUs(int64_t nowUs)1198 int64_t NuPlayer::Renderer::getPendingAudioPlayoutDurationUs(int64_t nowUs) {
1199     int64_t writtenAudioDurationUs = getDurationUsIfPlayedAtSampleRate(mNumFramesWritten);
1200     if (mUseVirtualAudioSink) {
1201         int64_t nowUs = ALooper::GetNowUs();
1202         int64_t mediaUs;
1203         if (mMediaClock->getMediaTime(nowUs, &mediaUs) != OK) {
1204             return 0LL;
1205         } else {
1206             return writtenAudioDurationUs - (mediaUs - mAudioFirstAnchorTimeMediaUs);
1207         }
1208     }
1209 
1210     const int64_t audioSinkPlayedUs = mAudioSink->getPlayedOutDurationUs(nowUs);
1211     int64_t pendingUs = writtenAudioDurationUs - audioSinkPlayedUs;
1212     if (pendingUs < 0) {
1213         // This shouldn't happen unless the timestamp is stale.
1214         ALOGW("%s: pendingUs %lld < 0, clamping to zero, potential resume after pause "
1215                 "writtenAudioDurationUs: %lld, audioSinkPlayedUs: %lld",
1216                 __func__, (long long)pendingUs,
1217                 (long long)writtenAudioDurationUs, (long long)audioSinkPlayedUs);
1218         pendingUs = 0;
1219     }
1220     return pendingUs;
1221 }
1222 
getRealTimeUs(int64_t mediaTimeUs,int64_t nowUs)1223 int64_t NuPlayer::Renderer::getRealTimeUs(int64_t mediaTimeUs, int64_t nowUs) {
1224     int64_t realUs;
1225     if (mMediaClock->getRealTimeFor(mediaTimeUs, &realUs) != OK) {
1226         // If failed to get current position, e.g. due to audio clock is
1227         // not ready, then just play out video immediately without delay.
1228         return nowUs;
1229     }
1230     return realUs;
1231 }
1232 
onNewAudioMediaTime(int64_t mediaTimeUs)1233 void NuPlayer::Renderer::onNewAudioMediaTime(int64_t mediaTimeUs) {
1234     Mutex::Autolock autoLock(mLock);
1235     // TRICKY: vorbis decoder generates multiple frames with the same
1236     // timestamp, so only update on the first frame with a given timestamp
1237     if (mediaTimeUs == mAnchorTimeMediaUs) {
1238         return;
1239     }
1240     setAudioFirstAnchorTimeIfNeeded_l(mediaTimeUs);
1241 
1242     // mNextAudioClockUpdateTimeUs is -1 if we're waiting for audio sink to start
1243     if (mNextAudioClockUpdateTimeUs == -1) {
1244         AudioTimestamp ts;
1245         if (mAudioSink->getTimestamp(ts) == OK && ts.mPosition > 0) {
1246             mNextAudioClockUpdateTimeUs = 0; // start our clock updates
1247         }
1248     }
1249     int64_t nowUs = ALooper::GetNowUs();
1250     if (mNextAudioClockUpdateTimeUs >= 0) {
1251         if (nowUs >= mNextAudioClockUpdateTimeUs) {
1252             int64_t nowMediaUs = mediaTimeUs - getPendingAudioPlayoutDurationUs(nowUs);
1253             mMediaClock->updateAnchor(nowMediaUs, nowUs, mediaTimeUs);
1254             mUseVirtualAudioSink = false;
1255             mNextAudioClockUpdateTimeUs = nowUs + kMinimumAudioClockUpdatePeriodUs;
1256         }
1257     } else {
1258         int64_t unused;
1259         if ((mMediaClock->getMediaTime(nowUs, &unused) != OK)
1260                 && (getDurationUsIfPlayedAtSampleRate(mNumFramesWritten)
1261                         > kMaxAllowedAudioSinkDelayUs)) {
1262             // Enough data has been sent to AudioSink, but AudioSink has not rendered
1263             // any data yet. Something is wrong with AudioSink, e.g., the device is not
1264             // connected to audio out.
1265             // Switch to system clock. This essentially creates a virtual AudioSink with
1266             // initial latenty of getDurationUsIfPlayedAtSampleRate(mNumFramesWritten).
1267             // This virtual AudioSink renders audio data starting from the very first sample
1268             // and it's paced by system clock.
1269             ALOGW("AudioSink stuck. ARE YOU CONNECTED TO AUDIO OUT? Switching to system clock.");
1270             mMediaClock->updateAnchor(mAudioFirstAnchorTimeMediaUs, nowUs, mediaTimeUs);
1271             mUseVirtualAudioSink = true;
1272         }
1273     }
1274     mAnchorNumFramesWritten = mNumFramesWritten;
1275     mAnchorTimeMediaUs = mediaTimeUs;
1276 }
1277 
1278 // Called without mLock acquired.
postDrainVideoQueue()1279 void NuPlayer::Renderer::postDrainVideoQueue() {
1280     if (mDrainVideoQueuePending
1281             || getSyncQueues()
1282             || (mPaused && mVideoSampleReceived)) {
1283         return;
1284     }
1285 
1286     if (mVideoQueue.empty()) {
1287         return;
1288     }
1289 
1290     QueueEntry &entry = *mVideoQueue.begin();
1291 
1292     sp<AMessage> msg = new AMessage(kWhatDrainVideoQueue, this);
1293     msg->setInt32("drainGeneration", getDrainGeneration(false /* audio */));
1294 
1295     if (entry.mBuffer == NULL) {
1296         // EOS doesn't carry a timestamp.
1297         msg->post();
1298         mDrainVideoQueuePending = true;
1299         return;
1300     }
1301 
1302     int64_t nowUs = ALooper::GetNowUs();
1303     if (mFlags & FLAG_REAL_TIME) {
1304         int64_t realTimeUs;
1305         CHECK(entry.mBuffer->meta()->findInt64("timeUs", &realTimeUs));
1306 
1307         realTimeUs = mVideoScheduler->schedule(realTimeUs * 1000) / 1000;
1308 
1309         int64_t twoVsyncsUs = 2 * (mVideoScheduler->getVsyncPeriod() / 1000);
1310 
1311         int64_t delayUs = realTimeUs - nowUs;
1312 
1313         ALOGW_IF(delayUs > 500000, "unusually high delayUs: %lld", (long long)delayUs);
1314         // post 2 display refreshes before rendering is due
1315         msg->post(delayUs > twoVsyncsUs ? delayUs - twoVsyncsUs : 0);
1316 
1317         mDrainVideoQueuePending = true;
1318         return;
1319     }
1320 
1321     int64_t mediaTimeUs;
1322     CHECK(entry.mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
1323 
1324     {
1325         Mutex::Autolock autoLock(mLock);
1326         if (mAnchorTimeMediaUs < 0) {
1327             mMediaClock->updateAnchor(mediaTimeUs, nowUs, mediaTimeUs);
1328             mAnchorTimeMediaUs = mediaTimeUs;
1329         }
1330     }
1331     mNextVideoTimeMediaUs = mediaTimeUs;
1332     if (!mHasAudio) {
1333         // smooth out videos >= 10fps
1334         mMediaClock->updateMaxTimeMedia(mediaTimeUs + kDefaultVideoFrameIntervalUs);
1335     }
1336 
1337     if (!mVideoSampleReceived || mediaTimeUs < mAudioFirstAnchorTimeMediaUs) {
1338         msg->post();
1339     } else {
1340         int64_t twoVsyncsUs = 2 * (mVideoScheduler->getVsyncPeriod() / 1000);
1341 
1342         // post 2 display refreshes before rendering is due
1343         mMediaClock->addTimer(msg, mediaTimeUs, -twoVsyncsUs);
1344     }
1345 
1346     mDrainVideoQueuePending = true;
1347 }
1348 
onDrainVideoQueue()1349 void NuPlayer::Renderer::onDrainVideoQueue() {
1350     if (mVideoQueue.empty()) {
1351         return;
1352     }
1353 
1354     QueueEntry *entry = &*mVideoQueue.begin();
1355 
1356     if (entry->mBuffer == NULL) {
1357         // EOS
1358 
1359         notifyEOS(false /* audio */, entry->mFinalResult);
1360 
1361         mVideoQueue.erase(mVideoQueue.begin());
1362         entry = NULL;
1363 
1364         setVideoLateByUs(0);
1365         return;
1366     }
1367 
1368     int64_t nowUs = ALooper::GetNowUs();
1369     int64_t realTimeUs;
1370     int64_t mediaTimeUs = -1;
1371     if (mFlags & FLAG_REAL_TIME) {
1372         CHECK(entry->mBuffer->meta()->findInt64("timeUs", &realTimeUs));
1373     } else {
1374         CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
1375 
1376         realTimeUs = getRealTimeUs(mediaTimeUs, nowUs);
1377     }
1378     realTimeUs = mVideoScheduler->schedule(realTimeUs * 1000) / 1000;
1379 
1380     bool tooLate = false;
1381 
1382     if (!mPaused) {
1383         setVideoLateByUs(nowUs - realTimeUs);
1384         tooLate = (mVideoLateByUs > 40000);
1385 
1386         if (tooLate) {
1387             ALOGV("video late by %lld us (%.2f secs)",
1388                  (long long)mVideoLateByUs, mVideoLateByUs / 1E6);
1389         } else {
1390             int64_t mediaUs = 0;
1391             mMediaClock->getMediaTime(realTimeUs, &mediaUs);
1392             ALOGV("rendering video at media time %.2f secs",
1393                     (mFlags & FLAG_REAL_TIME ? realTimeUs :
1394                     mediaUs) / 1E6);
1395 
1396             if (!(mFlags & FLAG_REAL_TIME)
1397                     && mLastAudioMediaTimeUs != -1
1398                     && mediaTimeUs > mLastAudioMediaTimeUs) {
1399                 // If audio ends before video, video continues to drive media clock.
1400                 // Also smooth out videos >= 10fps.
1401                 mMediaClock->updateMaxTimeMedia(mediaTimeUs + kDefaultVideoFrameIntervalUs);
1402             }
1403         }
1404     } else {
1405         setVideoLateByUs(0);
1406         if (!mVideoSampleReceived && !mHasAudio) {
1407             // This will ensure that the first frame after a flush won't be used as anchor
1408             // when renderer is in paused state, because resume can happen any time after seek.
1409             clearAnchorTime();
1410         }
1411     }
1412 
1413     // Always render the first video frame while keeping stats on A/V sync.
1414     if (!mVideoSampleReceived) {
1415         realTimeUs = nowUs;
1416         tooLate = false;
1417     }
1418 
1419     entry->mNotifyConsumed->setInt64("timestampNs", realTimeUs * 1000LL);
1420     entry->mNotifyConsumed->setInt32("render", !tooLate);
1421     entry->mNotifyConsumed->post();
1422     mVideoQueue.erase(mVideoQueue.begin());
1423     entry = NULL;
1424 
1425     mVideoSampleReceived = true;
1426 
1427     if (!mPaused) {
1428         if (!mVideoRenderingStarted) {
1429             mVideoRenderingStarted = true;
1430             notifyVideoRenderingStart();
1431         }
1432         Mutex::Autolock autoLock(mLock);
1433         notifyIfMediaRenderingStarted_l();
1434     }
1435 }
1436 
notifyVideoRenderingStart()1437 void NuPlayer::Renderer::notifyVideoRenderingStart() {
1438     sp<AMessage> notify = mNotify->dup();
1439     notify->setInt32("what", kWhatVideoRenderingStart);
1440     notify->post();
1441 }
1442 
notifyEOS(bool audio,status_t finalResult,int64_t delayUs)1443 void NuPlayer::Renderer::notifyEOS(bool audio, status_t finalResult, int64_t delayUs) {
1444     Mutex::Autolock autoLock(mLock);
1445     notifyEOS_l(audio, finalResult, delayUs);
1446 }
1447 
notifyEOS_l(bool audio,status_t finalResult,int64_t delayUs)1448 void NuPlayer::Renderer::notifyEOS_l(bool audio, status_t finalResult, int64_t delayUs) {
1449     if (audio && delayUs > 0) {
1450         sp<AMessage> msg = new AMessage(kWhatEOS, this);
1451         msg->setInt32("audioEOSGeneration", mAudioEOSGeneration);
1452         msg->setInt32("finalResult", finalResult);
1453         msg->post(delayUs);
1454         return;
1455     }
1456     sp<AMessage> notify = mNotify->dup();
1457     notify->setInt32("what", kWhatEOS);
1458     notify->setInt32("audio", static_cast<int32_t>(audio));
1459     notify->setInt32("finalResult", finalResult);
1460     notify->post(delayUs);
1461 
1462     if (audio) {
1463         // Video might outlive audio. Clear anchor to enable video only case.
1464         mAnchorTimeMediaUs = -1;
1465         mHasAudio = false;
1466         if (mNextVideoTimeMediaUs >= 0) {
1467             int64_t mediaUs = 0;
1468             int64_t nowUs = ALooper::GetNowUs();
1469             status_t result = mMediaClock->getMediaTime(nowUs, &mediaUs);
1470             if (result == OK) {
1471                 if (mNextVideoTimeMediaUs > mediaUs) {
1472                     mMediaClock->updateMaxTimeMedia(mNextVideoTimeMediaUs);
1473                 }
1474             } else {
1475                 mMediaClock->updateAnchor(
1476                         mNextVideoTimeMediaUs, nowUs,
1477                         mNextVideoTimeMediaUs + kDefaultVideoFrameIntervalUs);
1478             }
1479         }
1480     }
1481 }
1482 
notifyAudioTearDown(AudioTearDownReason reason)1483 void NuPlayer::Renderer::notifyAudioTearDown(AudioTearDownReason reason) {
1484     sp<AMessage> msg = new AMessage(kWhatAudioTearDown, this);
1485     msg->setInt32("reason", reason);
1486     msg->post();
1487 }
1488 
onQueueBuffer(const sp<AMessage> & msg)1489 void NuPlayer::Renderer::onQueueBuffer(const sp<AMessage> &msg) {
1490     int32_t audio;
1491     CHECK(msg->findInt32("audio", &audio));
1492 
1493     if (dropBufferIfStale(audio, msg)) {
1494         return;
1495     }
1496 
1497     if (audio) {
1498         mHasAudio = true;
1499     } else {
1500         mHasVideo = true;
1501     }
1502 
1503     if (mHasVideo) {
1504         if (mVideoScheduler == NULL) {
1505             mVideoScheduler = new VideoFrameScheduler();
1506             mVideoScheduler->init();
1507         }
1508     }
1509 
1510     sp<RefBase> obj;
1511     CHECK(msg->findObject("buffer", &obj));
1512     sp<MediaCodecBuffer> buffer = static_cast<MediaCodecBuffer *>(obj.get());
1513 
1514     sp<AMessage> notifyConsumed;
1515     CHECK(msg->findMessage("notifyConsumed", &notifyConsumed));
1516 
1517     QueueEntry entry;
1518     entry.mBuffer = buffer;
1519     entry.mNotifyConsumed = notifyConsumed;
1520     entry.mOffset = 0;
1521     entry.mFinalResult = OK;
1522     entry.mBufferOrdinal = ++mTotalBuffersQueued;
1523 
1524     if (audio) {
1525         Mutex::Autolock autoLock(mLock);
1526         mAudioQueue.push_back(entry);
1527         postDrainAudioQueue_l();
1528     } else {
1529         mVideoQueue.push_back(entry);
1530         postDrainVideoQueue();
1531     }
1532 
1533     Mutex::Autolock autoLock(mLock);
1534     if (!mSyncQueues || mAudioQueue.empty() || mVideoQueue.empty()) {
1535         return;
1536     }
1537 
1538     sp<MediaCodecBuffer> firstAudioBuffer = (*mAudioQueue.begin()).mBuffer;
1539     sp<MediaCodecBuffer> firstVideoBuffer = (*mVideoQueue.begin()).mBuffer;
1540 
1541     if (firstAudioBuffer == NULL || firstVideoBuffer == NULL) {
1542         // EOS signalled on either queue.
1543         syncQueuesDone_l();
1544         return;
1545     }
1546 
1547     int64_t firstAudioTimeUs;
1548     int64_t firstVideoTimeUs;
1549     CHECK(firstAudioBuffer->meta()
1550             ->findInt64("timeUs", &firstAudioTimeUs));
1551     CHECK(firstVideoBuffer->meta()
1552             ->findInt64("timeUs", &firstVideoTimeUs));
1553 
1554     int64_t diff = firstVideoTimeUs - firstAudioTimeUs;
1555 
1556     ALOGV("queueDiff = %.2f secs", diff / 1E6);
1557 
1558     if (diff > 100000LL) {
1559         // Audio data starts More than 0.1 secs before video.
1560         // Drop some audio.
1561 
1562         (*mAudioQueue.begin()).mNotifyConsumed->post();
1563         mAudioQueue.erase(mAudioQueue.begin());
1564         return;
1565     }
1566 
1567     syncQueuesDone_l();
1568 }
1569 
syncQueuesDone_l()1570 void NuPlayer::Renderer::syncQueuesDone_l() {
1571     if (!mSyncQueues) {
1572         return;
1573     }
1574 
1575     mSyncQueues = false;
1576 
1577     if (!mAudioQueue.empty()) {
1578         postDrainAudioQueue_l();
1579     }
1580 
1581     if (!mVideoQueue.empty()) {
1582         mLock.unlock();
1583         postDrainVideoQueue();
1584         mLock.lock();
1585     }
1586 }
1587 
onQueueEOS(const sp<AMessage> & msg)1588 void NuPlayer::Renderer::onQueueEOS(const sp<AMessage> &msg) {
1589     int32_t audio;
1590     CHECK(msg->findInt32("audio", &audio));
1591 
1592     if (dropBufferIfStale(audio, msg)) {
1593         return;
1594     }
1595 
1596     int32_t finalResult;
1597     CHECK(msg->findInt32("finalResult", &finalResult));
1598 
1599     QueueEntry entry;
1600     entry.mOffset = 0;
1601     entry.mFinalResult = finalResult;
1602 
1603     if (audio) {
1604         Mutex::Autolock autoLock(mLock);
1605         if (mAudioQueue.empty() && mSyncQueues) {
1606             syncQueuesDone_l();
1607         }
1608         mAudioQueue.push_back(entry);
1609         postDrainAudioQueue_l();
1610     } else {
1611         if (mVideoQueue.empty() && getSyncQueues()) {
1612             Mutex::Autolock autoLock(mLock);
1613             syncQueuesDone_l();
1614         }
1615         mVideoQueue.push_back(entry);
1616         postDrainVideoQueue();
1617     }
1618 }
1619 
onFlush(const sp<AMessage> & msg)1620 void NuPlayer::Renderer::onFlush(const sp<AMessage> &msg) {
1621     int32_t audio, notifyComplete;
1622     CHECK(msg->findInt32("audio", &audio));
1623 
1624     {
1625         Mutex::Autolock autoLock(mLock);
1626         if (audio) {
1627             notifyComplete = mNotifyCompleteAudio;
1628             mNotifyCompleteAudio = false;
1629             mLastAudioMediaTimeUs = -1;
1630 
1631             mHasAudio = false;
1632             if (mNextVideoTimeMediaUs >= 0) {
1633                 int64_t nowUs = ALooper::GetNowUs();
1634                 mMediaClock->updateAnchor(
1635                         mNextVideoTimeMediaUs, nowUs,
1636                         mNextVideoTimeMediaUs + kDefaultVideoFrameIntervalUs);
1637             }
1638         } else {
1639             notifyComplete = mNotifyCompleteVideo;
1640             mNotifyCompleteVideo = false;
1641         }
1642 
1643         // If we're currently syncing the queues, i.e. dropping audio while
1644         // aligning the first audio/video buffer times and only one of the
1645         // two queues has data, we may starve that queue by not requesting
1646         // more buffers from the decoder. If the other source then encounters
1647         // a discontinuity that leads to flushing, we'll never find the
1648         // corresponding discontinuity on the other queue.
1649         // Therefore we'll stop syncing the queues if at least one of them
1650         // is flushed.
1651         syncQueuesDone_l();
1652     }
1653     clearAnchorTime();
1654 
1655     ALOGV("flushing %s", audio ? "audio" : "video");
1656     if (audio) {
1657         {
1658             Mutex::Autolock autoLock(mLock);
1659             flushQueue(&mAudioQueue);
1660 
1661             ++mAudioDrainGeneration;
1662             ++mAudioEOSGeneration;
1663             prepareForMediaRenderingStart_l();
1664 
1665             // the frame count will be reset after flush.
1666             clearAudioFirstAnchorTime_l();
1667         }
1668 
1669         mDrainAudioQueuePending = false;
1670 
1671         if (offloadingAudio()) {
1672             mAudioSink->pause();
1673             mAudioSink->flush();
1674             if (!mPaused) {
1675                 mAudioSink->start();
1676             }
1677         } else {
1678             mAudioSink->pause();
1679             mAudioSink->flush();
1680             // Call stop() to signal to the AudioSink to completely fill the
1681             // internal buffer before resuming playback.
1682             // FIXME: this is ignored after flush().
1683             mAudioSink->stop();
1684             if (!mPaused) {
1685                 mAudioSink->start();
1686             }
1687             mNumFramesWritten = 0;
1688         }
1689         mNextAudioClockUpdateTimeUs = -1;
1690     } else {
1691         flushQueue(&mVideoQueue);
1692 
1693         mDrainVideoQueuePending = false;
1694 
1695         if (mVideoScheduler != NULL) {
1696             mVideoScheduler->restart();
1697         }
1698 
1699         Mutex::Autolock autoLock(mLock);
1700         ++mVideoDrainGeneration;
1701         prepareForMediaRenderingStart_l();
1702     }
1703 
1704     mVideoSampleReceived = false;
1705 
1706     if (notifyComplete) {
1707         notifyFlushComplete(audio);
1708     }
1709 }
1710 
flushQueue(List<QueueEntry> * queue)1711 void NuPlayer::Renderer::flushQueue(List<QueueEntry> *queue) {
1712     while (!queue->empty()) {
1713         QueueEntry *entry = &*queue->begin();
1714 
1715         if (entry->mBuffer != NULL) {
1716             entry->mNotifyConsumed->post();
1717         } else if (entry->mNotifyConsumed != nullptr) {
1718             // Is it needed to open audio sink now?
1719             onChangeAudioFormat(entry->mMeta, entry->mNotifyConsumed);
1720         }
1721 
1722         queue->erase(queue->begin());
1723         entry = NULL;
1724     }
1725 }
1726 
notifyFlushComplete(bool audio)1727 void NuPlayer::Renderer::notifyFlushComplete(bool audio) {
1728     sp<AMessage> notify = mNotify->dup();
1729     notify->setInt32("what", kWhatFlushComplete);
1730     notify->setInt32("audio", static_cast<int32_t>(audio));
1731     notify->post();
1732 }
1733 
dropBufferIfStale(bool audio,const sp<AMessage> & msg)1734 bool NuPlayer::Renderer::dropBufferIfStale(
1735         bool audio, const sp<AMessage> &msg) {
1736     int32_t queueGeneration;
1737     CHECK(msg->findInt32("queueGeneration", &queueGeneration));
1738 
1739     if (queueGeneration == getQueueGeneration(audio)) {
1740         return false;
1741     }
1742 
1743     sp<AMessage> notifyConsumed;
1744     if (msg->findMessage("notifyConsumed", &notifyConsumed)) {
1745         notifyConsumed->post();
1746     }
1747 
1748     return true;
1749 }
1750 
onAudioSinkChanged()1751 void NuPlayer::Renderer::onAudioSinkChanged() {
1752     if (offloadingAudio()) {
1753         return;
1754     }
1755     CHECK(!mDrainAudioQueuePending);
1756     mNumFramesWritten = 0;
1757     mAnchorNumFramesWritten = -1;
1758     uint32_t written;
1759     if (mAudioSink->getFramesWritten(&written) == OK) {
1760         mNumFramesWritten = written;
1761     }
1762 }
1763 
onDisableOffloadAudio()1764 void NuPlayer::Renderer::onDisableOffloadAudio() {
1765     Mutex::Autolock autoLock(mLock);
1766     mFlags &= ~FLAG_OFFLOAD_AUDIO;
1767     ++mAudioDrainGeneration;
1768     if (mAudioRenderingStartGeneration != -1) {
1769         prepareForMediaRenderingStart_l();
1770         // PauseTimeout is applied to offload mode only. Cancel pending timer.
1771         cancelAudioOffloadPauseTimeout();
1772     }
1773 }
1774 
onEnableOffloadAudio()1775 void NuPlayer::Renderer::onEnableOffloadAudio() {
1776     Mutex::Autolock autoLock(mLock);
1777     mFlags |= FLAG_OFFLOAD_AUDIO;
1778     ++mAudioDrainGeneration;
1779     if (mAudioRenderingStartGeneration != -1) {
1780         prepareForMediaRenderingStart_l();
1781     }
1782 }
1783 
onPause()1784 void NuPlayer::Renderer::onPause() {
1785     if (mPaused) {
1786         return;
1787     }
1788 
1789     {
1790         Mutex::Autolock autoLock(mLock);
1791         // we do not increment audio drain generation so that we fill audio buffer during pause.
1792         ++mVideoDrainGeneration;
1793         prepareForMediaRenderingStart_l();
1794         mPaused = true;
1795         mMediaClock->setPlaybackRate(0.0);
1796     }
1797 
1798     mDrainAudioQueuePending = false;
1799     mDrainVideoQueuePending = false;
1800 
1801     // Note: audio data may not have been decoded, and the AudioSink may not be opened.
1802     mAudioSink->pause();
1803     startAudioOffloadPauseTimeout();
1804 
1805     ALOGV("now paused audio queue has %zu entries, video has %zu entries",
1806           mAudioQueue.size(), mVideoQueue.size());
1807 }
1808 
onResume()1809 void NuPlayer::Renderer::onResume() {
1810     if (!mPaused) {
1811         return;
1812     }
1813 
1814     // Note: audio data may not have been decoded, and the AudioSink may not be opened.
1815     cancelAudioOffloadPauseTimeout();
1816     if (mAudioSink->ready()) {
1817         status_t err = mAudioSink->start();
1818         if (err != OK) {
1819             ALOGE("cannot start AudioSink err %d", err);
1820             notifyAudioTearDown(kDueToError);
1821         }
1822     }
1823 
1824     {
1825         Mutex::Autolock autoLock(mLock);
1826         mPaused = false;
1827         // rendering started message may have been delayed if we were paused.
1828         if (mRenderingDataDelivered) {
1829             notifyIfMediaRenderingStarted_l();
1830         }
1831         // configure audiosink as we did not do it when pausing
1832         if (mAudioSink != NULL && mAudioSink->ready()) {
1833             mAudioSink->setPlaybackRate(mPlaybackSettings);
1834         }
1835 
1836         mMediaClock->setPlaybackRate(mPlaybackRate);
1837 
1838         if (!mAudioQueue.empty()) {
1839             postDrainAudioQueue_l();
1840         }
1841     }
1842 
1843     if (!mVideoQueue.empty()) {
1844         postDrainVideoQueue();
1845     }
1846 }
1847 
onSetVideoFrameRate(float fps)1848 void NuPlayer::Renderer::onSetVideoFrameRate(float fps) {
1849     if (mVideoScheduler == NULL) {
1850         mVideoScheduler = new VideoFrameScheduler();
1851     }
1852     mVideoScheduler->init(fps);
1853 }
1854 
getQueueGeneration(bool audio)1855 int32_t NuPlayer::Renderer::getQueueGeneration(bool audio) {
1856     Mutex::Autolock autoLock(mLock);
1857     return (audio ? mAudioQueueGeneration : mVideoQueueGeneration);
1858 }
1859 
getDrainGeneration(bool audio)1860 int32_t NuPlayer::Renderer::getDrainGeneration(bool audio) {
1861     Mutex::Autolock autoLock(mLock);
1862     return (audio ? mAudioDrainGeneration : mVideoDrainGeneration);
1863 }
1864 
getSyncQueues()1865 bool NuPlayer::Renderer::getSyncQueues() {
1866     Mutex::Autolock autoLock(mLock);
1867     return mSyncQueues;
1868 }
1869 
onAudioTearDown(AudioTearDownReason reason)1870 void NuPlayer::Renderer::onAudioTearDown(AudioTearDownReason reason) {
1871     if (mAudioTornDown) {
1872         return;
1873     }
1874 
1875     // TimeoutWhenPaused is only for offload mode.
1876     if (reason == kDueToTimeout && !offloadingAudio()) {
1877         return;
1878     }
1879 
1880     mAudioTornDown = true;
1881 
1882     int64_t currentPositionUs;
1883     sp<AMessage> notify = mNotify->dup();
1884     if (getCurrentPosition(&currentPositionUs) == OK) {
1885         notify->setInt64("positionUs", currentPositionUs);
1886     }
1887 
1888     mAudioSink->stop();
1889     mAudioSink->flush();
1890 
1891     notify->setInt32("what", kWhatAudioTearDown);
1892     notify->setInt32("reason", reason);
1893     notify->post();
1894 }
1895 
startAudioOffloadPauseTimeout()1896 void NuPlayer::Renderer::startAudioOffloadPauseTimeout() {
1897     if (offloadingAudio()) {
1898         mWakeLock->acquire();
1899         sp<AMessage> msg = new AMessage(kWhatAudioOffloadPauseTimeout, this);
1900         msg->setInt32("drainGeneration", mAudioOffloadPauseTimeoutGeneration);
1901         msg->post(kOffloadPauseMaxUs);
1902     }
1903 }
1904 
cancelAudioOffloadPauseTimeout()1905 void NuPlayer::Renderer::cancelAudioOffloadPauseTimeout() {
1906     // We may have called startAudioOffloadPauseTimeout() without
1907     // the AudioSink open and with offloadingAudio enabled.
1908     //
1909     // When we cancel, it may be that offloadingAudio is subsequently disabled, so regardless
1910     // we always release the wakelock and increment the pause timeout generation.
1911     //
1912     // Note: The acquired wakelock prevents the device from suspending
1913     // immediately after offload pause (in case a resume happens shortly thereafter).
1914     mWakeLock->release(true);
1915     ++mAudioOffloadPauseTimeoutGeneration;
1916 }
1917 
onOpenAudioSink(const sp<AMessage> & format,bool offloadOnly,bool hasVideo,uint32_t flags,bool isStreaming)1918 status_t NuPlayer::Renderer::onOpenAudioSink(
1919         const sp<AMessage> &format,
1920         bool offloadOnly,
1921         bool hasVideo,
1922         uint32_t flags,
1923         bool isStreaming) {
1924     ALOGV("openAudioSink: offloadOnly(%d) offloadingAudio(%d)",
1925             offloadOnly, offloadingAudio());
1926     bool audioSinkChanged = false;
1927 
1928     int32_t numChannels;
1929     CHECK(format->findInt32("channel-count", &numChannels));
1930 
1931     int32_t channelMask;
1932     if (!format->findInt32("channel-mask", &channelMask)) {
1933         // signal to the AudioSink to derive the mask from count.
1934         channelMask = CHANNEL_MASK_USE_CHANNEL_ORDER;
1935     }
1936 
1937     int32_t sampleRate;
1938     CHECK(format->findInt32("sample-rate", &sampleRate));
1939 
1940     // read pcm encoding from MediaCodec output format, if available
1941     int32_t pcmEncoding;
1942     audio_format_t audioFormat =
1943             format->findInt32(KEY_PCM_ENCODING, &pcmEncoding) ?
1944                     audioFormatFromEncoding(pcmEncoding) : AUDIO_FORMAT_PCM_16_BIT;
1945 
1946     if (offloadingAudio()) {
1947         AString mime;
1948         CHECK(format->findString("mime", &mime));
1949         status_t err = mapMimeToAudioFormat(audioFormat, mime.c_str());
1950 
1951         if (err != OK) {
1952             ALOGE("Couldn't map mime \"%s\" to a valid "
1953                     "audio_format", mime.c_str());
1954             onDisableOffloadAudio();
1955         } else {
1956             ALOGV("Mime \"%s\" mapped to audio_format 0x%x",
1957                     mime.c_str(), audioFormat);
1958 
1959             int avgBitRate = -1;
1960             format->findInt32("bitrate", &avgBitRate);
1961 
1962             int32_t aacProfile = -1;
1963             if (audioFormat == AUDIO_FORMAT_AAC
1964                     && format->findInt32("aac-profile", &aacProfile)) {
1965                 // Redefine AAC format as per aac profile
1966                 mapAACProfileToAudioFormat(
1967                         audioFormat,
1968                         aacProfile);
1969             }
1970 
1971             audio_offload_info_t offloadInfo = AUDIO_INFO_INITIALIZER;
1972             offloadInfo.duration_us = -1;
1973             format->findInt64(
1974                     "durationUs", &offloadInfo.duration_us);
1975             offloadInfo.sample_rate = sampleRate;
1976             offloadInfo.channel_mask = channelMask;
1977             offloadInfo.format = audioFormat;
1978             offloadInfo.stream_type = AUDIO_STREAM_MUSIC;
1979             offloadInfo.bit_rate = avgBitRate;
1980             offloadInfo.has_video = hasVideo;
1981             offloadInfo.is_streaming = isStreaming;
1982 
1983             if (memcmp(&mCurrentOffloadInfo, &offloadInfo, sizeof(offloadInfo)) == 0) {
1984                 ALOGV("openAudioSink: no change in offload mode");
1985                 // no change from previous configuration, everything ok.
1986                 return OK;
1987             }
1988             mCurrentPcmInfo = AUDIO_PCMINFO_INITIALIZER;
1989 
1990             ALOGV("openAudioSink: try to open AudioSink in offload mode");
1991             uint32_t offloadFlags = flags;
1992             offloadFlags |= AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD;
1993             offloadFlags &= ~AUDIO_OUTPUT_FLAG_DEEP_BUFFER;
1994             audioSinkChanged = true;
1995             mAudioSink->close();
1996 
1997             err = mAudioSink->open(
1998                     sampleRate,
1999                     numChannels,
2000                     (audio_channel_mask_t)channelMask,
2001                     audioFormat,
2002                     0 /* bufferCount - unused */,
2003                     &NuPlayer::Renderer::AudioSinkCallback,
2004                     this,
2005                     (audio_output_flags_t)offloadFlags,
2006                     &offloadInfo);
2007 
2008             if (err == OK) {
2009                 err = mAudioSink->setPlaybackRate(mPlaybackSettings);
2010             }
2011 
2012             if (err == OK) {
2013                 // If the playback is offloaded to h/w, we pass
2014                 // the HAL some metadata information.
2015                 // We don't want to do this for PCM because it
2016                 // will be going through the AudioFlinger mixer
2017                 // before reaching the hardware.
2018                 // TODO
2019                 mCurrentOffloadInfo = offloadInfo;
2020                 if (!mPaused) { // for preview mode, don't start if paused
2021                     err = mAudioSink->start();
2022                 }
2023                 ALOGV_IF(err == OK, "openAudioSink: offload succeeded");
2024             }
2025             if (err != OK) {
2026                 // Clean up, fall back to non offload mode.
2027                 mAudioSink->close();
2028                 onDisableOffloadAudio();
2029                 mCurrentOffloadInfo = AUDIO_INFO_INITIALIZER;
2030                 ALOGV("openAudioSink: offload failed");
2031                 if (offloadOnly) {
2032                     notifyAudioTearDown(kForceNonOffload);
2033                 }
2034             } else {
2035                 mUseAudioCallback = true;  // offload mode transfers data through callback
2036                 ++mAudioDrainGeneration;  // discard pending kWhatDrainAudioQueue message.
2037             }
2038         }
2039     }
2040     if (!offloadOnly && !offloadingAudio()) {
2041         ALOGV("openAudioSink: open AudioSink in NON-offload mode");
2042         uint32_t pcmFlags = flags;
2043         pcmFlags &= ~AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD;
2044 
2045         const PcmInfo info = {
2046                 (audio_channel_mask_t)channelMask,
2047                 (audio_output_flags_t)pcmFlags,
2048                 audioFormat,
2049                 numChannels,
2050                 sampleRate
2051         };
2052         if (memcmp(&mCurrentPcmInfo, &info, sizeof(info)) == 0) {
2053             ALOGV("openAudioSink: no change in pcm mode");
2054             // no change from previous configuration, everything ok.
2055             return OK;
2056         }
2057 
2058         audioSinkChanged = true;
2059         mAudioSink->close();
2060         mCurrentOffloadInfo = AUDIO_INFO_INITIALIZER;
2061         // Note: It is possible to set up the callback, but not use it to send audio data.
2062         // This requires a fix in AudioSink to explicitly specify the transfer mode.
2063         mUseAudioCallback = getUseAudioCallbackSetting();
2064         if (mUseAudioCallback) {
2065             ++mAudioDrainGeneration;  // discard pending kWhatDrainAudioQueue message.
2066         }
2067 
2068         // Compute the desired buffer size.
2069         // For callback mode, the amount of time before wakeup is about half the buffer size.
2070         const uint32_t frameCount =
2071                 (unsigned long long)sampleRate * getAudioSinkPcmMsSetting() / 1000;
2072 
2073         // The doNotReconnect means AudioSink will signal back and let NuPlayer to re-construct
2074         // AudioSink. We don't want this when there's video because it will cause a video seek to
2075         // the previous I frame. But we do want this when there's only audio because it will give
2076         // NuPlayer a chance to switch from non-offload mode to offload mode.
2077         // So we only set doNotReconnect when there's no video.
2078         const bool doNotReconnect = !hasVideo;
2079 
2080         // We should always be able to set our playback settings if the sink is closed.
2081         LOG_ALWAYS_FATAL_IF(mAudioSink->setPlaybackRate(mPlaybackSettings) != OK,
2082                 "onOpenAudioSink: can't set playback rate on closed sink");
2083         status_t err = mAudioSink->open(
2084                     sampleRate,
2085                     numChannels,
2086                     (audio_channel_mask_t)channelMask,
2087                     audioFormat,
2088                     0 /* bufferCount - unused */,
2089                     mUseAudioCallback ? &NuPlayer::Renderer::AudioSinkCallback : NULL,
2090                     mUseAudioCallback ? this : NULL,
2091                     (audio_output_flags_t)pcmFlags,
2092                     NULL,
2093                     doNotReconnect,
2094                     frameCount);
2095         if (err != OK) {
2096             ALOGW("openAudioSink: non offloaded open failed status: %d", err);
2097             mAudioSink->close();
2098             mCurrentPcmInfo = AUDIO_PCMINFO_INITIALIZER;
2099             return err;
2100         }
2101         mCurrentPcmInfo = info;
2102         if (!mPaused) { // for preview mode, don't start if paused
2103             mAudioSink->start();
2104         }
2105     }
2106     if (audioSinkChanged) {
2107         onAudioSinkChanged();
2108     }
2109     mAudioTornDown = false;
2110     return OK;
2111 }
2112 
onCloseAudioSink()2113 void NuPlayer::Renderer::onCloseAudioSink() {
2114     mAudioSink->close();
2115     mCurrentOffloadInfo = AUDIO_INFO_INITIALIZER;
2116     mCurrentPcmInfo = AUDIO_PCMINFO_INITIALIZER;
2117 }
2118 
onChangeAudioFormat(const sp<AMessage> & meta,const sp<AMessage> & notify)2119 void NuPlayer::Renderer::onChangeAudioFormat(
2120         const sp<AMessage> &meta, const sp<AMessage> &notify) {
2121     sp<AMessage> format;
2122     CHECK(meta->findMessage("format", &format));
2123 
2124     int32_t offloadOnly;
2125     CHECK(meta->findInt32("offload-only", &offloadOnly));
2126 
2127     int32_t hasVideo;
2128     CHECK(meta->findInt32("has-video", &hasVideo));
2129 
2130     uint32_t flags;
2131     CHECK(meta->findInt32("flags", (int32_t *)&flags));
2132 
2133     uint32_t isStreaming;
2134     CHECK(meta->findInt32("isStreaming", (int32_t *)&isStreaming));
2135 
2136     status_t err = onOpenAudioSink(format, offloadOnly, hasVideo, flags, isStreaming);
2137 
2138     if (err != OK) {
2139         notify->setInt32("err", err);
2140     }
2141     notify->post();
2142 }
2143 
2144 }  // namespace android
2145