1 /*
2 **
3 ** Copyright 2007, The Android Open Source Project
4 **
5 ** Licensed under the Apache License, Version 2.0 (the "License");
6 ** you may not use this file except in compliance with the License.
7 ** You may obtain a copy of the License at
8 **
9 **     http://www.apache.org/licenses/LICENSE-2.0
10 **
11 ** Unless required by applicable law or agreed to in writing, software
12 ** distributed under the License is distributed on an "AS IS" BASIS,
13 ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 ** See the License for the specific language governing permissions and
15 ** limitations under the License.
16 */
17 
18 //#define LOG_NDEBUG 0
19 #define LOG_TAG "AudioTrack"
20 
21 #include <inttypes.h>
22 #include <math.h>
23 #include <sys/resource.h>
24 
25 #include <audio_utils/primitives.h>
26 #include <binder/IPCThreadState.h>
27 #include <media/AudioTrack.h>
28 #include <utils/Log.h>
29 #include <private/media/AudioTrackShared.h>
30 #include <media/IAudioFlinger.h>
31 #include <media/AudioPolicyHelper.h>
32 #include <media/AudioResamplerPublic.h>
33 
34 #define WAIT_PERIOD_MS                  10
35 #define WAIT_STREAM_END_TIMEOUT_SEC     120
36 
37 
38 namespace android {
39 // ---------------------------------------------------------------------------
40 
convertTimespecToUs(const struct timespec & tv)41 static int64_t convertTimespecToUs(const struct timespec &tv)
42 {
43     return tv.tv_sec * 1000000ll + tv.tv_nsec / 1000;
44 }
45 
46 // current monotonic time in microseconds.
getNowUs()47 static int64_t getNowUs()
48 {
49     struct timespec tv;
50     (void) clock_gettime(CLOCK_MONOTONIC, &tv);
51     return convertTimespecToUs(tv);
52 }
53 
54 // static
getMinFrameCount(size_t * frameCount,audio_stream_type_t streamType,uint32_t sampleRate)55 status_t AudioTrack::getMinFrameCount(
56         size_t* frameCount,
57         audio_stream_type_t streamType,
58         uint32_t sampleRate)
59 {
60     if (frameCount == NULL) {
61         return BAD_VALUE;
62     }
63 
64     // FIXME merge with similar code in createTrack_l(), except we're missing
65     //       some information here that is available in createTrack_l():
66     //          audio_io_handle_t output
67     //          audio_format_t format
68     //          audio_channel_mask_t channelMask
69     //          audio_output_flags_t flags
70     uint32_t afSampleRate;
71     status_t status;
72     status = AudioSystem::getOutputSamplingRate(&afSampleRate, streamType);
73     if (status != NO_ERROR) {
74         ALOGE("Unable to query output sample rate for stream type %d; status %d",
75                 streamType, status);
76         return status;
77     }
78     size_t afFrameCount;
79     status = AudioSystem::getOutputFrameCount(&afFrameCount, streamType);
80     if (status != NO_ERROR) {
81         ALOGE("Unable to query output frame count for stream type %d; status %d",
82                 streamType, status);
83         return status;
84     }
85     uint32_t afLatency;
86     status = AudioSystem::getOutputLatency(&afLatency, streamType);
87     if (status != NO_ERROR) {
88         ALOGE("Unable to query output latency for stream type %d; status %d",
89                 streamType, status);
90         return status;
91     }
92 
93     // Ensure that buffer depth covers at least audio hardware latency
94     uint32_t minBufCount = afLatency / ((1000 * afFrameCount) / afSampleRate);
95     if (minBufCount < 2) {
96         minBufCount = 2;
97     }
98 
99     *frameCount = (sampleRate == 0) ? afFrameCount * minBufCount :
100             afFrameCount * minBufCount * uint64_t(sampleRate) / afSampleRate;
101     // The formula above should always produce a non-zero value, but return an error
102     // in the unlikely event that it does not, as that's part of the API contract.
103     if (*frameCount == 0) {
104         ALOGE("AudioTrack::getMinFrameCount failed for streamType %d, sampleRate %d",
105                 streamType, sampleRate);
106         return BAD_VALUE;
107     }
108     ALOGV("getMinFrameCount=%zu: afFrameCount=%zu, minBufCount=%d, afSampleRate=%d, afLatency=%d",
109             *frameCount, afFrameCount, minBufCount, afSampleRate, afLatency);
110     return NO_ERROR;
111 }
112 
113 // ---------------------------------------------------------------------------
114 
AudioTrack()115 AudioTrack::AudioTrack()
116     : mStatus(NO_INIT),
117       mIsTimed(false),
118       mPreviousPriority(ANDROID_PRIORITY_NORMAL),
119       mPreviousSchedulingGroup(SP_DEFAULT),
120       mPausedPosition(0)
121 {
122     mAttributes.content_type = AUDIO_CONTENT_TYPE_UNKNOWN;
123     mAttributes.usage = AUDIO_USAGE_UNKNOWN;
124     mAttributes.flags = 0x0;
125     strcpy(mAttributes.tags, "");
126 }
127 
AudioTrack(audio_stream_type_t streamType,uint32_t sampleRate,audio_format_t format,audio_channel_mask_t channelMask,size_t frameCount,audio_output_flags_t flags,callback_t cbf,void * user,uint32_t notificationFrames,int sessionId,transfer_type transferType,const audio_offload_info_t * offloadInfo,int uid,pid_t pid,const audio_attributes_t * pAttributes)128 AudioTrack::AudioTrack(
129         audio_stream_type_t streamType,
130         uint32_t sampleRate,
131         audio_format_t format,
132         audio_channel_mask_t channelMask,
133         size_t frameCount,
134         audio_output_flags_t flags,
135         callback_t cbf,
136         void* user,
137         uint32_t notificationFrames,
138         int sessionId,
139         transfer_type transferType,
140         const audio_offload_info_t *offloadInfo,
141         int uid,
142         pid_t pid,
143         const audio_attributes_t* pAttributes)
144     : mStatus(NO_INIT),
145       mIsTimed(false),
146       mPreviousPriority(ANDROID_PRIORITY_NORMAL),
147       mPreviousSchedulingGroup(SP_DEFAULT),
148       mPausedPosition(0)
149 {
150     mStatus = set(streamType, sampleRate, format, channelMask,
151             frameCount, flags, cbf, user, notificationFrames,
152             0 /*sharedBuffer*/, false /*threadCanCallJava*/, sessionId, transferType,
153             offloadInfo, uid, pid, pAttributes);
154 }
155 
AudioTrack(audio_stream_type_t streamType,uint32_t sampleRate,audio_format_t format,audio_channel_mask_t channelMask,const sp<IMemory> & sharedBuffer,audio_output_flags_t flags,callback_t cbf,void * user,uint32_t notificationFrames,int sessionId,transfer_type transferType,const audio_offload_info_t * offloadInfo,int uid,pid_t pid,const audio_attributes_t * pAttributes)156 AudioTrack::AudioTrack(
157         audio_stream_type_t streamType,
158         uint32_t sampleRate,
159         audio_format_t format,
160         audio_channel_mask_t channelMask,
161         const sp<IMemory>& sharedBuffer,
162         audio_output_flags_t flags,
163         callback_t cbf,
164         void* user,
165         uint32_t notificationFrames,
166         int sessionId,
167         transfer_type transferType,
168         const audio_offload_info_t *offloadInfo,
169         int uid,
170         pid_t pid,
171         const audio_attributes_t* pAttributes)
172     : mStatus(NO_INIT),
173       mIsTimed(false),
174       mPreviousPriority(ANDROID_PRIORITY_NORMAL),
175       mPreviousSchedulingGroup(SP_DEFAULT),
176       mPausedPosition(0)
177 {
178     mStatus = set(streamType, sampleRate, format, channelMask,
179             0 /*frameCount*/, flags, cbf, user, notificationFrames,
180             sharedBuffer, false /*threadCanCallJava*/, sessionId, transferType, offloadInfo,
181             uid, pid, pAttributes);
182 }
183 
~AudioTrack()184 AudioTrack::~AudioTrack()
185 {
186     if (mStatus == NO_ERROR) {
187         // Make sure that callback function exits in the case where
188         // it is looping on buffer full condition in obtainBuffer().
189         // Otherwise the callback thread will never exit.
190         stop();
191         if (mAudioTrackThread != 0) {
192             mProxy->interrupt();
193             mAudioTrackThread->requestExit();   // see comment in AudioTrack.h
194             mAudioTrackThread->requestExitAndWait();
195             mAudioTrackThread.clear();
196         }
197         mAudioTrack->asBinder()->unlinkToDeath(mDeathNotifier, this);
198         mAudioTrack.clear();
199         mCblkMemory.clear();
200         mSharedBuffer.clear();
201         IPCThreadState::self()->flushCommands();
202         ALOGV("~AudioTrack, releasing session id from %d on behalf of %d",
203                 IPCThreadState::self()->getCallingPid(), mClientPid);
204         AudioSystem::releaseAudioSessionId(mSessionId, mClientPid);
205     }
206 }
207 
set(audio_stream_type_t streamType,uint32_t sampleRate,audio_format_t format,audio_channel_mask_t channelMask,size_t frameCount,audio_output_flags_t flags,callback_t cbf,void * user,uint32_t notificationFrames,const sp<IMemory> & sharedBuffer,bool threadCanCallJava,int sessionId,transfer_type transferType,const audio_offload_info_t * offloadInfo,int uid,pid_t pid,const audio_attributes_t * pAttributes)208 status_t AudioTrack::set(
209         audio_stream_type_t streamType,
210         uint32_t sampleRate,
211         audio_format_t format,
212         audio_channel_mask_t channelMask,
213         size_t frameCount,
214         audio_output_flags_t flags,
215         callback_t cbf,
216         void* user,
217         uint32_t notificationFrames,
218         const sp<IMemory>& sharedBuffer,
219         bool threadCanCallJava,
220         int sessionId,
221         transfer_type transferType,
222         const audio_offload_info_t *offloadInfo,
223         int uid,
224         pid_t pid,
225         const audio_attributes_t* pAttributes)
226 {
227     ALOGV("set(): streamType %d, sampleRate %u, format %#x, channelMask %#x, frameCount %zu, "
228           "flags #%x, notificationFrames %u, sessionId %d, transferType %d",
229           streamType, sampleRate, format, channelMask, frameCount, flags, notificationFrames,
230           sessionId, transferType);
231 
232     switch (transferType) {
233     case TRANSFER_DEFAULT:
234         if (sharedBuffer != 0) {
235             transferType = TRANSFER_SHARED;
236         } else if (cbf == NULL || threadCanCallJava) {
237             transferType = TRANSFER_SYNC;
238         } else {
239             transferType = TRANSFER_CALLBACK;
240         }
241         break;
242     case TRANSFER_CALLBACK:
243         if (cbf == NULL || sharedBuffer != 0) {
244             ALOGE("Transfer type TRANSFER_CALLBACK but cbf == NULL || sharedBuffer != 0");
245             return BAD_VALUE;
246         }
247         break;
248     case TRANSFER_OBTAIN:
249     case TRANSFER_SYNC:
250         if (sharedBuffer != 0) {
251             ALOGE("Transfer type TRANSFER_OBTAIN but sharedBuffer != 0");
252             return BAD_VALUE;
253         }
254         break;
255     case TRANSFER_SHARED:
256         if (sharedBuffer == 0) {
257             ALOGE("Transfer type TRANSFER_SHARED but sharedBuffer == 0");
258             return BAD_VALUE;
259         }
260         break;
261     default:
262         ALOGE("Invalid transfer type %d", transferType);
263         return BAD_VALUE;
264     }
265     mSharedBuffer = sharedBuffer;
266     mTransfer = transferType;
267 
268     ALOGV_IF(sharedBuffer != 0, "sharedBuffer: %p, size: %d", sharedBuffer->pointer(),
269             sharedBuffer->size());
270 
271     ALOGV("set() streamType %d frameCount %zu flags %04x", streamType, frameCount, flags);
272 
273     AutoMutex lock(mLock);
274 
275     // invariant that mAudioTrack != 0 is true only after set() returns successfully
276     if (mAudioTrack != 0) {
277         ALOGE("Track already in use");
278         return INVALID_OPERATION;
279     }
280 
281     // handle default values first.
282     if (streamType == AUDIO_STREAM_DEFAULT) {
283         streamType = AUDIO_STREAM_MUSIC;
284     }
285     if (pAttributes == NULL) {
286         if (uint32_t(streamType) >= AUDIO_STREAM_PUBLIC_CNT) {
287             ALOGE("Invalid stream type %d", streamType);
288             return BAD_VALUE;
289         }
290         mStreamType = streamType;
291 
292     } else {
293         // stream type shouldn't be looked at, this track has audio attributes
294         memcpy(&mAttributes, pAttributes, sizeof(audio_attributes_t));
295         ALOGV("Building AudioTrack with attributes: usage=%d content=%d flags=0x%x tags=[%s]",
296                 mAttributes.usage, mAttributes.content_type, mAttributes.flags, mAttributes.tags);
297         mStreamType = AUDIO_STREAM_DEFAULT;
298     }
299 
300     // these below should probably come from the audioFlinger too...
301     if (format == AUDIO_FORMAT_DEFAULT) {
302         format = AUDIO_FORMAT_PCM_16_BIT;
303     }
304 
305     // validate parameters
306     if (!audio_is_valid_format(format)) {
307         ALOGE("Invalid format %#x", format);
308         return BAD_VALUE;
309     }
310     mFormat = format;
311 
312     if (!audio_is_output_channel(channelMask)) {
313         ALOGE("Invalid channel mask %#x", channelMask);
314         return BAD_VALUE;
315     }
316     mChannelMask = channelMask;
317     uint32_t channelCount = audio_channel_count_from_out_mask(channelMask);
318     mChannelCount = channelCount;
319 
320     // AudioFlinger does not currently support 8-bit data in shared memory
321     if (format == AUDIO_FORMAT_PCM_8_BIT && sharedBuffer != 0) {
322         ALOGE("8-bit data in shared memory is not supported");
323         return BAD_VALUE;
324     }
325 
326     // force direct flag if format is not linear PCM
327     // or offload was requested
328     if ((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
329             || !audio_is_linear_pcm(format)) {
330         ALOGV( (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
331                     ? "Offload request, forcing to Direct Output"
332                     : "Not linear PCM, forcing to Direct Output");
333         flags = (audio_output_flags_t)
334                 // FIXME why can't we allow direct AND fast?
335                 ((flags | AUDIO_OUTPUT_FLAG_DIRECT) & ~AUDIO_OUTPUT_FLAG_FAST);
336     }
337 
338     // force direct flag if HW A/V sync requested
339     if ((flags & AUDIO_OUTPUT_FLAG_HW_AV_SYNC) != 0) {
340         flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_DIRECT);
341     }
342 
343     if (flags & AUDIO_OUTPUT_FLAG_DIRECT) {
344         if (audio_is_linear_pcm(format)) {
345             mFrameSize = channelCount * audio_bytes_per_sample(format);
346         } else {
347             mFrameSize = sizeof(uint8_t);
348         }
349         mFrameSizeAF = mFrameSize;
350     } else {
351         ALOG_ASSERT(audio_is_linear_pcm(format));
352         mFrameSize = channelCount * audio_bytes_per_sample(format);
353         mFrameSizeAF = channelCount * audio_bytes_per_sample(
354                 format == AUDIO_FORMAT_PCM_8_BIT ? AUDIO_FORMAT_PCM_16_BIT : format);
355         // createTrack will return an error if PCM format is not supported by server,
356         // so no need to check for specific PCM formats here
357     }
358 
359     // sampling rate must be specified for direct outputs
360     if (sampleRate == 0 && (flags & AUDIO_OUTPUT_FLAG_DIRECT) != 0) {
361         return BAD_VALUE;
362     }
363     mSampleRate = sampleRate;
364 
365     // Make copy of input parameter offloadInfo so that in the future:
366     //  (a) createTrack_l doesn't need it as an input parameter
367     //  (b) we can support re-creation of offloaded tracks
368     if (offloadInfo != NULL) {
369         mOffloadInfoCopy = *offloadInfo;
370         mOffloadInfo = &mOffloadInfoCopy;
371     } else {
372         mOffloadInfo = NULL;
373     }
374 
375     mVolume[AUDIO_INTERLEAVE_LEFT] = 1.0f;
376     mVolume[AUDIO_INTERLEAVE_RIGHT] = 1.0f;
377     mSendLevel = 0.0f;
378     // mFrameCount is initialized in createTrack_l
379     mReqFrameCount = frameCount;
380     mNotificationFramesReq = notificationFrames;
381     mNotificationFramesAct = 0;
382     if (sessionId == AUDIO_SESSION_ALLOCATE) {
383         mSessionId = AudioSystem::newAudioUniqueId();
384     } else {
385         mSessionId = sessionId;
386     }
387     int callingpid = IPCThreadState::self()->getCallingPid();
388     int mypid = getpid();
389     if (uid == -1 || (callingpid != mypid)) {
390         mClientUid = IPCThreadState::self()->getCallingUid();
391     } else {
392         mClientUid = uid;
393     }
394     if (pid == -1 || (callingpid != mypid)) {
395         mClientPid = callingpid;
396     } else {
397         mClientPid = pid;
398     }
399     mAuxEffectId = 0;
400     mFlags = flags;
401     mCbf = cbf;
402 
403     if (cbf != NULL) {
404         mAudioTrackThread = new AudioTrackThread(*this, threadCanCallJava);
405         mAudioTrackThread->run("AudioTrack", ANDROID_PRIORITY_AUDIO, 0 /*stack*/);
406     }
407 
408     // create the IAudioTrack
409     status_t status = createTrack_l();
410 
411     if (status != NO_ERROR) {
412         if (mAudioTrackThread != 0) {
413             mAudioTrackThread->requestExit();   // see comment in AudioTrack.h
414             mAudioTrackThread->requestExitAndWait();
415             mAudioTrackThread.clear();
416         }
417         return status;
418     }
419 
420     mStatus = NO_ERROR;
421     mState = STATE_STOPPED;
422     mUserData = user;
423     mLoopPeriod = 0;
424     mMarkerPosition = 0;
425     mMarkerReached = false;
426     mNewPosition = 0;
427     mUpdatePeriod = 0;
428     mServer = 0;
429     mPosition = 0;
430     mReleased = 0;
431     mStartUs = 0;
432     AudioSystem::acquireAudioSessionId(mSessionId, mClientPid);
433     mSequence = 1;
434     mObservedSequence = mSequence;
435     mInUnderrun = false;
436 
437     return NO_ERROR;
438 }
439 
440 // -------------------------------------------------------------------------
441 
start()442 status_t AudioTrack::start()
443 {
444     AutoMutex lock(mLock);
445 
446     if (mState == STATE_ACTIVE) {
447         return INVALID_OPERATION;
448     }
449 
450     mInUnderrun = true;
451 
452     State previousState = mState;
453     if (previousState == STATE_PAUSED_STOPPING) {
454         mState = STATE_STOPPING;
455     } else {
456         mState = STATE_ACTIVE;
457     }
458     (void) updateAndGetPosition_l();
459     if (previousState == STATE_STOPPED || previousState == STATE_FLUSHED) {
460         // reset current position as seen by client to 0
461         mPosition = 0;
462         // For offloaded tracks, we don't know if the hardware counters are really zero here,
463         // since the flush is asynchronous and stop may not fully drain.
464         // We save the time when the track is started to later verify whether
465         // the counters are realistic (i.e. start from zero after this time).
466         mStartUs = getNowUs();
467 
468         // force refresh of remaining frames by processAudioBuffer() as last
469         // write before stop could be partial.
470         mRefreshRemaining = true;
471     }
472     mNewPosition = mPosition + mUpdatePeriod;
473     int32_t flags = android_atomic_and(~CBLK_DISABLED, &mCblk->mFlags);
474 
475     sp<AudioTrackThread> t = mAudioTrackThread;
476     if (t != 0) {
477         if (previousState == STATE_STOPPING) {
478             mProxy->interrupt();
479         } else {
480             t->resume();
481         }
482     } else {
483         mPreviousPriority = getpriority(PRIO_PROCESS, 0);
484         get_sched_policy(0, &mPreviousSchedulingGroup);
485         androidSetThreadPriority(0, ANDROID_PRIORITY_AUDIO);
486     }
487 
488     status_t status = NO_ERROR;
489     if (!(flags & CBLK_INVALID)) {
490         status = mAudioTrack->start();
491         if (status == DEAD_OBJECT) {
492             flags |= CBLK_INVALID;
493         }
494     }
495     if (flags & CBLK_INVALID) {
496         status = restoreTrack_l("start");
497     }
498 
499     if (status != NO_ERROR) {
500         ALOGE("start() status %d", status);
501         mState = previousState;
502         if (t != 0) {
503             if (previousState != STATE_STOPPING) {
504                 t->pause();
505             }
506         } else {
507             setpriority(PRIO_PROCESS, 0, mPreviousPriority);
508             set_sched_policy(0, mPreviousSchedulingGroup);
509         }
510     }
511 
512     return status;
513 }
514 
stop()515 void AudioTrack::stop()
516 {
517     AutoMutex lock(mLock);
518     if (mState != STATE_ACTIVE && mState != STATE_PAUSED) {
519         return;
520     }
521 
522     if (isOffloaded_l()) {
523         mState = STATE_STOPPING;
524     } else {
525         mState = STATE_STOPPED;
526         mReleased = 0;
527     }
528 
529     mProxy->interrupt();
530     mAudioTrack->stop();
531     // the playback head position will reset to 0, so if a marker is set, we need
532     // to activate it again
533     mMarkerReached = false;
534 #if 0
535     // Force flush if a shared buffer is used otherwise audioflinger
536     // will not stop before end of buffer is reached.
537     // It may be needed to make sure that we stop playback, likely in case looping is on.
538     if (mSharedBuffer != 0) {
539         flush_l();
540     }
541 #endif
542 
543     sp<AudioTrackThread> t = mAudioTrackThread;
544     if (t != 0) {
545         if (!isOffloaded_l()) {
546             t->pause();
547         }
548     } else {
549         setpriority(PRIO_PROCESS, 0, mPreviousPriority);
550         set_sched_policy(0, mPreviousSchedulingGroup);
551     }
552 }
553 
stopped() const554 bool AudioTrack::stopped() const
555 {
556     AutoMutex lock(mLock);
557     return mState != STATE_ACTIVE;
558 }
559 
flush()560 void AudioTrack::flush()
561 {
562     if (mSharedBuffer != 0) {
563         return;
564     }
565     AutoMutex lock(mLock);
566     if (mState == STATE_ACTIVE || mState == STATE_FLUSHED) {
567         return;
568     }
569     flush_l();
570 }
571 
flush_l()572 void AudioTrack::flush_l()
573 {
574     ALOG_ASSERT(mState != STATE_ACTIVE);
575 
576     // clear playback marker and periodic update counter
577     mMarkerPosition = 0;
578     mMarkerReached = false;
579     mUpdatePeriod = 0;
580     mRefreshRemaining = true;
581 
582     mState = STATE_FLUSHED;
583     mReleased = 0;
584     if (isOffloaded_l()) {
585         mProxy->interrupt();
586     }
587     mProxy->flush();
588     mAudioTrack->flush();
589 }
590 
pause()591 void AudioTrack::pause()
592 {
593     AutoMutex lock(mLock);
594     if (mState == STATE_ACTIVE) {
595         mState = STATE_PAUSED;
596     } else if (mState == STATE_STOPPING) {
597         mState = STATE_PAUSED_STOPPING;
598     } else {
599         return;
600     }
601     mProxy->interrupt();
602     mAudioTrack->pause();
603 
604     if (isOffloaded_l()) {
605         if (mOutput != AUDIO_IO_HANDLE_NONE) {
606             // An offload output can be re-used between two audio tracks having
607             // the same configuration. A timestamp query for a paused track
608             // while the other is running would return an incorrect time.
609             // To fix this, cache the playback position on a pause() and return
610             // this time when requested until the track is resumed.
611 
612             // OffloadThread sends HAL pause in its threadLoop. Time saved
613             // here can be slightly off.
614 
615             // TODO: check return code for getRenderPosition.
616 
617             uint32_t halFrames;
618             AudioSystem::getRenderPosition(mOutput, &halFrames, &mPausedPosition);
619             ALOGV("AudioTrack::pause for offload, cache current position %u", mPausedPosition);
620         }
621     }
622 }
623 
setVolume(float left,float right)624 status_t AudioTrack::setVolume(float left, float right)
625 {
626     // This duplicates a test by AudioTrack JNI, but that is not the only caller
627     if (isnanf(left) || left < GAIN_FLOAT_ZERO || left > GAIN_FLOAT_UNITY ||
628             isnanf(right) || right < GAIN_FLOAT_ZERO || right > GAIN_FLOAT_UNITY) {
629         return BAD_VALUE;
630     }
631 
632     AutoMutex lock(mLock);
633     mVolume[AUDIO_INTERLEAVE_LEFT] = left;
634     mVolume[AUDIO_INTERLEAVE_RIGHT] = right;
635 
636     mProxy->setVolumeLR(gain_minifloat_pack(gain_from_float(left), gain_from_float(right)));
637 
638     if (isOffloaded_l()) {
639         mAudioTrack->signal();
640     }
641     return NO_ERROR;
642 }
643 
setVolume(float volume)644 status_t AudioTrack::setVolume(float volume)
645 {
646     return setVolume(volume, volume);
647 }
648 
setAuxEffectSendLevel(float level)649 status_t AudioTrack::setAuxEffectSendLevel(float level)
650 {
651     // This duplicates a test by AudioTrack JNI, but that is not the only caller
652     if (isnanf(level) || level < GAIN_FLOAT_ZERO || level > GAIN_FLOAT_UNITY) {
653         return BAD_VALUE;
654     }
655 
656     AutoMutex lock(mLock);
657     mSendLevel = level;
658     mProxy->setSendLevel(level);
659 
660     return NO_ERROR;
661 }
662 
getAuxEffectSendLevel(float * level) const663 void AudioTrack::getAuxEffectSendLevel(float* level) const
664 {
665     if (level != NULL) {
666         *level = mSendLevel;
667     }
668 }
669 
setSampleRate(uint32_t rate)670 status_t AudioTrack::setSampleRate(uint32_t rate)
671 {
672     if (mIsTimed || isOffloadedOrDirect()) {
673         return INVALID_OPERATION;
674     }
675 
676     AutoMutex lock(mLock);
677     if (mOutput == AUDIO_IO_HANDLE_NONE) {
678         return NO_INIT;
679     }
680     uint32_t afSamplingRate;
681     if (AudioSystem::getSamplingRate(mOutput, &afSamplingRate) != NO_ERROR) {
682         return NO_INIT;
683     }
684     if (rate == 0 || rate > afSamplingRate * AUDIO_RESAMPLER_DOWN_RATIO_MAX) {
685         return BAD_VALUE;
686     }
687 
688     mSampleRate = rate;
689     mProxy->setSampleRate(rate);
690 
691     return NO_ERROR;
692 }
693 
getSampleRate() const694 uint32_t AudioTrack::getSampleRate() const
695 {
696     if (mIsTimed) {
697         return 0;
698     }
699 
700     AutoMutex lock(mLock);
701 
702     // sample rate can be updated during playback by the offloaded decoder so we need to
703     // query the HAL and update if needed.
704 // FIXME use Proxy return channel to update the rate from server and avoid polling here
705     if (isOffloadedOrDirect_l()) {
706         if (mOutput != AUDIO_IO_HANDLE_NONE) {
707             uint32_t sampleRate = 0;
708             status_t status = AudioSystem::getSamplingRate(mOutput, &sampleRate);
709             if (status == NO_ERROR) {
710                 mSampleRate = sampleRate;
711             }
712         }
713     }
714     return mSampleRate;
715 }
716 
setLoop(uint32_t loopStart,uint32_t loopEnd,int loopCount)717 status_t AudioTrack::setLoop(uint32_t loopStart, uint32_t loopEnd, int loopCount)
718 {
719     if (mSharedBuffer == 0 || mIsTimed || isOffloadedOrDirect()) {
720         return INVALID_OPERATION;
721     }
722 
723     if (loopCount == 0) {
724         ;
725     } else if (loopCount >= -1 && loopStart < loopEnd && loopEnd <= mFrameCount &&
726             loopEnd - loopStart >= MIN_LOOP) {
727         ;
728     } else {
729         return BAD_VALUE;
730     }
731 
732     AutoMutex lock(mLock);
733     // See setPosition() regarding setting parameters such as loop points or position while active
734     if (mState == STATE_ACTIVE) {
735         return INVALID_OPERATION;
736     }
737     setLoop_l(loopStart, loopEnd, loopCount);
738     return NO_ERROR;
739 }
740 
setLoop_l(uint32_t loopStart,uint32_t loopEnd,int loopCount)741 void AudioTrack::setLoop_l(uint32_t loopStart, uint32_t loopEnd, int loopCount)
742 {
743     // Setting the loop will reset next notification update period (like setPosition).
744     mNewPosition = updateAndGetPosition_l() + mUpdatePeriod;
745     mLoopPeriod = loopCount != 0 ? loopEnd - loopStart : 0;
746     mStaticProxy->setLoop(loopStart, loopEnd, loopCount);
747 }
748 
setMarkerPosition(uint32_t marker)749 status_t AudioTrack::setMarkerPosition(uint32_t marker)
750 {
751     // The only purpose of setting marker position is to get a callback
752     if (mCbf == NULL || isOffloadedOrDirect()) {
753         return INVALID_OPERATION;
754     }
755 
756     AutoMutex lock(mLock);
757     mMarkerPosition = marker;
758     mMarkerReached = false;
759 
760     return NO_ERROR;
761 }
762 
getMarkerPosition(uint32_t * marker) const763 status_t AudioTrack::getMarkerPosition(uint32_t *marker) const
764 {
765     if (isOffloadedOrDirect()) {
766         return INVALID_OPERATION;
767     }
768     if (marker == NULL) {
769         return BAD_VALUE;
770     }
771 
772     AutoMutex lock(mLock);
773     *marker = mMarkerPosition;
774 
775     return NO_ERROR;
776 }
777 
setPositionUpdatePeriod(uint32_t updatePeriod)778 status_t AudioTrack::setPositionUpdatePeriod(uint32_t updatePeriod)
779 {
780     // The only purpose of setting position update period is to get a callback
781     if (mCbf == NULL || isOffloadedOrDirect()) {
782         return INVALID_OPERATION;
783     }
784 
785     AutoMutex lock(mLock);
786     mNewPosition = updateAndGetPosition_l() + updatePeriod;
787     mUpdatePeriod = updatePeriod;
788 
789     return NO_ERROR;
790 }
791 
getPositionUpdatePeriod(uint32_t * updatePeriod) const792 status_t AudioTrack::getPositionUpdatePeriod(uint32_t *updatePeriod) const
793 {
794     if (isOffloadedOrDirect()) {
795         return INVALID_OPERATION;
796     }
797     if (updatePeriod == NULL) {
798         return BAD_VALUE;
799     }
800 
801     AutoMutex lock(mLock);
802     *updatePeriod = mUpdatePeriod;
803 
804     return NO_ERROR;
805 }
806 
setPosition(uint32_t position)807 status_t AudioTrack::setPosition(uint32_t position)
808 {
809     if (mSharedBuffer == 0 || mIsTimed || isOffloadedOrDirect()) {
810         return INVALID_OPERATION;
811     }
812     if (position > mFrameCount) {
813         return BAD_VALUE;
814     }
815 
816     AutoMutex lock(mLock);
817     // Currently we require that the player is inactive before setting parameters such as position
818     // or loop points.  Otherwise, there could be a race condition: the application could read the
819     // current position, compute a new position or loop parameters, and then set that position or
820     // loop parameters but it would do the "wrong" thing since the position has continued to advance
821     // in the mean time.  If we ever provide a sequencer in server, we could allow a way for the app
822     // to specify how it wants to handle such scenarios.
823     if (mState == STATE_ACTIVE) {
824         return INVALID_OPERATION;
825     }
826     mNewPosition = updateAndGetPosition_l() + mUpdatePeriod;
827     mLoopPeriod = 0;
828     // FIXME Check whether loops and setting position are incompatible in old code.
829     // If we use setLoop for both purposes we lose the capability to set the position while looping.
830     mStaticProxy->setLoop(position, mFrameCount, 0);
831 
832     return NO_ERROR;
833 }
834 
getPosition(uint32_t * position)835 status_t AudioTrack::getPosition(uint32_t *position)
836 {
837     if (position == NULL) {
838         return BAD_VALUE;
839     }
840 
841     AutoMutex lock(mLock);
842     if (isOffloadedOrDirect_l()) {
843         uint32_t dspFrames = 0;
844 
845         if (isOffloaded_l() && ((mState == STATE_PAUSED) || (mState == STATE_PAUSED_STOPPING))) {
846             ALOGV("getPosition called in paused state, return cached position %u", mPausedPosition);
847             *position = mPausedPosition;
848             return NO_ERROR;
849         }
850 
851         if (mOutput != AUDIO_IO_HANDLE_NONE) {
852             uint32_t halFrames;
853             AudioSystem::getRenderPosition(mOutput, &halFrames, &dspFrames);
854         }
855         // FIXME: dspFrames may not be zero in (mState == STATE_STOPPED || mState == STATE_FLUSHED)
856         // due to hardware latency. We leave this behavior for now.
857         *position = dspFrames;
858     } else {
859         if (mCblk->mFlags & CBLK_INVALID) {
860             restoreTrack_l("getPosition");
861         }
862 
863         // IAudioTrack::stop() isn't synchronous; we don't know when presentation completes
864         *position = (mState == STATE_STOPPED || mState == STATE_FLUSHED) ?
865                 0 : updateAndGetPosition_l();
866     }
867     return NO_ERROR;
868 }
869 
getBufferPosition(uint32_t * position)870 status_t AudioTrack::getBufferPosition(uint32_t *position)
871 {
872     if (mSharedBuffer == 0 || mIsTimed) {
873         return INVALID_OPERATION;
874     }
875     if (position == NULL) {
876         return BAD_VALUE;
877     }
878 
879     AutoMutex lock(mLock);
880     *position = mStaticProxy->getBufferPosition();
881     return NO_ERROR;
882 }
883 
reload()884 status_t AudioTrack::reload()
885 {
886     if (mSharedBuffer == 0 || mIsTimed || isOffloadedOrDirect()) {
887         return INVALID_OPERATION;
888     }
889 
890     AutoMutex lock(mLock);
891     // See setPosition() regarding setting parameters such as loop points or position while active
892     if (mState == STATE_ACTIVE) {
893         return INVALID_OPERATION;
894     }
895     mNewPosition = mUpdatePeriod;
896     mLoopPeriod = 0;
897     // FIXME The new code cannot reload while keeping a loop specified.
898     // Need to check how the old code handled this, and whether it's a significant change.
899     mStaticProxy->setLoop(0, mFrameCount, 0);
900     return NO_ERROR;
901 }
902 
getOutput() const903 audio_io_handle_t AudioTrack::getOutput() const
904 {
905     AutoMutex lock(mLock);
906     return mOutput;
907 }
908 
attachAuxEffect(int effectId)909 status_t AudioTrack::attachAuxEffect(int effectId)
910 {
911     AutoMutex lock(mLock);
912     status_t status = mAudioTrack->attachAuxEffect(effectId);
913     if (status == NO_ERROR) {
914         mAuxEffectId = effectId;
915     }
916     return status;
917 }
918 
streamType() const919 audio_stream_type_t AudioTrack::streamType() const
920 {
921     if (mStreamType == AUDIO_STREAM_DEFAULT) {
922         return audio_attributes_to_stream_type(&mAttributes);
923     }
924     return mStreamType;
925 }
926 
927 // -------------------------------------------------------------------------
928 
929 // must be called with mLock held
createTrack_l()930 status_t AudioTrack::createTrack_l()
931 {
932     const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();
933     if (audioFlinger == 0) {
934         ALOGE("Could not get audioflinger");
935         return NO_INIT;
936     }
937 
938     audio_io_handle_t output;
939     audio_stream_type_t streamType = mStreamType;
940     audio_attributes_t *attr = (mStreamType == AUDIO_STREAM_DEFAULT) ? &mAttributes : NULL;
941     status_t status = AudioSystem::getOutputForAttr(attr, &output,
942                                                     (audio_session_t)mSessionId, &streamType,
943                                                     mSampleRate, mFormat, mChannelMask,
944                                                     mFlags, mOffloadInfo);
945 
946 
947     if (status != NO_ERROR || output == AUDIO_IO_HANDLE_NONE) {
948         ALOGE("Could not get audio output for stream type %d, usage %d, sample rate %u, format %#x,"
949               " channel mask %#x, flags %#x",
950               streamType, mAttributes.usage, mSampleRate, mFormat, mChannelMask, mFlags);
951         return BAD_VALUE;
952     }
953     {
954     // Now that we have a reference to an I/O handle and have not yet handed it off to AudioFlinger,
955     // we must release it ourselves if anything goes wrong.
956 
957     // Not all of these values are needed under all conditions, but it is easier to get them all
958 
959     uint32_t afLatency;
960     status = AudioSystem::getLatency(output, &afLatency);
961     if (status != NO_ERROR) {
962         ALOGE("getLatency(%d) failed status %d", output, status);
963         goto release;
964     }
965 
966     size_t afFrameCount;
967     status = AudioSystem::getFrameCount(output, &afFrameCount);
968     if (status != NO_ERROR) {
969         ALOGE("getFrameCount(output=%d) status %d", output, status);
970         goto release;
971     }
972 
973     uint32_t afSampleRate;
974     status = AudioSystem::getSamplingRate(output, &afSampleRate);
975     if (status != NO_ERROR) {
976         ALOGE("getSamplingRate(output=%d) status %d", output, status);
977         goto release;
978     }
979     if (mSampleRate == 0) {
980         mSampleRate = afSampleRate;
981     }
982     // Client decides whether the track is TIMED (see below), but can only express a preference
983     // for FAST.  Server will perform additional tests.
984     if ((mFlags & AUDIO_OUTPUT_FLAG_FAST) && !((
985             // either of these use cases:
986             // use case 1: shared buffer
987             (mSharedBuffer != 0) ||
988             // use case 2: callback transfer mode
989             (mTransfer == TRANSFER_CALLBACK)) &&
990             // matching sample rate
991             (mSampleRate == afSampleRate))) {
992         ALOGW("AUDIO_OUTPUT_FLAG_FAST denied by client");
993         // once denied, do not request again if IAudioTrack is re-created
994         mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_FAST);
995     }
996     ALOGV("createTrack_l() output %d afLatency %d", output, afLatency);
997 
998     // The client's AudioTrack buffer is divided into n parts for purpose of wakeup by server, where
999     //  n = 1   fast track with single buffering; nBuffering is ignored
1000     //  n = 2   fast track with double buffering
1001     //  n = 2   normal track, no sample rate conversion
1002     //  n = 3   normal track, with sample rate conversion
1003     //          (pessimistic; some non-1:1 conversion ratios don't actually need triple-buffering)
1004     //  n > 3   very high latency or very small notification interval; nBuffering is ignored
1005     const uint32_t nBuffering = (mSampleRate == afSampleRate) ? 2 : 3;
1006 
1007     mNotificationFramesAct = mNotificationFramesReq;
1008 
1009     size_t frameCount = mReqFrameCount;
1010     if (!audio_is_linear_pcm(mFormat)) {
1011 
1012         if (mSharedBuffer != 0) {
1013             // Same comment as below about ignoring frameCount parameter for set()
1014             frameCount = mSharedBuffer->size();
1015         } else if (frameCount == 0) {
1016             frameCount = afFrameCount;
1017         }
1018         if (mNotificationFramesAct != frameCount) {
1019             mNotificationFramesAct = frameCount;
1020         }
1021     } else if (mSharedBuffer != 0) {
1022 
1023         // Ensure that buffer alignment matches channel count
1024         // 8-bit data in shared memory is not currently supported by AudioFlinger
1025         size_t alignment = audio_bytes_per_sample(
1026                 mFormat == AUDIO_FORMAT_PCM_8_BIT ? AUDIO_FORMAT_PCM_16_BIT : mFormat);
1027         if (alignment & 1) {
1028             alignment = 1;
1029         }
1030         if (mChannelCount > 1) {
1031             // More than 2 channels does not require stronger alignment than stereo
1032             alignment <<= 1;
1033         }
1034         if (((uintptr_t)mSharedBuffer->pointer() & (alignment - 1)) != 0) {
1035             ALOGE("Invalid buffer alignment: address %p, channel count %u",
1036                     mSharedBuffer->pointer(), mChannelCount);
1037             status = BAD_VALUE;
1038             goto release;
1039         }
1040 
1041         // When initializing a shared buffer AudioTrack via constructors,
1042         // there's no frameCount parameter.
1043         // But when initializing a shared buffer AudioTrack via set(),
1044         // there _is_ a frameCount parameter.  We silently ignore it.
1045         frameCount = mSharedBuffer->size() / mFrameSizeAF;
1046 
1047     } else if (!(mFlags & AUDIO_OUTPUT_FLAG_FAST)) {
1048 
1049         // FIXME move these calculations and associated checks to server
1050 
1051         // Ensure that buffer depth covers at least audio hardware latency
1052         uint32_t minBufCount = afLatency / ((1000 * afFrameCount)/afSampleRate);
1053         ALOGV("afFrameCount=%zu, minBufCount=%d, afSampleRate=%u, afLatency=%d",
1054                 afFrameCount, minBufCount, afSampleRate, afLatency);
1055         if (minBufCount <= nBuffering) {
1056             minBufCount = nBuffering;
1057         }
1058 
1059         size_t minFrameCount = afFrameCount * minBufCount * uint64_t(mSampleRate) / afSampleRate;
1060         ALOGV("minFrameCount: %zu, afFrameCount=%zu, minBufCount=%d, sampleRate=%u, afSampleRate=%u"
1061                 ", afLatency=%d",
1062                 minFrameCount, afFrameCount, minBufCount, mSampleRate, afSampleRate, afLatency);
1063 
1064         if (frameCount == 0) {
1065             frameCount = minFrameCount;
1066         } else if (frameCount < minFrameCount) {
1067             // not ALOGW because it happens all the time when playing key clicks over A2DP
1068             ALOGV("Minimum buffer size corrected from %zu to %zu",
1069                      frameCount, minFrameCount);
1070             frameCount = minFrameCount;
1071         }
1072         // Make sure that application is notified with sufficient margin before underrun
1073         if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount/nBuffering) {
1074             mNotificationFramesAct = frameCount/nBuffering;
1075         }
1076 
1077     } else {
1078         // For fast tracks, the frame count calculations and checks are done by server
1079     }
1080 
1081     IAudioFlinger::track_flags_t trackFlags = IAudioFlinger::TRACK_DEFAULT;
1082     if (mIsTimed) {
1083         trackFlags |= IAudioFlinger::TRACK_TIMED;
1084     }
1085 
1086     pid_t tid = -1;
1087     if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1088         trackFlags |= IAudioFlinger::TRACK_FAST;
1089         if (mAudioTrackThread != 0) {
1090             tid = mAudioTrackThread->getTid();
1091         }
1092     }
1093 
1094     if (mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
1095         trackFlags |= IAudioFlinger::TRACK_OFFLOAD;
1096     }
1097 
1098     if (mFlags & AUDIO_OUTPUT_FLAG_DIRECT) {
1099         trackFlags |= IAudioFlinger::TRACK_DIRECT;
1100     }
1101 
1102     size_t temp = frameCount;   // temp may be replaced by a revised value of frameCount,
1103                                 // but we will still need the original value also
1104     sp<IAudioTrack> track = audioFlinger->createTrack(streamType,
1105                                                       mSampleRate,
1106                                                       // AudioFlinger only sees 16-bit PCM
1107                                                       mFormat == AUDIO_FORMAT_PCM_8_BIT &&
1108                                                           !(mFlags & AUDIO_OUTPUT_FLAG_DIRECT) ?
1109                                                               AUDIO_FORMAT_PCM_16_BIT : mFormat,
1110                                                       mChannelMask,
1111                                                       &temp,
1112                                                       &trackFlags,
1113                                                       mSharedBuffer,
1114                                                       output,
1115                                                       tid,
1116                                                       &mSessionId,
1117                                                       mClientUid,
1118                                                       &status);
1119 
1120     if (status != NO_ERROR) {
1121         ALOGE("AudioFlinger could not create track, status: %d", status);
1122         goto release;
1123     }
1124     ALOG_ASSERT(track != 0);
1125 
1126     // AudioFlinger now owns the reference to the I/O handle,
1127     // so we are no longer responsible for releasing it.
1128 
1129     sp<IMemory> iMem = track->getCblk();
1130     if (iMem == 0) {
1131         ALOGE("Could not get control block");
1132         return NO_INIT;
1133     }
1134     void *iMemPointer = iMem->pointer();
1135     if (iMemPointer == NULL) {
1136         ALOGE("Could not get control block pointer");
1137         return NO_INIT;
1138     }
1139     // invariant that mAudioTrack != 0 is true only after set() returns successfully
1140     if (mAudioTrack != 0) {
1141         mAudioTrack->asBinder()->unlinkToDeath(mDeathNotifier, this);
1142         mDeathNotifier.clear();
1143     }
1144     mAudioTrack = track;
1145     mCblkMemory = iMem;
1146     IPCThreadState::self()->flushCommands();
1147 
1148     audio_track_cblk_t* cblk = static_cast<audio_track_cblk_t*>(iMemPointer);
1149     mCblk = cblk;
1150     // note that temp is the (possibly revised) value of frameCount
1151     if (temp < frameCount || (frameCount == 0 && temp == 0)) {
1152         // In current design, AudioTrack client checks and ensures frame count validity before
1153         // passing it to AudioFlinger so AudioFlinger should not return a different value except
1154         // for fast track as it uses a special method of assigning frame count.
1155         ALOGW("Requested frameCount %zu but received frameCount %zu", frameCount, temp);
1156     }
1157     frameCount = temp;
1158 
1159     mAwaitBoost = false;
1160     if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1161         if (trackFlags & IAudioFlinger::TRACK_FAST) {
1162             ALOGV("AUDIO_OUTPUT_FLAG_FAST successful; frameCount %zu", frameCount);
1163             mAwaitBoost = true;
1164             if (mSharedBuffer == 0) {
1165                 // Theoretically double-buffering is not required for fast tracks,
1166                 // due to tighter scheduling.  But in practice, to accommodate kernels with
1167                 // scheduling jitter, and apps with computation jitter, we use double-buffering.
1168                 if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount/nBuffering) {
1169                     mNotificationFramesAct = frameCount/nBuffering;
1170                 }
1171             }
1172         } else {
1173             ALOGV("AUDIO_OUTPUT_FLAG_FAST denied by server; frameCount %zu", frameCount);
1174             // once denied, do not request again if IAudioTrack is re-created
1175             mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_FAST);
1176             if (mSharedBuffer == 0) {
1177                 if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount/nBuffering) {
1178                     mNotificationFramesAct = frameCount/nBuffering;
1179                 }
1180             }
1181         }
1182     }
1183     if (mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
1184         if (trackFlags & IAudioFlinger::TRACK_OFFLOAD) {
1185             ALOGV("AUDIO_OUTPUT_FLAG_OFFLOAD successful");
1186         } else {
1187             ALOGW("AUDIO_OUTPUT_FLAG_OFFLOAD denied by server");
1188             mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD);
1189             // FIXME This is a warning, not an error, so don't return error status
1190             //return NO_INIT;
1191         }
1192     }
1193     if (mFlags & AUDIO_OUTPUT_FLAG_DIRECT) {
1194         if (trackFlags & IAudioFlinger::TRACK_DIRECT) {
1195             ALOGV("AUDIO_OUTPUT_FLAG_DIRECT successful");
1196         } else {
1197             ALOGW("AUDIO_OUTPUT_FLAG_DIRECT denied by server");
1198             mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_DIRECT);
1199             // FIXME This is a warning, not an error, so don't return error status
1200             //return NO_INIT;
1201         }
1202     }
1203 
1204     // We retain a copy of the I/O handle, but don't own the reference
1205     mOutput = output;
1206     mRefreshRemaining = true;
1207 
1208     // Starting address of buffers in shared memory.  If there is a shared buffer, buffers
1209     // is the value of pointer() for the shared buffer, otherwise buffers points
1210     // immediately after the control block.  This address is for the mapping within client
1211     // address space.  AudioFlinger::TrackBase::mBuffer is for the server address space.
1212     void* buffers;
1213     if (mSharedBuffer == 0) {
1214         buffers = (char*)cblk + sizeof(audio_track_cblk_t);
1215     } else {
1216         buffers = mSharedBuffer->pointer();
1217     }
1218 
1219     mAudioTrack->attachAuxEffect(mAuxEffectId);
1220     // FIXME don't believe this lie
1221     mLatency = afLatency + (1000*frameCount) / mSampleRate;
1222 
1223     mFrameCount = frameCount;
1224     // If IAudioTrack is re-created, don't let the requested frameCount
1225     // decrease.  This can confuse clients that cache frameCount().
1226     if (frameCount > mReqFrameCount) {
1227         mReqFrameCount = frameCount;
1228     }
1229 
1230     // update proxy
1231     if (mSharedBuffer == 0) {
1232         mStaticProxy.clear();
1233         mProxy = new AudioTrackClientProxy(cblk, buffers, frameCount, mFrameSizeAF);
1234     } else {
1235         mStaticProxy = new StaticAudioTrackClientProxy(cblk, buffers, frameCount, mFrameSizeAF);
1236         mProxy = mStaticProxy;
1237     }
1238 
1239     mProxy->setVolumeLR(gain_minifloat_pack(
1240             gain_from_float(mVolume[AUDIO_INTERLEAVE_LEFT]),
1241             gain_from_float(mVolume[AUDIO_INTERLEAVE_RIGHT])));
1242 
1243     mProxy->setSendLevel(mSendLevel);
1244     mProxy->setSampleRate(mSampleRate);
1245     mProxy->setMinimum(mNotificationFramesAct);
1246 
1247     mDeathNotifier = new DeathNotifier(this);
1248     mAudioTrack->asBinder()->linkToDeath(mDeathNotifier, this);
1249 
1250     return NO_ERROR;
1251     }
1252 
1253 release:
1254     AudioSystem::releaseOutput(output, streamType, (audio_session_t)mSessionId);
1255     if (status == NO_ERROR) {
1256         status = NO_INIT;
1257     }
1258     return status;
1259 }
1260 
obtainBuffer(Buffer * audioBuffer,int32_t waitCount)1261 status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, int32_t waitCount)
1262 {
1263     if (audioBuffer == NULL) {
1264         return BAD_VALUE;
1265     }
1266     if (mTransfer != TRANSFER_OBTAIN) {
1267         audioBuffer->frameCount = 0;
1268         audioBuffer->size = 0;
1269         audioBuffer->raw = NULL;
1270         return INVALID_OPERATION;
1271     }
1272 
1273     const struct timespec *requested;
1274     struct timespec timeout;
1275     if (waitCount == -1) {
1276         requested = &ClientProxy::kForever;
1277     } else if (waitCount == 0) {
1278         requested = &ClientProxy::kNonBlocking;
1279     } else if (waitCount > 0) {
1280         long long ms = WAIT_PERIOD_MS * (long long) waitCount;
1281         timeout.tv_sec = ms / 1000;
1282         timeout.tv_nsec = (int) (ms % 1000) * 1000000;
1283         requested = &timeout;
1284     } else {
1285         ALOGE("%s invalid waitCount %d", __func__, waitCount);
1286         requested = NULL;
1287     }
1288     return obtainBuffer(audioBuffer, requested);
1289 }
1290 
obtainBuffer(Buffer * audioBuffer,const struct timespec * requested,struct timespec * elapsed,size_t * nonContig)1291 status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, const struct timespec *requested,
1292         struct timespec *elapsed, size_t *nonContig)
1293 {
1294     // previous and new IAudioTrack sequence numbers are used to detect track re-creation
1295     uint32_t oldSequence = 0;
1296     uint32_t newSequence;
1297 
1298     Proxy::Buffer buffer;
1299     status_t status = NO_ERROR;
1300 
1301     static const int32_t kMaxTries = 5;
1302     int32_t tryCounter = kMaxTries;
1303 
1304     do {
1305         // obtainBuffer() is called with mutex unlocked, so keep extra references to these fields to
1306         // keep them from going away if another thread re-creates the track during obtainBuffer()
1307         sp<AudioTrackClientProxy> proxy;
1308         sp<IMemory> iMem;
1309 
1310         {   // start of lock scope
1311             AutoMutex lock(mLock);
1312 
1313             newSequence = mSequence;
1314             // did previous obtainBuffer() fail due to media server death or voluntary invalidation?
1315             if (status == DEAD_OBJECT) {
1316                 // re-create track, unless someone else has already done so
1317                 if (newSequence == oldSequence) {
1318                     status = restoreTrack_l("obtainBuffer");
1319                     if (status != NO_ERROR) {
1320                         buffer.mFrameCount = 0;
1321                         buffer.mRaw = NULL;
1322                         buffer.mNonContig = 0;
1323                         break;
1324                     }
1325                 }
1326             }
1327             oldSequence = newSequence;
1328 
1329             // Keep the extra references
1330             proxy = mProxy;
1331             iMem = mCblkMemory;
1332 
1333             if (mState == STATE_STOPPING) {
1334                 status = -EINTR;
1335                 buffer.mFrameCount = 0;
1336                 buffer.mRaw = NULL;
1337                 buffer.mNonContig = 0;
1338                 break;
1339             }
1340 
1341             // Non-blocking if track is stopped or paused
1342             if (mState != STATE_ACTIVE) {
1343                 requested = &ClientProxy::kNonBlocking;
1344             }
1345 
1346         }   // end of lock scope
1347 
1348         buffer.mFrameCount = audioBuffer->frameCount;
1349         // FIXME starts the requested timeout and elapsed over from scratch
1350         status = proxy->obtainBuffer(&buffer, requested, elapsed);
1351 
1352     } while ((status == DEAD_OBJECT) && (tryCounter-- > 0));
1353 
1354     audioBuffer->frameCount = buffer.mFrameCount;
1355     audioBuffer->size = buffer.mFrameCount * mFrameSizeAF;
1356     audioBuffer->raw = buffer.mRaw;
1357     if (nonContig != NULL) {
1358         *nonContig = buffer.mNonContig;
1359     }
1360     return status;
1361 }
1362 
releaseBuffer(Buffer * audioBuffer)1363 void AudioTrack::releaseBuffer(Buffer* audioBuffer)
1364 {
1365     if (mTransfer == TRANSFER_SHARED) {
1366         return;
1367     }
1368 
1369     size_t stepCount = audioBuffer->size / mFrameSizeAF;
1370     if (stepCount == 0) {
1371         return;
1372     }
1373 
1374     Proxy::Buffer buffer;
1375     buffer.mFrameCount = stepCount;
1376     buffer.mRaw = audioBuffer->raw;
1377 
1378     AutoMutex lock(mLock);
1379     mReleased += stepCount;
1380     mInUnderrun = false;
1381     mProxy->releaseBuffer(&buffer);
1382 
1383     // restart track if it was disabled by audioflinger due to previous underrun
1384     if (mState == STATE_ACTIVE) {
1385         audio_track_cblk_t* cblk = mCblk;
1386         if (android_atomic_and(~CBLK_DISABLED, &cblk->mFlags) & CBLK_DISABLED) {
1387             ALOGW("releaseBuffer() track %p disabled due to previous underrun, restarting", this);
1388             // FIXME ignoring status
1389             mAudioTrack->start();
1390         }
1391     }
1392 }
1393 
1394 // -------------------------------------------------------------------------
1395 
write(const void * buffer,size_t userSize,bool blocking)1396 ssize_t AudioTrack::write(const void* buffer, size_t userSize, bool blocking)
1397 {
1398     if (mTransfer != TRANSFER_SYNC || mIsTimed) {
1399         return INVALID_OPERATION;
1400     }
1401 
1402     if (isDirect()) {
1403         AutoMutex lock(mLock);
1404         int32_t flags = android_atomic_and(
1405                             ~(CBLK_UNDERRUN | CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL | CBLK_BUFFER_END),
1406                             &mCblk->mFlags);
1407         if (flags & CBLK_INVALID) {
1408             return DEAD_OBJECT;
1409         }
1410     }
1411 
1412     if (ssize_t(userSize) < 0 || (buffer == NULL && userSize != 0)) {
1413         // Sanity-check: user is most-likely passing an error code, and it would
1414         // make the return value ambiguous (actualSize vs error).
1415         ALOGE("AudioTrack::write(buffer=%p, size=%zu (%zd)", buffer, userSize, userSize);
1416         return BAD_VALUE;
1417     }
1418 
1419     size_t written = 0;
1420     Buffer audioBuffer;
1421 
1422     while (userSize >= mFrameSize) {
1423         audioBuffer.frameCount = userSize / mFrameSize;
1424 
1425         status_t err = obtainBuffer(&audioBuffer,
1426                 blocking ? &ClientProxy::kForever : &ClientProxy::kNonBlocking);
1427         if (err < 0) {
1428             if (written > 0) {
1429                 break;
1430             }
1431             return ssize_t(err);
1432         }
1433 
1434         size_t toWrite;
1435         if (mFormat == AUDIO_FORMAT_PCM_8_BIT && !(mFlags & AUDIO_OUTPUT_FLAG_DIRECT)) {
1436             // Divide capacity by 2 to take expansion into account
1437             toWrite = audioBuffer.size >> 1;
1438             memcpy_to_i16_from_u8(audioBuffer.i16, (const uint8_t *) buffer, toWrite);
1439         } else {
1440             toWrite = audioBuffer.size;
1441             memcpy(audioBuffer.i8, buffer, toWrite);
1442         }
1443         buffer = ((const char *) buffer) + toWrite;
1444         userSize -= toWrite;
1445         written += toWrite;
1446 
1447         releaseBuffer(&audioBuffer);
1448     }
1449 
1450     return written;
1451 }
1452 
1453 // -------------------------------------------------------------------------
1454 
TimedAudioTrack()1455 TimedAudioTrack::TimedAudioTrack() {
1456     mIsTimed = true;
1457 }
1458 
allocateTimedBuffer(size_t size,sp<IMemory> * buffer)1459 status_t TimedAudioTrack::allocateTimedBuffer(size_t size, sp<IMemory>* buffer)
1460 {
1461     AutoMutex lock(mLock);
1462     status_t result = UNKNOWN_ERROR;
1463 
1464 #if 1
1465     // acquire a strong reference on the IMemory and IAudioTrack so that they cannot be destroyed
1466     // while we are accessing the cblk
1467     sp<IAudioTrack> audioTrack = mAudioTrack;
1468     sp<IMemory> iMem = mCblkMemory;
1469 #endif
1470 
1471     // If the track is not invalid already, try to allocate a buffer.  alloc
1472     // fails indicating that the server is dead, flag the track as invalid so
1473     // we can attempt to restore in just a bit.
1474     audio_track_cblk_t* cblk = mCblk;
1475     if (!(cblk->mFlags & CBLK_INVALID)) {
1476         result = mAudioTrack->allocateTimedBuffer(size, buffer);
1477         if (result == DEAD_OBJECT) {
1478             android_atomic_or(CBLK_INVALID, &cblk->mFlags);
1479         }
1480     }
1481 
1482     // If the track is invalid at this point, attempt to restore it. and try the
1483     // allocation one more time.
1484     if (cblk->mFlags & CBLK_INVALID) {
1485         result = restoreTrack_l("allocateTimedBuffer");
1486 
1487         if (result == NO_ERROR) {
1488             result = mAudioTrack->allocateTimedBuffer(size, buffer);
1489         }
1490     }
1491 
1492     return result;
1493 }
1494 
queueTimedBuffer(const sp<IMemory> & buffer,int64_t pts)1495 status_t TimedAudioTrack::queueTimedBuffer(const sp<IMemory>& buffer,
1496                                            int64_t pts)
1497 {
1498     status_t status = mAudioTrack->queueTimedBuffer(buffer, pts);
1499     {
1500         AutoMutex lock(mLock);
1501         audio_track_cblk_t* cblk = mCblk;
1502         // restart track if it was disabled by audioflinger due to previous underrun
1503         if (buffer->size() != 0 && status == NO_ERROR &&
1504                 (mState == STATE_ACTIVE) && (cblk->mFlags & CBLK_DISABLED)) {
1505             android_atomic_and(~CBLK_DISABLED, &cblk->mFlags);
1506             ALOGW("queueTimedBuffer() track %p disabled, restarting", this);
1507             // FIXME ignoring status
1508             mAudioTrack->start();
1509         }
1510     }
1511     return status;
1512 }
1513 
setMediaTimeTransform(const LinearTransform & xform,TargetTimeline target)1514 status_t TimedAudioTrack::setMediaTimeTransform(const LinearTransform& xform,
1515                                                 TargetTimeline target)
1516 {
1517     return mAudioTrack->setMediaTimeTransform(xform, target);
1518 }
1519 
1520 // -------------------------------------------------------------------------
1521 
processAudioBuffer()1522 nsecs_t AudioTrack::processAudioBuffer()
1523 {
1524     // Currently the AudioTrack thread is not created if there are no callbacks.
1525     // Would it ever make sense to run the thread, even without callbacks?
1526     // If so, then replace this by checks at each use for mCbf != NULL.
1527     LOG_ALWAYS_FATAL_IF(mCblk == NULL);
1528 
1529     mLock.lock();
1530     if (mAwaitBoost) {
1531         mAwaitBoost = false;
1532         mLock.unlock();
1533         static const int32_t kMaxTries = 5;
1534         int32_t tryCounter = kMaxTries;
1535         uint32_t pollUs = 10000;
1536         do {
1537             int policy = sched_getscheduler(0);
1538             if (policy == SCHED_FIFO || policy == SCHED_RR) {
1539                 break;
1540             }
1541             usleep(pollUs);
1542             pollUs <<= 1;
1543         } while (tryCounter-- > 0);
1544         if (tryCounter < 0) {
1545             ALOGE("did not receive expected priority boost on time");
1546         }
1547         // Run again immediately
1548         return 0;
1549     }
1550 
1551     // Can only reference mCblk while locked
1552     int32_t flags = android_atomic_and(
1553         ~(CBLK_UNDERRUN | CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL | CBLK_BUFFER_END), &mCblk->mFlags);
1554 
1555     // Check for track invalidation
1556     if (flags & CBLK_INVALID) {
1557         // for offloaded tracks restoreTrack_l() will just update the sequence and clear
1558         // AudioSystem cache. We should not exit here but after calling the callback so
1559         // that the upper layers can recreate the track
1560         if (!isOffloadedOrDirect_l() || (mSequence == mObservedSequence)) {
1561             status_t status = restoreTrack_l("processAudioBuffer");
1562             mLock.unlock();
1563             // Run again immediately, but with a new IAudioTrack
1564             return 0;
1565         }
1566     }
1567 
1568     bool waitStreamEnd = mState == STATE_STOPPING;
1569     bool active = mState == STATE_ACTIVE;
1570 
1571     // Manage underrun callback, must be done under lock to avoid race with releaseBuffer()
1572     bool newUnderrun = false;
1573     if (flags & CBLK_UNDERRUN) {
1574 #if 0
1575         // Currently in shared buffer mode, when the server reaches the end of buffer,
1576         // the track stays active in continuous underrun state.  It's up to the application
1577         // to pause or stop the track, or set the position to a new offset within buffer.
1578         // This was some experimental code to auto-pause on underrun.   Keeping it here
1579         // in "if 0" so we can re-visit this if we add a real sequencer for shared memory content.
1580         if (mTransfer == TRANSFER_SHARED) {
1581             mState = STATE_PAUSED;
1582             active = false;
1583         }
1584 #endif
1585         if (!mInUnderrun) {
1586             mInUnderrun = true;
1587             newUnderrun = true;
1588         }
1589     }
1590 
1591     // Get current position of server
1592     size_t position = updateAndGetPosition_l();
1593 
1594     // Manage marker callback
1595     bool markerReached = false;
1596     size_t markerPosition = mMarkerPosition;
1597     // FIXME fails for wraparound, need 64 bits
1598     if (!mMarkerReached && (markerPosition > 0) && (position >= markerPosition)) {
1599         mMarkerReached = markerReached = true;
1600     }
1601 
1602     // Determine number of new position callback(s) that will be needed, while locked
1603     size_t newPosCount = 0;
1604     size_t newPosition = mNewPosition;
1605     size_t updatePeriod = mUpdatePeriod;
1606     // FIXME fails for wraparound, need 64 bits
1607     if (updatePeriod > 0 && position >= newPosition) {
1608         newPosCount = ((position - newPosition) / updatePeriod) + 1;
1609         mNewPosition += updatePeriod * newPosCount;
1610     }
1611 
1612     // Cache other fields that will be needed soon
1613     uint32_t loopPeriod = mLoopPeriod;
1614     uint32_t sampleRate = mSampleRate;
1615     uint32_t notificationFrames = mNotificationFramesAct;
1616     if (mRefreshRemaining) {
1617         mRefreshRemaining = false;
1618         mRemainingFrames = notificationFrames;
1619         mRetryOnPartialBuffer = false;
1620     }
1621     size_t misalignment = mProxy->getMisalignment();
1622     uint32_t sequence = mSequence;
1623     sp<AudioTrackClientProxy> proxy = mProxy;
1624 
1625     // These fields don't need to be cached, because they are assigned only by set():
1626     //     mTransfer, mCbf, mUserData, mFormat, mFrameSize, mFrameSizeAF, mFlags
1627     // mFlags is also assigned by createTrack_l(), but not the bit we care about.
1628 
1629     mLock.unlock();
1630 
1631     if (waitStreamEnd) {
1632         struct timespec timeout;
1633         timeout.tv_sec = WAIT_STREAM_END_TIMEOUT_SEC;
1634         timeout.tv_nsec = 0;
1635 
1636         status_t status = proxy->waitStreamEndDone(&timeout);
1637         switch (status) {
1638         case NO_ERROR:
1639         case DEAD_OBJECT:
1640         case TIMED_OUT:
1641             mCbf(EVENT_STREAM_END, mUserData, NULL);
1642             {
1643                 AutoMutex lock(mLock);
1644                 // The previously assigned value of waitStreamEnd is no longer valid,
1645                 // since the mutex has been unlocked and either the callback handler
1646                 // or another thread could have re-started the AudioTrack during that time.
1647                 waitStreamEnd = mState == STATE_STOPPING;
1648                 if (waitStreamEnd) {
1649                     mState = STATE_STOPPED;
1650                     mReleased = 0;
1651                 }
1652             }
1653             if (waitStreamEnd && status != DEAD_OBJECT) {
1654                return NS_INACTIVE;
1655             }
1656             break;
1657         }
1658         return 0;
1659     }
1660 
1661     // perform callbacks while unlocked
1662     if (newUnderrun) {
1663         mCbf(EVENT_UNDERRUN, mUserData, NULL);
1664     }
1665     // FIXME we will miss loops if loop cycle was signaled several times since last call
1666     //       to processAudioBuffer()
1667     if (flags & (CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL)) {
1668         mCbf(EVENT_LOOP_END, mUserData, NULL);
1669     }
1670     if (flags & CBLK_BUFFER_END) {
1671         mCbf(EVENT_BUFFER_END, mUserData, NULL);
1672     }
1673     if (markerReached) {
1674         mCbf(EVENT_MARKER, mUserData, &markerPosition);
1675     }
1676     while (newPosCount > 0) {
1677         size_t temp = newPosition;
1678         mCbf(EVENT_NEW_POS, mUserData, &temp);
1679         newPosition += updatePeriod;
1680         newPosCount--;
1681     }
1682 
1683     if (mObservedSequence != sequence) {
1684         mObservedSequence = sequence;
1685         mCbf(EVENT_NEW_IAUDIOTRACK, mUserData, NULL);
1686         // for offloaded tracks, just wait for the upper layers to recreate the track
1687         if (isOffloadedOrDirect()) {
1688             return NS_INACTIVE;
1689         }
1690     }
1691 
1692     // if inactive, then don't run me again until re-started
1693     if (!active) {
1694         return NS_INACTIVE;
1695     }
1696 
1697     // Compute the estimated time until the next timed event (position, markers, loops)
1698     // FIXME only for non-compressed audio
1699     uint32_t minFrames = ~0;
1700     if (!markerReached && position < markerPosition) {
1701         minFrames = markerPosition - position;
1702     }
1703     if (loopPeriod > 0 && loopPeriod < minFrames) {
1704         minFrames = loopPeriod;
1705     }
1706     if (updatePeriod > 0 && updatePeriod < minFrames) {
1707         minFrames = updatePeriod;
1708     }
1709 
1710     // If > 0, poll periodically to recover from a stuck server.  A good value is 2.
1711     static const uint32_t kPoll = 0;
1712     if (kPoll > 0 && mTransfer == TRANSFER_CALLBACK && kPoll * notificationFrames < minFrames) {
1713         minFrames = kPoll * notificationFrames;
1714     }
1715 
1716     // Convert frame units to time units
1717     nsecs_t ns = NS_WHENEVER;
1718     if (minFrames != (uint32_t) ~0) {
1719         // This "fudge factor" avoids soaking CPU, and compensates for late progress by server
1720         static const nsecs_t kFudgeNs = 10000000LL; // 10 ms
1721         ns = ((minFrames * 1000000000LL) / sampleRate) + kFudgeNs;
1722     }
1723 
1724     // If not supplying data by EVENT_MORE_DATA, then we're done
1725     if (mTransfer != TRANSFER_CALLBACK) {
1726         return ns;
1727     }
1728 
1729     struct timespec timeout;
1730     const struct timespec *requested = &ClientProxy::kForever;
1731     if (ns != NS_WHENEVER) {
1732         timeout.tv_sec = ns / 1000000000LL;
1733         timeout.tv_nsec = ns % 1000000000LL;
1734         ALOGV("timeout %ld.%03d", timeout.tv_sec, (int) timeout.tv_nsec / 1000000);
1735         requested = &timeout;
1736     }
1737 
1738     while (mRemainingFrames > 0) {
1739 
1740         Buffer audioBuffer;
1741         audioBuffer.frameCount = mRemainingFrames;
1742         size_t nonContig;
1743         status_t err = obtainBuffer(&audioBuffer, requested, NULL, &nonContig);
1744         LOG_ALWAYS_FATAL_IF((err != NO_ERROR) != (audioBuffer.frameCount == 0),
1745                 "obtainBuffer() err=%d frameCount=%zu", err, audioBuffer.frameCount);
1746         requested = &ClientProxy::kNonBlocking;
1747         size_t avail = audioBuffer.frameCount + nonContig;
1748         ALOGV("obtainBuffer(%u) returned %zu = %zu + %zu err %d",
1749                 mRemainingFrames, avail, audioBuffer.frameCount, nonContig, err);
1750         if (err != NO_ERROR) {
1751             if (err == TIMED_OUT || err == WOULD_BLOCK || err == -EINTR ||
1752                     (isOffloaded() && (err == DEAD_OBJECT))) {
1753                 return 0;
1754             }
1755             ALOGE("Error %d obtaining an audio buffer, giving up.", err);
1756             return NS_NEVER;
1757         }
1758 
1759         if (mRetryOnPartialBuffer && !isOffloaded()) {
1760             mRetryOnPartialBuffer = false;
1761             if (avail < mRemainingFrames) {
1762                 int64_t myns = ((mRemainingFrames - avail) * 1100000000LL) / sampleRate;
1763                 if (ns < 0 || myns < ns) {
1764                     ns = myns;
1765                 }
1766                 return ns;
1767             }
1768         }
1769 
1770         // Divide buffer size by 2 to take into account the expansion
1771         // due to 8 to 16 bit conversion: the callback must fill only half
1772         // of the destination buffer
1773         if (mFormat == AUDIO_FORMAT_PCM_8_BIT && !(mFlags & AUDIO_OUTPUT_FLAG_DIRECT)) {
1774             audioBuffer.size >>= 1;
1775         }
1776 
1777         size_t reqSize = audioBuffer.size;
1778         mCbf(EVENT_MORE_DATA, mUserData, &audioBuffer);
1779         size_t writtenSize = audioBuffer.size;
1780 
1781         // Sanity check on returned size
1782         if (ssize_t(writtenSize) < 0 || writtenSize > reqSize) {
1783             ALOGE("EVENT_MORE_DATA requested %zu bytes but callback returned %zd bytes",
1784                     reqSize, ssize_t(writtenSize));
1785             return NS_NEVER;
1786         }
1787 
1788         if (writtenSize == 0) {
1789             // The callback is done filling buffers
1790             // Keep this thread going to handle timed events and
1791             // still try to get more data in intervals of WAIT_PERIOD_MS
1792             // but don't just loop and block the CPU, so wait
1793             return WAIT_PERIOD_MS * 1000000LL;
1794         }
1795 
1796         if (mFormat == AUDIO_FORMAT_PCM_8_BIT && !(mFlags & AUDIO_OUTPUT_FLAG_DIRECT)) {
1797             // 8 to 16 bit conversion, note that source and destination are the same address
1798             memcpy_to_i16_from_u8(audioBuffer.i16, (const uint8_t *) audioBuffer.i8, writtenSize);
1799             audioBuffer.size <<= 1;
1800         }
1801 
1802         size_t releasedFrames = audioBuffer.size / mFrameSizeAF;
1803         audioBuffer.frameCount = releasedFrames;
1804         mRemainingFrames -= releasedFrames;
1805         if (misalignment >= releasedFrames) {
1806             misalignment -= releasedFrames;
1807         } else {
1808             misalignment = 0;
1809         }
1810 
1811         releaseBuffer(&audioBuffer);
1812 
1813         // FIXME here is where we would repeat EVENT_MORE_DATA again on same advanced buffer
1814         // if callback doesn't like to accept the full chunk
1815         if (writtenSize < reqSize) {
1816             continue;
1817         }
1818 
1819         // There could be enough non-contiguous frames available to satisfy the remaining request
1820         if (mRemainingFrames <= nonContig) {
1821             continue;
1822         }
1823 
1824 #if 0
1825         // This heuristic tries to collapse a series of EVENT_MORE_DATA that would total to a
1826         // sum <= notificationFrames.  It replaces that series by at most two EVENT_MORE_DATA
1827         // that total to a sum == notificationFrames.
1828         if (0 < misalignment && misalignment <= mRemainingFrames) {
1829             mRemainingFrames = misalignment;
1830             return (mRemainingFrames * 1100000000LL) / sampleRate;
1831         }
1832 #endif
1833 
1834     }
1835     mRemainingFrames = notificationFrames;
1836     mRetryOnPartialBuffer = true;
1837 
1838     // A lot has transpired since ns was calculated, so run again immediately and re-calculate
1839     return 0;
1840 }
1841 
restoreTrack_l(const char * from)1842 status_t AudioTrack::restoreTrack_l(const char *from)
1843 {
1844     ALOGW("dead IAudioTrack, %s, creating a new one from %s()",
1845           isOffloadedOrDirect_l() ? "Offloaded or Direct" : "PCM", from);
1846     ++mSequence;
1847     status_t result;
1848 
1849     // refresh the audio configuration cache in this process to make sure we get new
1850     // output parameters and new IAudioFlinger in createTrack_l()
1851     AudioSystem::clearAudioConfigCache();
1852 
1853     if (isOffloadedOrDirect_l()) {
1854         // FIXME re-creation of offloaded tracks is not yet implemented
1855         return DEAD_OBJECT;
1856     }
1857 
1858     // save the old static buffer position
1859     size_t bufferPosition = mStaticProxy != NULL ? mStaticProxy->getBufferPosition() : 0;
1860 
1861     // If a new IAudioTrack is successfully created, createTrack_l() will modify the
1862     // following member variables: mAudioTrack, mCblkMemory and mCblk.
1863     // It will also delete the strong references on previous IAudioTrack and IMemory.
1864     // If a new IAudioTrack cannot be created, the previous (dead) instance will be left intact.
1865     result = createTrack_l();
1866 
1867     // take the frames that will be lost by track recreation into account in saved position
1868     (void) updateAndGetPosition_l();
1869     mPosition = mReleased;
1870 
1871     if (result == NO_ERROR) {
1872         // continue playback from last known position, but
1873         // don't attempt to restore loop after invalidation; it's difficult and not worthwhile
1874         if (mStaticProxy != NULL) {
1875             mLoopPeriod = 0;
1876             mStaticProxy->setLoop(bufferPosition, mFrameCount, 0);
1877         }
1878         // FIXME How do we simulate the fact that all frames present in the buffer at the time of
1879         //       track destruction have been played? This is critical for SoundPool implementation
1880         //       This must be broken, and needs to be tested/debugged.
1881 #if 0
1882         // restore write index and set other indexes to reflect empty buffer status
1883         if (!strcmp(from, "start")) {
1884             // Make sure that a client relying on callback events indicating underrun or
1885             // the actual amount of audio frames played (e.g SoundPool) receives them.
1886             if (mSharedBuffer == 0) {
1887                 // restart playback even if buffer is not completely filled.
1888                 android_atomic_or(CBLK_FORCEREADY, &mCblk->mFlags);
1889             }
1890         }
1891 #endif
1892         if (mState == STATE_ACTIVE) {
1893             result = mAudioTrack->start();
1894         }
1895     }
1896     if (result != NO_ERROR) {
1897         ALOGW("restoreTrack_l() failed status %d", result);
1898         mState = STATE_STOPPED;
1899         mReleased = 0;
1900     }
1901 
1902     return result;
1903 }
1904 
updateAndGetPosition_l()1905 uint32_t AudioTrack::updateAndGetPosition_l()
1906 {
1907     // This is the sole place to read server consumed frames
1908     uint32_t newServer = mProxy->getPosition();
1909     int32_t delta = newServer - mServer;
1910     mServer = newServer;
1911     // TODO There is controversy about whether there can be "negative jitter" in server position.
1912     //      This should be investigated further, and if possible, it should be addressed.
1913     //      A more definite failure mode is infrequent polling by client.
1914     //      One could call (void)getPosition_l() in releaseBuffer(),
1915     //      so mReleased and mPosition are always lock-step as best possible.
1916     //      That should ensure delta never goes negative for infrequent polling
1917     //      unless the server has more than 2^31 frames in its buffer,
1918     //      in which case the use of uint32_t for these counters has bigger issues.
1919     if (delta < 0) {
1920         ALOGE("detected illegal retrograde motion by the server: mServer advanced by %d", delta);
1921         delta = 0;
1922     }
1923     return mPosition += (uint32_t) delta;
1924 }
1925 
setParameters(const String8 & keyValuePairs)1926 status_t AudioTrack::setParameters(const String8& keyValuePairs)
1927 {
1928     AutoMutex lock(mLock);
1929     return mAudioTrack->setParameters(keyValuePairs);
1930 }
1931 
getTimestamp(AudioTimestamp & timestamp)1932 status_t AudioTrack::getTimestamp(AudioTimestamp& timestamp)
1933 {
1934     AutoMutex lock(mLock);
1935     // FIXME not implemented for fast tracks; should use proxy and SSQ
1936     if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1937         return INVALID_OPERATION;
1938     }
1939 
1940     switch (mState) {
1941     case STATE_ACTIVE:
1942     case STATE_PAUSED:
1943         break; // handle below
1944     case STATE_FLUSHED:
1945     case STATE_STOPPED:
1946         return WOULD_BLOCK;
1947     case STATE_STOPPING:
1948     case STATE_PAUSED_STOPPING:
1949         if (!isOffloaded_l()) {
1950             return INVALID_OPERATION;
1951         }
1952         break; // offloaded tracks handled below
1953     default:
1954         LOG_ALWAYS_FATAL("Invalid mState in getTimestamp(): %d", mState);
1955         break;
1956     }
1957 
1958     if (mCblk->mFlags & CBLK_INVALID) {
1959         restoreTrack_l("getTimestamp");
1960     }
1961 
1962     // The presented frame count must always lag behind the consumed frame count.
1963     // To avoid a race, read the presented frames first.  This ensures that presented <= consumed.
1964     status_t status = mAudioTrack->getTimestamp(timestamp);
1965     if (status != NO_ERROR) {
1966         ALOGV_IF(status != WOULD_BLOCK, "getTimestamp error:%#x", status);
1967         return status;
1968     }
1969     if (isOffloadedOrDirect_l()) {
1970         if (isOffloaded_l() && (mState == STATE_PAUSED || mState == STATE_PAUSED_STOPPING)) {
1971             // use cached paused position in case another offloaded track is running.
1972             timestamp.mPosition = mPausedPosition;
1973             clock_gettime(CLOCK_MONOTONIC, &timestamp.mTime);
1974             return NO_ERROR;
1975         }
1976 
1977         // Check whether a pending flush or stop has completed, as those commands may
1978         // be asynchronous or return near finish.
1979         if (mStartUs != 0 && mSampleRate != 0) {
1980             static const int kTimeJitterUs = 100000; // 100 ms
1981             static const int k1SecUs = 1000000;
1982 
1983             const int64_t timeNow = getNowUs();
1984 
1985             if (timeNow < mStartUs + k1SecUs) { // within first second of starting
1986                 const int64_t timestampTimeUs = convertTimespecToUs(timestamp.mTime);
1987                 if (timestampTimeUs < mStartUs) {
1988                     return WOULD_BLOCK;  // stale timestamp time, occurs before start.
1989                 }
1990                 const int64_t deltaTimeUs = timestampTimeUs - mStartUs;
1991                 const int64_t deltaPositionByUs = timestamp.mPosition * 1000000LL / mSampleRate;
1992 
1993                 if (deltaPositionByUs > deltaTimeUs + kTimeJitterUs) {
1994                     // Verify that the counter can't count faster than the sample rate
1995                     // since the start time.  If greater, then that means we have failed
1996                     // to completely flush or stop the previous playing track.
1997                     ALOGW("incomplete flush or stop:"
1998                             " deltaTimeUs(%lld) deltaPositionUs(%lld) tsmPosition(%u)",
1999                             (long long)deltaTimeUs, (long long)deltaPositionByUs,
2000                             timestamp.mPosition);
2001                     return WOULD_BLOCK;
2002                 }
2003             }
2004             mStartUs = 0; // no need to check again, start timestamp has either expired or unneeded.
2005         }
2006     } else {
2007         // Update the mapping between local consumed (mPosition) and server consumed (mServer)
2008         (void) updateAndGetPosition_l();
2009         // Server consumed (mServer) and presented both use the same server time base,
2010         // and server consumed is always >= presented.
2011         // The delta between these represents the number of frames in the buffer pipeline.
2012         // If this delta between these is greater than the client position, it means that
2013         // actually presented is still stuck at the starting line (figuratively speaking),
2014         // waiting for the first frame to go by.  So we can't report a valid timestamp yet.
2015         if ((uint32_t) (mServer - timestamp.mPosition) > mPosition) {
2016             return INVALID_OPERATION;
2017         }
2018         // Convert timestamp position from server time base to client time base.
2019         // TODO The following code should work OK now because timestamp.mPosition is 32-bit.
2020         // But if we change it to 64-bit then this could fail.
2021         // If (mPosition - mServer) can be negative then should use:
2022         //   (int32_t)(mPosition - mServer)
2023         timestamp.mPosition += mPosition - mServer;
2024         // Immediately after a call to getPosition_l(), mPosition and
2025         // mServer both represent the same frame position.  mPosition is
2026         // in client's point of view, and mServer is in server's point of
2027         // view.  So the difference between them is the "fudge factor"
2028         // between client and server views due to stop() and/or new
2029         // IAudioTrack.  And timestamp.mPosition is initially in server's
2030         // point of view, so we need to apply the same fudge factor to it.
2031     }
2032     return status;
2033 }
2034 
getParameters(const String8 & keys)2035 String8 AudioTrack::getParameters(const String8& keys)
2036 {
2037     audio_io_handle_t output = getOutput();
2038     if (output != AUDIO_IO_HANDLE_NONE) {
2039         return AudioSystem::getParameters(output, keys);
2040     } else {
2041         return String8::empty();
2042     }
2043 }
2044 
isOffloaded() const2045 bool AudioTrack::isOffloaded() const
2046 {
2047     AutoMutex lock(mLock);
2048     return isOffloaded_l();
2049 }
2050 
isDirect() const2051 bool AudioTrack::isDirect() const
2052 {
2053     AutoMutex lock(mLock);
2054     return isDirect_l();
2055 }
2056 
isOffloadedOrDirect() const2057 bool AudioTrack::isOffloadedOrDirect() const
2058 {
2059     AutoMutex lock(mLock);
2060     return isOffloadedOrDirect_l();
2061 }
2062 
2063 
dump(int fd,const Vector<String16> & args __unused) const2064 status_t AudioTrack::dump(int fd, const Vector<String16>& args __unused) const
2065 {
2066 
2067     const size_t SIZE = 256;
2068     char buffer[SIZE];
2069     String8 result;
2070 
2071     result.append(" AudioTrack::dump\n");
2072     snprintf(buffer, 255, "  stream type(%d), left - right volume(%f, %f)\n", mStreamType,
2073             mVolume[AUDIO_INTERLEAVE_LEFT], mVolume[AUDIO_INTERLEAVE_RIGHT]);
2074     result.append(buffer);
2075     snprintf(buffer, 255, "  format(%d), channel count(%d), frame count(%zu)\n", mFormat,
2076             mChannelCount, mFrameCount);
2077     result.append(buffer);
2078     snprintf(buffer, 255, "  sample rate(%u), status(%d)\n", mSampleRate, mStatus);
2079     result.append(buffer);
2080     snprintf(buffer, 255, "  state(%d), latency (%d)\n", mState, mLatency);
2081     result.append(buffer);
2082     ::write(fd, result.string(), result.size());
2083     return NO_ERROR;
2084 }
2085 
getUnderrunFrames() const2086 uint32_t AudioTrack::getUnderrunFrames() const
2087 {
2088     AutoMutex lock(mLock);
2089     return mProxy->getUnderrunFrames();
2090 }
2091 
2092 // =========================================================================
2093 
binderDied(const wp<IBinder> & who __unused)2094 void AudioTrack::DeathNotifier::binderDied(const wp<IBinder>& who __unused)
2095 {
2096     sp<AudioTrack> audioTrack = mAudioTrack.promote();
2097     if (audioTrack != 0) {
2098         AutoMutex lock(audioTrack->mLock);
2099         audioTrack->mProxy->binderDied();
2100     }
2101 }
2102 
2103 // =========================================================================
2104 
AudioTrackThread(AudioTrack & receiver,bool bCanCallJava)2105 AudioTrack::AudioTrackThread::AudioTrackThread(AudioTrack& receiver, bool bCanCallJava)
2106     : Thread(bCanCallJava), mReceiver(receiver), mPaused(true), mPausedInt(false), mPausedNs(0LL),
2107       mIgnoreNextPausedInt(false)
2108 {
2109 }
2110 
~AudioTrackThread()2111 AudioTrack::AudioTrackThread::~AudioTrackThread()
2112 {
2113 }
2114 
threadLoop()2115 bool AudioTrack::AudioTrackThread::threadLoop()
2116 {
2117     {
2118         AutoMutex _l(mMyLock);
2119         if (mPaused) {
2120             mMyCond.wait(mMyLock);
2121             // caller will check for exitPending()
2122             return true;
2123         }
2124         if (mIgnoreNextPausedInt) {
2125             mIgnoreNextPausedInt = false;
2126             mPausedInt = false;
2127         }
2128         if (mPausedInt) {
2129             if (mPausedNs > 0) {
2130                 (void) mMyCond.waitRelative(mMyLock, mPausedNs);
2131             } else {
2132                 mMyCond.wait(mMyLock);
2133             }
2134             mPausedInt = false;
2135             return true;
2136         }
2137     }
2138     if (exitPending()) {
2139         return false;
2140     }
2141     nsecs_t ns = mReceiver.processAudioBuffer();
2142     switch (ns) {
2143     case 0:
2144         return true;
2145     case NS_INACTIVE:
2146         pauseInternal();
2147         return true;
2148     case NS_NEVER:
2149         return false;
2150     case NS_WHENEVER:
2151         // FIXME increase poll interval, or make event-driven
2152         ns = 1000000000LL;
2153         // fall through
2154     default:
2155         LOG_ALWAYS_FATAL_IF(ns < 0, "processAudioBuffer() returned %" PRId64, ns);
2156         pauseInternal(ns);
2157         return true;
2158     }
2159 }
2160 
requestExit()2161 void AudioTrack::AudioTrackThread::requestExit()
2162 {
2163     // must be in this order to avoid a race condition
2164     Thread::requestExit();
2165     resume();
2166 }
2167 
pause()2168 void AudioTrack::AudioTrackThread::pause()
2169 {
2170     AutoMutex _l(mMyLock);
2171     mPaused = true;
2172 }
2173 
resume()2174 void AudioTrack::AudioTrackThread::resume()
2175 {
2176     AutoMutex _l(mMyLock);
2177     mIgnoreNextPausedInt = true;
2178     if (mPaused || mPausedInt) {
2179         mPaused = false;
2180         mPausedInt = false;
2181         mMyCond.signal();
2182     }
2183 }
2184 
pauseInternal(nsecs_t ns)2185 void AudioTrack::AudioTrackThread::pauseInternal(nsecs_t ns)
2186 {
2187     AutoMutex _l(mMyLock);
2188     mPausedInt = true;
2189     mPausedNs = ns;
2190 }
2191 
2192 }; // namespace android
2193