1 /*
2 **
3 ** Copyright 2007, The Android Open Source Project
4 **
5 ** Licensed under the Apache License, Version 2.0 (the "License");
6 ** you may not use this file except in compliance with the License.
7 ** You may obtain a copy of the License at
8 **
9 **     http://www.apache.org/licenses/LICENSE-2.0
10 **
11 ** Unless required by applicable law or agreed to in writing, software
12 ** distributed under the License is distributed on an "AS IS" BASIS,
13 ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 ** See the License for the specific language governing permissions and
15 ** limitations under the License.
16 */
17 
18 //#define LOG_NDEBUG 0
19 #define LOG_TAG "AudioTrack"
20 
21 #include <inttypes.h>
22 #include <math.h>
23 #include <sys/resource.h>
24 
25 #include <android-base/macros.h>
26 #include <audio_utils/clock.h>
27 #include <audio_utils/primitives.h>
28 #include <binder/IPCThreadState.h>
29 #include <media/AudioTrack.h>
30 #include <utils/Log.h>
31 #include <private/media/AudioTrackShared.h>
32 #include <processgroup/sched_policy.h>
33 #include <media/IAudioFlinger.h>
34 #include <media/IAudioPolicyService.h>
35 #include <media/AudioParameter.h>
36 #include <media/AudioResamplerPublic.h>
37 #include <media/AudioSystem.h>
38 #include <media/MediaAnalyticsItem.h>
39 #include <media/TypeConverter.h>
40 
41 #define WAIT_PERIOD_MS                  10
42 #define WAIT_STREAM_END_TIMEOUT_SEC     120
43 static const int kMaxLoopCountNotifications = 32;
44 
45 namespace android {
46 // ---------------------------------------------------------------------------
47 
48 using media::VolumeShaper;
49 
50 // TODO: Move to a separate .h
51 
52 template <typename T>
min(const T & x,const T & y)53 static inline const T &min(const T &x, const T &y) {
54     return x < y ? x : y;
55 }
56 
57 template <typename T>
max(const T & x,const T & y)58 static inline const T &max(const T &x, const T &y) {
59     return x > y ? x : y;
60 }
61 
framesToNanoseconds(ssize_t frames,uint32_t sampleRate,float speed)62 static inline nsecs_t framesToNanoseconds(ssize_t frames, uint32_t sampleRate, float speed)
63 {
64     return ((double)frames * 1000000000) / ((double)sampleRate * speed);
65 }
66 
convertTimespecToUs(const struct timespec & tv)67 static int64_t convertTimespecToUs(const struct timespec &tv)
68 {
69     return tv.tv_sec * 1000000LL + tv.tv_nsec / 1000;
70 }
71 
72 // TODO move to audio_utils.
convertNsToTimespec(int64_t ns)73 static inline struct timespec convertNsToTimespec(int64_t ns) {
74     struct timespec tv;
75     tv.tv_sec = static_cast<time_t>(ns / NANOS_PER_SECOND);
76     tv.tv_nsec = static_cast<long>(ns % NANOS_PER_SECOND);
77     return tv;
78 }
79 
80 // current monotonic time in microseconds.
getNowUs()81 static int64_t getNowUs()
82 {
83     struct timespec tv;
84     (void) clock_gettime(CLOCK_MONOTONIC, &tv);
85     return convertTimespecToUs(tv);
86 }
87 
88 // FIXME: we don't use the pitch setting in the time stretcher (not working);
89 // instead we emulate it using our sample rate converter.
90 static const bool kFixPitch = true; // enable pitch fix
adjustSampleRate(uint32_t sampleRate,float pitch)91 static inline uint32_t adjustSampleRate(uint32_t sampleRate, float pitch)
92 {
93     return kFixPitch ? (sampleRate * pitch + 0.5) : sampleRate;
94 }
95 
adjustSpeed(float speed,float pitch)96 static inline float adjustSpeed(float speed, float pitch)
97 {
98     return kFixPitch ? speed / max(pitch, AUDIO_TIMESTRETCH_PITCH_MIN_DELTA) : speed;
99 }
100 
adjustPitch(float pitch)101 static inline float adjustPitch(float pitch)
102 {
103     return kFixPitch ? AUDIO_TIMESTRETCH_PITCH_NORMAL : pitch;
104 }
105 
106 // static
getMinFrameCount(size_t * frameCount,audio_stream_type_t streamType,uint32_t sampleRate)107 status_t AudioTrack::getMinFrameCount(
108         size_t* frameCount,
109         audio_stream_type_t streamType,
110         uint32_t sampleRate)
111 {
112     if (frameCount == NULL) {
113         return BAD_VALUE;
114     }
115 
116     // FIXME handle in server, like createTrack_l(), possible missing info:
117     //          audio_io_handle_t output
118     //          audio_format_t format
119     //          audio_channel_mask_t channelMask
120     //          audio_output_flags_t flags (FAST)
121     uint32_t afSampleRate;
122     status_t status;
123     status = AudioSystem::getOutputSamplingRate(&afSampleRate, streamType);
124     if (status != NO_ERROR) {
125         ALOGE("%s(): Unable to query output sample rate for stream type %d; status %d",
126                 __func__, streamType, status);
127         return status;
128     }
129     size_t afFrameCount;
130     status = AudioSystem::getOutputFrameCount(&afFrameCount, streamType);
131     if (status != NO_ERROR) {
132         ALOGE("%s(): Unable to query output frame count for stream type %d; status %d",
133                 __func__, streamType, status);
134         return status;
135     }
136     uint32_t afLatency;
137     status = AudioSystem::getOutputLatency(&afLatency, streamType);
138     if (status != NO_ERROR) {
139         ALOGE("%s(): Unable to query output latency for stream type %d; status %d",
140                 __func__, streamType, status);
141         return status;
142     }
143 
144     // When called from createTrack, speed is 1.0f (normal speed).
145     // This is rechecked again on setting playback rate (TODO: on setting sample rate, too).
146     *frameCount = AudioSystem::calculateMinFrameCount(afLatency, afFrameCount, afSampleRate,
147                                               sampleRate, 1.0f /*, 0 notificationsPerBufferReq*/);
148 
149     // The formula above should always produce a non-zero value under normal circumstances:
150     // AudioTrack.SAMPLE_RATE_HZ_MIN <= sampleRate <= AudioTrack.SAMPLE_RATE_HZ_MAX.
151     // Return error in the unlikely event that it does not, as that's part of the API contract.
152     if (*frameCount == 0) {
153         ALOGE("%s(): failed for streamType %d, sampleRate %u",
154                 __func__, streamType, sampleRate);
155         return BAD_VALUE;
156     }
157     ALOGV("%s(): getMinFrameCount=%zu: afFrameCount=%zu, afSampleRate=%u, afLatency=%u",
158             __func__, *frameCount, afFrameCount, afSampleRate, afLatency);
159     return NO_ERROR;
160 }
161 
162 // static
isDirectOutputSupported(const audio_config_base_t & config,const audio_attributes_t & attributes)163 bool AudioTrack::isDirectOutputSupported(const audio_config_base_t& config,
164                                          const audio_attributes_t& attributes) {
165     ALOGV("%s()", __FUNCTION__);
166     const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
167     if (aps == 0) return false;
168     return aps->isDirectOutputSupported(config, attributes);
169 }
170 
171 // ---------------------------------------------------------------------------
172 
gather(const AudioTrack * track)173 void AudioTrack::MediaMetrics::gather(const AudioTrack *track)
174 {
175     // only if we're in a good state...
176     // XXX: shall we gather alternative info if failing?
177     const status_t lstatus = track->initCheck();
178     if (lstatus != NO_ERROR) {
179         ALOGD("%s(): no metrics gathered, track status=%d", __func__, (int) lstatus);
180         return;
181     }
182 
183 #define MM_PREFIX "android.media.audiotrack." // avoid cut-n-paste errors.
184 
185     // Java API 28 entries, do not change.
186     mAnalyticsItem->setCString(MM_PREFIX "streamtype", toString(track->streamType()).c_str());
187     mAnalyticsItem->setCString(MM_PREFIX "type",
188             toString(track->mAttributes.content_type).c_str());
189     mAnalyticsItem->setCString(MM_PREFIX "usage", toString(track->mAttributes.usage).c_str());
190 
191     // Non-API entries, these can change due to a Java string mistake.
192     mAnalyticsItem->setInt32(MM_PREFIX "sampleRate", (int32_t)track->mSampleRate);
193     mAnalyticsItem->setInt64(MM_PREFIX "channelMask", (int64_t)track->mChannelMask);
194     // Non-API entries, these can change.
195     mAnalyticsItem->setInt32(MM_PREFIX "portId", (int32_t)track->mPortId);
196     mAnalyticsItem->setCString(MM_PREFIX "encoding", toString(track->mFormat).c_str());
197     mAnalyticsItem->setInt32(MM_PREFIX "frameCount", (int32_t)track->mFrameCount);
198     mAnalyticsItem->setCString(MM_PREFIX "attributes", toString(track->mAttributes).c_str());
199 }
200 
201 // hand the user a snapshot of the metrics.
getMetrics(MediaAnalyticsItem * & item)202 status_t AudioTrack::getMetrics(MediaAnalyticsItem * &item)
203 {
204     mMediaMetrics.gather(this);
205     MediaAnalyticsItem *tmp = mMediaMetrics.dup();
206     if (tmp == nullptr) {
207         return BAD_VALUE;
208     }
209     item = tmp;
210     return NO_ERROR;
211 }
212 
AudioTrack()213 AudioTrack::AudioTrack()
214     : mStatus(NO_INIT),
215       mState(STATE_STOPPED),
216       mPreviousPriority(ANDROID_PRIORITY_NORMAL),
217       mPreviousSchedulingGroup(SP_DEFAULT),
218       mPausedPosition(0),
219       mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE),
220       mRoutedDeviceId(AUDIO_PORT_HANDLE_NONE)
221 {
222     mAttributes.content_type = AUDIO_CONTENT_TYPE_UNKNOWN;
223     mAttributes.usage = AUDIO_USAGE_UNKNOWN;
224     mAttributes.flags = 0x0;
225     strcpy(mAttributes.tags, "");
226 }
227 
AudioTrack(audio_stream_type_t streamType,uint32_t sampleRate,audio_format_t format,audio_channel_mask_t channelMask,size_t frameCount,audio_output_flags_t flags,callback_t cbf,void * user,int32_t notificationFrames,audio_session_t sessionId,transfer_type transferType,const audio_offload_info_t * offloadInfo,uid_t uid,pid_t pid,const audio_attributes_t * pAttributes,bool doNotReconnect,float maxRequiredSpeed,audio_port_handle_t selectedDeviceId)228 AudioTrack::AudioTrack(
229         audio_stream_type_t streamType,
230         uint32_t sampleRate,
231         audio_format_t format,
232         audio_channel_mask_t channelMask,
233         size_t frameCount,
234         audio_output_flags_t flags,
235         callback_t cbf,
236         void* user,
237         int32_t notificationFrames,
238         audio_session_t sessionId,
239         transfer_type transferType,
240         const audio_offload_info_t *offloadInfo,
241         uid_t uid,
242         pid_t pid,
243         const audio_attributes_t* pAttributes,
244         bool doNotReconnect,
245         float maxRequiredSpeed,
246         audio_port_handle_t selectedDeviceId)
247     : mStatus(NO_INIT),
248       mState(STATE_STOPPED),
249       mPreviousPriority(ANDROID_PRIORITY_NORMAL),
250       mPreviousSchedulingGroup(SP_DEFAULT),
251       mPausedPosition(0)
252 {
253     mAttributes = AUDIO_ATTRIBUTES_INITIALIZER;
254 
255     (void)set(streamType, sampleRate, format, channelMask,
256             frameCount, flags, cbf, user, notificationFrames,
257             0 /*sharedBuffer*/, false /*threadCanCallJava*/, sessionId, transferType,
258             offloadInfo, uid, pid, pAttributes, doNotReconnect, maxRequiredSpeed, selectedDeviceId);
259 }
260 
AudioTrack(audio_stream_type_t streamType,uint32_t sampleRate,audio_format_t format,audio_channel_mask_t channelMask,const sp<IMemory> & sharedBuffer,audio_output_flags_t flags,callback_t cbf,void * user,int32_t notificationFrames,audio_session_t sessionId,transfer_type transferType,const audio_offload_info_t * offloadInfo,uid_t uid,pid_t pid,const audio_attributes_t * pAttributes,bool doNotReconnect,float maxRequiredSpeed)261 AudioTrack::AudioTrack(
262         audio_stream_type_t streamType,
263         uint32_t sampleRate,
264         audio_format_t format,
265         audio_channel_mask_t channelMask,
266         const sp<IMemory>& sharedBuffer,
267         audio_output_flags_t flags,
268         callback_t cbf,
269         void* user,
270         int32_t notificationFrames,
271         audio_session_t sessionId,
272         transfer_type transferType,
273         const audio_offload_info_t *offloadInfo,
274         uid_t uid,
275         pid_t pid,
276         const audio_attributes_t* pAttributes,
277         bool doNotReconnect,
278         float maxRequiredSpeed)
279     : mStatus(NO_INIT),
280       mState(STATE_STOPPED),
281       mPreviousPriority(ANDROID_PRIORITY_NORMAL),
282       mPreviousSchedulingGroup(SP_DEFAULT),
283       mPausedPosition(0),
284       mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE)
285 {
286     mAttributes = AUDIO_ATTRIBUTES_INITIALIZER;
287 
288     (void)set(streamType, sampleRate, format, channelMask,
289             0 /*frameCount*/, flags, cbf, user, notificationFrames,
290             sharedBuffer, false /*threadCanCallJava*/, sessionId, transferType, offloadInfo,
291             uid, pid, pAttributes, doNotReconnect, maxRequiredSpeed);
292 }
293 
~AudioTrack()294 AudioTrack::~AudioTrack()
295 {
296     // pull together the numbers, before we clean up our structures
297     mMediaMetrics.gather(this);
298 
299     if (mStatus == NO_ERROR) {
300         // Make sure that callback function exits in the case where
301         // it is looping on buffer full condition in obtainBuffer().
302         // Otherwise the callback thread will never exit.
303         stop();
304         if (mAudioTrackThread != 0) {
305             mProxy->interrupt();
306             mAudioTrackThread->requestExit();   // see comment in AudioTrack.h
307             mAudioTrackThread->requestExitAndWait();
308             mAudioTrackThread.clear();
309         }
310         // No lock here: worst case we remove a NULL callback which will be a nop
311         if (mDeviceCallback != 0 && mOutput != AUDIO_IO_HANDLE_NONE) {
312             AudioSystem::removeAudioDeviceCallback(this, mOutput, mPortId);
313         }
314         IInterface::asBinder(mAudioTrack)->unlinkToDeath(mDeathNotifier, this);
315         mAudioTrack.clear();
316         mCblkMemory.clear();
317         mSharedBuffer.clear();
318         IPCThreadState::self()->flushCommands();
319         ALOGV("%s(%d), releasing session id %d from %d on behalf of %d",
320                 __func__, mPortId,
321                 mSessionId, IPCThreadState::self()->getCallingPid(), mClientPid);
322         AudioSystem::releaseAudioSessionId(mSessionId, mClientPid);
323     }
324 }
325 
set(audio_stream_type_t streamType,uint32_t sampleRate,audio_format_t format,audio_channel_mask_t channelMask,size_t frameCount,audio_output_flags_t flags,callback_t cbf,void * user,int32_t notificationFrames,const sp<IMemory> & sharedBuffer,bool threadCanCallJava,audio_session_t sessionId,transfer_type transferType,const audio_offload_info_t * offloadInfo,uid_t uid,pid_t pid,const audio_attributes_t * pAttributes,bool doNotReconnect,float maxRequiredSpeed,audio_port_handle_t selectedDeviceId)326 status_t AudioTrack::set(
327         audio_stream_type_t streamType,
328         uint32_t sampleRate,
329         audio_format_t format,
330         audio_channel_mask_t channelMask,
331         size_t frameCount,
332         audio_output_flags_t flags,
333         callback_t cbf,
334         void* user,
335         int32_t notificationFrames,
336         const sp<IMemory>& sharedBuffer,
337         bool threadCanCallJava,
338         audio_session_t sessionId,
339         transfer_type transferType,
340         const audio_offload_info_t *offloadInfo,
341         uid_t uid,
342         pid_t pid,
343         const audio_attributes_t* pAttributes,
344         bool doNotReconnect,
345         float maxRequiredSpeed,
346         audio_port_handle_t selectedDeviceId)
347 {
348     status_t status;
349     uint32_t channelCount;
350     pid_t callingPid;
351     pid_t myPid;
352 
353     // Note mPortId is not valid until the track is created, so omit mPortId in ALOG for set.
354     ALOGV("%s(): streamType %d, sampleRate %u, format %#x, channelMask %#x, frameCount %zu, "
355           "flags #%x, notificationFrames %d, sessionId %d, transferType %d, uid %d, pid %d",
356           __func__,
357           streamType, sampleRate, format, channelMask, frameCount, flags, notificationFrames,
358           sessionId, transferType, uid, pid);
359 
360     mThreadCanCallJava = threadCanCallJava;
361     mSelectedDeviceId = selectedDeviceId;
362     mSessionId = sessionId;
363 
364     switch (transferType) {
365     case TRANSFER_DEFAULT:
366         if (sharedBuffer != 0) {
367             transferType = TRANSFER_SHARED;
368         } else if (cbf == NULL || threadCanCallJava) {
369             transferType = TRANSFER_SYNC;
370         } else {
371             transferType = TRANSFER_CALLBACK;
372         }
373         break;
374     case TRANSFER_CALLBACK:
375     case TRANSFER_SYNC_NOTIF_CALLBACK:
376         if (cbf == NULL || sharedBuffer != 0) {
377             ALOGE("%s(): Transfer type %s but cbf == NULL || sharedBuffer != 0",
378                     convertTransferToText(transferType), __func__);
379             status = BAD_VALUE;
380             goto exit;
381         }
382         break;
383     case TRANSFER_OBTAIN:
384     case TRANSFER_SYNC:
385         if (sharedBuffer != 0) {
386             ALOGE("%s(): Transfer type TRANSFER_OBTAIN but sharedBuffer != 0", __func__);
387             status = BAD_VALUE;
388             goto exit;
389         }
390         break;
391     case TRANSFER_SHARED:
392         if (sharedBuffer == 0) {
393             ALOGE("%s(): Transfer type TRANSFER_SHARED but sharedBuffer == 0", __func__);
394             status = BAD_VALUE;
395             goto exit;
396         }
397         break;
398     default:
399         ALOGE("%s(): Invalid transfer type %d",
400                 __func__, transferType);
401         status = BAD_VALUE;
402         goto exit;
403     }
404     mSharedBuffer = sharedBuffer;
405     mTransfer = transferType;
406     mDoNotReconnect = doNotReconnect;
407 
408     ALOGV_IF(sharedBuffer != 0, "%s(): sharedBuffer: %p, size: %zu",
409             __func__, sharedBuffer->pointer(), sharedBuffer->size());
410 
411     ALOGV("%s(): streamType %d frameCount %zu flags %04x",
412             __func__, streamType, frameCount, flags);
413 
414     // invariant that mAudioTrack != 0 is true only after set() returns successfully
415     if (mAudioTrack != 0) {
416         ALOGE("%s(): Track already in use", __func__);
417         status = INVALID_OPERATION;
418         goto exit;
419     }
420 
421     // handle default values first.
422     if (streamType == AUDIO_STREAM_DEFAULT) {
423         streamType = AUDIO_STREAM_MUSIC;
424     }
425     if (pAttributes == NULL) {
426         if (uint32_t(streamType) >= AUDIO_STREAM_PUBLIC_CNT) {
427             ALOGE("%s(): Invalid stream type %d", __func__, streamType);
428             status = BAD_VALUE;
429             goto exit;
430         }
431         mStreamType = streamType;
432 
433     } else {
434         // stream type shouldn't be looked at, this track has audio attributes
435         memcpy(&mAttributes, pAttributes, sizeof(audio_attributes_t));
436         ALOGV("%s(): Building AudioTrack with attributes:"
437                 " usage=%d content=%d flags=0x%x tags=[%s]",
438                 __func__,
439                  mAttributes.usage, mAttributes.content_type, mAttributes.flags, mAttributes.tags);
440         mStreamType = AUDIO_STREAM_DEFAULT;
441         audio_flags_to_audio_output_flags(mAttributes.flags, &flags);
442     }
443 
444     // these below should probably come from the audioFlinger too...
445     if (format == AUDIO_FORMAT_DEFAULT) {
446         format = AUDIO_FORMAT_PCM_16_BIT;
447     } else if (format == AUDIO_FORMAT_IEC61937) { // HDMI pass-through?
448         mAttributes.flags |= AUDIO_OUTPUT_FLAG_IEC958_NONAUDIO;
449     }
450 
451     // validate parameters
452     if (!audio_is_valid_format(format)) {
453         ALOGE("%s(): Invalid format %#x", __func__, format);
454         status = BAD_VALUE;
455         goto exit;
456     }
457     mFormat = format;
458 
459     if (!audio_is_output_channel(channelMask)) {
460         ALOGE("%s(): Invalid channel mask %#x",  __func__, channelMask);
461         status = BAD_VALUE;
462         goto exit;
463     }
464     mChannelMask = channelMask;
465     channelCount = audio_channel_count_from_out_mask(channelMask);
466     mChannelCount = channelCount;
467 
468     // force direct flag if format is not linear PCM
469     // or offload was requested
470     if ((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
471             || !audio_is_linear_pcm(format)) {
472         ALOGV( (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
473                     ? "%s(): Offload request, forcing to Direct Output"
474                     : "%s(): Not linear PCM, forcing to Direct Output",
475                     __func__);
476         flags = (audio_output_flags_t)
477                 // FIXME why can't we allow direct AND fast?
478                 ((flags | AUDIO_OUTPUT_FLAG_DIRECT) & ~AUDIO_OUTPUT_FLAG_FAST);
479     }
480 
481     // force direct flag if HW A/V sync requested
482     if ((flags & AUDIO_OUTPUT_FLAG_HW_AV_SYNC) != 0) {
483         flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_DIRECT);
484     }
485 
486     if (flags & AUDIO_OUTPUT_FLAG_DIRECT) {
487         if (audio_has_proportional_frames(format)) {
488             mFrameSize = channelCount * audio_bytes_per_sample(format);
489         } else {
490             mFrameSize = sizeof(uint8_t);
491         }
492     } else {
493         ALOG_ASSERT(audio_has_proportional_frames(format));
494         mFrameSize = channelCount * audio_bytes_per_sample(format);
495         // createTrack will return an error if PCM format is not supported by server,
496         // so no need to check for specific PCM formats here
497     }
498 
499     // sampling rate must be specified for direct outputs
500     if (sampleRate == 0 && (flags & AUDIO_OUTPUT_FLAG_DIRECT) != 0) {
501         status = BAD_VALUE;
502         goto exit;
503     }
504     mSampleRate = sampleRate;
505     mOriginalSampleRate = sampleRate;
506     mPlaybackRate = AUDIO_PLAYBACK_RATE_DEFAULT;
507     // 1.0 <= mMaxRequiredSpeed <= AUDIO_TIMESTRETCH_SPEED_MAX
508     mMaxRequiredSpeed = min(max(maxRequiredSpeed, 1.0f), AUDIO_TIMESTRETCH_SPEED_MAX);
509 
510     // Make copy of input parameter offloadInfo so that in the future:
511     //  (a) createTrack_l doesn't need it as an input parameter
512     //  (b) we can support re-creation of offloaded tracks
513     if (offloadInfo != NULL) {
514         mOffloadInfoCopy = *offloadInfo;
515         mOffloadInfo = &mOffloadInfoCopy;
516     } else {
517         mOffloadInfo = NULL;
518         memset(&mOffloadInfoCopy, 0, sizeof(audio_offload_info_t));
519     }
520 
521     mVolume[AUDIO_INTERLEAVE_LEFT] = 1.0f;
522     mVolume[AUDIO_INTERLEAVE_RIGHT] = 1.0f;
523     mSendLevel = 0.0f;
524     // mFrameCount is initialized in createTrack_l
525     mReqFrameCount = frameCount;
526     if (notificationFrames >= 0) {
527         mNotificationFramesReq = notificationFrames;
528         mNotificationsPerBufferReq = 0;
529     } else {
530         if (!(flags & AUDIO_OUTPUT_FLAG_FAST)) {
531             ALOGE("%s(): notificationFrames=%d not permitted for non-fast track",
532                     __func__, notificationFrames);
533             status = BAD_VALUE;
534             goto exit;
535         }
536         if (frameCount > 0) {
537             ALOGE("%s(): notificationFrames=%d not permitted with non-zero frameCount=%zu",
538                     __func__, notificationFrames, frameCount);
539             status = BAD_VALUE;
540             goto exit;
541         }
542         mNotificationFramesReq = 0;
543         const uint32_t minNotificationsPerBuffer = 1;
544         const uint32_t maxNotificationsPerBuffer = 8;
545         mNotificationsPerBufferReq = min(maxNotificationsPerBuffer,
546                 max((uint32_t) -notificationFrames, minNotificationsPerBuffer));
547         ALOGW_IF(mNotificationsPerBufferReq != (uint32_t) -notificationFrames,
548                 "%s(): notificationFrames=%d clamped to the range -%u to -%u",
549                 __func__,
550                 notificationFrames, minNotificationsPerBuffer, maxNotificationsPerBuffer);
551     }
552     mNotificationFramesAct = 0;
553     callingPid = IPCThreadState::self()->getCallingPid();
554     myPid = getpid();
555     if (uid == AUDIO_UID_INVALID || (callingPid != myPid)) {
556         mClientUid = IPCThreadState::self()->getCallingUid();
557     } else {
558         mClientUid = uid;
559     }
560     if (pid == -1 || (callingPid != myPid)) {
561         mClientPid = callingPid;
562     } else {
563         mClientPid = pid;
564     }
565     mAuxEffectId = 0;
566     mOrigFlags = mFlags = flags;
567     mCbf = cbf;
568 
569     if (cbf != NULL) {
570         mAudioTrackThread = new AudioTrackThread(*this);
571         mAudioTrackThread->run("AudioTrack", ANDROID_PRIORITY_AUDIO, 0 /*stack*/);
572         // thread begins in paused state, and will not reference us until start()
573     }
574 
575     // create the IAudioTrack
576     {
577         AutoMutex lock(mLock);
578         status = createTrack_l();
579     }
580     if (status != NO_ERROR) {
581         if (mAudioTrackThread != 0) {
582             mAudioTrackThread->requestExit();   // see comment in AudioTrack.h
583             mAudioTrackThread->requestExitAndWait();
584             mAudioTrackThread.clear();
585         }
586         goto exit;
587     }
588 
589     mUserData = user;
590     mLoopCount = 0;
591     mLoopStart = 0;
592     mLoopEnd = 0;
593     mLoopCountNotified = 0;
594     mMarkerPosition = 0;
595     mMarkerReached = false;
596     mNewPosition = 0;
597     mUpdatePeriod = 0;
598     mPosition = 0;
599     mReleased = 0;
600     mStartNs = 0;
601     mStartFromZeroUs = 0;
602     AudioSystem::acquireAudioSessionId(mSessionId, mClientPid);
603     mSequence = 1;
604     mObservedSequence = mSequence;
605     mInUnderrun = false;
606     mPreviousTimestampValid = false;
607     mTimestampStartupGlitchReported = false;
608     mTimestampRetrogradePositionReported = false;
609     mTimestampRetrogradeTimeReported = false;
610     mTimestampStallReported = false;
611     mTimestampStaleTimeReported = false;
612     mPreviousLocation = ExtendedTimestamp::LOCATION_INVALID;
613     mStartTs.mPosition = 0;
614     mUnderrunCountOffset = 0;
615     mFramesWritten = 0;
616     mFramesWrittenServerOffset = 0;
617     mFramesWrittenAtRestore = -1; // -1 is a unique initializer.
618     mVolumeHandler = new media::VolumeHandler();
619 
620 exit:
621     mStatus = status;
622     return status;
623 }
624 
625 // -------------------------------------------------------------------------
626 
start()627 status_t AudioTrack::start()
628 {
629     AutoMutex lock(mLock);
630     ALOGV("%s(%d): prior state:%s", __func__, mPortId, stateToString(mState));
631 
632     if (mState == STATE_ACTIVE) {
633         return INVALID_OPERATION;
634     }
635 
636     mInUnderrun = true;
637 
638     State previousState = mState;
639     if (previousState == STATE_PAUSED_STOPPING) {
640         mState = STATE_STOPPING;
641     } else {
642         mState = STATE_ACTIVE;
643     }
644     (void) updateAndGetPosition_l();
645 
646     // save start timestamp
647     if (isOffloadedOrDirect_l()) {
648         if (getTimestamp_l(mStartTs) != OK) {
649             mStartTs.mPosition = 0;
650         }
651     } else {
652         if (getTimestamp_l(&mStartEts) != OK) {
653             mStartEts.clear();
654         }
655     }
656     mStartNs = systemTime(); // save this for timestamp adjustment after starting.
657     if (previousState == STATE_STOPPED || previousState == STATE_FLUSHED) {
658         // reset current position as seen by client to 0
659         mPosition = 0;
660         mPreviousTimestampValid = false;
661         mTimestampStartupGlitchReported = false;
662         mTimestampRetrogradePositionReported = false;
663         mTimestampRetrogradeTimeReported = false;
664         mTimestampStallReported = false;
665         mTimestampStaleTimeReported = false;
666         mPreviousLocation = ExtendedTimestamp::LOCATION_INVALID;
667 
668         if (!isOffloadedOrDirect_l()
669                 && mStartEts.mTimeNs[ExtendedTimestamp::LOCATION_SERVER] > 0) {
670             // Server side has consumed something, but is it finished consuming?
671             // It is possible since flush and stop are asynchronous that the server
672             // is still active at this point.
673             ALOGV("%s(%d): server read:%lld  cumulative flushed:%lld  client written:%lld",
674                     __func__, mPortId,
675                     (long long)(mFramesWrittenServerOffset
676                             + mStartEts.mPosition[ExtendedTimestamp::LOCATION_SERVER]),
677                     (long long)mStartEts.mFlushed,
678                     (long long)mFramesWritten);
679             // mStartEts is already adjusted by mFramesWrittenServerOffset, so we delta adjust.
680             mFramesWrittenServerOffset -= mStartEts.mPosition[ExtendedTimestamp::LOCATION_SERVER];
681         }
682         mFramesWritten = 0;
683         mProxy->clearTimestamp(); // need new server push for valid timestamp
684         mMarkerReached = false;
685 
686         // For offloaded tracks, we don't know if the hardware counters are really zero here,
687         // since the flush is asynchronous and stop may not fully drain.
688         // We save the time when the track is started to later verify whether
689         // the counters are realistic (i.e. start from zero after this time).
690         mStartFromZeroUs = mStartNs / 1000;
691 
692         // force refresh of remaining frames by processAudioBuffer() as last
693         // write before stop could be partial.
694         mRefreshRemaining = true;
695 
696         // for static track, clear the old flags when starting from stopped state
697         if (mSharedBuffer != 0) {
698             android_atomic_and(
699             ~(CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL | CBLK_BUFFER_END),
700             &mCblk->mFlags);
701         }
702     }
703     mNewPosition = mPosition + mUpdatePeriod;
704     int32_t flags = android_atomic_and(~(CBLK_STREAM_END_DONE | CBLK_DISABLED), &mCblk->mFlags);
705 
706     status_t status = NO_ERROR;
707     if (!(flags & CBLK_INVALID)) {
708         status = mAudioTrack->start();
709         if (status == DEAD_OBJECT) {
710             flags |= CBLK_INVALID;
711         }
712     }
713     if (flags & CBLK_INVALID) {
714         status = restoreTrack_l("start");
715     }
716 
717     // resume or pause the callback thread as needed.
718     sp<AudioTrackThread> t = mAudioTrackThread;
719     if (status == NO_ERROR) {
720         if (t != 0) {
721             if (previousState == STATE_STOPPING) {
722                 mProxy->interrupt();
723             } else {
724                 t->resume();
725             }
726         } else {
727             mPreviousPriority = getpriority(PRIO_PROCESS, 0);
728             get_sched_policy(0, &mPreviousSchedulingGroup);
729             androidSetThreadPriority(0, ANDROID_PRIORITY_AUDIO);
730         }
731 
732         // Start our local VolumeHandler for restoration purposes.
733         mVolumeHandler->setStarted();
734     } else {
735         ALOGE("%s(%d): status %d", __func__, mPortId, status);
736         mState = previousState;
737         if (t != 0) {
738             if (previousState != STATE_STOPPING) {
739                 t->pause();
740             }
741         } else {
742             setpriority(PRIO_PROCESS, 0, mPreviousPriority);
743             set_sched_policy(0, mPreviousSchedulingGroup);
744         }
745     }
746 
747     return status;
748 }
749 
stop()750 void AudioTrack::stop()
751 {
752     AutoMutex lock(mLock);
753     ALOGV("%s(%d): prior state:%s", __func__, mPortId, stateToString(mState));
754 
755     if (mState != STATE_ACTIVE && mState != STATE_PAUSED) {
756         return;
757     }
758 
759     if (isOffloaded_l()) {
760         mState = STATE_STOPPING;
761     } else {
762         mState = STATE_STOPPED;
763         ALOGD_IF(mSharedBuffer == nullptr,
764                 "%s(%d): called with %u frames delivered", __func__, mPortId, mReleased.value());
765         mReleased = 0;
766     }
767 
768     mProxy->stop(); // notify server not to read beyond current client position until start().
769     mProxy->interrupt();
770     mAudioTrack->stop();
771 
772     // Note: legacy handling - stop does not clear playback marker
773     // and periodic update counter, but flush does for streaming tracks.
774 
775     if (mSharedBuffer != 0) {
776         // clear buffer position and loop count.
777         mStaticProxy->setBufferPositionAndLoop(0 /* position */,
778                 0 /* loopStart */, 0 /* loopEnd */, 0 /* loopCount */);
779     }
780 
781     sp<AudioTrackThread> t = mAudioTrackThread;
782     if (t != 0) {
783         if (!isOffloaded_l()) {
784             t->pause();
785         } else if (mTransfer == TRANSFER_SYNC_NOTIF_CALLBACK) {
786             // causes wake up of the playback thread, that will callback the client for
787             // EVENT_STREAM_END in processAudioBuffer()
788             t->wake();
789         }
790     } else {
791         setpriority(PRIO_PROCESS, 0, mPreviousPriority);
792         set_sched_policy(0, mPreviousSchedulingGroup);
793     }
794 }
795 
stopped() const796 bool AudioTrack::stopped() const
797 {
798     AutoMutex lock(mLock);
799     return mState != STATE_ACTIVE;
800 }
801 
flush()802 void AudioTrack::flush()
803 {
804     AutoMutex lock(mLock);
805     ALOGV("%s(%d): prior state:%s", __func__, mPortId, stateToString(mState));
806 
807     if (mSharedBuffer != 0) {
808         return;
809     }
810     if (mState == STATE_ACTIVE) {
811         return;
812     }
813     flush_l();
814 }
815 
flush_l()816 void AudioTrack::flush_l()
817 {
818     ALOG_ASSERT(mState != STATE_ACTIVE);
819 
820     // clear playback marker and periodic update counter
821     mMarkerPosition = 0;
822     mMarkerReached = false;
823     mUpdatePeriod = 0;
824     mRefreshRemaining = true;
825 
826     mState = STATE_FLUSHED;
827     mReleased = 0;
828     if (isOffloaded_l()) {
829         mProxy->interrupt();
830     }
831     mProxy->flush();
832     mAudioTrack->flush();
833 }
834 
pause()835 void AudioTrack::pause()
836 {
837     AutoMutex lock(mLock);
838     ALOGV("%s(%d): prior state:%s", __func__, mPortId, stateToString(mState));
839 
840     if (mState == STATE_ACTIVE) {
841         mState = STATE_PAUSED;
842     } else if (mState == STATE_STOPPING) {
843         mState = STATE_PAUSED_STOPPING;
844     } else {
845         return;
846     }
847     mProxy->interrupt();
848     mAudioTrack->pause();
849 
850     if (isOffloaded_l()) {
851         if (mOutput != AUDIO_IO_HANDLE_NONE) {
852             // An offload output can be re-used between two audio tracks having
853             // the same configuration. A timestamp query for a paused track
854             // while the other is running would return an incorrect time.
855             // To fix this, cache the playback position on a pause() and return
856             // this time when requested until the track is resumed.
857 
858             // OffloadThread sends HAL pause in its threadLoop. Time saved
859             // here can be slightly off.
860 
861             // TODO: check return code for getRenderPosition.
862 
863             uint32_t halFrames;
864             AudioSystem::getRenderPosition(mOutput, &halFrames, &mPausedPosition);
865             ALOGV("%s(%d): for offload, cache current position %u",
866                     __func__, mPortId, mPausedPosition);
867         }
868     }
869 }
870 
setVolume(float left,float right)871 status_t AudioTrack::setVolume(float left, float right)
872 {
873     // This duplicates a test by AudioTrack JNI, but that is not the only caller
874     if (isnanf(left) || left < GAIN_FLOAT_ZERO || left > GAIN_FLOAT_UNITY ||
875             isnanf(right) || right < GAIN_FLOAT_ZERO || right > GAIN_FLOAT_UNITY) {
876         return BAD_VALUE;
877     }
878 
879     AutoMutex lock(mLock);
880     mVolume[AUDIO_INTERLEAVE_LEFT] = left;
881     mVolume[AUDIO_INTERLEAVE_RIGHT] = right;
882 
883     mProxy->setVolumeLR(gain_minifloat_pack(gain_from_float(left), gain_from_float(right)));
884 
885     if (isOffloaded_l()) {
886         mAudioTrack->signal();
887     }
888     return NO_ERROR;
889 }
890 
setVolume(float volume)891 status_t AudioTrack::setVolume(float volume)
892 {
893     return setVolume(volume, volume);
894 }
895 
setAuxEffectSendLevel(float level)896 status_t AudioTrack::setAuxEffectSendLevel(float level)
897 {
898     // This duplicates a test by AudioTrack JNI, but that is not the only caller
899     if (isnanf(level) || level < GAIN_FLOAT_ZERO || level > GAIN_FLOAT_UNITY) {
900         return BAD_VALUE;
901     }
902 
903     AutoMutex lock(mLock);
904     mSendLevel = level;
905     mProxy->setSendLevel(level);
906 
907     return NO_ERROR;
908 }
909 
getAuxEffectSendLevel(float * level) const910 void AudioTrack::getAuxEffectSendLevel(float* level) const
911 {
912     if (level != NULL) {
913         *level = mSendLevel;
914     }
915 }
916 
setSampleRate(uint32_t rate)917 status_t AudioTrack::setSampleRate(uint32_t rate)
918 {
919     AutoMutex lock(mLock);
920     ALOGV("%s(%d): prior state:%s rate:%u", __func__, mPortId, stateToString(mState), rate);
921 
922     if (rate == mSampleRate) {
923         return NO_ERROR;
924     }
925     if (isOffloadedOrDirect_l() || (mFlags & AUDIO_OUTPUT_FLAG_FAST)
926             || (mChannelMask & AUDIO_CHANNEL_HAPTIC_ALL)) {
927         return INVALID_OPERATION;
928     }
929     if (mOutput == AUDIO_IO_HANDLE_NONE) {
930         return NO_INIT;
931     }
932     // NOTE: it is theoretically possible, but highly unlikely, that a device change
933     // could mean a previously allowed sampling rate is no longer allowed.
934     uint32_t afSamplingRate;
935     if (AudioSystem::getSamplingRate(mOutput, &afSamplingRate) != NO_ERROR) {
936         return NO_INIT;
937     }
938     // pitch is emulated by adjusting speed and sampleRate
939     const uint32_t effectiveSampleRate = adjustSampleRate(rate, mPlaybackRate.mPitch);
940     if (rate == 0 || effectiveSampleRate > afSamplingRate * AUDIO_RESAMPLER_DOWN_RATIO_MAX) {
941         return BAD_VALUE;
942     }
943     // TODO: Should we also check if the buffer size is compatible?
944 
945     mSampleRate = rate;
946     mProxy->setSampleRate(effectiveSampleRate);
947 
948     return NO_ERROR;
949 }
950 
getSampleRate() const951 uint32_t AudioTrack::getSampleRate() const
952 {
953     AutoMutex lock(mLock);
954 
955     // sample rate can be updated during playback by the offloaded decoder so we need to
956     // query the HAL and update if needed.
957 // FIXME use Proxy return channel to update the rate from server and avoid polling here
958     if (isOffloadedOrDirect_l()) {
959         if (mOutput != AUDIO_IO_HANDLE_NONE) {
960             uint32_t sampleRate = 0;
961             status_t status = AudioSystem::getSamplingRate(mOutput, &sampleRate);
962             if (status == NO_ERROR) {
963                 mSampleRate = sampleRate;
964             }
965         }
966     }
967     return mSampleRate;
968 }
969 
getOriginalSampleRate() const970 uint32_t AudioTrack::getOriginalSampleRate() const
971 {
972     return mOriginalSampleRate;
973 }
974 
setPlaybackRate(const AudioPlaybackRate & playbackRate)975 status_t AudioTrack::setPlaybackRate(const AudioPlaybackRate &playbackRate)
976 {
977     AutoMutex lock(mLock);
978     if (isAudioPlaybackRateEqual(playbackRate, mPlaybackRate)) {
979         return NO_ERROR;
980     }
981     if (isOffloadedOrDirect_l()) {
982         return INVALID_OPERATION;
983     }
984     if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
985         return INVALID_OPERATION;
986     }
987 
988     ALOGV("%s(%d): mSampleRate:%u  mSpeed:%f  mPitch:%f",
989             __func__, mPortId, mSampleRate, playbackRate.mSpeed, playbackRate.mPitch);
990     // pitch is emulated by adjusting speed and sampleRate
991     const uint32_t effectiveRate = adjustSampleRate(mSampleRate, playbackRate.mPitch);
992     const float effectiveSpeed = adjustSpeed(playbackRate.mSpeed, playbackRate.mPitch);
993     const float effectivePitch = adjustPitch(playbackRate.mPitch);
994     AudioPlaybackRate playbackRateTemp = playbackRate;
995     playbackRateTemp.mSpeed = effectiveSpeed;
996     playbackRateTemp.mPitch = effectivePitch;
997 
998     ALOGV("%s(%d) (effective) mSampleRate:%u  mSpeed:%f  mPitch:%f",
999             __func__, mPortId, effectiveRate, effectiveSpeed, effectivePitch);
1000 
1001     if (!isAudioPlaybackRateValid(playbackRateTemp)) {
1002         ALOGW("%s(%d) (%f, %f) failed (effective rate out of bounds)",
1003                 __func__, mPortId, playbackRate.mSpeed, playbackRate.mPitch);
1004         return BAD_VALUE;
1005     }
1006     // Check if the buffer size is compatible.
1007     if (!isSampleRateSpeedAllowed_l(effectiveRate, effectiveSpeed)) {
1008         ALOGW("%s(%d) (%f, %f) failed (buffer size)",
1009                 __func__, mPortId, playbackRate.mSpeed, playbackRate.mPitch);
1010         return BAD_VALUE;
1011     }
1012 
1013     // Check resampler ratios are within bounds
1014     if ((uint64_t)effectiveRate > (uint64_t)mSampleRate *
1015             (uint64_t)AUDIO_RESAMPLER_DOWN_RATIO_MAX) {
1016         ALOGW("%s(%d) (%f, %f) failed. Resample rate exceeds max accepted value",
1017                 __func__, mPortId, playbackRate.mSpeed, playbackRate.mPitch);
1018         return BAD_VALUE;
1019     }
1020 
1021     if ((uint64_t)effectiveRate * (uint64_t)AUDIO_RESAMPLER_UP_RATIO_MAX < (uint64_t)mSampleRate) {
1022         ALOGW("%s(%d) (%f, %f) failed. Resample rate below min accepted value",
1023                 __func__, mPortId, playbackRate.mSpeed, playbackRate.mPitch);
1024         return BAD_VALUE;
1025     }
1026     mPlaybackRate = playbackRate;
1027     //set effective rates
1028     mProxy->setPlaybackRate(playbackRateTemp);
1029     mProxy->setSampleRate(effectiveRate); // FIXME: not quite "atomic" with setPlaybackRate
1030     return NO_ERROR;
1031 }
1032 
getPlaybackRate() const1033 const AudioPlaybackRate& AudioTrack::getPlaybackRate() const
1034 {
1035     AutoMutex lock(mLock);
1036     return mPlaybackRate;
1037 }
1038 
getBufferSizeInFrames()1039 ssize_t AudioTrack::getBufferSizeInFrames()
1040 {
1041     AutoMutex lock(mLock);
1042     if (mOutput == AUDIO_IO_HANDLE_NONE || mProxy.get() == 0) {
1043         return NO_INIT;
1044     }
1045     return (ssize_t) mProxy->getBufferSizeInFrames();
1046 }
1047 
getBufferDurationInUs(int64_t * duration)1048 status_t AudioTrack::getBufferDurationInUs(int64_t *duration)
1049 {
1050     if (duration == nullptr) {
1051         return BAD_VALUE;
1052     }
1053     AutoMutex lock(mLock);
1054     if (mOutput == AUDIO_IO_HANDLE_NONE || mProxy.get() == 0) {
1055         return NO_INIT;
1056     }
1057     ssize_t bufferSizeInFrames = (ssize_t) mProxy->getBufferSizeInFrames();
1058     if (bufferSizeInFrames < 0) {
1059         return (status_t)bufferSizeInFrames;
1060     }
1061     *duration = (int64_t)((double)bufferSizeInFrames * 1000000
1062             / ((double)mSampleRate * mPlaybackRate.mSpeed));
1063     return NO_ERROR;
1064 }
1065 
setBufferSizeInFrames(size_t bufferSizeInFrames)1066 ssize_t AudioTrack::setBufferSizeInFrames(size_t bufferSizeInFrames)
1067 {
1068     AutoMutex lock(mLock);
1069     if (mOutput == AUDIO_IO_HANDLE_NONE || mProxy.get() == 0) {
1070         return NO_INIT;
1071     }
1072     // Reject if timed track or compressed audio.
1073     if (!audio_is_linear_pcm(mFormat)) {
1074         return INVALID_OPERATION;
1075     }
1076     return (ssize_t) mProxy->setBufferSizeInFrames((uint32_t) bufferSizeInFrames);
1077 }
1078 
setLoop(uint32_t loopStart,uint32_t loopEnd,int loopCount)1079 status_t AudioTrack::setLoop(uint32_t loopStart, uint32_t loopEnd, int loopCount)
1080 {
1081     if (mSharedBuffer == 0 || isOffloadedOrDirect()) {
1082         return INVALID_OPERATION;
1083     }
1084 
1085     if (loopCount == 0) {
1086         ;
1087     } else if (loopCount >= -1 && loopStart < loopEnd && loopEnd <= mFrameCount &&
1088             loopEnd - loopStart >= MIN_LOOP) {
1089         ;
1090     } else {
1091         return BAD_VALUE;
1092     }
1093 
1094     AutoMutex lock(mLock);
1095     // See setPosition() regarding setting parameters such as loop points or position while active
1096     if (mState == STATE_ACTIVE) {
1097         return INVALID_OPERATION;
1098     }
1099     setLoop_l(loopStart, loopEnd, loopCount);
1100     return NO_ERROR;
1101 }
1102 
setLoop_l(uint32_t loopStart,uint32_t loopEnd,int loopCount)1103 void AudioTrack::setLoop_l(uint32_t loopStart, uint32_t loopEnd, int loopCount)
1104 {
1105     // We do not update the periodic notification point.
1106     // mNewPosition = updateAndGetPosition_l() + mUpdatePeriod;
1107     mLoopCount = loopCount;
1108     mLoopEnd = loopEnd;
1109     mLoopStart = loopStart;
1110     mLoopCountNotified = loopCount;
1111     mStaticProxy->setLoop(loopStart, loopEnd, loopCount);
1112 
1113     // Waking the AudioTrackThread is not needed as this cannot be called when active.
1114 }
1115 
setMarkerPosition(uint32_t marker)1116 status_t AudioTrack::setMarkerPosition(uint32_t marker)
1117 {
1118     // The only purpose of setting marker position is to get a callback
1119     if (mCbf == NULL || isOffloadedOrDirect()) {
1120         return INVALID_OPERATION;
1121     }
1122 
1123     AutoMutex lock(mLock);
1124     mMarkerPosition = marker;
1125     mMarkerReached = false;
1126 
1127     sp<AudioTrackThread> t = mAudioTrackThread;
1128     if (t != 0) {
1129         t->wake();
1130     }
1131     return NO_ERROR;
1132 }
1133 
getMarkerPosition(uint32_t * marker) const1134 status_t AudioTrack::getMarkerPosition(uint32_t *marker) const
1135 {
1136     if (isOffloadedOrDirect()) {
1137         return INVALID_OPERATION;
1138     }
1139     if (marker == NULL) {
1140         return BAD_VALUE;
1141     }
1142 
1143     AutoMutex lock(mLock);
1144     mMarkerPosition.getValue(marker);
1145 
1146     return NO_ERROR;
1147 }
1148 
setPositionUpdatePeriod(uint32_t updatePeriod)1149 status_t AudioTrack::setPositionUpdatePeriod(uint32_t updatePeriod)
1150 {
1151     // The only purpose of setting position update period is to get a callback
1152     if (mCbf == NULL || isOffloadedOrDirect()) {
1153         return INVALID_OPERATION;
1154     }
1155 
1156     AutoMutex lock(mLock);
1157     mNewPosition = updateAndGetPosition_l() + updatePeriod;
1158     mUpdatePeriod = updatePeriod;
1159 
1160     sp<AudioTrackThread> t = mAudioTrackThread;
1161     if (t != 0) {
1162         t->wake();
1163     }
1164     return NO_ERROR;
1165 }
1166 
getPositionUpdatePeriod(uint32_t * updatePeriod) const1167 status_t AudioTrack::getPositionUpdatePeriod(uint32_t *updatePeriod) const
1168 {
1169     if (isOffloadedOrDirect()) {
1170         return INVALID_OPERATION;
1171     }
1172     if (updatePeriod == NULL) {
1173         return BAD_VALUE;
1174     }
1175 
1176     AutoMutex lock(mLock);
1177     *updatePeriod = mUpdatePeriod;
1178 
1179     return NO_ERROR;
1180 }
1181 
setPosition(uint32_t position)1182 status_t AudioTrack::setPosition(uint32_t position)
1183 {
1184     if (mSharedBuffer == 0 || isOffloadedOrDirect()) {
1185         return INVALID_OPERATION;
1186     }
1187     if (position > mFrameCount) {
1188         return BAD_VALUE;
1189     }
1190 
1191     AutoMutex lock(mLock);
1192     // Currently we require that the player is inactive before setting parameters such as position
1193     // or loop points.  Otherwise, there could be a race condition: the application could read the
1194     // current position, compute a new position or loop parameters, and then set that position or
1195     // loop parameters but it would do the "wrong" thing since the position has continued to advance
1196     // in the mean time.  If we ever provide a sequencer in server, we could allow a way for the app
1197     // to specify how it wants to handle such scenarios.
1198     if (mState == STATE_ACTIVE) {
1199         return INVALID_OPERATION;
1200     }
1201     // After setting the position, use full update period before notification.
1202     mNewPosition = updateAndGetPosition_l() + mUpdatePeriod;
1203     mStaticProxy->setBufferPosition(position);
1204 
1205     // Waking the AudioTrackThread is not needed as this cannot be called when active.
1206     return NO_ERROR;
1207 }
1208 
getPosition(uint32_t * position)1209 status_t AudioTrack::getPosition(uint32_t *position)
1210 {
1211     if (position == NULL) {
1212         return BAD_VALUE;
1213     }
1214 
1215     AutoMutex lock(mLock);
1216     // FIXME: offloaded and direct tracks call into the HAL for render positions
1217     // for compressed/synced data; however, we use proxy position for pure linear pcm data
1218     // as we do not know the capability of the HAL for pcm position support and standby.
1219     // There may be some latency differences between the HAL position and the proxy position.
1220     if (isOffloadedOrDirect_l() && !isPurePcmData_l()) {
1221         uint32_t dspFrames = 0;
1222 
1223         if (isOffloaded_l() && ((mState == STATE_PAUSED) || (mState == STATE_PAUSED_STOPPING))) {
1224             ALOGV("%s(%d): called in paused state, return cached position %u",
1225                 __func__, mPortId, mPausedPosition);
1226             *position = mPausedPosition;
1227             return NO_ERROR;
1228         }
1229 
1230         if (mOutput != AUDIO_IO_HANDLE_NONE) {
1231             uint32_t halFrames; // actually unused
1232             (void) AudioSystem::getRenderPosition(mOutput, &halFrames, &dspFrames);
1233             // FIXME: on getRenderPosition() error, we return OK with frame position 0.
1234         }
1235         // FIXME: dspFrames may not be zero in (mState == STATE_STOPPED || mState == STATE_FLUSHED)
1236         // due to hardware latency. We leave this behavior for now.
1237         *position = dspFrames;
1238     } else {
1239         if (mCblk->mFlags & CBLK_INVALID) {
1240             (void) restoreTrack_l("getPosition");
1241             // FIXME: for compatibility with the Java API we ignore the restoreTrack_l()
1242             // error here (e.g. DEAD_OBJECT) and return OK with the last recorded server position.
1243         }
1244 
1245         // IAudioTrack::stop() isn't synchronous; we don't know when presentation completes
1246         *position = (mState == STATE_STOPPED || mState == STATE_FLUSHED) ?
1247                 0 : updateAndGetPosition_l().value();
1248     }
1249     return NO_ERROR;
1250 }
1251 
getBufferPosition(uint32_t * position)1252 status_t AudioTrack::getBufferPosition(uint32_t *position)
1253 {
1254     if (mSharedBuffer == 0) {
1255         return INVALID_OPERATION;
1256     }
1257     if (position == NULL) {
1258         return BAD_VALUE;
1259     }
1260 
1261     AutoMutex lock(mLock);
1262     *position = mStaticProxy->getBufferPosition();
1263     return NO_ERROR;
1264 }
1265 
reload()1266 status_t AudioTrack::reload()
1267 {
1268     if (mSharedBuffer == 0 || isOffloadedOrDirect()) {
1269         return INVALID_OPERATION;
1270     }
1271 
1272     AutoMutex lock(mLock);
1273     // See setPosition() regarding setting parameters such as loop points or position while active
1274     if (mState == STATE_ACTIVE) {
1275         return INVALID_OPERATION;
1276     }
1277     mNewPosition = mUpdatePeriod;
1278     (void) updateAndGetPosition_l();
1279     mPosition = 0;
1280     mPreviousTimestampValid = false;
1281 #if 0
1282     // The documentation is not clear on the behavior of reload() and the restoration
1283     // of loop count. Historically we have not restored loop count, start, end,
1284     // but it makes sense if one desires to repeat playing a particular sound.
1285     if (mLoopCount != 0) {
1286         mLoopCountNotified = mLoopCount;
1287         mStaticProxy->setLoop(mLoopStart, mLoopEnd, mLoopCount);
1288     }
1289 #endif
1290     mStaticProxy->setBufferPosition(0);
1291     return NO_ERROR;
1292 }
1293 
getOutput() const1294 audio_io_handle_t AudioTrack::getOutput() const
1295 {
1296     AutoMutex lock(mLock);
1297     return mOutput;
1298 }
1299 
setOutputDevice(audio_port_handle_t deviceId)1300 status_t AudioTrack::setOutputDevice(audio_port_handle_t deviceId) {
1301     AutoMutex lock(mLock);
1302     if (mSelectedDeviceId != deviceId) {
1303         mSelectedDeviceId = deviceId;
1304         if (mStatus == NO_ERROR) {
1305             android_atomic_or(CBLK_INVALID, &mCblk->mFlags);
1306             mProxy->interrupt();
1307         }
1308     }
1309     return NO_ERROR;
1310 }
1311 
getOutputDevice()1312 audio_port_handle_t AudioTrack::getOutputDevice() {
1313     AutoMutex lock(mLock);
1314     return mSelectedDeviceId;
1315 }
1316 
1317 // must be called with mLock held
updateRoutedDeviceId_l()1318 void AudioTrack::updateRoutedDeviceId_l()
1319 {
1320     // if the track is inactive, do not update actual device as the output stream maybe routed
1321     // to a device not relevant to this client because of other active use cases.
1322     if (mState != STATE_ACTIVE) {
1323         return;
1324     }
1325     if (mOutput != AUDIO_IO_HANDLE_NONE) {
1326         audio_port_handle_t deviceId = AudioSystem::getDeviceIdForIo(mOutput);
1327         if (deviceId != AUDIO_PORT_HANDLE_NONE) {
1328             mRoutedDeviceId = deviceId;
1329         }
1330     }
1331 }
1332 
getRoutedDeviceId()1333 audio_port_handle_t AudioTrack::getRoutedDeviceId() {
1334     AutoMutex lock(mLock);
1335     updateRoutedDeviceId_l();
1336     return mRoutedDeviceId;
1337 }
1338 
attachAuxEffect(int effectId)1339 status_t AudioTrack::attachAuxEffect(int effectId)
1340 {
1341     AutoMutex lock(mLock);
1342     status_t status = mAudioTrack->attachAuxEffect(effectId);
1343     if (status == NO_ERROR) {
1344         mAuxEffectId = effectId;
1345     }
1346     return status;
1347 }
1348 
streamType() const1349 audio_stream_type_t AudioTrack::streamType() const
1350 {
1351     if (mStreamType == AUDIO_STREAM_DEFAULT) {
1352         return AudioSystem::attributesToStreamType(mAttributes);
1353     }
1354     return mStreamType;
1355 }
1356 
latency()1357 uint32_t AudioTrack::latency()
1358 {
1359     AutoMutex lock(mLock);
1360     updateLatency_l();
1361     return mLatency;
1362 }
1363 
1364 // -------------------------------------------------------------------------
1365 
1366 // must be called with mLock held
updateLatency_l()1367 void AudioTrack::updateLatency_l()
1368 {
1369     status_t status = AudioSystem::getLatency(mOutput, &mAfLatency);
1370     if (status != NO_ERROR) {
1371         ALOGW("%s(%d): getLatency(%d) failed status %d", __func__, mPortId, mOutput, status);
1372     } else {
1373         // FIXME don't believe this lie
1374         mLatency = mAfLatency + (1000LL * mFrameCount) / mSampleRate;
1375     }
1376 }
1377 
1378 // TODO Move this macro to a common header file for enum to string conversion in audio framework.
1379 #define MEDIA_CASE_ENUM(name) case name: return #name
convertTransferToText(transfer_type transferType)1380 const char * AudioTrack::convertTransferToText(transfer_type transferType) {
1381     switch (transferType) {
1382         MEDIA_CASE_ENUM(TRANSFER_DEFAULT);
1383         MEDIA_CASE_ENUM(TRANSFER_CALLBACK);
1384         MEDIA_CASE_ENUM(TRANSFER_OBTAIN);
1385         MEDIA_CASE_ENUM(TRANSFER_SYNC);
1386         MEDIA_CASE_ENUM(TRANSFER_SHARED);
1387         MEDIA_CASE_ENUM(TRANSFER_SYNC_NOTIF_CALLBACK);
1388         default:
1389             return "UNRECOGNIZED";
1390     }
1391 }
1392 
createTrack_l()1393 status_t AudioTrack::createTrack_l()
1394 {
1395     status_t status;
1396     bool callbackAdded = false;
1397 
1398     const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();
1399     if (audioFlinger == 0) {
1400         ALOGE("%s(%d): Could not get audioflinger",
1401                 __func__, mPortId);
1402         status = NO_INIT;
1403         goto exit;
1404     }
1405 
1406     {
1407     // mFlags (not mOrigFlags) is modified depending on whether fast request is accepted.
1408     // After fast request is denied, we will request again if IAudioTrack is re-created.
1409     // Client can only express a preference for FAST.  Server will perform additional tests.
1410     if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1411         // either of these use cases:
1412         // use case 1: shared buffer
1413         bool sharedBuffer = mSharedBuffer != 0;
1414         bool transferAllowed =
1415             // use case 2: callback transfer mode
1416             (mTransfer == TRANSFER_CALLBACK) ||
1417             // use case 3: obtain/release mode
1418             (mTransfer == TRANSFER_OBTAIN) ||
1419             // use case 4: synchronous write
1420             ((mTransfer == TRANSFER_SYNC || mTransfer == TRANSFER_SYNC_NOTIF_CALLBACK)
1421                     && mThreadCanCallJava);
1422 
1423         bool fastAllowed = sharedBuffer || transferAllowed;
1424         if (!fastAllowed) {
1425             ALOGW("%s(%d): AUDIO_OUTPUT_FLAG_FAST denied by client,"
1426                   " not shared buffer and transfer = %s",
1427                   __func__, mPortId,
1428                   convertTransferToText(mTransfer));
1429             mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_FAST);
1430         }
1431     }
1432 
1433     IAudioFlinger::CreateTrackInput input;
1434     if (mStreamType != AUDIO_STREAM_DEFAULT) {
1435         input.attr = AudioSystem::streamTypeToAttributes(mStreamType);
1436     } else {
1437         input.attr = mAttributes;
1438     }
1439     input.config = AUDIO_CONFIG_INITIALIZER;
1440     input.config.sample_rate = mSampleRate;
1441     input.config.channel_mask = mChannelMask;
1442     input.config.format = mFormat;
1443     input.config.offload_info = mOffloadInfoCopy;
1444     input.clientInfo.clientUid = mClientUid;
1445     input.clientInfo.clientPid = mClientPid;
1446     input.clientInfo.clientTid = -1;
1447     if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1448         // It is currently meaningless to request SCHED_FIFO for a Java thread.  Even if the
1449         // application-level code follows all non-blocking design rules, the language runtime
1450         // doesn't also follow those rules, so the thread will not benefit overall.
1451         if (mAudioTrackThread != 0 && !mThreadCanCallJava) {
1452             input.clientInfo.clientTid = mAudioTrackThread->getTid();
1453         }
1454     }
1455     input.sharedBuffer = mSharedBuffer;
1456     input.notificationsPerBuffer = mNotificationsPerBufferReq;
1457     input.speed = 1.0;
1458     if (audio_has_proportional_frames(mFormat) && mSharedBuffer == 0 &&
1459             (mFlags & AUDIO_OUTPUT_FLAG_FAST) == 0) {
1460         input.speed  = !isPurePcmData_l() || isOffloadedOrDirect_l() ? 1.0f :
1461                         max(mMaxRequiredSpeed, mPlaybackRate.mSpeed);
1462     }
1463     input.flags = mFlags;
1464     input.frameCount = mReqFrameCount;
1465     input.notificationFrameCount = mNotificationFramesReq;
1466     input.selectedDeviceId = mSelectedDeviceId;
1467     input.sessionId = mSessionId;
1468 
1469     IAudioFlinger::CreateTrackOutput output;
1470 
1471     sp<IAudioTrack> track = audioFlinger->createTrack(input,
1472                                                       output,
1473                                                       &status);
1474 
1475     if (status != NO_ERROR || output.outputId == AUDIO_IO_HANDLE_NONE) {
1476         ALOGE("%s(%d): AudioFlinger could not create track, status: %d output %d",
1477                 __func__, mPortId, status, output.outputId);
1478         if (status == NO_ERROR) {
1479             status = NO_INIT;
1480         }
1481         goto exit;
1482     }
1483     ALOG_ASSERT(track != 0);
1484 
1485     mFrameCount = output.frameCount;
1486     mNotificationFramesAct = (uint32_t)output.notificationFrameCount;
1487     mRoutedDeviceId = output.selectedDeviceId;
1488     mSessionId = output.sessionId;
1489 
1490     mSampleRate = output.sampleRate;
1491     if (mOriginalSampleRate == 0) {
1492         mOriginalSampleRate = mSampleRate;
1493     }
1494 
1495     mAfFrameCount = output.afFrameCount;
1496     mAfSampleRate = output.afSampleRate;
1497     mAfLatency = output.afLatencyMs;
1498 
1499     mLatency = mAfLatency + (1000LL * mFrameCount) / mSampleRate;
1500 
1501     // AudioFlinger now owns the reference to the I/O handle,
1502     // so we are no longer responsible for releasing it.
1503 
1504     // FIXME compare to AudioRecord
1505     sp<IMemory> iMem = track->getCblk();
1506     if (iMem == 0) {
1507         ALOGE("%s(%d): Could not get control block", __func__, mPortId);
1508         status = NO_INIT;
1509         goto exit;
1510     }
1511     void *iMemPointer = iMem->pointer();
1512     if (iMemPointer == NULL) {
1513         ALOGE("%s(%d): Could not get control block pointer", __func__, mPortId);
1514         status = NO_INIT;
1515         goto exit;
1516     }
1517     // invariant that mAudioTrack != 0 is true only after set() returns successfully
1518     if (mAudioTrack != 0) {
1519         IInterface::asBinder(mAudioTrack)->unlinkToDeath(mDeathNotifier, this);
1520         mDeathNotifier.clear();
1521     }
1522     mAudioTrack = track;
1523     mCblkMemory = iMem;
1524     IPCThreadState::self()->flushCommands();
1525 
1526     audio_track_cblk_t* cblk = static_cast<audio_track_cblk_t*>(iMemPointer);
1527     mCblk = cblk;
1528 
1529     mAwaitBoost = false;
1530     if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1531         if (output.flags & AUDIO_OUTPUT_FLAG_FAST) {
1532             ALOGI("%s(%d): AUDIO_OUTPUT_FLAG_FAST successful; frameCount %zu -> %zu",
1533                   __func__, mPortId, mReqFrameCount, mFrameCount);
1534             if (!mThreadCanCallJava) {
1535                 mAwaitBoost = true;
1536             }
1537         } else {
1538             ALOGW("%s(%d): AUDIO_OUTPUT_FLAG_FAST denied by server; frameCount %zu -> %zu",
1539                   __func__, mPortId, mReqFrameCount, mFrameCount);
1540         }
1541     }
1542     mFlags = output.flags;
1543 
1544     //mOutput != output includes the case where mOutput == AUDIO_IO_HANDLE_NONE for first creation
1545     if (mDeviceCallback != 0) {
1546         if (mOutput != AUDIO_IO_HANDLE_NONE) {
1547             AudioSystem::removeAudioDeviceCallback(this, mOutput, mPortId);
1548         }
1549         AudioSystem::addAudioDeviceCallback(this, output.outputId, output.portId);
1550         callbackAdded = true;
1551     }
1552 
1553     mPortId = output.portId;
1554     // We retain a copy of the I/O handle, but don't own the reference
1555     mOutput = output.outputId;
1556     mRefreshRemaining = true;
1557 
1558     // Starting address of buffers in shared memory.  If there is a shared buffer, buffers
1559     // is the value of pointer() for the shared buffer, otherwise buffers points
1560     // immediately after the control block.  This address is for the mapping within client
1561     // address space.  AudioFlinger::TrackBase::mBuffer is for the server address space.
1562     void* buffers;
1563     if (mSharedBuffer == 0) {
1564         buffers = cblk + 1;
1565     } else {
1566         buffers = mSharedBuffer->pointer();
1567         if (buffers == NULL) {
1568             ALOGE("%s(%d): Could not get buffer pointer", __func__, mPortId);
1569             status = NO_INIT;
1570             goto exit;
1571         }
1572     }
1573 
1574     mAudioTrack->attachAuxEffect(mAuxEffectId);
1575 
1576     // If IAudioTrack is re-created, don't let the requested frameCount
1577     // decrease.  This can confuse clients that cache frameCount().
1578     if (mFrameCount > mReqFrameCount) {
1579         mReqFrameCount = mFrameCount;
1580     }
1581 
1582     // reset server position to 0 as we have new cblk.
1583     mServer = 0;
1584 
1585     // update proxy
1586     if (mSharedBuffer == 0) {
1587         mStaticProxy.clear();
1588         mProxy = new AudioTrackClientProxy(cblk, buffers, mFrameCount, mFrameSize);
1589     } else {
1590         mStaticProxy = new StaticAudioTrackClientProxy(cblk, buffers, mFrameCount, mFrameSize);
1591         mProxy = mStaticProxy;
1592     }
1593 
1594     mProxy->setVolumeLR(gain_minifloat_pack(
1595             gain_from_float(mVolume[AUDIO_INTERLEAVE_LEFT]),
1596             gain_from_float(mVolume[AUDIO_INTERLEAVE_RIGHT])));
1597 
1598     mProxy->setSendLevel(mSendLevel);
1599     const uint32_t effectiveSampleRate = adjustSampleRate(mSampleRate, mPlaybackRate.mPitch);
1600     const float effectiveSpeed = adjustSpeed(mPlaybackRate.mSpeed, mPlaybackRate.mPitch);
1601     const float effectivePitch = adjustPitch(mPlaybackRate.mPitch);
1602     mProxy->setSampleRate(effectiveSampleRate);
1603 
1604     AudioPlaybackRate playbackRateTemp = mPlaybackRate;
1605     playbackRateTemp.mSpeed = effectiveSpeed;
1606     playbackRateTemp.mPitch = effectivePitch;
1607     mProxy->setPlaybackRate(playbackRateTemp);
1608     mProxy->setMinimum(mNotificationFramesAct);
1609 
1610     mDeathNotifier = new DeathNotifier(this);
1611     IInterface::asBinder(mAudioTrack)->linkToDeath(mDeathNotifier, this);
1612 
1613     }
1614 
1615 exit:
1616     if (status != NO_ERROR && callbackAdded) {
1617         // note: mOutput is always valid is callbackAdded is true
1618         AudioSystem::removeAudioDeviceCallback(this, mOutput, mPortId);
1619     }
1620 
1621     mStatus = status;
1622 
1623     // sp<IAudioTrack> track destructor will cause releaseOutput() to be called by AudioFlinger
1624     return status;
1625 }
1626 
obtainBuffer(Buffer * audioBuffer,int32_t waitCount,size_t * nonContig)1627 status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, int32_t waitCount, size_t *nonContig)
1628 {
1629     if (audioBuffer == NULL) {
1630         if (nonContig != NULL) {
1631             *nonContig = 0;
1632         }
1633         return BAD_VALUE;
1634     }
1635     if (mTransfer != TRANSFER_OBTAIN) {
1636         audioBuffer->frameCount = 0;
1637         audioBuffer->size = 0;
1638         audioBuffer->raw = NULL;
1639         if (nonContig != NULL) {
1640             *nonContig = 0;
1641         }
1642         return INVALID_OPERATION;
1643     }
1644 
1645     const struct timespec *requested;
1646     struct timespec timeout;
1647     if (waitCount == -1) {
1648         requested = &ClientProxy::kForever;
1649     } else if (waitCount == 0) {
1650         requested = &ClientProxy::kNonBlocking;
1651     } else if (waitCount > 0) {
1652         time_t ms = WAIT_PERIOD_MS * (time_t) waitCount;
1653         timeout.tv_sec = ms / 1000;
1654         timeout.tv_nsec = (long) (ms % 1000) * 1000000;
1655         requested = &timeout;
1656     } else {
1657         ALOGE("%s(%d): invalid waitCount %d", __func__, mPortId, waitCount);
1658         requested = NULL;
1659     }
1660     return obtainBuffer(audioBuffer, requested, NULL /*elapsed*/, nonContig);
1661 }
1662 
obtainBuffer(Buffer * audioBuffer,const struct timespec * requested,struct timespec * elapsed,size_t * nonContig)1663 status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, const struct timespec *requested,
1664         struct timespec *elapsed, size_t *nonContig)
1665 {
1666     // previous and new IAudioTrack sequence numbers are used to detect track re-creation
1667     uint32_t oldSequence = 0;
1668     uint32_t newSequence;
1669 
1670     Proxy::Buffer buffer;
1671     status_t status = NO_ERROR;
1672 
1673     static const int32_t kMaxTries = 5;
1674     int32_t tryCounter = kMaxTries;
1675 
1676     do {
1677         // obtainBuffer() is called with mutex unlocked, so keep extra references to these fields to
1678         // keep them from going away if another thread re-creates the track during obtainBuffer()
1679         sp<AudioTrackClientProxy> proxy;
1680         sp<IMemory> iMem;
1681 
1682         {   // start of lock scope
1683             AutoMutex lock(mLock);
1684 
1685             newSequence = mSequence;
1686             // did previous obtainBuffer() fail due to media server death or voluntary invalidation?
1687             if (status == DEAD_OBJECT) {
1688                 // re-create track, unless someone else has already done so
1689                 if (newSequence == oldSequence) {
1690                     status = restoreTrack_l("obtainBuffer");
1691                     if (status != NO_ERROR) {
1692                         buffer.mFrameCount = 0;
1693                         buffer.mRaw = NULL;
1694                         buffer.mNonContig = 0;
1695                         break;
1696                     }
1697                 }
1698             }
1699             oldSequence = newSequence;
1700 
1701             if (status == NOT_ENOUGH_DATA) {
1702                 restartIfDisabled();
1703             }
1704 
1705             // Keep the extra references
1706             proxy = mProxy;
1707             iMem = mCblkMemory;
1708 
1709             if (mState == STATE_STOPPING) {
1710                 status = -EINTR;
1711                 buffer.mFrameCount = 0;
1712                 buffer.mRaw = NULL;
1713                 buffer.mNonContig = 0;
1714                 break;
1715             }
1716 
1717             // Non-blocking if track is stopped or paused
1718             if (mState != STATE_ACTIVE) {
1719                 requested = &ClientProxy::kNonBlocking;
1720             }
1721 
1722         }   // end of lock scope
1723 
1724         buffer.mFrameCount = audioBuffer->frameCount;
1725         // FIXME starts the requested timeout and elapsed over from scratch
1726         status = proxy->obtainBuffer(&buffer, requested, elapsed);
1727     } while (((status == DEAD_OBJECT) || (status == NOT_ENOUGH_DATA)) && (tryCounter-- > 0));
1728 
1729     audioBuffer->frameCount = buffer.mFrameCount;
1730     audioBuffer->size = buffer.mFrameCount * mFrameSize;
1731     audioBuffer->raw = buffer.mRaw;
1732     if (nonContig != NULL) {
1733         *nonContig = buffer.mNonContig;
1734     }
1735     return status;
1736 }
1737 
releaseBuffer(const Buffer * audioBuffer)1738 void AudioTrack::releaseBuffer(const Buffer* audioBuffer)
1739 {
1740     // FIXME add error checking on mode, by adding an internal version
1741     if (mTransfer == TRANSFER_SHARED) {
1742         return;
1743     }
1744 
1745     size_t stepCount = audioBuffer->size / mFrameSize;
1746     if (stepCount == 0) {
1747         return;
1748     }
1749 
1750     Proxy::Buffer buffer;
1751     buffer.mFrameCount = stepCount;
1752     buffer.mRaw = audioBuffer->raw;
1753 
1754     AutoMutex lock(mLock);
1755     mReleased += stepCount;
1756     mInUnderrun = false;
1757     mProxy->releaseBuffer(&buffer);
1758 
1759     // restart track if it was disabled by audioflinger due to previous underrun
1760     restartIfDisabled();
1761 }
1762 
restartIfDisabled()1763 void AudioTrack::restartIfDisabled()
1764 {
1765     int32_t flags = android_atomic_and(~CBLK_DISABLED, &mCblk->mFlags);
1766     if ((mState == STATE_ACTIVE) && (flags & CBLK_DISABLED)) {
1767         ALOGW("%s(%d): releaseBuffer() track %p disabled due to previous underrun, restarting",
1768                 __func__, mPortId, this);
1769         // FIXME ignoring status
1770         mAudioTrack->start();
1771     }
1772 }
1773 
1774 // -------------------------------------------------------------------------
1775 
write(const void * buffer,size_t userSize,bool blocking)1776 ssize_t AudioTrack::write(const void* buffer, size_t userSize, bool blocking)
1777 {
1778     if (mTransfer != TRANSFER_SYNC && mTransfer != TRANSFER_SYNC_NOTIF_CALLBACK) {
1779         return INVALID_OPERATION;
1780     }
1781 
1782     if (isDirect()) {
1783         AutoMutex lock(mLock);
1784         int32_t flags = android_atomic_and(
1785                             ~(CBLK_UNDERRUN | CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL | CBLK_BUFFER_END),
1786                             &mCblk->mFlags);
1787         if (flags & CBLK_INVALID) {
1788             return DEAD_OBJECT;
1789         }
1790     }
1791 
1792     if (ssize_t(userSize) < 0 || (buffer == NULL && userSize != 0)) {
1793         // Sanity-check: user is most-likely passing an error code, and it would
1794         // make the return value ambiguous (actualSize vs error).
1795         ALOGE("%s(%d): AudioTrack::write(buffer=%p, size=%zu (%zd)",
1796                 __func__, mPortId, buffer, userSize, userSize);
1797         return BAD_VALUE;
1798     }
1799 
1800     size_t written = 0;
1801     Buffer audioBuffer;
1802 
1803     while (userSize >= mFrameSize) {
1804         audioBuffer.frameCount = userSize / mFrameSize;
1805 
1806         status_t err = obtainBuffer(&audioBuffer,
1807                 blocking ? &ClientProxy::kForever : &ClientProxy::kNonBlocking);
1808         if (err < 0) {
1809             if (written > 0) {
1810                 break;
1811             }
1812             if (err == TIMED_OUT || err == -EINTR) {
1813                 err = WOULD_BLOCK;
1814             }
1815             return ssize_t(err);
1816         }
1817 
1818         size_t toWrite = audioBuffer.size;
1819         memcpy(audioBuffer.i8, buffer, toWrite);
1820         buffer = ((const char *) buffer) + toWrite;
1821         userSize -= toWrite;
1822         written += toWrite;
1823 
1824         releaseBuffer(&audioBuffer);
1825     }
1826 
1827     if (written > 0) {
1828         mFramesWritten += written / mFrameSize;
1829 
1830         if (mTransfer == TRANSFER_SYNC_NOTIF_CALLBACK) {
1831             const sp<AudioTrackThread> t = mAudioTrackThread;
1832             if (t != 0) {
1833                 // causes wake up of the playback thread, that will callback the client for
1834                 // more data (with EVENT_CAN_WRITE_MORE_DATA) in processAudioBuffer()
1835                 t->wake();
1836             }
1837         }
1838     }
1839 
1840     return written;
1841 }
1842 
1843 // -------------------------------------------------------------------------
1844 
processAudioBuffer()1845 nsecs_t AudioTrack::processAudioBuffer()
1846 {
1847     // Currently the AudioTrack thread is not created if there are no callbacks.
1848     // Would it ever make sense to run the thread, even without callbacks?
1849     // If so, then replace this by checks at each use for mCbf != NULL.
1850     LOG_ALWAYS_FATAL_IF(mCblk == NULL);
1851 
1852     mLock.lock();
1853     if (mAwaitBoost) {
1854         mAwaitBoost = false;
1855         mLock.unlock();
1856         static const int32_t kMaxTries = 5;
1857         int32_t tryCounter = kMaxTries;
1858         uint32_t pollUs = 10000;
1859         do {
1860             int policy = sched_getscheduler(0) & ~SCHED_RESET_ON_FORK;
1861             if (policy == SCHED_FIFO || policy == SCHED_RR) {
1862                 break;
1863             }
1864             usleep(pollUs);
1865             pollUs <<= 1;
1866         } while (tryCounter-- > 0);
1867         if (tryCounter < 0) {
1868             ALOGE("%s(%d): did not receive expected priority boost on time",
1869                     __func__, mPortId);
1870         }
1871         // Run again immediately
1872         return 0;
1873     }
1874 
1875     // Can only reference mCblk while locked
1876     int32_t flags = android_atomic_and(
1877         ~(CBLK_UNDERRUN | CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL | CBLK_BUFFER_END), &mCblk->mFlags);
1878 
1879     // Check for track invalidation
1880     if (flags & CBLK_INVALID) {
1881         // for offloaded tracks restoreTrack_l() will just update the sequence and clear
1882         // AudioSystem cache. We should not exit here but after calling the callback so
1883         // that the upper layers can recreate the track
1884         if (!isOffloadedOrDirect_l() || (mSequence == mObservedSequence)) {
1885             status_t status __unused = restoreTrack_l("processAudioBuffer");
1886             // FIXME unused status
1887             // after restoration, continue below to make sure that the loop and buffer events
1888             // are notified because they have been cleared from mCblk->mFlags above.
1889         }
1890     }
1891 
1892     bool waitStreamEnd = mState == STATE_STOPPING;
1893     bool active = mState == STATE_ACTIVE;
1894 
1895     // Manage underrun callback, must be done under lock to avoid race with releaseBuffer()
1896     bool newUnderrun = false;
1897     if (flags & CBLK_UNDERRUN) {
1898 #if 0
1899         // Currently in shared buffer mode, when the server reaches the end of buffer,
1900         // the track stays active in continuous underrun state.  It's up to the application
1901         // to pause or stop the track, or set the position to a new offset within buffer.
1902         // This was some experimental code to auto-pause on underrun.   Keeping it here
1903         // in "if 0" so we can re-visit this if we add a real sequencer for shared memory content.
1904         if (mTransfer == TRANSFER_SHARED) {
1905             mState = STATE_PAUSED;
1906             active = false;
1907         }
1908 #endif
1909         if (!mInUnderrun) {
1910             mInUnderrun = true;
1911             newUnderrun = true;
1912         }
1913     }
1914 
1915     // Get current position of server
1916     Modulo<uint32_t> position(updateAndGetPosition_l());
1917 
1918     // Manage marker callback
1919     bool markerReached = false;
1920     Modulo<uint32_t> markerPosition(mMarkerPosition);
1921     // uses 32 bit wraparound for comparison with position.
1922     if (!mMarkerReached && markerPosition.value() > 0 && position >= markerPosition) {
1923         mMarkerReached = markerReached = true;
1924     }
1925 
1926     // Determine number of new position callback(s) that will be needed, while locked
1927     size_t newPosCount = 0;
1928     Modulo<uint32_t> newPosition(mNewPosition);
1929     uint32_t updatePeriod = mUpdatePeriod;
1930     // FIXME fails for wraparound, need 64 bits
1931     if (updatePeriod > 0 && position >= newPosition) {
1932         newPosCount = ((position - newPosition).value() / updatePeriod) + 1;
1933         mNewPosition += updatePeriod * newPosCount;
1934     }
1935 
1936     // Cache other fields that will be needed soon
1937     uint32_t sampleRate = mSampleRate;
1938     float speed = mPlaybackRate.mSpeed;
1939     const uint32_t notificationFrames = mNotificationFramesAct;
1940     if (mRefreshRemaining) {
1941         mRefreshRemaining = false;
1942         mRemainingFrames = notificationFrames;
1943         mRetryOnPartialBuffer = false;
1944     }
1945     size_t misalignment = mProxy->getMisalignment();
1946     uint32_t sequence = mSequence;
1947     sp<AudioTrackClientProxy> proxy = mProxy;
1948 
1949     // Determine the number of new loop callback(s) that will be needed, while locked.
1950     int loopCountNotifications = 0;
1951     uint32_t loopPeriod = 0; // time in frames for next EVENT_LOOP_END or EVENT_BUFFER_END
1952 
1953     if (mLoopCount > 0) {
1954         int loopCount;
1955         size_t bufferPosition;
1956         mStaticProxy->getBufferPositionAndLoopCount(&bufferPosition, &loopCount);
1957         loopPeriod = ((loopCount > 0) ? mLoopEnd : mFrameCount) - bufferPosition;
1958         loopCountNotifications = min(mLoopCountNotified - loopCount, kMaxLoopCountNotifications);
1959         mLoopCountNotified = loopCount; // discard any excess notifications
1960     } else if (mLoopCount < 0) {
1961         // FIXME: We're not accurate with notification count and position with infinite looping
1962         // since loopCount from server side will always return -1 (we could decrement it).
1963         size_t bufferPosition = mStaticProxy->getBufferPosition();
1964         loopCountNotifications = int((flags & (CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL)) != 0);
1965         loopPeriod = mLoopEnd - bufferPosition;
1966     } else if (/* mLoopCount == 0 && */ mSharedBuffer != 0) {
1967         size_t bufferPosition = mStaticProxy->getBufferPosition();
1968         loopPeriod = mFrameCount - bufferPosition;
1969     }
1970 
1971     // These fields don't need to be cached, because they are assigned only by set():
1972     //     mTransfer, mCbf, mUserData, mFormat, mFrameSize, mFlags
1973     // mFlags is also assigned by createTrack_l(), but not the bit we care about.
1974 
1975     mLock.unlock();
1976 
1977     // get anchor time to account for callbacks.
1978     const nsecs_t timeBeforeCallbacks = systemTime();
1979 
1980     if (waitStreamEnd) {
1981         // FIXME:  Instead of blocking in proxy->waitStreamEndDone(), Callback thread
1982         // should wait on proxy futex and handle CBLK_STREAM_END_DONE within this function
1983         // (and make sure we don't callback for more data while we're stopping).
1984         // This helps with position, marker notifications, and track invalidation.
1985         struct timespec timeout;
1986         timeout.tv_sec = WAIT_STREAM_END_TIMEOUT_SEC;
1987         timeout.tv_nsec = 0;
1988 
1989         status_t status = proxy->waitStreamEndDone(&timeout);
1990         switch (status) {
1991         case NO_ERROR:
1992         case DEAD_OBJECT:
1993         case TIMED_OUT:
1994             if (status != DEAD_OBJECT) {
1995                 // for DEAD_OBJECT, we do not send a EVENT_STREAM_END after stop();
1996                 // instead, the application should handle the EVENT_NEW_IAUDIOTRACK.
1997                 mCbf(EVENT_STREAM_END, mUserData, NULL);
1998             }
1999             {
2000                 AutoMutex lock(mLock);
2001                 // The previously assigned value of waitStreamEnd is no longer valid,
2002                 // since the mutex has been unlocked and either the callback handler
2003                 // or another thread could have re-started the AudioTrack during that time.
2004                 waitStreamEnd = mState == STATE_STOPPING;
2005                 if (waitStreamEnd) {
2006                     mState = STATE_STOPPED;
2007                     mReleased = 0;
2008                 }
2009             }
2010             if (waitStreamEnd && status != DEAD_OBJECT) {
2011                return NS_INACTIVE;
2012             }
2013             break;
2014         }
2015         return 0;
2016     }
2017 
2018     // perform callbacks while unlocked
2019     if (newUnderrun) {
2020         mCbf(EVENT_UNDERRUN, mUserData, NULL);
2021     }
2022     while (loopCountNotifications > 0) {
2023         mCbf(EVENT_LOOP_END, mUserData, NULL);
2024         --loopCountNotifications;
2025     }
2026     if (flags & CBLK_BUFFER_END) {
2027         mCbf(EVENT_BUFFER_END, mUserData, NULL);
2028     }
2029     if (markerReached) {
2030         mCbf(EVENT_MARKER, mUserData, &markerPosition);
2031     }
2032     while (newPosCount > 0) {
2033         size_t temp = newPosition.value(); // FIXME size_t != uint32_t
2034         mCbf(EVENT_NEW_POS, mUserData, &temp);
2035         newPosition += updatePeriod;
2036         newPosCount--;
2037     }
2038 
2039     if (mObservedSequence != sequence) {
2040         mObservedSequence = sequence;
2041         mCbf(EVENT_NEW_IAUDIOTRACK, mUserData, NULL);
2042         // for offloaded tracks, just wait for the upper layers to recreate the track
2043         if (isOffloadedOrDirect()) {
2044             return NS_INACTIVE;
2045         }
2046     }
2047 
2048     // if inactive, then don't run me again until re-started
2049     if (!active) {
2050         return NS_INACTIVE;
2051     }
2052 
2053     // Compute the estimated time until the next timed event (position, markers, loops)
2054     // FIXME only for non-compressed audio
2055     uint32_t minFrames = ~0;
2056     if (!markerReached && position < markerPosition) {
2057         minFrames = (markerPosition - position).value();
2058     }
2059     if (loopPeriod > 0 && loopPeriod < minFrames) {
2060         // loopPeriod is already adjusted for actual position.
2061         minFrames = loopPeriod;
2062     }
2063     if (updatePeriod > 0) {
2064         minFrames = min(minFrames, (newPosition - position).value());
2065     }
2066 
2067     // If > 0, poll periodically to recover from a stuck server.  A good value is 2.
2068     static const uint32_t kPoll = 0;
2069     if (kPoll > 0 && mTransfer == TRANSFER_CALLBACK && kPoll * notificationFrames < minFrames) {
2070         minFrames = kPoll * notificationFrames;
2071     }
2072 
2073     // This "fudge factor" avoids soaking CPU, and compensates for late progress by server
2074     static const nsecs_t kWaitPeriodNs = WAIT_PERIOD_MS * 1000000LL;
2075     const nsecs_t timeAfterCallbacks = systemTime();
2076 
2077     // Convert frame units to time units
2078     nsecs_t ns = NS_WHENEVER;
2079     if (minFrames != (uint32_t) ~0) {
2080         // AudioFlinger consumption of client data may be irregular when coming out of device
2081         // standby since the kernel buffers require filling. This is throttled to no more than 2x
2082         // the expected rate in the MixerThread. Hence, we reduce the estimated time to wait by one
2083         // half (but no more than half a second) to improve callback accuracy during these temporary
2084         // data surges.
2085         const nsecs_t estimatedNs = framesToNanoseconds(minFrames, sampleRate, speed);
2086         constexpr nsecs_t maxThrottleCompensationNs = 500000000LL;
2087         ns = estimatedNs - min(estimatedNs / 2, maxThrottleCompensationNs) + kWaitPeriodNs;
2088         ns -= (timeAfterCallbacks - timeBeforeCallbacks);  // account for callback time
2089         // TODO: Should we warn if the callback time is too long?
2090         if (ns < 0) ns = 0;
2091     }
2092 
2093     // If not supplying data by EVENT_MORE_DATA or EVENT_CAN_WRITE_MORE_DATA, then we're done
2094     if (mTransfer != TRANSFER_CALLBACK && mTransfer != TRANSFER_SYNC_NOTIF_CALLBACK) {
2095         return ns;
2096     }
2097 
2098     // EVENT_MORE_DATA callback handling.
2099     // Timing for linear pcm audio data formats can be derived directly from the
2100     // buffer fill level.
2101     // Timing for compressed data is not directly available from the buffer fill level,
2102     // rather indirectly from waiting for blocking mode callbacks or waiting for obtain()
2103     // to return a certain fill level.
2104 
2105     struct timespec timeout;
2106     const struct timespec *requested = &ClientProxy::kForever;
2107     if (ns != NS_WHENEVER) {
2108         timeout.tv_sec = ns / 1000000000LL;
2109         timeout.tv_nsec = ns % 1000000000LL;
2110         ALOGV("%s(%d): timeout %ld.%03d",
2111                 __func__, mPortId, timeout.tv_sec, (int) timeout.tv_nsec / 1000000);
2112         requested = &timeout;
2113     }
2114 
2115     size_t writtenFrames = 0;
2116     while (mRemainingFrames > 0) {
2117 
2118         Buffer audioBuffer;
2119         audioBuffer.frameCount = mRemainingFrames;
2120         size_t nonContig;
2121         status_t err = obtainBuffer(&audioBuffer, requested, NULL, &nonContig);
2122         LOG_ALWAYS_FATAL_IF((err != NO_ERROR) != (audioBuffer.frameCount == 0),
2123                 "%s(%d): obtainBuffer() err=%d frameCount=%zu",
2124                  __func__, mPortId, err, audioBuffer.frameCount);
2125         requested = &ClientProxy::kNonBlocking;
2126         size_t avail = audioBuffer.frameCount + nonContig;
2127         ALOGV("%s(%d): obtainBuffer(%u) returned %zu = %zu + %zu err %d",
2128                 __func__, mPortId, mRemainingFrames, avail, audioBuffer.frameCount, nonContig, err);
2129         if (err != NO_ERROR) {
2130             if (err == TIMED_OUT || err == WOULD_BLOCK || err == -EINTR ||
2131                     (isOffloaded() && (err == DEAD_OBJECT))) {
2132                 // FIXME bug 25195759
2133                 return 1000000;
2134             }
2135             ALOGE("%s(%d): Error %d obtaining an audio buffer, giving up.",
2136                     __func__, mPortId, err);
2137             return NS_NEVER;
2138         }
2139 
2140         if (mRetryOnPartialBuffer && audio_has_proportional_frames(mFormat)) {
2141             mRetryOnPartialBuffer = false;
2142             if (avail < mRemainingFrames) {
2143                 if (ns > 0) { // account for obtain time
2144                     const nsecs_t timeNow = systemTime();
2145                     ns = max((nsecs_t)0, ns - (timeNow - timeAfterCallbacks));
2146                 }
2147 
2148                 // delayNs is first computed by the additional frames required in the buffer.
2149                 nsecs_t delayNs = framesToNanoseconds(
2150                         mRemainingFrames - avail, sampleRate, speed);
2151 
2152                 // afNs is the AudioFlinger mixer period in ns.
2153                 const nsecs_t afNs = framesToNanoseconds(mAfFrameCount, mAfSampleRate, speed);
2154 
2155                 // If the AudioTrack is double buffered based on the AudioFlinger mixer period,
2156                 // we may have a race if we wait based on the number of frames desired.
2157                 // This is a possible issue with resampling and AAudio.
2158                 //
2159                 // The granularity of audioflinger processing is one mixer period; if
2160                 // our wait time is less than one mixer period, wait at most half the period.
2161                 if (delayNs < afNs) {
2162                     delayNs = std::min(delayNs, afNs / 2);
2163                 }
2164 
2165                 // adjust our ns wait by delayNs.
2166                 if (ns < 0 /* NS_WHENEVER */ || delayNs < ns) {
2167                     ns = delayNs;
2168                 }
2169                 return ns;
2170             }
2171         }
2172 
2173         size_t reqSize = audioBuffer.size;
2174         if (mTransfer == TRANSFER_SYNC_NOTIF_CALLBACK) {
2175             // when notifying client it can write more data, pass the total size that can be
2176             // written in the next write() call, since it's not passed through the callback
2177             audioBuffer.size += nonContig;
2178         }
2179         mCbf(mTransfer == TRANSFER_CALLBACK ? EVENT_MORE_DATA : EVENT_CAN_WRITE_MORE_DATA,
2180                 mUserData, &audioBuffer);
2181         size_t writtenSize = audioBuffer.size;
2182 
2183         // Sanity check on returned size
2184         if (ssize_t(writtenSize) < 0 || writtenSize > reqSize) {
2185             ALOGE("%s(%d): EVENT_MORE_DATA requested %zu bytes but callback returned %zd bytes",
2186                     __func__, mPortId, reqSize, ssize_t(writtenSize));
2187             return NS_NEVER;
2188         }
2189 
2190         if (writtenSize == 0) {
2191             if (mTransfer == TRANSFER_SYNC_NOTIF_CALLBACK) {
2192                 // The callback EVENT_CAN_WRITE_MORE_DATA was processed in the JNI of
2193                 // android.media.AudioTrack. The JNI is not using the callback to provide data,
2194                 // it only signals to the Java client that it can provide more data, which
2195                 // this track is read to accept now.
2196                 // The playback thread will be awaken at the next ::write()
2197                 return NS_WHENEVER;
2198             }
2199             // The callback is done filling buffers
2200             // Keep this thread going to handle timed events and
2201             // still try to get more data in intervals of WAIT_PERIOD_MS
2202             // but don't just loop and block the CPU, so wait
2203 
2204             // mCbf(EVENT_MORE_DATA, ...) might either
2205             // (1) Block until it can fill the buffer, returning 0 size on EOS.
2206             // (2) Block until it can fill the buffer, returning 0 data (silence) on EOS.
2207             // (3) Return 0 size when no data is available, does not wait for more data.
2208             //
2209             // (1) and (2) occurs with AudioPlayer/AwesomePlayer; (3) occurs with NuPlayer.
2210             // We try to compute the wait time to avoid a tight sleep-wait cycle,
2211             // especially for case (3).
2212             //
2213             // The decision to support (1) and (2) affect the sizing of mRemainingFrames
2214             // and this loop; whereas for case (3) we could simply check once with the full
2215             // buffer size and skip the loop entirely.
2216 
2217             nsecs_t myns;
2218             if (audio_has_proportional_frames(mFormat)) {
2219                 // time to wait based on buffer occupancy
2220                 const nsecs_t datans = mRemainingFrames <= avail ? 0 :
2221                         framesToNanoseconds(mRemainingFrames - avail, sampleRate, speed);
2222                 // audio flinger thread buffer size (TODO: adjust for fast tracks)
2223                 // FIXME: use mAfFrameCountHAL instead of mAfFrameCount below for fast tracks.
2224                 const nsecs_t afns = framesToNanoseconds(mAfFrameCount, mAfSampleRate, speed);
2225                 // add a half the AudioFlinger buffer time to avoid soaking CPU if datans is 0.
2226                 myns = datans + (afns / 2);
2227             } else {
2228                 // FIXME: This could ping quite a bit if the buffer isn't full.
2229                 // Note that when mState is stopping we waitStreamEnd, so it never gets here.
2230                 myns = kWaitPeriodNs;
2231             }
2232             if (ns > 0) { // account for obtain and callback time
2233                 const nsecs_t timeNow = systemTime();
2234                 ns = max((nsecs_t)0, ns - (timeNow - timeAfterCallbacks));
2235             }
2236             if (ns < 0 /* NS_WHENEVER */ || myns < ns) {
2237                 ns = myns;
2238             }
2239             return ns;
2240         }
2241 
2242         size_t releasedFrames = writtenSize / mFrameSize;
2243         audioBuffer.frameCount = releasedFrames;
2244         mRemainingFrames -= releasedFrames;
2245         if (misalignment >= releasedFrames) {
2246             misalignment -= releasedFrames;
2247         } else {
2248             misalignment = 0;
2249         }
2250 
2251         releaseBuffer(&audioBuffer);
2252         writtenFrames += releasedFrames;
2253 
2254         // FIXME here is where we would repeat EVENT_MORE_DATA again on same advanced buffer
2255         // if callback doesn't like to accept the full chunk
2256         if (writtenSize < reqSize) {
2257             continue;
2258         }
2259 
2260         // There could be enough non-contiguous frames available to satisfy the remaining request
2261         if (mRemainingFrames <= nonContig) {
2262             continue;
2263         }
2264 
2265 #if 0
2266         // This heuristic tries to collapse a series of EVENT_MORE_DATA that would total to a
2267         // sum <= notificationFrames.  It replaces that series by at most two EVENT_MORE_DATA
2268         // that total to a sum == notificationFrames.
2269         if (0 < misalignment && misalignment <= mRemainingFrames) {
2270             mRemainingFrames = misalignment;
2271             return ((double)mRemainingFrames * 1100000000) / ((double)sampleRate * speed);
2272         }
2273 #endif
2274 
2275     }
2276     if (writtenFrames > 0) {
2277         AutoMutex lock(mLock);
2278         mFramesWritten += writtenFrames;
2279     }
2280     mRemainingFrames = notificationFrames;
2281     mRetryOnPartialBuffer = true;
2282 
2283     // A lot has transpired since ns was calculated, so run again immediately and re-calculate
2284     return 0;
2285 }
2286 
restoreTrack_l(const char * from)2287 status_t AudioTrack::restoreTrack_l(const char *from)
2288 {
2289     ALOGW("%s(%d): dead IAudioTrack, %s, creating a new one from %s()",
2290             __func__, mPortId, isOffloadedOrDirect_l() ? "Offloaded or Direct" : "PCM", from);
2291     ++mSequence;
2292 
2293     // refresh the audio configuration cache in this process to make sure we get new
2294     // output parameters and new IAudioFlinger in createTrack_l()
2295     AudioSystem::clearAudioConfigCache();
2296 
2297     if (isOffloadedOrDirect_l() || mDoNotReconnect) {
2298         // FIXME re-creation of offloaded and direct tracks is not yet implemented;
2299         // reconsider enabling for linear PCM encodings when position can be preserved.
2300         return DEAD_OBJECT;
2301     }
2302 
2303     // Save so we can return count since creation.
2304     mUnderrunCountOffset = getUnderrunCount_l();
2305 
2306     // save the old static buffer position
2307     uint32_t staticPosition = 0;
2308     size_t bufferPosition = 0;
2309     int loopCount = 0;
2310     if (mStaticProxy != 0) {
2311         mStaticProxy->getBufferPositionAndLoopCount(&bufferPosition, &loopCount);
2312         staticPosition = mStaticProxy->getPosition().unsignedValue();
2313     }
2314 
2315     // See b/74409267. Connecting to a BT A2DP device supporting multiple codecs
2316     // causes a lot of churn on the service side, and it can reject starting
2317     // playback of a previously created track. May also apply to other cases.
2318     const int INITIAL_RETRIES = 3;
2319     int retries = INITIAL_RETRIES;
2320 retry:
2321     if (retries < INITIAL_RETRIES) {
2322         // See the comment for clearAudioConfigCache at the start of the function.
2323         AudioSystem::clearAudioConfigCache();
2324     }
2325     mFlags = mOrigFlags;
2326 
2327     // If a new IAudioTrack is successfully created, createTrack_l() will modify the
2328     // following member variables: mAudioTrack, mCblkMemory and mCblk.
2329     // It will also delete the strong references on previous IAudioTrack and IMemory.
2330     // If a new IAudioTrack cannot be created, the previous (dead) instance will be left intact.
2331     status_t result = createTrack_l();
2332 
2333     if (result == NO_ERROR) {
2334         // take the frames that will be lost by track recreation into account in saved position
2335         // For streaming tracks, this is the amount we obtained from the user/client
2336         // (not the number actually consumed at the server - those are already lost).
2337         if (mStaticProxy == 0) {
2338             mPosition = mReleased;
2339         }
2340         // Continue playback from last known position and restore loop.
2341         if (mStaticProxy != 0) {
2342             if (loopCount != 0) {
2343                 mStaticProxy->setBufferPositionAndLoop(bufferPosition,
2344                         mLoopStart, mLoopEnd, loopCount);
2345             } else {
2346                 mStaticProxy->setBufferPosition(bufferPosition);
2347                 if (bufferPosition == mFrameCount) {
2348                     ALOGD("%s(%d): restoring track at end of static buffer", __func__, mPortId);
2349                 }
2350             }
2351         }
2352         // restore volume handler
2353         mVolumeHandler->forall([this](const VolumeShaper &shaper) -> VolumeShaper::Status {
2354             sp<VolumeShaper::Operation> operationToEnd =
2355                     new VolumeShaper::Operation(shaper.mOperation);
2356             // TODO: Ideally we would restore to the exact xOffset position
2357             // as returned by getVolumeShaperState(), but we don't have that
2358             // information when restoring at the client unless we periodically poll
2359             // the server or create shared memory state.
2360             //
2361             // For now, we simply advance to the end of the VolumeShaper effect
2362             // if it has been started.
2363             if (shaper.isStarted()) {
2364                 operationToEnd->setNormalizedTime(1.f);
2365             }
2366             return mAudioTrack->applyVolumeShaper(shaper.mConfiguration, operationToEnd);
2367         });
2368 
2369         if (mState == STATE_ACTIVE) {
2370             result = mAudioTrack->start();
2371         }
2372         // server resets to zero so we offset
2373         mFramesWrittenServerOffset =
2374                 mStaticProxy.get() != nullptr ? staticPosition : mFramesWritten;
2375         mFramesWrittenAtRestore = mFramesWrittenServerOffset;
2376     }
2377     if (result != NO_ERROR) {
2378         ALOGW("%s(%d): failed status %d, retries %d", __func__, mPortId, result, retries);
2379         if (--retries > 0) {
2380             // leave time for an eventual race condition to clear before retrying
2381             usleep(500000);
2382             goto retry;
2383         }
2384         // if no retries left, set invalid bit to force restoring at next occasion
2385         // and avoid inconsistent active state on client and server sides
2386         if (mCblk != nullptr) {
2387             android_atomic_or(CBLK_INVALID, &mCblk->mFlags);
2388         }
2389     }
2390     return result;
2391 }
2392 
updateAndGetPosition_l()2393 Modulo<uint32_t> AudioTrack::updateAndGetPosition_l()
2394 {
2395     // This is the sole place to read server consumed frames
2396     Modulo<uint32_t> newServer(mProxy->getPosition());
2397     const int32_t delta = (newServer - mServer).signedValue();
2398     // TODO There is controversy about whether there can be "negative jitter" in server position.
2399     //      This should be investigated further, and if possible, it should be addressed.
2400     //      A more definite failure mode is infrequent polling by client.
2401     //      One could call (void)getPosition_l() in releaseBuffer(),
2402     //      so mReleased and mPosition are always lock-step as best possible.
2403     //      That should ensure delta never goes negative for infrequent polling
2404     //      unless the server has more than 2^31 frames in its buffer,
2405     //      in which case the use of uint32_t for these counters has bigger issues.
2406     ALOGE_IF(delta < 0,
2407             "%s(%d): detected illegal retrograde motion by the server: mServer advanced by %d",
2408             __func__, mPortId, delta);
2409     mServer = newServer;
2410     if (delta > 0) { // avoid retrograde
2411         mPosition += delta;
2412     }
2413     return mPosition;
2414 }
2415 
isSampleRateSpeedAllowed_l(uint32_t sampleRate,float speed)2416 bool AudioTrack::isSampleRateSpeedAllowed_l(uint32_t sampleRate, float speed)
2417 {
2418     updateLatency_l();
2419     // applicable for mixing tracks only (not offloaded or direct)
2420     if (mStaticProxy != 0) {
2421         return true; // static tracks do not have issues with buffer sizing.
2422     }
2423     const size_t minFrameCount =
2424             AudioSystem::calculateMinFrameCount(mAfLatency, mAfFrameCount, mAfSampleRate,
2425                                             sampleRate, speed /*, 0 mNotificationsPerBufferReq*/);
2426     const bool allowed = mFrameCount >= minFrameCount;
2427     ALOGD_IF(!allowed,
2428             "%s(%d): denied "
2429             "mAfLatency:%u  mAfFrameCount:%zu  mAfSampleRate:%u  sampleRate:%u  speed:%f "
2430             "mFrameCount:%zu < minFrameCount:%zu",
2431             __func__, mPortId,
2432             mAfLatency, mAfFrameCount, mAfSampleRate, sampleRate, speed,
2433             mFrameCount, minFrameCount);
2434     return allowed;
2435 }
2436 
setParameters(const String8 & keyValuePairs)2437 status_t AudioTrack::setParameters(const String8& keyValuePairs)
2438 {
2439     AutoMutex lock(mLock);
2440     return mAudioTrack->setParameters(keyValuePairs);
2441 }
2442 
selectPresentation(int presentationId,int programId)2443 status_t AudioTrack::selectPresentation(int presentationId, int programId)
2444 {
2445     AutoMutex lock(mLock);
2446     AudioParameter param = AudioParameter();
2447     param.addInt(String8(AudioParameter::keyPresentationId), presentationId);
2448     param.addInt(String8(AudioParameter::keyProgramId), programId);
2449     ALOGV("%s(%d): PresentationId/ProgramId[%s]",
2450             __func__, mPortId, param.toString().string());
2451 
2452     return mAudioTrack->setParameters(param.toString());
2453 }
2454 
applyVolumeShaper(const sp<VolumeShaper::Configuration> & configuration,const sp<VolumeShaper::Operation> & operation)2455 VolumeShaper::Status AudioTrack::applyVolumeShaper(
2456         const sp<VolumeShaper::Configuration>& configuration,
2457         const sp<VolumeShaper::Operation>& operation)
2458 {
2459     AutoMutex lock(mLock);
2460     mVolumeHandler->setIdIfNecessary(configuration);
2461     VolumeShaper::Status status = mAudioTrack->applyVolumeShaper(configuration, operation);
2462 
2463     if (status == DEAD_OBJECT) {
2464         if (restoreTrack_l("applyVolumeShaper") == OK) {
2465             status = mAudioTrack->applyVolumeShaper(configuration, operation);
2466         }
2467     }
2468     if (status >= 0) {
2469         // save VolumeShaper for restore
2470         mVolumeHandler->applyVolumeShaper(configuration, operation);
2471         if (mState == STATE_ACTIVE || mState == STATE_STOPPING) {
2472             mVolumeHandler->setStarted();
2473         }
2474     } else {
2475         // warn only if not an expected restore failure.
2476         ALOGW_IF(!((isOffloadedOrDirect_l() || mDoNotReconnect) && status == DEAD_OBJECT),
2477                 "%s(%d): applyVolumeShaper failed: %d", __func__, mPortId, status);
2478     }
2479     return status;
2480 }
2481 
getVolumeShaperState(int id)2482 sp<VolumeShaper::State> AudioTrack::getVolumeShaperState(int id)
2483 {
2484     AutoMutex lock(mLock);
2485     sp<VolumeShaper::State> state = mAudioTrack->getVolumeShaperState(id);
2486     if (state.get() == nullptr && (mCblk->mFlags & CBLK_INVALID) != 0) {
2487         if (restoreTrack_l("getVolumeShaperState") == OK) {
2488             state = mAudioTrack->getVolumeShaperState(id);
2489         }
2490     }
2491     return state;
2492 }
2493 
getTimestamp(ExtendedTimestamp * timestamp)2494 status_t AudioTrack::getTimestamp(ExtendedTimestamp *timestamp)
2495 {
2496     if (timestamp == nullptr) {
2497         return BAD_VALUE;
2498     }
2499     AutoMutex lock(mLock);
2500     return getTimestamp_l(timestamp);
2501 }
2502 
getTimestamp_l(ExtendedTimestamp * timestamp)2503 status_t AudioTrack::getTimestamp_l(ExtendedTimestamp *timestamp)
2504 {
2505     if (mCblk->mFlags & CBLK_INVALID) {
2506         const status_t status = restoreTrack_l("getTimestampExtended");
2507         if (status != OK) {
2508             // per getTimestamp() API doc in header, we return DEAD_OBJECT here,
2509             // recommending that the track be recreated.
2510             return DEAD_OBJECT;
2511         }
2512     }
2513     // check for offloaded/direct here in case restoring somehow changed those flags.
2514     if (isOffloadedOrDirect_l()) {
2515         return INVALID_OPERATION; // not supported
2516     }
2517     status_t status = mProxy->getTimestamp(timestamp);
2518     LOG_ALWAYS_FATAL_IF(status != OK, "%s(%d): status %d not allowed from proxy getTimestamp",
2519             __func__, mPortId, status);
2520     bool found = false;
2521     timestamp->mPosition[ExtendedTimestamp::LOCATION_CLIENT] = mFramesWritten;
2522     timestamp->mTimeNs[ExtendedTimestamp::LOCATION_CLIENT] = 0;
2523     // server side frame offset in case AudioTrack has been restored.
2524     for (int i = ExtendedTimestamp::LOCATION_SERVER;
2525             i < ExtendedTimestamp::LOCATION_MAX; ++i) {
2526         if (timestamp->mTimeNs[i] >= 0) {
2527             // apply server offset (frames flushed is ignored
2528             // so we don't report the jump when the flush occurs).
2529             timestamp->mPosition[i] += mFramesWrittenServerOffset;
2530             found = true;
2531         }
2532     }
2533     return found ? OK : WOULD_BLOCK;
2534 }
2535 
getTimestamp(AudioTimestamp & timestamp)2536 status_t AudioTrack::getTimestamp(AudioTimestamp& timestamp)
2537 {
2538     AutoMutex lock(mLock);
2539     return getTimestamp_l(timestamp);
2540 }
2541 
getTimestamp_l(AudioTimestamp & timestamp)2542 status_t AudioTrack::getTimestamp_l(AudioTimestamp& timestamp)
2543 {
2544     bool previousTimestampValid = mPreviousTimestampValid;
2545     // Set false here to cover all the error return cases.
2546     mPreviousTimestampValid = false;
2547 
2548     switch (mState) {
2549     case STATE_ACTIVE:
2550     case STATE_PAUSED:
2551         break; // handle below
2552     case STATE_FLUSHED:
2553     case STATE_STOPPED:
2554         return WOULD_BLOCK;
2555     case STATE_STOPPING:
2556     case STATE_PAUSED_STOPPING:
2557         if (!isOffloaded_l()) {
2558             return INVALID_OPERATION;
2559         }
2560         break; // offloaded tracks handled below
2561     default:
2562         LOG_ALWAYS_FATAL("%s(%d): Invalid mState in getTimestamp(): %d",
2563                __func__, mPortId, mState);
2564         break;
2565     }
2566 
2567     if (mCblk->mFlags & CBLK_INVALID) {
2568         const status_t status = restoreTrack_l("getTimestamp");
2569         if (status != OK) {
2570             // per getTimestamp() API doc in header, we return DEAD_OBJECT here,
2571             // recommending that the track be recreated.
2572             return DEAD_OBJECT;
2573         }
2574     }
2575 
2576     // The presented frame count must always lag behind the consumed frame count.
2577     // To avoid a race, read the presented frames first.  This ensures that presented <= consumed.
2578 
2579     status_t status;
2580     if (isOffloadedOrDirect_l()) {
2581         // use Binder to get timestamp
2582         status = mAudioTrack->getTimestamp(timestamp);
2583     } else {
2584         // read timestamp from shared memory
2585         ExtendedTimestamp ets;
2586         status = mProxy->getTimestamp(&ets);
2587         if (status == OK) {
2588             ExtendedTimestamp::Location location;
2589             status = ets.getBestTimestamp(&timestamp, &location);
2590 
2591             if (status == OK) {
2592                 updateLatency_l();
2593                 // It is possible that the best location has moved from the kernel to the server.
2594                 // In this case we adjust the position from the previous computed latency.
2595                 if (location == ExtendedTimestamp::LOCATION_SERVER) {
2596                     ALOGW_IF(mPreviousLocation == ExtendedTimestamp::LOCATION_KERNEL,
2597                             "%s(%d): location moved from kernel to server",
2598                             __func__, mPortId);
2599                     // check that the last kernel OK time info exists and the positions
2600                     // are valid (if they predate the current track, the positions may
2601                     // be zero or negative).
2602                     const int64_t frames =
2603                             (ets.mTimeNs[ExtendedTimestamp::LOCATION_SERVER_LASTKERNELOK] < 0 ||
2604                             ets.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL_LASTKERNELOK] < 0 ||
2605                             ets.mPosition[ExtendedTimestamp::LOCATION_SERVER_LASTKERNELOK] <= 0 ||
2606                             ets.mPosition[ExtendedTimestamp::LOCATION_KERNEL_LASTKERNELOK] <= 0)
2607                             ?
2608                             int64_t((double)mAfLatency * mSampleRate * mPlaybackRate.mSpeed
2609                                     / 1000)
2610                             :
2611                             (ets.mPosition[ExtendedTimestamp::LOCATION_SERVER_LASTKERNELOK]
2612                             - ets.mPosition[ExtendedTimestamp::LOCATION_KERNEL_LASTKERNELOK]);
2613                     ALOGV("%s(%d): frame adjustment:%lld  timestamp:%s",
2614                             __func__, mPortId, (long long)frames, ets.toString().c_str());
2615                     if (frames >= ets.mPosition[location]) {
2616                         timestamp.mPosition = 0;
2617                     } else {
2618                         timestamp.mPosition = (uint32_t)(ets.mPosition[location] - frames);
2619                     }
2620                 } else if (location == ExtendedTimestamp::LOCATION_KERNEL) {
2621                     ALOGV_IF(mPreviousLocation == ExtendedTimestamp::LOCATION_SERVER,
2622                             "%s(%d): location moved from server to kernel",
2623                             __func__, mPortId);
2624 
2625                     if (ets.mPosition[ExtendedTimestamp::LOCATION_SERVER] ==
2626                             ets.mPosition[ExtendedTimestamp::LOCATION_KERNEL]) {
2627                         // In Q, we don't return errors as an invalid time
2628                         // but instead we leave the last kernel good timestamp alone.
2629                         //
2630                         // If server is identical to kernel, the device data pipeline is idle.
2631                         // A better start time is now.  The retrograde check ensures
2632                         // timestamp monotonicity.
2633                         const int64_t nowNs = systemTime();
2634                         if (!mTimestampStallReported) {
2635                             ALOGD("%s(%d): device stall time corrected using current time %lld",
2636                                     __func__, mPortId, (long long)nowNs);
2637                             mTimestampStallReported = true;
2638                         }
2639                         timestamp.mTime = convertNsToTimespec(nowNs);
2640                     }  else {
2641                         mTimestampStallReported = false;
2642                     }
2643                 }
2644 
2645                 // We update the timestamp time even when paused.
2646                 if (mState == STATE_PAUSED /* not needed: STATE_PAUSED_STOPPING */) {
2647                     const int64_t now = systemTime();
2648                     const int64_t at = audio_utils_ns_from_timespec(&timestamp.mTime);
2649                     const int64_t lag =
2650                             (ets.mTimeNs[ExtendedTimestamp::LOCATION_SERVER_LASTKERNELOK] < 0 ||
2651                                 ets.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL_LASTKERNELOK] < 0)
2652                             ? int64_t(mAfLatency * 1000000LL)
2653                             : (ets.mPosition[ExtendedTimestamp::LOCATION_SERVER_LASTKERNELOK]
2654                              - ets.mPosition[ExtendedTimestamp::LOCATION_KERNEL_LASTKERNELOK])
2655                              * NANOS_PER_SECOND / mSampleRate;
2656                     const int64_t limit = now - lag; // no earlier than this limit
2657                     if (at < limit) {
2658                         ALOGV("timestamp pause lag:%lld adjusting from %lld to %lld",
2659                                 (long long)lag, (long long)at, (long long)limit);
2660                         timestamp.mTime = convertNsToTimespec(limit);
2661                     }
2662                 }
2663                 mPreviousLocation = location;
2664             } else {
2665                 // right after AudioTrack is started, one may not find a timestamp
2666                 ALOGV("%s(%d): getBestTimestamp did not find timestamp", __func__, mPortId);
2667             }
2668         }
2669         if (status == INVALID_OPERATION) {
2670             // INVALID_OPERATION occurs when no timestamp has been issued by the server;
2671             // other failures are signaled by a negative time.
2672             // If we come out of FLUSHED or STOPPED where the position is known
2673             // to be zero we convert this to WOULD_BLOCK (with the implicit meaning of
2674             // "zero" for NuPlayer).  We don't convert for track restoration as position
2675             // does not reset.
2676             ALOGV("%s(%d): timestamp server offset:%lld restore frames:%lld",
2677                     __func__, mPortId,
2678                     (long long)mFramesWrittenServerOffset, (long long)mFramesWrittenAtRestore);
2679             if (mFramesWrittenServerOffset != mFramesWrittenAtRestore) {
2680                 status = WOULD_BLOCK;
2681             }
2682         }
2683     }
2684     if (status != NO_ERROR) {
2685         ALOGV_IF(status != WOULD_BLOCK, "%s(%d): getTimestamp error:%#x", __func__, mPortId, status);
2686         return status;
2687     }
2688     if (isOffloadedOrDirect_l()) {
2689         if (isOffloaded_l() && (mState == STATE_PAUSED || mState == STATE_PAUSED_STOPPING)) {
2690             // use cached paused position in case another offloaded track is running.
2691             timestamp.mPosition = mPausedPosition;
2692             clock_gettime(CLOCK_MONOTONIC, &timestamp.mTime);
2693             // TODO: adjust for delay
2694             return NO_ERROR;
2695         }
2696 
2697         // Check whether a pending flush or stop has completed, as those commands may
2698         // be asynchronous or return near finish or exhibit glitchy behavior.
2699         //
2700         // Originally this showed up as the first timestamp being a continuation of
2701         // the previous song under gapless playback.
2702         // However, we sometimes see zero timestamps, then a glitch of
2703         // the previous song's position, and then correct timestamps afterwards.
2704         if (mStartFromZeroUs != 0 && mSampleRate != 0) {
2705             static const int kTimeJitterUs = 100000; // 100 ms
2706             static const int k1SecUs = 1000000;
2707 
2708             const int64_t timeNow = getNowUs();
2709 
2710             if (timeNow < mStartFromZeroUs + k1SecUs) { // within first second of starting
2711                 const int64_t timestampTimeUs = convertTimespecToUs(timestamp.mTime);
2712                 if (timestampTimeUs < mStartFromZeroUs) {
2713                     return WOULD_BLOCK;  // stale timestamp time, occurs before start.
2714                 }
2715                 const int64_t deltaTimeUs = timestampTimeUs - mStartFromZeroUs;
2716                 const int64_t deltaPositionByUs = (double)timestamp.mPosition * 1000000
2717                         / ((double)mSampleRate * mPlaybackRate.mSpeed);
2718 
2719                 if (deltaPositionByUs > deltaTimeUs + kTimeJitterUs) {
2720                     // Verify that the counter can't count faster than the sample rate
2721                     // since the start time.  If greater, then that means we may have failed
2722                     // to completely flush or stop the previous playing track.
2723                     ALOGW_IF(!mTimestampStartupGlitchReported,
2724                             "%s(%d): startup glitch detected"
2725                             " deltaTimeUs(%lld) deltaPositionUs(%lld) tsmPosition(%u)",
2726                             __func__, mPortId,
2727                             (long long)deltaTimeUs, (long long)deltaPositionByUs,
2728                             timestamp.mPosition);
2729                     mTimestampStartupGlitchReported = true;
2730                     if (previousTimestampValid
2731                             && mPreviousTimestamp.mPosition == 0 /* should be true if valid */) {
2732                         timestamp = mPreviousTimestamp;
2733                         mPreviousTimestampValid = true;
2734                         return NO_ERROR;
2735                     }
2736                     return WOULD_BLOCK;
2737                 }
2738                 if (deltaPositionByUs != 0) {
2739                     mStartFromZeroUs = 0; // don't check again, we got valid nonzero position.
2740                 }
2741             } else {
2742                 mStartFromZeroUs = 0; // don't check again, start time expired.
2743             }
2744             mTimestampStartupGlitchReported = false;
2745         }
2746     } else {
2747         // Update the mapping between local consumed (mPosition) and server consumed (mServer)
2748         (void) updateAndGetPosition_l();
2749         // Server consumed (mServer) and presented both use the same server time base,
2750         // and server consumed is always >= presented.
2751         // The delta between these represents the number of frames in the buffer pipeline.
2752         // If this delta between these is greater than the client position, it means that
2753         // actually presented is still stuck at the starting line (figuratively speaking),
2754         // waiting for the first frame to go by.  So we can't report a valid timestamp yet.
2755         // Note: We explicitly use non-Modulo comparison here - potential wrap issue when
2756         // mPosition exceeds 32 bits.
2757         // TODO Remove when timestamp is updated to contain pipeline status info.
2758         const int32_t pipelineDepthInFrames = (mServer - timestamp.mPosition).signedValue();
2759         if (pipelineDepthInFrames > 0 /* should be true, but we check anyways */
2760                 && (uint32_t)pipelineDepthInFrames > mPosition.value()) {
2761             return INVALID_OPERATION;
2762         }
2763         // Convert timestamp position from server time base to client time base.
2764         // TODO The following code should work OK now because timestamp.mPosition is 32-bit.
2765         // But if we change it to 64-bit then this could fail.
2766         // Use Modulo computation here.
2767         timestamp.mPosition = (mPosition - mServer + timestamp.mPosition).value();
2768         // Immediately after a call to getPosition_l(), mPosition and
2769         // mServer both represent the same frame position.  mPosition is
2770         // in client's point of view, and mServer is in server's point of
2771         // view.  So the difference between them is the "fudge factor"
2772         // between client and server views due to stop() and/or new
2773         // IAudioTrack.  And timestamp.mPosition is initially in server's
2774         // point of view, so we need to apply the same fudge factor to it.
2775     }
2776 
2777     // Prevent retrograde motion in timestamp.
2778     // This is sometimes caused by erratic reports of the available space in the ALSA drivers.
2779     if (status == NO_ERROR) {
2780         // Fix stale time when checking timestamp right after start().
2781         // The position is at the last reported location but the time can be stale
2782         // due to pause or standby or cold start latency.
2783         //
2784         // We keep advancing the time (but not the position) to ensure that the
2785         // stale value does not confuse the application.
2786         //
2787         // For offload compatibility, use a default lag value here.
2788         // Any time discrepancy between this update and the pause timestamp is handled
2789         // by the retrograde check afterwards.
2790         int64_t currentTimeNanos = audio_utils_ns_from_timespec(&timestamp.mTime);
2791         const int64_t lagNs = int64_t(mAfLatency * 1000000LL);
2792         const int64_t limitNs = mStartNs - lagNs;
2793         if (currentTimeNanos < limitNs) {
2794             if (!mTimestampStaleTimeReported) {
2795                 ALOGD("%s(%d): stale timestamp time corrected, "
2796                         "currentTimeNanos: %lld < limitNs: %lld < mStartNs: %lld",
2797                         __func__, mPortId,
2798                         (long long)currentTimeNanos, (long long)limitNs, (long long)mStartNs);
2799                 mTimestampStaleTimeReported = true;
2800             }
2801             timestamp.mTime = convertNsToTimespec(limitNs);
2802             currentTimeNanos = limitNs;
2803         } else {
2804             mTimestampStaleTimeReported = false;
2805         }
2806 
2807         // previousTimestampValid is set to false when starting after a stop or flush.
2808         if (previousTimestampValid) {
2809             const int64_t previousTimeNanos =
2810                     audio_utils_ns_from_timespec(&mPreviousTimestamp.mTime);
2811 
2812             // retrograde check
2813             if (currentTimeNanos < previousTimeNanos) {
2814                 if (!mTimestampRetrogradeTimeReported) {
2815                     ALOGW("%s(%d): retrograde timestamp time corrected, %lld < %lld",
2816                             __func__, mPortId,
2817                             (long long)currentTimeNanos, (long long)previousTimeNanos);
2818                     mTimestampRetrogradeTimeReported = true;
2819                 }
2820                 timestamp.mTime = mPreviousTimestamp.mTime;
2821             } else {
2822                 mTimestampRetrogradeTimeReported = false;
2823             }
2824 
2825             // Looking at signed delta will work even when the timestamps
2826             // are wrapping around.
2827             int32_t deltaPosition = (Modulo<uint32_t>(timestamp.mPosition)
2828                     - mPreviousTimestamp.mPosition).signedValue();
2829             if (deltaPosition < 0) {
2830                 // Only report once per position instead of spamming the log.
2831                 if (!mTimestampRetrogradePositionReported) {
2832                     ALOGW("%s(%d): retrograde timestamp position corrected, %d = %u - %u",
2833                             __func__, mPortId,
2834                             deltaPosition,
2835                             timestamp.mPosition,
2836                             mPreviousTimestamp.mPosition);
2837                     mTimestampRetrogradePositionReported = true;
2838                 }
2839             } else {
2840                 mTimestampRetrogradePositionReported = false;
2841             }
2842             if (deltaPosition < 0) {
2843                 timestamp.mPosition = mPreviousTimestamp.mPosition;
2844                 deltaPosition = 0;
2845             }
2846 #if 0
2847             // Uncomment this to verify audio timestamp rate.
2848             const int64_t deltaTime =
2849                     audio_utils_ns_from_timespec(&timestamp.mTime) - previousTimeNanos;
2850             if (deltaTime != 0) {
2851                 const int64_t computedSampleRate =
2852                         deltaPosition * (long long)NANOS_PER_SECOND / deltaTime;
2853                 ALOGD("%s(%d): computedSampleRate:%u  sampleRate:%u",
2854                         __func__, mPortId,
2855                         (unsigned)computedSampleRate, mSampleRate);
2856             }
2857 #endif
2858         }
2859         mPreviousTimestamp = timestamp;
2860         mPreviousTimestampValid = true;
2861     }
2862 
2863     return status;
2864 }
2865 
getParameters(const String8 & keys)2866 String8 AudioTrack::getParameters(const String8& keys)
2867 {
2868     audio_io_handle_t output = getOutput();
2869     if (output != AUDIO_IO_HANDLE_NONE) {
2870         return AudioSystem::getParameters(output, keys);
2871     } else {
2872         return String8::empty();
2873     }
2874 }
2875 
isOffloaded() const2876 bool AudioTrack::isOffloaded() const
2877 {
2878     AutoMutex lock(mLock);
2879     return isOffloaded_l();
2880 }
2881 
isDirect() const2882 bool AudioTrack::isDirect() const
2883 {
2884     AutoMutex lock(mLock);
2885     return isDirect_l();
2886 }
2887 
isOffloadedOrDirect() const2888 bool AudioTrack::isOffloadedOrDirect() const
2889 {
2890     AutoMutex lock(mLock);
2891     return isOffloadedOrDirect_l();
2892 }
2893 
2894 
dump(int fd,const Vector<String16> & args __unused) const2895 status_t AudioTrack::dump(int fd, const Vector<String16>& args __unused) const
2896 {
2897     String8 result;
2898 
2899     result.append(" AudioTrack::dump\n");
2900     result.appendFormat("  id(%d) status(%d), state(%d), session Id(%d), flags(%#x)\n",
2901                         mPortId, mStatus, mState, mSessionId, mFlags);
2902     result.appendFormat("  stream type(%d), left - right volume(%f, %f)\n",
2903                         (mStreamType == AUDIO_STREAM_DEFAULT) ?
2904                             AudioSystem::attributesToStreamType(mAttributes) :
2905                             mStreamType,
2906                         mVolume[AUDIO_INTERLEAVE_LEFT], mVolume[AUDIO_INTERLEAVE_RIGHT]);
2907     result.appendFormat("  format(%#x), channel mask(%#x), channel count(%u)\n",
2908                   mFormat, mChannelMask, mChannelCount);
2909     result.appendFormat("  sample rate(%u), original sample rate(%u), speed(%f)\n",
2910                   mSampleRate, mOriginalSampleRate, mPlaybackRate.mSpeed);
2911     result.appendFormat("  frame count(%zu), req. frame count(%zu)\n",
2912                   mFrameCount, mReqFrameCount);
2913     result.appendFormat("  notif. frame count(%u), req. notif. frame count(%u),"
2914             " req. notif. per buff(%u)\n",
2915              mNotificationFramesAct, mNotificationFramesReq, mNotificationsPerBufferReq);
2916     result.appendFormat("  latency (%d), selected device Id(%d), routed device Id(%d)\n",
2917                         mLatency, mSelectedDeviceId, mRoutedDeviceId);
2918     result.appendFormat("  output(%d) AF latency (%u) AF frame count(%zu) AF SampleRate(%u)\n",
2919                         mOutput, mAfLatency, mAfFrameCount, mAfSampleRate);
2920     ::write(fd, result.string(), result.size());
2921     return NO_ERROR;
2922 }
2923 
getUnderrunCount() const2924 uint32_t AudioTrack::getUnderrunCount() const
2925 {
2926     AutoMutex lock(mLock);
2927     return getUnderrunCount_l();
2928 }
2929 
getUnderrunCount_l() const2930 uint32_t AudioTrack::getUnderrunCount_l() const
2931 {
2932     return mProxy->getUnderrunCount() + mUnderrunCountOffset;
2933 }
2934 
getUnderrunFrames() const2935 uint32_t AudioTrack::getUnderrunFrames() const
2936 {
2937     AutoMutex lock(mLock);
2938     return mProxy->getUnderrunFrames();
2939 }
2940 
addAudioDeviceCallback(const sp<AudioSystem::AudioDeviceCallback> & callback)2941 status_t AudioTrack::addAudioDeviceCallback(const sp<AudioSystem::AudioDeviceCallback>& callback)
2942 {
2943 
2944     if (callback == 0) {
2945         ALOGW("%s(%d): adding NULL callback!", __func__, mPortId);
2946         return BAD_VALUE;
2947     }
2948     AutoMutex lock(mLock);
2949     if (mDeviceCallback.unsafe_get() == callback.get()) {
2950         ALOGW("%s(%d): adding same callback!", __func__, mPortId);
2951         return INVALID_OPERATION;
2952     }
2953     status_t status = NO_ERROR;
2954     if (mOutput != AUDIO_IO_HANDLE_NONE) {
2955         if (mDeviceCallback != 0) {
2956             ALOGW("%s(%d): callback already present!", __func__, mPortId);
2957             AudioSystem::removeAudioDeviceCallback(this, mOutput, mPortId);
2958         }
2959         status = AudioSystem::addAudioDeviceCallback(this, mOutput, mPortId);
2960     }
2961     mDeviceCallback = callback;
2962     return status;
2963 }
2964 
removeAudioDeviceCallback(const sp<AudioSystem::AudioDeviceCallback> & callback)2965 status_t AudioTrack::removeAudioDeviceCallback(
2966         const sp<AudioSystem::AudioDeviceCallback>& callback)
2967 {
2968     if (callback == 0) {
2969         ALOGW("%s(%d): removing NULL callback!", __func__, mPortId);
2970         return BAD_VALUE;
2971     }
2972     AutoMutex lock(mLock);
2973     if (mDeviceCallback.unsafe_get() != callback.get()) {
2974         ALOGW("%s removing different callback!", __FUNCTION__);
2975         return INVALID_OPERATION;
2976     }
2977     mDeviceCallback.clear();
2978     if (mOutput != AUDIO_IO_HANDLE_NONE) {
2979         AudioSystem::removeAudioDeviceCallback(this, mOutput, mPortId);
2980     }
2981     return NO_ERROR;
2982 }
2983 
2984 
onAudioDeviceUpdate(audio_io_handle_t audioIo,audio_port_handle_t deviceId)2985 void AudioTrack::onAudioDeviceUpdate(audio_io_handle_t audioIo,
2986                                  audio_port_handle_t deviceId)
2987 {
2988     sp<AudioSystem::AudioDeviceCallback> callback;
2989     {
2990         AutoMutex lock(mLock);
2991         if (audioIo != mOutput) {
2992             return;
2993         }
2994         callback = mDeviceCallback.promote();
2995         // only update device if the track is active as route changes due to other use cases are
2996         // irrelevant for this client
2997         if (mState == STATE_ACTIVE) {
2998             mRoutedDeviceId = deviceId;
2999         }
3000     }
3001 
3002     if (callback.get() != nullptr) {
3003         callback->onAudioDeviceUpdate(mOutput, mRoutedDeviceId);
3004     }
3005 }
3006 
pendingDuration(int32_t * msec,ExtendedTimestamp::Location location)3007 status_t AudioTrack::pendingDuration(int32_t *msec, ExtendedTimestamp::Location location)
3008 {
3009     if (msec == nullptr ||
3010             (location != ExtendedTimestamp::LOCATION_SERVER
3011                     && location != ExtendedTimestamp::LOCATION_KERNEL)) {
3012         return BAD_VALUE;
3013     }
3014     AutoMutex lock(mLock);
3015     // inclusive of offloaded and direct tracks.
3016     //
3017     // It is possible, but not enabled, to allow duration computation for non-pcm
3018     // audio_has_proportional_frames() formats because currently they have
3019     // the drain rate equivalent to the pcm sample rate * framesize.
3020     if (!isPurePcmData_l()) {
3021         return INVALID_OPERATION;
3022     }
3023     ExtendedTimestamp ets;
3024     if (getTimestamp_l(&ets) == OK
3025             && ets.mTimeNs[location] > 0) {
3026         int64_t diff = ets.mPosition[ExtendedTimestamp::LOCATION_CLIENT]
3027                 - ets.mPosition[location];
3028         if (diff < 0) {
3029             *msec = 0;
3030         } else {
3031             // ms is the playback time by frames
3032             int64_t ms = (int64_t)((double)diff * 1000 /
3033                     ((double)mSampleRate * mPlaybackRate.mSpeed));
3034             // clockdiff is the timestamp age (negative)
3035             int64_t clockdiff = (mState != STATE_ACTIVE) ? 0 :
3036                     ets.mTimeNs[location]
3037                     + ets.mTimebaseOffset[ExtendedTimestamp::TIMEBASE_MONOTONIC]
3038                     - systemTime(SYSTEM_TIME_MONOTONIC);
3039 
3040             //ALOGV("ms: %lld  clockdiff: %lld", (long long)ms, (long long)clockdiff);
3041             static const int NANOS_PER_MILLIS = 1000000;
3042             *msec = (int32_t)(ms + clockdiff / NANOS_PER_MILLIS);
3043         }
3044         return NO_ERROR;
3045     }
3046     if (location != ExtendedTimestamp::LOCATION_SERVER) {
3047         return INVALID_OPERATION; // LOCATION_KERNEL is not available
3048     }
3049     // use server position directly (offloaded and direct arrive here)
3050     updateAndGetPosition_l();
3051     int32_t diff = (Modulo<uint32_t>(mFramesWritten) - mPosition).signedValue();
3052     *msec = (diff <= 0) ? 0
3053             : (int32_t)((double)diff * 1000 / ((double)mSampleRate * mPlaybackRate.mSpeed));
3054     return NO_ERROR;
3055 }
3056 
hasStarted()3057 bool AudioTrack::hasStarted()
3058 {
3059     AutoMutex lock(mLock);
3060     switch (mState) {
3061     case STATE_STOPPED:
3062         if (isOffloadedOrDirect_l()) {
3063             // check if we have started in the past to return true.
3064             return mStartFromZeroUs > 0;
3065         }
3066         // A normal audio track may still be draining, so
3067         // check if stream has ended.  This covers fasttrack position
3068         // instability and start/stop without any data written.
3069         if (mProxy->getStreamEndDone()) {
3070             return true;
3071         }
3072         FALLTHROUGH_INTENDED;
3073     case STATE_ACTIVE:
3074     case STATE_STOPPING:
3075         break;
3076     case STATE_PAUSED:
3077     case STATE_PAUSED_STOPPING:
3078     case STATE_FLUSHED:
3079         return false;  // we're not active
3080     default:
3081         LOG_ALWAYS_FATAL("%s(%d): Invalid mState in hasStarted(): %d", __func__, mPortId, mState);
3082         break;
3083     }
3084 
3085     // wait indicates whether we need to wait for a timestamp.
3086     // This is conservatively figured - if we encounter an unexpected error
3087     // then we will not wait.
3088     bool wait = false;
3089     if (isOffloadedOrDirect_l()) {
3090         AudioTimestamp ts;
3091         status_t status = getTimestamp_l(ts);
3092         if (status == WOULD_BLOCK) {
3093             wait = true;
3094         } else if (status == OK) {
3095             wait = (ts.mPosition == 0 || ts.mPosition == mStartTs.mPosition);
3096         }
3097         ALOGV("%s(%d): hasStarted wait:%d  ts:%u  start position:%lld",
3098                 __func__, mPortId,
3099                 (int)wait,
3100                 ts.mPosition,
3101                 (long long)mStartTs.mPosition);
3102     } else {
3103         int location = ExtendedTimestamp::LOCATION_SERVER; // for ALOG
3104         ExtendedTimestamp ets;
3105         status_t status = getTimestamp_l(&ets);
3106         if (status == WOULD_BLOCK) {  // no SERVER or KERNEL frame info in ets
3107             wait = true;
3108         } else if (status == OK) {
3109             for (location = ExtendedTimestamp::LOCATION_KERNEL;
3110                     location >= ExtendedTimestamp::LOCATION_SERVER; --location) {
3111                 if (ets.mTimeNs[location] < 0 || mStartEts.mTimeNs[location] < 0) {
3112                     continue;
3113                 }
3114                 wait = ets.mPosition[location] == 0
3115                         || ets.mPosition[location] == mStartEts.mPosition[location];
3116                 break;
3117             }
3118         }
3119         ALOGV("%s(%d): hasStarted wait:%d  ets:%lld  start position:%lld",
3120                 __func__, mPortId,
3121                 (int)wait,
3122                 (long long)ets.mPosition[location],
3123                 (long long)mStartEts.mPosition[location]);
3124     }
3125     return !wait;
3126 }
3127 
3128 // =========================================================================
3129 
binderDied(const wp<IBinder> & who __unused)3130 void AudioTrack::DeathNotifier::binderDied(const wp<IBinder>& who __unused)
3131 {
3132     sp<AudioTrack> audioTrack = mAudioTrack.promote();
3133     if (audioTrack != 0) {
3134         AutoMutex lock(audioTrack->mLock);
3135         audioTrack->mProxy->binderDied();
3136     }
3137 }
3138 
3139 // =========================================================================
3140 
AudioTrackThread(AudioTrack & receiver)3141 AudioTrack::AudioTrackThread::AudioTrackThread(AudioTrack& receiver)
3142     : Thread(true /* bCanCallJava */)  // binder recursion on restoreTrack_l() may call Java.
3143     , mReceiver(receiver), mPaused(true), mPausedInt(false), mPausedNs(0LL),
3144       mIgnoreNextPausedInt(false)
3145 {
3146 }
3147 
~AudioTrackThread()3148 AudioTrack::AudioTrackThread::~AudioTrackThread()
3149 {
3150 }
3151 
threadLoop()3152 bool AudioTrack::AudioTrackThread::threadLoop()
3153 {
3154     {
3155         AutoMutex _l(mMyLock);
3156         if (mPaused) {
3157             // TODO check return value and handle or log
3158             mMyCond.wait(mMyLock);
3159             // caller will check for exitPending()
3160             return true;
3161         }
3162         if (mIgnoreNextPausedInt) {
3163             mIgnoreNextPausedInt = false;
3164             mPausedInt = false;
3165         }
3166         if (mPausedInt) {
3167             // TODO use futex instead of condition, for event flag "or"
3168             if (mPausedNs > 0) {
3169                 // TODO check return value and handle or log
3170                 (void) mMyCond.waitRelative(mMyLock, mPausedNs);
3171             } else {
3172                 // TODO check return value and handle or log
3173                 mMyCond.wait(mMyLock);
3174             }
3175             mPausedInt = false;
3176             return true;
3177         }
3178     }
3179     if (exitPending()) {
3180         return false;
3181     }
3182     nsecs_t ns = mReceiver.processAudioBuffer();
3183     switch (ns) {
3184     case 0:
3185         return true;
3186     case NS_INACTIVE:
3187         pauseInternal();
3188         return true;
3189     case NS_NEVER:
3190         return false;
3191     case NS_WHENEVER:
3192         // Event driven: call wake() when callback notifications conditions change.
3193         ns = INT64_MAX;
3194         FALLTHROUGH_INTENDED;
3195     default:
3196         LOG_ALWAYS_FATAL_IF(ns < 0, "%s(%d): processAudioBuffer() returned %lld",
3197                 __func__, mReceiver.mPortId, (long long)ns);
3198         pauseInternal(ns);
3199         return true;
3200     }
3201 }
3202 
requestExit()3203 void AudioTrack::AudioTrackThread::requestExit()
3204 {
3205     // must be in this order to avoid a race condition
3206     Thread::requestExit();
3207     resume();
3208 }
3209 
pause()3210 void AudioTrack::AudioTrackThread::pause()
3211 {
3212     AutoMutex _l(mMyLock);
3213     mPaused = true;
3214 }
3215 
resume()3216 void AudioTrack::AudioTrackThread::resume()
3217 {
3218     AutoMutex _l(mMyLock);
3219     mIgnoreNextPausedInt = true;
3220     if (mPaused || mPausedInt) {
3221         mPaused = false;
3222         mPausedInt = false;
3223         mMyCond.signal();
3224     }
3225 }
3226 
wake()3227 void AudioTrack::AudioTrackThread::wake()
3228 {
3229     AutoMutex _l(mMyLock);
3230     if (!mPaused) {
3231         // wake() might be called while servicing a callback - ignore the next
3232         // pause time and call processAudioBuffer.
3233         mIgnoreNextPausedInt = true;
3234         if (mPausedInt && mPausedNs > 0) {
3235             // audio track is active and internally paused with timeout.
3236             mPausedInt = false;
3237             mMyCond.signal();
3238         }
3239     }
3240 }
3241 
pauseInternal(nsecs_t ns)3242 void AudioTrack::AudioTrackThread::pauseInternal(nsecs_t ns)
3243 {
3244     AutoMutex _l(mMyLock);
3245     mPausedInt = true;
3246     mPausedNs = ns;
3247 }
3248 
3249 } // namespace android
3250