1 /*
2 **
3 ** Copyright 2007, The Android Open Source Project
4 **
5 ** Licensed under the Apache License, Version 2.0 (the "License");
6 ** you may not use this file except in compliance with the License.
7 ** You may obtain a copy of the License at
8 **
9 **     http://www.apache.org/licenses/LICENSE-2.0
10 **
11 ** Unless required by applicable law or agreed to in writing, software
12 ** distributed under the License is distributed on an "AS IS" BASIS,
13 ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 ** See the License for the specific language governing permissions and
15 ** limitations under the License.
16 */
17 
18 //#define LOG_NDEBUG 0
19 #define LOG_TAG "AudioTrack"
20 
21 #include <inttypes.h>
22 #include <math.h>
23 #include <sys/resource.h>
24 
25 #include <audio_utils/clock.h>
26 #include <audio_utils/primitives.h>
27 #include <binder/IPCThreadState.h>
28 #include <media/AudioTrack.h>
29 #include <utils/Log.h>
30 #include <private/media/AudioTrackShared.h>
31 #include <media/IAudioFlinger.h>
32 #include <media/AudioParameter.h>
33 #include <media/AudioPolicyHelper.h>
34 #include <media/AudioResamplerPublic.h>
35 #include <media/MediaAnalyticsItem.h>
36 #include <media/TypeConverter.h>
37 
38 #define WAIT_PERIOD_MS                  10
39 #define WAIT_STREAM_END_TIMEOUT_SEC     120
40 static const int kMaxLoopCountNotifications = 32;
41 
42 namespace android {
43 // ---------------------------------------------------------------------------
44 
45 using media::VolumeShaper;
46 
47 // TODO: Move to a separate .h
48 
49 template <typename T>
min(const T & x,const T & y)50 static inline const T &min(const T &x, const T &y) {
51     return x < y ? x : y;
52 }
53 
54 template <typename T>
max(const T & x,const T & y)55 static inline const T &max(const T &x, const T &y) {
56     return x > y ? x : y;
57 }
58 
framesToNanoseconds(ssize_t frames,uint32_t sampleRate,float speed)59 static inline nsecs_t framesToNanoseconds(ssize_t frames, uint32_t sampleRate, float speed)
60 {
61     return ((double)frames * 1000000000) / ((double)sampleRate * speed);
62 }
63 
convertTimespecToUs(const struct timespec & tv)64 static int64_t convertTimespecToUs(const struct timespec &tv)
65 {
66     return tv.tv_sec * 1000000ll + tv.tv_nsec / 1000;
67 }
68 
69 // TODO move to audio_utils.
convertNsToTimespec(int64_t ns)70 static inline struct timespec convertNsToTimespec(int64_t ns) {
71     struct timespec tv;
72     tv.tv_sec = static_cast<time_t>(ns / NANOS_PER_SECOND);
73     tv.tv_nsec = static_cast<long>(ns % NANOS_PER_SECOND);
74     return tv;
75 }
76 
77 // current monotonic time in microseconds.
getNowUs()78 static int64_t getNowUs()
79 {
80     struct timespec tv;
81     (void) clock_gettime(CLOCK_MONOTONIC, &tv);
82     return convertTimespecToUs(tv);
83 }
84 
85 // FIXME: we don't use the pitch setting in the time stretcher (not working);
86 // instead we emulate it using our sample rate converter.
87 static const bool kFixPitch = true; // enable pitch fix
adjustSampleRate(uint32_t sampleRate,float pitch)88 static inline uint32_t adjustSampleRate(uint32_t sampleRate, float pitch)
89 {
90     return kFixPitch ? (sampleRate * pitch + 0.5) : sampleRate;
91 }
92 
adjustSpeed(float speed,float pitch)93 static inline float adjustSpeed(float speed, float pitch)
94 {
95     return kFixPitch ? speed / max(pitch, AUDIO_TIMESTRETCH_PITCH_MIN_DELTA) : speed;
96 }
97 
adjustPitch(float pitch)98 static inline float adjustPitch(float pitch)
99 {
100     return kFixPitch ? AUDIO_TIMESTRETCH_PITCH_NORMAL : pitch;
101 }
102 
103 // static
getMinFrameCount(size_t * frameCount,audio_stream_type_t streamType,uint32_t sampleRate)104 status_t AudioTrack::getMinFrameCount(
105         size_t* frameCount,
106         audio_stream_type_t streamType,
107         uint32_t sampleRate)
108 {
109     if (frameCount == NULL) {
110         return BAD_VALUE;
111     }
112 
113     // FIXME handle in server, like createTrack_l(), possible missing info:
114     //          audio_io_handle_t output
115     //          audio_format_t format
116     //          audio_channel_mask_t channelMask
117     //          audio_output_flags_t flags (FAST)
118     uint32_t afSampleRate;
119     status_t status;
120     status = AudioSystem::getOutputSamplingRate(&afSampleRate, streamType);
121     if (status != NO_ERROR) {
122         ALOGE("Unable to query output sample rate for stream type %d; status %d",
123                 streamType, status);
124         return status;
125     }
126     size_t afFrameCount;
127     status = AudioSystem::getOutputFrameCount(&afFrameCount, streamType);
128     if (status != NO_ERROR) {
129         ALOGE("Unable to query output frame count for stream type %d; status %d",
130                 streamType, status);
131         return status;
132     }
133     uint32_t afLatency;
134     status = AudioSystem::getOutputLatency(&afLatency, streamType);
135     if (status != NO_ERROR) {
136         ALOGE("Unable to query output latency for stream type %d; status %d",
137                 streamType, status);
138         return status;
139     }
140 
141     // When called from createTrack, speed is 1.0f (normal speed).
142     // This is rechecked again on setting playback rate (TODO: on setting sample rate, too).
143     *frameCount = AudioSystem::calculateMinFrameCount(afLatency, afFrameCount, afSampleRate,
144                                               sampleRate, 1.0f /*, 0 notificationsPerBufferReq*/);
145 
146     // The formula above should always produce a non-zero value under normal circumstances:
147     // AudioTrack.SAMPLE_RATE_HZ_MIN <= sampleRate <= AudioTrack.SAMPLE_RATE_HZ_MAX.
148     // Return error in the unlikely event that it does not, as that's part of the API contract.
149     if (*frameCount == 0) {
150         ALOGE("AudioTrack::getMinFrameCount failed for streamType %d, sampleRate %u",
151                 streamType, sampleRate);
152         return BAD_VALUE;
153     }
154     ALOGV("getMinFrameCount=%zu: afFrameCount=%zu, afSampleRate=%u, afLatency=%u",
155             *frameCount, afFrameCount, afSampleRate, afLatency);
156     return NO_ERROR;
157 }
158 
159 // ---------------------------------------------------------------------------
160 
audioContentTypeString(audio_content_type_t value)161 static std::string audioContentTypeString(audio_content_type_t value) {
162     std::string contentType;
163     if (AudioContentTypeConverter::toString(value, contentType)) {
164         return contentType;
165     }
166     char rawbuffer[16];  // room for "%d"
167     snprintf(rawbuffer, sizeof(rawbuffer), "%d", value);
168     return rawbuffer;
169 }
170 
audioUsageString(audio_usage_t value)171 static std::string audioUsageString(audio_usage_t value) {
172     std::string usage;
173     if (UsageTypeConverter::toString(value, usage)) {
174         return usage;
175     }
176     char rawbuffer[16];  // room for "%d"
177     snprintf(rawbuffer, sizeof(rawbuffer), "%d", value);
178     return rawbuffer;
179 }
180 
gather(const AudioTrack * track)181 void AudioTrack::MediaMetrics::gather(const AudioTrack *track)
182 {
183 
184     // key for media statistics is defined in the header
185     // attrs for media statistics
186     // NB: these are matched with public Java API constants defined
187     // in frameworks/base/media/java/android/media/AudioTrack.java
188     // These must be kept synchronized with the constants there.
189     static constexpr char kAudioTrackStreamType[] = "android.media.audiotrack.streamtype";
190     static constexpr char kAudioTrackContentType[] = "android.media.audiotrack.type";
191     static constexpr char kAudioTrackUsage[] = "android.media.audiotrack.usage";
192     static constexpr char kAudioTrackSampleRate[] = "android.media.audiotrack.samplerate";
193     static constexpr char kAudioTrackChannelMask[] = "android.media.audiotrack.channelmask";
194 
195     // NB: These are not yet exposed as public Java API constants.
196     static constexpr char kAudioTrackUnderrunFrames[] = "android.media.audiotrack.underrunframes";
197     static constexpr char kAudioTrackStartupGlitch[] = "android.media.audiotrack.glitch.startup";
198 
199     // only if we're in a good state...
200     // XXX: shall we gather alternative info if failing?
201     const status_t lstatus = track->initCheck();
202     if (lstatus != NO_ERROR) {
203         ALOGD("no metrics gathered, track status=%d", (int) lstatus);
204         return;
205     }
206 
207     // constructor guarantees mAnalyticsItem is valid
208 
209     const int32_t underrunFrames = track->getUnderrunFrames();
210     if (underrunFrames != 0) {
211         mAnalyticsItem->setInt32(kAudioTrackUnderrunFrames, underrunFrames);
212     }
213 
214     if (track->mTimestampStartupGlitchReported) {
215         mAnalyticsItem->setInt32(kAudioTrackStartupGlitch, 1);
216     }
217 
218     if (track->mStreamType != -1) {
219         // deprecated, but this will tell us who still uses it.
220         mAnalyticsItem->setInt32(kAudioTrackStreamType, track->mStreamType);
221     }
222     // XXX: consider including from mAttributes: source type
223     mAnalyticsItem->setCString(kAudioTrackContentType,
224                                audioContentTypeString(track->mAttributes.content_type).c_str());
225     mAnalyticsItem->setCString(kAudioTrackUsage,
226                                audioUsageString(track->mAttributes.usage).c_str());
227     mAnalyticsItem->setInt32(kAudioTrackSampleRate, track->mSampleRate);
228     mAnalyticsItem->setInt64(kAudioTrackChannelMask, track->mChannelMask);
229 }
230 
231 // hand the user a snapshot of the metrics.
getMetrics(MediaAnalyticsItem * & item)232 status_t AudioTrack::getMetrics(MediaAnalyticsItem * &item)
233 {
234     mMediaMetrics.gather(this);
235     MediaAnalyticsItem *tmp = mMediaMetrics.dup();
236     if (tmp == nullptr) {
237         return BAD_VALUE;
238     }
239     item = tmp;
240     return NO_ERROR;
241 }
242 
AudioTrack()243 AudioTrack::AudioTrack()
244     : mStatus(NO_INIT),
245       mState(STATE_STOPPED),
246       mPreviousPriority(ANDROID_PRIORITY_NORMAL),
247       mPreviousSchedulingGroup(SP_DEFAULT),
248       mPausedPosition(0),
249       mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE),
250       mRoutedDeviceId(AUDIO_PORT_HANDLE_NONE)
251 {
252     mAttributes.content_type = AUDIO_CONTENT_TYPE_UNKNOWN;
253     mAttributes.usage = AUDIO_USAGE_UNKNOWN;
254     mAttributes.flags = 0x0;
255     strcpy(mAttributes.tags, "");
256 }
257 
AudioTrack(audio_stream_type_t streamType,uint32_t sampleRate,audio_format_t format,audio_channel_mask_t channelMask,size_t frameCount,audio_output_flags_t flags,callback_t cbf,void * user,int32_t notificationFrames,audio_session_t sessionId,transfer_type transferType,const audio_offload_info_t * offloadInfo,uid_t uid,pid_t pid,const audio_attributes_t * pAttributes,bool doNotReconnect,float maxRequiredSpeed,audio_port_handle_t selectedDeviceId)258 AudioTrack::AudioTrack(
259         audio_stream_type_t streamType,
260         uint32_t sampleRate,
261         audio_format_t format,
262         audio_channel_mask_t channelMask,
263         size_t frameCount,
264         audio_output_flags_t flags,
265         callback_t cbf,
266         void* user,
267         int32_t notificationFrames,
268         audio_session_t sessionId,
269         transfer_type transferType,
270         const audio_offload_info_t *offloadInfo,
271         uid_t uid,
272         pid_t pid,
273         const audio_attributes_t* pAttributes,
274         bool doNotReconnect,
275         float maxRequiredSpeed,
276         audio_port_handle_t selectedDeviceId)
277     : mStatus(NO_INIT),
278       mState(STATE_STOPPED),
279       mPreviousPriority(ANDROID_PRIORITY_NORMAL),
280       mPreviousSchedulingGroup(SP_DEFAULT),
281       mPausedPosition(0)
282 {
283     (void)set(streamType, sampleRate, format, channelMask,
284             frameCount, flags, cbf, user, notificationFrames,
285             0 /*sharedBuffer*/, false /*threadCanCallJava*/, sessionId, transferType,
286             offloadInfo, uid, pid, pAttributes, doNotReconnect, maxRequiredSpeed, selectedDeviceId);
287 }
288 
AudioTrack(audio_stream_type_t streamType,uint32_t sampleRate,audio_format_t format,audio_channel_mask_t channelMask,const sp<IMemory> & sharedBuffer,audio_output_flags_t flags,callback_t cbf,void * user,int32_t notificationFrames,audio_session_t sessionId,transfer_type transferType,const audio_offload_info_t * offloadInfo,uid_t uid,pid_t pid,const audio_attributes_t * pAttributes,bool doNotReconnect,float maxRequiredSpeed)289 AudioTrack::AudioTrack(
290         audio_stream_type_t streamType,
291         uint32_t sampleRate,
292         audio_format_t format,
293         audio_channel_mask_t channelMask,
294         const sp<IMemory>& sharedBuffer,
295         audio_output_flags_t flags,
296         callback_t cbf,
297         void* user,
298         int32_t notificationFrames,
299         audio_session_t sessionId,
300         transfer_type transferType,
301         const audio_offload_info_t *offloadInfo,
302         uid_t uid,
303         pid_t pid,
304         const audio_attributes_t* pAttributes,
305         bool doNotReconnect,
306         float maxRequiredSpeed)
307     : mStatus(NO_INIT),
308       mState(STATE_STOPPED),
309       mPreviousPriority(ANDROID_PRIORITY_NORMAL),
310       mPreviousSchedulingGroup(SP_DEFAULT),
311       mPausedPosition(0),
312       mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE)
313 {
314     (void)set(streamType, sampleRate, format, channelMask,
315             0 /*frameCount*/, flags, cbf, user, notificationFrames,
316             sharedBuffer, false /*threadCanCallJava*/, sessionId, transferType, offloadInfo,
317             uid, pid, pAttributes, doNotReconnect, maxRequiredSpeed);
318 }
319 
~AudioTrack()320 AudioTrack::~AudioTrack()
321 {
322     // pull together the numbers, before we clean up our structures
323     mMediaMetrics.gather(this);
324 
325     if (mStatus == NO_ERROR) {
326         // Make sure that callback function exits in the case where
327         // it is looping on buffer full condition in obtainBuffer().
328         // Otherwise the callback thread will never exit.
329         stop();
330         if (mAudioTrackThread != 0) {
331             mProxy->interrupt();
332             mAudioTrackThread->requestExit();   // see comment in AudioTrack.h
333             mAudioTrackThread->requestExitAndWait();
334             mAudioTrackThread.clear();
335         }
336         // No lock here: worst case we remove a NULL callback which will be a nop
337         if (mDeviceCallback != 0 && mOutput != AUDIO_IO_HANDLE_NONE) {
338             AudioSystem::removeAudioDeviceCallback(this, mOutput);
339         }
340         IInterface::asBinder(mAudioTrack)->unlinkToDeath(mDeathNotifier, this);
341         mAudioTrack.clear();
342         mCblkMemory.clear();
343         mSharedBuffer.clear();
344         IPCThreadState::self()->flushCommands();
345         ALOGV("~AudioTrack, releasing session id %d from %d on behalf of %d",
346                 mSessionId, IPCThreadState::self()->getCallingPid(), mClientPid);
347         AudioSystem::releaseAudioSessionId(mSessionId, mClientPid);
348     }
349 }
350 
set(audio_stream_type_t streamType,uint32_t sampleRate,audio_format_t format,audio_channel_mask_t channelMask,size_t frameCount,audio_output_flags_t flags,callback_t cbf,void * user,int32_t notificationFrames,const sp<IMemory> & sharedBuffer,bool threadCanCallJava,audio_session_t sessionId,transfer_type transferType,const audio_offload_info_t * offloadInfo,uid_t uid,pid_t pid,const audio_attributes_t * pAttributes,bool doNotReconnect,float maxRequiredSpeed,audio_port_handle_t selectedDeviceId)351 status_t AudioTrack::set(
352         audio_stream_type_t streamType,
353         uint32_t sampleRate,
354         audio_format_t format,
355         audio_channel_mask_t channelMask,
356         size_t frameCount,
357         audio_output_flags_t flags,
358         callback_t cbf,
359         void* user,
360         int32_t notificationFrames,
361         const sp<IMemory>& sharedBuffer,
362         bool threadCanCallJava,
363         audio_session_t sessionId,
364         transfer_type transferType,
365         const audio_offload_info_t *offloadInfo,
366         uid_t uid,
367         pid_t pid,
368         const audio_attributes_t* pAttributes,
369         bool doNotReconnect,
370         float maxRequiredSpeed,
371         audio_port_handle_t selectedDeviceId)
372 {
373     status_t status;
374     uint32_t channelCount;
375     pid_t callingPid;
376     pid_t myPid;
377 
378     ALOGV("set(): streamType %d, sampleRate %u, format %#x, channelMask %#x, frameCount %zu, "
379           "flags #%x, notificationFrames %d, sessionId %d, transferType %d, uid %d, pid %d",
380           streamType, sampleRate, format, channelMask, frameCount, flags, notificationFrames,
381           sessionId, transferType, uid, pid);
382 
383     mThreadCanCallJava = threadCanCallJava;
384     mSelectedDeviceId = selectedDeviceId;
385     mSessionId = sessionId;
386 
387     switch (transferType) {
388     case TRANSFER_DEFAULT:
389         if (sharedBuffer != 0) {
390             transferType = TRANSFER_SHARED;
391         } else if (cbf == NULL || threadCanCallJava) {
392             transferType = TRANSFER_SYNC;
393         } else {
394             transferType = TRANSFER_CALLBACK;
395         }
396         break;
397     case TRANSFER_CALLBACK:
398         if (cbf == NULL || sharedBuffer != 0) {
399             ALOGE("Transfer type TRANSFER_CALLBACK but cbf == NULL || sharedBuffer != 0");
400             status = BAD_VALUE;
401             goto exit;
402         }
403         break;
404     case TRANSFER_OBTAIN:
405     case TRANSFER_SYNC:
406         if (sharedBuffer != 0) {
407             ALOGE("Transfer type TRANSFER_OBTAIN but sharedBuffer != 0");
408             status = BAD_VALUE;
409             goto exit;
410         }
411         break;
412     case TRANSFER_SHARED:
413         if (sharedBuffer == 0) {
414             ALOGE("Transfer type TRANSFER_SHARED but sharedBuffer == 0");
415             status = BAD_VALUE;
416             goto exit;
417         }
418         break;
419     default:
420         ALOGE("Invalid transfer type %d", transferType);
421         status = BAD_VALUE;
422         goto exit;
423     }
424     mSharedBuffer = sharedBuffer;
425     mTransfer = transferType;
426     mDoNotReconnect = doNotReconnect;
427 
428     ALOGV_IF(sharedBuffer != 0, "sharedBuffer: %p, size: %zu", sharedBuffer->pointer(),
429             sharedBuffer->size());
430 
431     ALOGV("set() streamType %d frameCount %zu flags %04x", streamType, frameCount, flags);
432 
433     // invariant that mAudioTrack != 0 is true only after set() returns successfully
434     if (mAudioTrack != 0) {
435         ALOGE("Track already in use");
436         status = INVALID_OPERATION;
437         goto exit;
438     }
439 
440     // handle default values first.
441     if (streamType == AUDIO_STREAM_DEFAULT) {
442         streamType = AUDIO_STREAM_MUSIC;
443     }
444     if (pAttributes == NULL) {
445         if (uint32_t(streamType) >= AUDIO_STREAM_PUBLIC_CNT) {
446             ALOGE("Invalid stream type %d", streamType);
447             status = BAD_VALUE;
448             goto exit;
449         }
450         mStreamType = streamType;
451 
452     } else {
453         // stream type shouldn't be looked at, this track has audio attributes
454         memcpy(&mAttributes, pAttributes, sizeof(audio_attributes_t));
455         ALOGV("Building AudioTrack with attributes: usage=%d content=%d flags=0x%x tags=[%s]",
456                 mAttributes.usage, mAttributes.content_type, mAttributes.flags, mAttributes.tags);
457         mStreamType = AUDIO_STREAM_DEFAULT;
458         if ((mAttributes.flags & AUDIO_FLAG_HW_AV_SYNC) != 0) {
459             flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_HW_AV_SYNC);
460         }
461         if ((mAttributes.flags & AUDIO_FLAG_LOW_LATENCY) != 0) {
462             flags = (audio_output_flags_t) (flags | AUDIO_OUTPUT_FLAG_FAST);
463         }
464         // check deep buffer after flags have been modified above
465         if (flags == AUDIO_OUTPUT_FLAG_NONE && (mAttributes.flags & AUDIO_FLAG_DEEP_BUFFER) != 0) {
466             flags = AUDIO_OUTPUT_FLAG_DEEP_BUFFER;
467         }
468     }
469 
470     // these below should probably come from the audioFlinger too...
471     if (format == AUDIO_FORMAT_DEFAULT) {
472         format = AUDIO_FORMAT_PCM_16_BIT;
473     } else if (format == AUDIO_FORMAT_IEC61937) { // HDMI pass-through?
474         mAttributes.flags |= AUDIO_OUTPUT_FLAG_IEC958_NONAUDIO;
475     }
476 
477     // validate parameters
478     if (!audio_is_valid_format(format)) {
479         ALOGE("Invalid format %#x", format);
480         status = BAD_VALUE;
481         goto exit;
482     }
483     mFormat = format;
484 
485     if (!audio_is_output_channel(channelMask)) {
486         ALOGE("Invalid channel mask %#x", channelMask);
487         status = BAD_VALUE;
488         goto exit;
489     }
490     mChannelMask = channelMask;
491     channelCount = audio_channel_count_from_out_mask(channelMask);
492     mChannelCount = channelCount;
493 
494     // force direct flag if format is not linear PCM
495     // or offload was requested
496     if ((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
497             || !audio_is_linear_pcm(format)) {
498         ALOGV( (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
499                     ? "Offload request, forcing to Direct Output"
500                     : "Not linear PCM, forcing to Direct Output");
501         flags = (audio_output_flags_t)
502                 // FIXME why can't we allow direct AND fast?
503                 ((flags | AUDIO_OUTPUT_FLAG_DIRECT) & ~AUDIO_OUTPUT_FLAG_FAST);
504     }
505 
506     // force direct flag if HW A/V sync requested
507     if ((flags & AUDIO_OUTPUT_FLAG_HW_AV_SYNC) != 0) {
508         flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_DIRECT);
509     }
510 
511     if (flags & AUDIO_OUTPUT_FLAG_DIRECT) {
512         if (audio_has_proportional_frames(format)) {
513             mFrameSize = channelCount * audio_bytes_per_sample(format);
514         } else {
515             mFrameSize = sizeof(uint8_t);
516         }
517     } else {
518         ALOG_ASSERT(audio_has_proportional_frames(format));
519         mFrameSize = channelCount * audio_bytes_per_sample(format);
520         // createTrack will return an error if PCM format is not supported by server,
521         // so no need to check for specific PCM formats here
522     }
523 
524     // sampling rate must be specified for direct outputs
525     if (sampleRate == 0 && (flags & AUDIO_OUTPUT_FLAG_DIRECT) != 0) {
526         status = BAD_VALUE;
527         goto exit;
528     }
529     mSampleRate = sampleRate;
530     mOriginalSampleRate = sampleRate;
531     mPlaybackRate = AUDIO_PLAYBACK_RATE_DEFAULT;
532     // 1.0 <= mMaxRequiredSpeed <= AUDIO_TIMESTRETCH_SPEED_MAX
533     mMaxRequiredSpeed = min(max(maxRequiredSpeed, 1.0f), AUDIO_TIMESTRETCH_SPEED_MAX);
534 
535     // Make copy of input parameter offloadInfo so that in the future:
536     //  (a) createTrack_l doesn't need it as an input parameter
537     //  (b) we can support re-creation of offloaded tracks
538     if (offloadInfo != NULL) {
539         mOffloadInfoCopy = *offloadInfo;
540         mOffloadInfo = &mOffloadInfoCopy;
541     } else {
542         mOffloadInfo = NULL;
543         memset(&mOffloadInfoCopy, 0, sizeof(audio_offload_info_t));
544     }
545 
546     mVolume[AUDIO_INTERLEAVE_LEFT] = 1.0f;
547     mVolume[AUDIO_INTERLEAVE_RIGHT] = 1.0f;
548     mSendLevel = 0.0f;
549     // mFrameCount is initialized in createTrack_l
550     mReqFrameCount = frameCount;
551     if (notificationFrames >= 0) {
552         mNotificationFramesReq = notificationFrames;
553         mNotificationsPerBufferReq = 0;
554     } else {
555         if (!(flags & AUDIO_OUTPUT_FLAG_FAST)) {
556             ALOGE("notificationFrames=%d not permitted for non-fast track",
557                     notificationFrames);
558             status = BAD_VALUE;
559             goto exit;
560         }
561         if (frameCount > 0) {
562             ALOGE("notificationFrames=%d not permitted with non-zero frameCount=%zu",
563                     notificationFrames, frameCount);
564             status = BAD_VALUE;
565             goto exit;
566         }
567         mNotificationFramesReq = 0;
568         const uint32_t minNotificationsPerBuffer = 1;
569         const uint32_t maxNotificationsPerBuffer = 8;
570         mNotificationsPerBufferReq = min(maxNotificationsPerBuffer,
571                 max((uint32_t) -notificationFrames, minNotificationsPerBuffer));
572         ALOGW_IF(mNotificationsPerBufferReq != (uint32_t) -notificationFrames,
573                 "notificationFrames=%d clamped to the range -%u to -%u",
574                 notificationFrames, minNotificationsPerBuffer, maxNotificationsPerBuffer);
575     }
576     mNotificationFramesAct = 0;
577     callingPid = IPCThreadState::self()->getCallingPid();
578     myPid = getpid();
579     if (uid == AUDIO_UID_INVALID || (callingPid != myPid)) {
580         mClientUid = IPCThreadState::self()->getCallingUid();
581     } else {
582         mClientUid = uid;
583     }
584     if (pid == -1 || (callingPid != myPid)) {
585         mClientPid = callingPid;
586     } else {
587         mClientPid = pid;
588     }
589     mAuxEffectId = 0;
590     mOrigFlags = mFlags = flags;
591     mCbf = cbf;
592 
593     if (cbf != NULL) {
594         mAudioTrackThread = new AudioTrackThread(*this, threadCanCallJava);
595         mAudioTrackThread->run("AudioTrack", ANDROID_PRIORITY_AUDIO, 0 /*stack*/);
596         // thread begins in paused state, and will not reference us until start()
597     }
598 
599     // create the IAudioTrack
600     status = createTrack_l();
601 
602     if (status != NO_ERROR) {
603         if (mAudioTrackThread != 0) {
604             mAudioTrackThread->requestExit();   // see comment in AudioTrack.h
605             mAudioTrackThread->requestExitAndWait();
606             mAudioTrackThread.clear();
607         }
608         goto exit;
609     }
610 
611     mUserData = user;
612     mLoopCount = 0;
613     mLoopStart = 0;
614     mLoopEnd = 0;
615     mLoopCountNotified = 0;
616     mMarkerPosition = 0;
617     mMarkerReached = false;
618     mNewPosition = 0;
619     mUpdatePeriod = 0;
620     mPosition = 0;
621     mReleased = 0;
622     mStartNs = 0;
623     mStartFromZeroUs = 0;
624     AudioSystem::acquireAudioSessionId(mSessionId, mClientPid);
625     mSequence = 1;
626     mObservedSequence = mSequence;
627     mInUnderrun = false;
628     mPreviousTimestampValid = false;
629     mTimestampStartupGlitchReported = false;
630     mRetrogradeMotionReported = false;
631     mPreviousLocation = ExtendedTimestamp::LOCATION_INVALID;
632     mStartTs.mPosition = 0;
633     mUnderrunCountOffset = 0;
634     mFramesWritten = 0;
635     mFramesWrittenServerOffset = 0;
636     mFramesWrittenAtRestore = -1; // -1 is a unique initializer.
637     mVolumeHandler = new media::VolumeHandler();
638 
639 exit:
640     mStatus = status;
641     return status;
642 }
643 
644 // -------------------------------------------------------------------------
645 
start()646 status_t AudioTrack::start()
647 {
648     AutoMutex lock(mLock);
649 
650     if (mState == STATE_ACTIVE) {
651         return INVALID_OPERATION;
652     }
653 
654     mInUnderrun = true;
655 
656     State previousState = mState;
657     if (previousState == STATE_PAUSED_STOPPING) {
658         mState = STATE_STOPPING;
659     } else {
660         mState = STATE_ACTIVE;
661     }
662     (void) updateAndGetPosition_l();
663 
664     // save start timestamp
665     if (isOffloadedOrDirect_l()) {
666         if (getTimestamp_l(mStartTs) != OK) {
667             mStartTs.mPosition = 0;
668         }
669     } else {
670         if (getTimestamp_l(&mStartEts) != OK) {
671             mStartEts.clear();
672         }
673     }
674     mStartNs = systemTime(); // save this for timestamp adjustment after starting.
675     if (previousState == STATE_STOPPED || previousState == STATE_FLUSHED) {
676         // reset current position as seen by client to 0
677         mPosition = 0;
678         mPreviousTimestampValid = false;
679         mTimestampStartupGlitchReported = false;
680         mRetrogradeMotionReported = false;
681         mPreviousLocation = ExtendedTimestamp::LOCATION_INVALID;
682 
683         if (!isOffloadedOrDirect_l()
684                 && mStartEts.mTimeNs[ExtendedTimestamp::LOCATION_SERVER] > 0) {
685             // Server side has consumed something, but is it finished consuming?
686             // It is possible since flush and stop are asynchronous that the server
687             // is still active at this point.
688             ALOGV("start: server read:%lld  cumulative flushed:%lld  client written:%lld",
689                     (long long)(mFramesWrittenServerOffset
690                             + mStartEts.mPosition[ExtendedTimestamp::LOCATION_SERVER]),
691                     (long long)mStartEts.mFlushed,
692                     (long long)mFramesWritten);
693             // mStartEts is already adjusted by mFramesWrittenServerOffset, so we delta adjust.
694             mFramesWrittenServerOffset -= mStartEts.mPosition[ExtendedTimestamp::LOCATION_SERVER];
695         }
696         mFramesWritten = 0;
697         mProxy->clearTimestamp(); // need new server push for valid timestamp
698         mMarkerReached = false;
699 
700         // For offloaded tracks, we don't know if the hardware counters are really zero here,
701         // since the flush is asynchronous and stop may not fully drain.
702         // We save the time when the track is started to later verify whether
703         // the counters are realistic (i.e. start from zero after this time).
704         mStartFromZeroUs = mStartNs / 1000;
705 
706         // force refresh of remaining frames by processAudioBuffer() as last
707         // write before stop could be partial.
708         mRefreshRemaining = true;
709     }
710     mNewPosition = mPosition + mUpdatePeriod;
711     int32_t flags = android_atomic_and(~(CBLK_STREAM_END_DONE | CBLK_DISABLED), &mCblk->mFlags);
712 
713     status_t status = NO_ERROR;
714     if (!(flags & CBLK_INVALID)) {
715         status = mAudioTrack->start();
716         if (status == DEAD_OBJECT) {
717             flags |= CBLK_INVALID;
718         }
719     }
720     if (flags & CBLK_INVALID) {
721         status = restoreTrack_l("start");
722     }
723 
724     // resume or pause the callback thread as needed.
725     sp<AudioTrackThread> t = mAudioTrackThread;
726     if (status == NO_ERROR) {
727         if (t != 0) {
728             if (previousState == STATE_STOPPING) {
729                 mProxy->interrupt();
730             } else {
731                 t->resume();
732             }
733         } else {
734             mPreviousPriority = getpriority(PRIO_PROCESS, 0);
735             get_sched_policy(0, &mPreviousSchedulingGroup);
736             androidSetThreadPriority(0, ANDROID_PRIORITY_AUDIO);
737         }
738 
739         // Start our local VolumeHandler for restoration purposes.
740         mVolumeHandler->setStarted();
741     } else {
742         ALOGE("start() status %d", status);
743         mState = previousState;
744         if (t != 0) {
745             if (previousState != STATE_STOPPING) {
746                 t->pause();
747             }
748         } else {
749             setpriority(PRIO_PROCESS, 0, mPreviousPriority);
750             set_sched_policy(0, mPreviousSchedulingGroup);
751         }
752     }
753 
754     return status;
755 }
756 
stop()757 void AudioTrack::stop()
758 {
759     AutoMutex lock(mLock);
760     if (mState != STATE_ACTIVE && mState != STATE_PAUSED) {
761         return;
762     }
763 
764     if (isOffloaded_l()) {
765         mState = STATE_STOPPING;
766     } else {
767         mState = STATE_STOPPED;
768         ALOGD_IF(mSharedBuffer == nullptr,
769                 "stop() called with %u frames delivered", mReleased.value());
770         mReleased = 0;
771     }
772 
773     mProxy->stop(); // notify server not to read beyond current client position until start().
774     mProxy->interrupt();
775     mAudioTrack->stop();
776 
777     // Note: legacy handling - stop does not clear playback marker
778     // and periodic update counter, but flush does for streaming tracks.
779 
780     if (mSharedBuffer != 0) {
781         // clear buffer position and loop count.
782         mStaticProxy->setBufferPositionAndLoop(0 /* position */,
783                 0 /* loopStart */, 0 /* loopEnd */, 0 /* loopCount */);
784     }
785 
786     sp<AudioTrackThread> t = mAudioTrackThread;
787     if (t != 0) {
788         if (!isOffloaded_l()) {
789             t->pause();
790         }
791     } else {
792         setpriority(PRIO_PROCESS, 0, mPreviousPriority);
793         set_sched_policy(0, mPreviousSchedulingGroup);
794     }
795 }
796 
stopped() const797 bool AudioTrack::stopped() const
798 {
799     AutoMutex lock(mLock);
800     return mState != STATE_ACTIVE;
801 }
802 
flush()803 void AudioTrack::flush()
804 {
805     if (mSharedBuffer != 0) {
806         return;
807     }
808     AutoMutex lock(mLock);
809     if (mState == STATE_ACTIVE) {
810         return;
811     }
812     flush_l();
813 }
814 
flush_l()815 void AudioTrack::flush_l()
816 {
817     ALOG_ASSERT(mState != STATE_ACTIVE);
818 
819     // clear playback marker and periodic update counter
820     mMarkerPosition = 0;
821     mMarkerReached = false;
822     mUpdatePeriod = 0;
823     mRefreshRemaining = true;
824 
825     mState = STATE_FLUSHED;
826     mReleased = 0;
827     if (isOffloaded_l()) {
828         mProxy->interrupt();
829     }
830     mProxy->flush();
831     mAudioTrack->flush();
832 }
833 
pause()834 void AudioTrack::pause()
835 {
836     AutoMutex lock(mLock);
837     if (mState == STATE_ACTIVE) {
838         mState = STATE_PAUSED;
839     } else if (mState == STATE_STOPPING) {
840         mState = STATE_PAUSED_STOPPING;
841     } else {
842         return;
843     }
844     mProxy->interrupt();
845     mAudioTrack->pause();
846 
847     if (isOffloaded_l()) {
848         if (mOutput != AUDIO_IO_HANDLE_NONE) {
849             // An offload output can be re-used between two audio tracks having
850             // the same configuration. A timestamp query for a paused track
851             // while the other is running would return an incorrect time.
852             // To fix this, cache the playback position on a pause() and return
853             // this time when requested until the track is resumed.
854 
855             // OffloadThread sends HAL pause in its threadLoop. Time saved
856             // here can be slightly off.
857 
858             // TODO: check return code for getRenderPosition.
859 
860             uint32_t halFrames;
861             AudioSystem::getRenderPosition(mOutput, &halFrames, &mPausedPosition);
862             ALOGV("AudioTrack::pause for offload, cache current position %u", mPausedPosition);
863         }
864     }
865 }
866 
setVolume(float left,float right)867 status_t AudioTrack::setVolume(float left, float right)
868 {
869     // This duplicates a test by AudioTrack JNI, but that is not the only caller
870     if (isnanf(left) || left < GAIN_FLOAT_ZERO || left > GAIN_FLOAT_UNITY ||
871             isnanf(right) || right < GAIN_FLOAT_ZERO || right > GAIN_FLOAT_UNITY) {
872         return BAD_VALUE;
873     }
874 
875     AutoMutex lock(mLock);
876     mVolume[AUDIO_INTERLEAVE_LEFT] = left;
877     mVolume[AUDIO_INTERLEAVE_RIGHT] = right;
878 
879     mProxy->setVolumeLR(gain_minifloat_pack(gain_from_float(left), gain_from_float(right)));
880 
881     if (isOffloaded_l()) {
882         mAudioTrack->signal();
883     }
884     return NO_ERROR;
885 }
886 
setVolume(float volume)887 status_t AudioTrack::setVolume(float volume)
888 {
889     return setVolume(volume, volume);
890 }
891 
setAuxEffectSendLevel(float level)892 status_t AudioTrack::setAuxEffectSendLevel(float level)
893 {
894     // This duplicates a test by AudioTrack JNI, but that is not the only caller
895     if (isnanf(level) || level < GAIN_FLOAT_ZERO || level > GAIN_FLOAT_UNITY) {
896         return BAD_VALUE;
897     }
898 
899     AutoMutex lock(mLock);
900     mSendLevel = level;
901     mProxy->setSendLevel(level);
902 
903     return NO_ERROR;
904 }
905 
getAuxEffectSendLevel(float * level) const906 void AudioTrack::getAuxEffectSendLevel(float* level) const
907 {
908     if (level != NULL) {
909         *level = mSendLevel;
910     }
911 }
912 
setSampleRate(uint32_t rate)913 status_t AudioTrack::setSampleRate(uint32_t rate)
914 {
915     AutoMutex lock(mLock);
916     if (rate == mSampleRate) {
917         return NO_ERROR;
918     }
919     if (isOffloadedOrDirect_l() || (mFlags & AUDIO_OUTPUT_FLAG_FAST)) {
920         return INVALID_OPERATION;
921     }
922     if (mOutput == AUDIO_IO_HANDLE_NONE) {
923         return NO_INIT;
924     }
925     // NOTE: it is theoretically possible, but highly unlikely, that a device change
926     // could mean a previously allowed sampling rate is no longer allowed.
927     uint32_t afSamplingRate;
928     if (AudioSystem::getSamplingRate(mOutput, &afSamplingRate) != NO_ERROR) {
929         return NO_INIT;
930     }
931     // pitch is emulated by adjusting speed and sampleRate
932     const uint32_t effectiveSampleRate = adjustSampleRate(rate, mPlaybackRate.mPitch);
933     if (rate == 0 || effectiveSampleRate > afSamplingRate * AUDIO_RESAMPLER_DOWN_RATIO_MAX) {
934         return BAD_VALUE;
935     }
936     // TODO: Should we also check if the buffer size is compatible?
937 
938     mSampleRate = rate;
939     mProxy->setSampleRate(effectiveSampleRate);
940 
941     return NO_ERROR;
942 }
943 
getSampleRate() const944 uint32_t AudioTrack::getSampleRate() const
945 {
946     AutoMutex lock(mLock);
947 
948     // sample rate can be updated during playback by the offloaded decoder so we need to
949     // query the HAL and update if needed.
950 // FIXME use Proxy return channel to update the rate from server and avoid polling here
951     if (isOffloadedOrDirect_l()) {
952         if (mOutput != AUDIO_IO_HANDLE_NONE) {
953             uint32_t sampleRate = 0;
954             status_t status = AudioSystem::getSamplingRate(mOutput, &sampleRate);
955             if (status == NO_ERROR) {
956                 mSampleRate = sampleRate;
957             }
958         }
959     }
960     return mSampleRate;
961 }
962 
getOriginalSampleRate() const963 uint32_t AudioTrack::getOriginalSampleRate() const
964 {
965     return mOriginalSampleRate;
966 }
967 
setPlaybackRate(const AudioPlaybackRate & playbackRate)968 status_t AudioTrack::setPlaybackRate(const AudioPlaybackRate &playbackRate)
969 {
970     AutoMutex lock(mLock);
971     if (isAudioPlaybackRateEqual(playbackRate, mPlaybackRate)) {
972         return NO_ERROR;
973     }
974     if (isOffloadedOrDirect_l()) {
975         return INVALID_OPERATION;
976     }
977     if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
978         return INVALID_OPERATION;
979     }
980 
981     ALOGV("setPlaybackRate (input): mSampleRate:%u  mSpeed:%f  mPitch:%f",
982             mSampleRate, playbackRate.mSpeed, playbackRate.mPitch);
983     // pitch is emulated by adjusting speed and sampleRate
984     const uint32_t effectiveRate = adjustSampleRate(mSampleRate, playbackRate.mPitch);
985     const float effectiveSpeed = adjustSpeed(playbackRate.mSpeed, playbackRate.mPitch);
986     const float effectivePitch = adjustPitch(playbackRate.mPitch);
987     AudioPlaybackRate playbackRateTemp = playbackRate;
988     playbackRateTemp.mSpeed = effectiveSpeed;
989     playbackRateTemp.mPitch = effectivePitch;
990 
991     ALOGV("setPlaybackRate (effective): mSampleRate:%u  mSpeed:%f  mPitch:%f",
992             effectiveRate, effectiveSpeed, effectivePitch);
993 
994     if (!isAudioPlaybackRateValid(playbackRateTemp)) {
995         ALOGW("setPlaybackRate(%f, %f) failed (effective rate out of bounds)",
996                 playbackRate.mSpeed, playbackRate.mPitch);
997         return BAD_VALUE;
998     }
999     // Check if the buffer size is compatible.
1000     if (!isSampleRateSpeedAllowed_l(effectiveRate, effectiveSpeed)) {
1001         ALOGW("setPlaybackRate(%f, %f) failed (buffer size)",
1002                 playbackRate.mSpeed, playbackRate.mPitch);
1003         return BAD_VALUE;
1004     }
1005 
1006     // Check resampler ratios are within bounds
1007     if ((uint64_t)effectiveRate > (uint64_t)mSampleRate *
1008             (uint64_t)AUDIO_RESAMPLER_DOWN_RATIO_MAX) {
1009         ALOGW("setPlaybackRate(%f, %f) failed. Resample rate exceeds max accepted value",
1010                 playbackRate.mSpeed, playbackRate.mPitch);
1011         return BAD_VALUE;
1012     }
1013 
1014     if ((uint64_t)effectiveRate * (uint64_t)AUDIO_RESAMPLER_UP_RATIO_MAX < (uint64_t)mSampleRate) {
1015         ALOGW("setPlaybackRate(%f, %f) failed. Resample rate below min accepted value",
1016                         playbackRate.mSpeed, playbackRate.mPitch);
1017         return BAD_VALUE;
1018     }
1019     mPlaybackRate = playbackRate;
1020     //set effective rates
1021     mProxy->setPlaybackRate(playbackRateTemp);
1022     mProxy->setSampleRate(effectiveRate); // FIXME: not quite "atomic" with setPlaybackRate
1023     return NO_ERROR;
1024 }
1025 
getPlaybackRate() const1026 const AudioPlaybackRate& AudioTrack::getPlaybackRate() const
1027 {
1028     AutoMutex lock(mLock);
1029     return mPlaybackRate;
1030 }
1031 
getBufferSizeInFrames()1032 ssize_t AudioTrack::getBufferSizeInFrames()
1033 {
1034     AutoMutex lock(mLock);
1035     if (mOutput == AUDIO_IO_HANDLE_NONE || mProxy.get() == 0) {
1036         return NO_INIT;
1037     }
1038     return (ssize_t) mProxy->getBufferSizeInFrames();
1039 }
1040 
getBufferDurationInUs(int64_t * duration)1041 status_t AudioTrack::getBufferDurationInUs(int64_t *duration)
1042 {
1043     if (duration == nullptr) {
1044         return BAD_VALUE;
1045     }
1046     AutoMutex lock(mLock);
1047     if (mOutput == AUDIO_IO_HANDLE_NONE || mProxy.get() == 0) {
1048         return NO_INIT;
1049     }
1050     ssize_t bufferSizeInFrames = (ssize_t) mProxy->getBufferSizeInFrames();
1051     if (bufferSizeInFrames < 0) {
1052         return (status_t)bufferSizeInFrames;
1053     }
1054     *duration = (int64_t)((double)bufferSizeInFrames * 1000000
1055             / ((double)mSampleRate * mPlaybackRate.mSpeed));
1056     return NO_ERROR;
1057 }
1058 
setBufferSizeInFrames(size_t bufferSizeInFrames)1059 ssize_t AudioTrack::setBufferSizeInFrames(size_t bufferSizeInFrames)
1060 {
1061     AutoMutex lock(mLock);
1062     if (mOutput == AUDIO_IO_HANDLE_NONE || mProxy.get() == 0) {
1063         return NO_INIT;
1064     }
1065     // Reject if timed track or compressed audio.
1066     if (!audio_is_linear_pcm(mFormat)) {
1067         return INVALID_OPERATION;
1068     }
1069     return (ssize_t) mProxy->setBufferSizeInFrames((uint32_t) bufferSizeInFrames);
1070 }
1071 
setLoop(uint32_t loopStart,uint32_t loopEnd,int loopCount)1072 status_t AudioTrack::setLoop(uint32_t loopStart, uint32_t loopEnd, int loopCount)
1073 {
1074     if (mSharedBuffer == 0 || isOffloadedOrDirect()) {
1075         return INVALID_OPERATION;
1076     }
1077 
1078     if (loopCount == 0) {
1079         ;
1080     } else if (loopCount >= -1 && loopStart < loopEnd && loopEnd <= mFrameCount &&
1081             loopEnd - loopStart >= MIN_LOOP) {
1082         ;
1083     } else {
1084         return BAD_VALUE;
1085     }
1086 
1087     AutoMutex lock(mLock);
1088     // See setPosition() regarding setting parameters such as loop points or position while active
1089     if (mState == STATE_ACTIVE) {
1090         return INVALID_OPERATION;
1091     }
1092     setLoop_l(loopStart, loopEnd, loopCount);
1093     return NO_ERROR;
1094 }
1095 
setLoop_l(uint32_t loopStart,uint32_t loopEnd,int loopCount)1096 void AudioTrack::setLoop_l(uint32_t loopStart, uint32_t loopEnd, int loopCount)
1097 {
1098     // We do not update the periodic notification point.
1099     // mNewPosition = updateAndGetPosition_l() + mUpdatePeriod;
1100     mLoopCount = loopCount;
1101     mLoopEnd = loopEnd;
1102     mLoopStart = loopStart;
1103     mLoopCountNotified = loopCount;
1104     mStaticProxy->setLoop(loopStart, loopEnd, loopCount);
1105 
1106     // Waking the AudioTrackThread is not needed as this cannot be called when active.
1107 }
1108 
setMarkerPosition(uint32_t marker)1109 status_t AudioTrack::setMarkerPosition(uint32_t marker)
1110 {
1111     // The only purpose of setting marker position is to get a callback
1112     if (mCbf == NULL || isOffloadedOrDirect()) {
1113         return INVALID_OPERATION;
1114     }
1115 
1116     AutoMutex lock(mLock);
1117     mMarkerPosition = marker;
1118     mMarkerReached = false;
1119 
1120     sp<AudioTrackThread> t = mAudioTrackThread;
1121     if (t != 0) {
1122         t->wake();
1123     }
1124     return NO_ERROR;
1125 }
1126 
getMarkerPosition(uint32_t * marker) const1127 status_t AudioTrack::getMarkerPosition(uint32_t *marker) const
1128 {
1129     if (isOffloadedOrDirect()) {
1130         return INVALID_OPERATION;
1131     }
1132     if (marker == NULL) {
1133         return BAD_VALUE;
1134     }
1135 
1136     AutoMutex lock(mLock);
1137     mMarkerPosition.getValue(marker);
1138 
1139     return NO_ERROR;
1140 }
1141 
setPositionUpdatePeriod(uint32_t updatePeriod)1142 status_t AudioTrack::setPositionUpdatePeriod(uint32_t updatePeriod)
1143 {
1144     // The only purpose of setting position update period is to get a callback
1145     if (mCbf == NULL || isOffloadedOrDirect()) {
1146         return INVALID_OPERATION;
1147     }
1148 
1149     AutoMutex lock(mLock);
1150     mNewPosition = updateAndGetPosition_l() + updatePeriod;
1151     mUpdatePeriod = updatePeriod;
1152 
1153     sp<AudioTrackThread> t = mAudioTrackThread;
1154     if (t != 0) {
1155         t->wake();
1156     }
1157     return NO_ERROR;
1158 }
1159 
getPositionUpdatePeriod(uint32_t * updatePeriod) const1160 status_t AudioTrack::getPositionUpdatePeriod(uint32_t *updatePeriod) const
1161 {
1162     if (isOffloadedOrDirect()) {
1163         return INVALID_OPERATION;
1164     }
1165     if (updatePeriod == NULL) {
1166         return BAD_VALUE;
1167     }
1168 
1169     AutoMutex lock(mLock);
1170     *updatePeriod = mUpdatePeriod;
1171 
1172     return NO_ERROR;
1173 }
1174 
setPosition(uint32_t position)1175 status_t AudioTrack::setPosition(uint32_t position)
1176 {
1177     if (mSharedBuffer == 0 || isOffloadedOrDirect()) {
1178         return INVALID_OPERATION;
1179     }
1180     if (position > mFrameCount) {
1181         return BAD_VALUE;
1182     }
1183 
1184     AutoMutex lock(mLock);
1185     // Currently we require that the player is inactive before setting parameters such as position
1186     // or loop points.  Otherwise, there could be a race condition: the application could read the
1187     // current position, compute a new position or loop parameters, and then set that position or
1188     // loop parameters but it would do the "wrong" thing since the position has continued to advance
1189     // in the mean time.  If we ever provide a sequencer in server, we could allow a way for the app
1190     // to specify how it wants to handle such scenarios.
1191     if (mState == STATE_ACTIVE) {
1192         return INVALID_OPERATION;
1193     }
1194     // After setting the position, use full update period before notification.
1195     mNewPosition = updateAndGetPosition_l() + mUpdatePeriod;
1196     mStaticProxy->setBufferPosition(position);
1197 
1198     // Waking the AudioTrackThread is not needed as this cannot be called when active.
1199     return NO_ERROR;
1200 }
1201 
getPosition(uint32_t * position)1202 status_t AudioTrack::getPosition(uint32_t *position)
1203 {
1204     if (position == NULL) {
1205         return BAD_VALUE;
1206     }
1207 
1208     AutoMutex lock(mLock);
1209     // FIXME: offloaded and direct tracks call into the HAL for render positions
1210     // for compressed/synced data; however, we use proxy position for pure linear pcm data
1211     // as we do not know the capability of the HAL for pcm position support and standby.
1212     // There may be some latency differences between the HAL position and the proxy position.
1213     if (isOffloadedOrDirect_l() && !isPurePcmData_l()) {
1214         uint32_t dspFrames = 0;
1215 
1216         if (isOffloaded_l() && ((mState == STATE_PAUSED) || (mState == STATE_PAUSED_STOPPING))) {
1217             ALOGV("getPosition called in paused state, return cached position %u", mPausedPosition);
1218             *position = mPausedPosition;
1219             return NO_ERROR;
1220         }
1221 
1222         if (mOutput != AUDIO_IO_HANDLE_NONE) {
1223             uint32_t halFrames; // actually unused
1224             (void) AudioSystem::getRenderPosition(mOutput, &halFrames, &dspFrames);
1225             // FIXME: on getRenderPosition() error, we return OK with frame position 0.
1226         }
1227         // FIXME: dspFrames may not be zero in (mState == STATE_STOPPED || mState == STATE_FLUSHED)
1228         // due to hardware latency. We leave this behavior for now.
1229         *position = dspFrames;
1230     } else {
1231         if (mCblk->mFlags & CBLK_INVALID) {
1232             (void) restoreTrack_l("getPosition");
1233             // FIXME: for compatibility with the Java API we ignore the restoreTrack_l()
1234             // error here (e.g. DEAD_OBJECT) and return OK with the last recorded server position.
1235         }
1236 
1237         // IAudioTrack::stop() isn't synchronous; we don't know when presentation completes
1238         *position = (mState == STATE_STOPPED || mState == STATE_FLUSHED) ?
1239                 0 : updateAndGetPosition_l().value();
1240     }
1241     return NO_ERROR;
1242 }
1243 
getBufferPosition(uint32_t * position)1244 status_t AudioTrack::getBufferPosition(uint32_t *position)
1245 {
1246     if (mSharedBuffer == 0) {
1247         return INVALID_OPERATION;
1248     }
1249     if (position == NULL) {
1250         return BAD_VALUE;
1251     }
1252 
1253     AutoMutex lock(mLock);
1254     *position = mStaticProxy->getBufferPosition();
1255     return NO_ERROR;
1256 }
1257 
reload()1258 status_t AudioTrack::reload()
1259 {
1260     if (mSharedBuffer == 0 || isOffloadedOrDirect()) {
1261         return INVALID_OPERATION;
1262     }
1263 
1264     AutoMutex lock(mLock);
1265     // See setPosition() regarding setting parameters such as loop points or position while active
1266     if (mState == STATE_ACTIVE) {
1267         return INVALID_OPERATION;
1268     }
1269     mNewPosition = mUpdatePeriod;
1270     (void) updateAndGetPosition_l();
1271     mPosition = 0;
1272     mPreviousTimestampValid = false;
1273 #if 0
1274     // The documentation is not clear on the behavior of reload() and the restoration
1275     // of loop count. Historically we have not restored loop count, start, end,
1276     // but it makes sense if one desires to repeat playing a particular sound.
1277     if (mLoopCount != 0) {
1278         mLoopCountNotified = mLoopCount;
1279         mStaticProxy->setLoop(mLoopStart, mLoopEnd, mLoopCount);
1280     }
1281 #endif
1282     mStaticProxy->setBufferPosition(0);
1283     return NO_ERROR;
1284 }
1285 
getOutput() const1286 audio_io_handle_t AudioTrack::getOutput() const
1287 {
1288     AutoMutex lock(mLock);
1289     return mOutput;
1290 }
1291 
setOutputDevice(audio_port_handle_t deviceId)1292 status_t AudioTrack::setOutputDevice(audio_port_handle_t deviceId) {
1293     AutoMutex lock(mLock);
1294     if (mSelectedDeviceId != deviceId) {
1295         mSelectedDeviceId = deviceId;
1296         if (mStatus == NO_ERROR) {
1297             android_atomic_or(CBLK_INVALID, &mCblk->mFlags);
1298             mProxy->interrupt();
1299         }
1300     }
1301     return NO_ERROR;
1302 }
1303 
getOutputDevice()1304 audio_port_handle_t AudioTrack::getOutputDevice() {
1305     AutoMutex lock(mLock);
1306     return mSelectedDeviceId;
1307 }
1308 
1309 // must be called with mLock held
updateRoutedDeviceId_l()1310 void AudioTrack::updateRoutedDeviceId_l()
1311 {
1312     // if the track is inactive, do not update actual device as the output stream maybe routed
1313     // to a device not relevant to this client because of other active use cases.
1314     if (mState != STATE_ACTIVE) {
1315         return;
1316     }
1317     if (mOutput != AUDIO_IO_HANDLE_NONE) {
1318         audio_port_handle_t deviceId = AudioSystem::getDeviceIdForIo(mOutput);
1319         if (deviceId != AUDIO_PORT_HANDLE_NONE) {
1320             mRoutedDeviceId = deviceId;
1321         }
1322     }
1323 }
1324 
getRoutedDeviceId()1325 audio_port_handle_t AudioTrack::getRoutedDeviceId() {
1326     AutoMutex lock(mLock);
1327     updateRoutedDeviceId_l();
1328     return mRoutedDeviceId;
1329 }
1330 
attachAuxEffect(int effectId)1331 status_t AudioTrack::attachAuxEffect(int effectId)
1332 {
1333     AutoMutex lock(mLock);
1334     status_t status = mAudioTrack->attachAuxEffect(effectId);
1335     if (status == NO_ERROR) {
1336         mAuxEffectId = effectId;
1337     }
1338     return status;
1339 }
1340 
streamType() const1341 audio_stream_type_t AudioTrack::streamType() const
1342 {
1343     if (mStreamType == AUDIO_STREAM_DEFAULT) {
1344         return audio_attributes_to_stream_type(&mAttributes);
1345     }
1346     return mStreamType;
1347 }
1348 
latency()1349 uint32_t AudioTrack::latency()
1350 {
1351     AutoMutex lock(mLock);
1352     updateLatency_l();
1353     return mLatency;
1354 }
1355 
1356 // -------------------------------------------------------------------------
1357 
1358 // must be called with mLock held
updateLatency_l()1359 void AudioTrack::updateLatency_l()
1360 {
1361     status_t status = AudioSystem::getLatency(mOutput, &mAfLatency);
1362     if (status != NO_ERROR) {
1363         ALOGW("getLatency(%d) failed status %d", mOutput, status);
1364     } else {
1365         // FIXME don't believe this lie
1366         mLatency = mAfLatency + (1000LL * mFrameCount) / mSampleRate;
1367     }
1368 }
1369 
1370 // TODO Move this macro to a common header file for enum to string conversion in audio framework.
1371 #define MEDIA_CASE_ENUM(name) case name: return #name
convertTransferToText(transfer_type transferType)1372 const char * AudioTrack::convertTransferToText(transfer_type transferType) {
1373     switch (transferType) {
1374         MEDIA_CASE_ENUM(TRANSFER_DEFAULT);
1375         MEDIA_CASE_ENUM(TRANSFER_CALLBACK);
1376         MEDIA_CASE_ENUM(TRANSFER_OBTAIN);
1377         MEDIA_CASE_ENUM(TRANSFER_SYNC);
1378         MEDIA_CASE_ENUM(TRANSFER_SHARED);
1379         default:
1380             return "UNRECOGNIZED";
1381     }
1382 }
1383 
createTrack_l()1384 status_t AudioTrack::createTrack_l()
1385 {
1386     status_t status;
1387     bool callbackAdded = false;
1388 
1389     const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();
1390     if (audioFlinger == 0) {
1391         ALOGE("Could not get audioflinger");
1392         status = NO_INIT;
1393         goto exit;
1394     }
1395 
1396     {
1397     // mFlags (not mOrigFlags) is modified depending on whether fast request is accepted.
1398     // After fast request is denied, we will request again if IAudioTrack is re-created.
1399     // Client can only express a preference for FAST.  Server will perform additional tests.
1400     if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1401         // either of these use cases:
1402         // use case 1: shared buffer
1403         bool sharedBuffer = mSharedBuffer != 0;
1404         bool transferAllowed =
1405             // use case 2: callback transfer mode
1406             (mTransfer == TRANSFER_CALLBACK) ||
1407             // use case 3: obtain/release mode
1408             (mTransfer == TRANSFER_OBTAIN) ||
1409             // use case 4: synchronous write
1410             ((mTransfer == TRANSFER_SYNC) && mThreadCanCallJava);
1411 
1412         bool fastAllowed = sharedBuffer || transferAllowed;
1413         if (!fastAllowed) {
1414             ALOGW("AUDIO_OUTPUT_FLAG_FAST denied by client, not shared buffer and transfer = %s",
1415                   convertTransferToText(mTransfer));
1416             mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_FAST);
1417         }
1418     }
1419 
1420     IAudioFlinger::CreateTrackInput input;
1421     if (mStreamType != AUDIO_STREAM_DEFAULT) {
1422         stream_type_to_audio_attributes(mStreamType, &input.attr);
1423     } else {
1424         input.attr = mAttributes;
1425     }
1426     input.config = AUDIO_CONFIG_INITIALIZER;
1427     input.config.sample_rate = mSampleRate;
1428     input.config.channel_mask = mChannelMask;
1429     input.config.format = mFormat;
1430     input.config.offload_info = mOffloadInfoCopy;
1431     input.clientInfo.clientUid = mClientUid;
1432     input.clientInfo.clientPid = mClientPid;
1433     input.clientInfo.clientTid = -1;
1434     if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1435         // It is currently meaningless to request SCHED_FIFO for a Java thread.  Even if the
1436         // application-level code follows all non-blocking design rules, the language runtime
1437         // doesn't also follow those rules, so the thread will not benefit overall.
1438         if (mAudioTrackThread != 0 && !mThreadCanCallJava) {
1439             input.clientInfo.clientTid = mAudioTrackThread->getTid();
1440         }
1441     }
1442     input.sharedBuffer = mSharedBuffer;
1443     input.notificationsPerBuffer = mNotificationsPerBufferReq;
1444     input.speed = 1.0;
1445     if (audio_has_proportional_frames(mFormat) && mSharedBuffer == 0 &&
1446             (mFlags & AUDIO_OUTPUT_FLAG_FAST) == 0) {
1447         input.speed  = !isPurePcmData_l() || isOffloadedOrDirect_l() ? 1.0f :
1448                         max(mMaxRequiredSpeed, mPlaybackRate.mSpeed);
1449     }
1450     input.flags = mFlags;
1451     input.frameCount = mReqFrameCount;
1452     input.notificationFrameCount = mNotificationFramesReq;
1453     input.selectedDeviceId = mSelectedDeviceId;
1454     input.sessionId = mSessionId;
1455 
1456     IAudioFlinger::CreateTrackOutput output;
1457 
1458     sp<IAudioTrack> track = audioFlinger->createTrack(input,
1459                                                       output,
1460                                                       &status);
1461 
1462     if (status != NO_ERROR || output.outputId == AUDIO_IO_HANDLE_NONE) {
1463         ALOGE("AudioFlinger could not create track, status: %d output %d", status, output.outputId);
1464         if (status == NO_ERROR) {
1465             status = NO_INIT;
1466         }
1467         goto exit;
1468     }
1469     ALOG_ASSERT(track != 0);
1470 
1471     mFrameCount = output.frameCount;
1472     mNotificationFramesAct = (uint32_t)output.notificationFrameCount;
1473     mRoutedDeviceId = output.selectedDeviceId;
1474     mSessionId = output.sessionId;
1475 
1476     mSampleRate = output.sampleRate;
1477     if (mOriginalSampleRate == 0) {
1478         mOriginalSampleRate = mSampleRate;
1479     }
1480 
1481     mAfFrameCount = output.afFrameCount;
1482     mAfSampleRate = output.afSampleRate;
1483     mAfLatency = output.afLatencyMs;
1484 
1485     mLatency = mAfLatency + (1000LL * mFrameCount) / mSampleRate;
1486 
1487     // AudioFlinger now owns the reference to the I/O handle,
1488     // so we are no longer responsible for releasing it.
1489 
1490     // FIXME compare to AudioRecord
1491     sp<IMemory> iMem = track->getCblk();
1492     if (iMem == 0) {
1493         ALOGE("Could not get control block");
1494         status = NO_INIT;
1495         goto exit;
1496     }
1497     void *iMemPointer = iMem->pointer();
1498     if (iMemPointer == NULL) {
1499         ALOGE("Could not get control block pointer");
1500         status = NO_INIT;
1501         goto exit;
1502     }
1503     // invariant that mAudioTrack != 0 is true only after set() returns successfully
1504     if (mAudioTrack != 0) {
1505         IInterface::asBinder(mAudioTrack)->unlinkToDeath(mDeathNotifier, this);
1506         mDeathNotifier.clear();
1507     }
1508     mAudioTrack = track;
1509     mCblkMemory = iMem;
1510     IPCThreadState::self()->flushCommands();
1511 
1512     audio_track_cblk_t* cblk = static_cast<audio_track_cblk_t*>(iMemPointer);
1513     mCblk = cblk;
1514 
1515     mAwaitBoost = false;
1516     if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1517         if (output.flags & AUDIO_OUTPUT_FLAG_FAST) {
1518             ALOGI("AUDIO_OUTPUT_FLAG_FAST successful; frameCount %zu -> %zu",
1519                   mReqFrameCount, mFrameCount);
1520             if (!mThreadCanCallJava) {
1521                 mAwaitBoost = true;
1522             }
1523         } else {
1524             ALOGW("AUDIO_OUTPUT_FLAG_FAST denied by server; frameCount %zu -> %zu", mReqFrameCount,
1525                   mFrameCount);
1526         }
1527     }
1528     mFlags = output.flags;
1529 
1530     //mOutput != output includes the case where mOutput == AUDIO_IO_HANDLE_NONE for first creation
1531     if (mDeviceCallback != 0 && mOutput != output.outputId) {
1532         if (mOutput != AUDIO_IO_HANDLE_NONE) {
1533             AudioSystem::removeAudioDeviceCallback(this, mOutput);
1534         }
1535         AudioSystem::addAudioDeviceCallback(this, output.outputId);
1536         callbackAdded = true;
1537     }
1538 
1539     // We retain a copy of the I/O handle, but don't own the reference
1540     mOutput = output.outputId;
1541     mRefreshRemaining = true;
1542 
1543     // Starting address of buffers in shared memory.  If there is a shared buffer, buffers
1544     // is the value of pointer() for the shared buffer, otherwise buffers points
1545     // immediately after the control block.  This address is for the mapping within client
1546     // address space.  AudioFlinger::TrackBase::mBuffer is for the server address space.
1547     void* buffers;
1548     if (mSharedBuffer == 0) {
1549         buffers = cblk + 1;
1550     } else {
1551         buffers = mSharedBuffer->pointer();
1552         if (buffers == NULL) {
1553             ALOGE("Could not get buffer pointer");
1554             status = NO_INIT;
1555             goto exit;
1556         }
1557     }
1558 
1559     mAudioTrack->attachAuxEffect(mAuxEffectId);
1560 
1561     // If IAudioTrack is re-created, don't let the requested frameCount
1562     // decrease.  This can confuse clients that cache frameCount().
1563     if (mFrameCount > mReqFrameCount) {
1564         mReqFrameCount = mFrameCount;
1565     }
1566 
1567     // reset server position to 0 as we have new cblk.
1568     mServer = 0;
1569 
1570     // update proxy
1571     if (mSharedBuffer == 0) {
1572         mStaticProxy.clear();
1573         mProxy = new AudioTrackClientProxy(cblk, buffers, mFrameCount, mFrameSize);
1574     } else {
1575         mStaticProxy = new StaticAudioTrackClientProxy(cblk, buffers, mFrameCount, mFrameSize);
1576         mProxy = mStaticProxy;
1577     }
1578 
1579     mProxy->setVolumeLR(gain_minifloat_pack(
1580             gain_from_float(mVolume[AUDIO_INTERLEAVE_LEFT]),
1581             gain_from_float(mVolume[AUDIO_INTERLEAVE_RIGHT])));
1582 
1583     mProxy->setSendLevel(mSendLevel);
1584     const uint32_t effectiveSampleRate = adjustSampleRate(mSampleRate, mPlaybackRate.mPitch);
1585     const float effectiveSpeed = adjustSpeed(mPlaybackRate.mSpeed, mPlaybackRate.mPitch);
1586     const float effectivePitch = adjustPitch(mPlaybackRate.mPitch);
1587     mProxy->setSampleRate(effectiveSampleRate);
1588 
1589     AudioPlaybackRate playbackRateTemp = mPlaybackRate;
1590     playbackRateTemp.mSpeed = effectiveSpeed;
1591     playbackRateTemp.mPitch = effectivePitch;
1592     mProxy->setPlaybackRate(playbackRateTemp);
1593     mProxy->setMinimum(mNotificationFramesAct);
1594 
1595     mDeathNotifier = new DeathNotifier(this);
1596     IInterface::asBinder(mAudioTrack)->linkToDeath(mDeathNotifier, this);
1597 
1598     }
1599 
1600 exit:
1601     if (status != NO_ERROR && callbackAdded) {
1602         // note: mOutput is always valid is callbackAdded is true
1603         AudioSystem::removeAudioDeviceCallback(this, mOutput);
1604     }
1605 
1606     mStatus = status;
1607 
1608     // sp<IAudioTrack> track destructor will cause releaseOutput() to be called by AudioFlinger
1609     return status;
1610 }
1611 
obtainBuffer(Buffer * audioBuffer,int32_t waitCount,size_t * nonContig)1612 status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, int32_t waitCount, size_t *nonContig)
1613 {
1614     if (audioBuffer == NULL) {
1615         if (nonContig != NULL) {
1616             *nonContig = 0;
1617         }
1618         return BAD_VALUE;
1619     }
1620     if (mTransfer != TRANSFER_OBTAIN) {
1621         audioBuffer->frameCount = 0;
1622         audioBuffer->size = 0;
1623         audioBuffer->raw = NULL;
1624         if (nonContig != NULL) {
1625             *nonContig = 0;
1626         }
1627         return INVALID_OPERATION;
1628     }
1629 
1630     const struct timespec *requested;
1631     struct timespec timeout;
1632     if (waitCount == -1) {
1633         requested = &ClientProxy::kForever;
1634     } else if (waitCount == 0) {
1635         requested = &ClientProxy::kNonBlocking;
1636     } else if (waitCount > 0) {
1637         long long ms = WAIT_PERIOD_MS * (long long) waitCount;
1638         timeout.tv_sec = ms / 1000;
1639         timeout.tv_nsec = (int) (ms % 1000) * 1000000;
1640         requested = &timeout;
1641     } else {
1642         ALOGE("%s invalid waitCount %d", __func__, waitCount);
1643         requested = NULL;
1644     }
1645     return obtainBuffer(audioBuffer, requested, NULL /*elapsed*/, nonContig);
1646 }
1647 
obtainBuffer(Buffer * audioBuffer,const struct timespec * requested,struct timespec * elapsed,size_t * nonContig)1648 status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, const struct timespec *requested,
1649         struct timespec *elapsed, size_t *nonContig)
1650 {
1651     // previous and new IAudioTrack sequence numbers are used to detect track re-creation
1652     uint32_t oldSequence = 0;
1653     uint32_t newSequence;
1654 
1655     Proxy::Buffer buffer;
1656     status_t status = NO_ERROR;
1657 
1658     static const int32_t kMaxTries = 5;
1659     int32_t tryCounter = kMaxTries;
1660 
1661     do {
1662         // obtainBuffer() is called with mutex unlocked, so keep extra references to these fields to
1663         // keep them from going away if another thread re-creates the track during obtainBuffer()
1664         sp<AudioTrackClientProxy> proxy;
1665         sp<IMemory> iMem;
1666 
1667         {   // start of lock scope
1668             AutoMutex lock(mLock);
1669 
1670             newSequence = mSequence;
1671             // did previous obtainBuffer() fail due to media server death or voluntary invalidation?
1672             if (status == DEAD_OBJECT) {
1673                 // re-create track, unless someone else has already done so
1674                 if (newSequence == oldSequence) {
1675                     status = restoreTrack_l("obtainBuffer");
1676                     if (status != NO_ERROR) {
1677                         buffer.mFrameCount = 0;
1678                         buffer.mRaw = NULL;
1679                         buffer.mNonContig = 0;
1680                         break;
1681                     }
1682                 }
1683             }
1684             oldSequence = newSequence;
1685 
1686             if (status == NOT_ENOUGH_DATA) {
1687                 restartIfDisabled();
1688             }
1689 
1690             // Keep the extra references
1691             proxy = mProxy;
1692             iMem = mCblkMemory;
1693 
1694             if (mState == STATE_STOPPING) {
1695                 status = -EINTR;
1696                 buffer.mFrameCount = 0;
1697                 buffer.mRaw = NULL;
1698                 buffer.mNonContig = 0;
1699                 break;
1700             }
1701 
1702             // Non-blocking if track is stopped or paused
1703             if (mState != STATE_ACTIVE) {
1704                 requested = &ClientProxy::kNonBlocking;
1705             }
1706 
1707         }   // end of lock scope
1708 
1709         buffer.mFrameCount = audioBuffer->frameCount;
1710         // FIXME starts the requested timeout and elapsed over from scratch
1711         status = proxy->obtainBuffer(&buffer, requested, elapsed);
1712     } while (((status == DEAD_OBJECT) || (status == NOT_ENOUGH_DATA)) && (tryCounter-- > 0));
1713 
1714     audioBuffer->frameCount = buffer.mFrameCount;
1715     audioBuffer->size = buffer.mFrameCount * mFrameSize;
1716     audioBuffer->raw = buffer.mRaw;
1717     if (nonContig != NULL) {
1718         *nonContig = buffer.mNonContig;
1719     }
1720     return status;
1721 }
1722 
releaseBuffer(const Buffer * audioBuffer)1723 void AudioTrack::releaseBuffer(const Buffer* audioBuffer)
1724 {
1725     // FIXME add error checking on mode, by adding an internal version
1726     if (mTransfer == TRANSFER_SHARED) {
1727         return;
1728     }
1729 
1730     size_t stepCount = audioBuffer->size / mFrameSize;
1731     if (stepCount == 0) {
1732         return;
1733     }
1734 
1735     Proxy::Buffer buffer;
1736     buffer.mFrameCount = stepCount;
1737     buffer.mRaw = audioBuffer->raw;
1738 
1739     AutoMutex lock(mLock);
1740     mReleased += stepCount;
1741     mInUnderrun = false;
1742     mProxy->releaseBuffer(&buffer);
1743 
1744     // restart track if it was disabled by audioflinger due to previous underrun
1745     restartIfDisabled();
1746 }
1747 
restartIfDisabled()1748 void AudioTrack::restartIfDisabled()
1749 {
1750     int32_t flags = android_atomic_and(~CBLK_DISABLED, &mCblk->mFlags);
1751     if ((mState == STATE_ACTIVE) && (flags & CBLK_DISABLED)) {
1752         ALOGW("releaseBuffer() track %p disabled due to previous underrun, restarting", this);
1753         // FIXME ignoring status
1754         mAudioTrack->start();
1755     }
1756 }
1757 
1758 // -------------------------------------------------------------------------
1759 
write(const void * buffer,size_t userSize,bool blocking)1760 ssize_t AudioTrack::write(const void* buffer, size_t userSize, bool blocking)
1761 {
1762     if (mTransfer != TRANSFER_SYNC) {
1763         return INVALID_OPERATION;
1764     }
1765 
1766     if (isDirect()) {
1767         AutoMutex lock(mLock);
1768         int32_t flags = android_atomic_and(
1769                             ~(CBLK_UNDERRUN | CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL | CBLK_BUFFER_END),
1770                             &mCblk->mFlags);
1771         if (flags & CBLK_INVALID) {
1772             return DEAD_OBJECT;
1773         }
1774     }
1775 
1776     if (ssize_t(userSize) < 0 || (buffer == NULL && userSize != 0)) {
1777         // Sanity-check: user is most-likely passing an error code, and it would
1778         // make the return value ambiguous (actualSize vs error).
1779         ALOGE("AudioTrack::write(buffer=%p, size=%zu (%zd)", buffer, userSize, userSize);
1780         return BAD_VALUE;
1781     }
1782 
1783     size_t written = 0;
1784     Buffer audioBuffer;
1785 
1786     while (userSize >= mFrameSize) {
1787         audioBuffer.frameCount = userSize / mFrameSize;
1788 
1789         status_t err = obtainBuffer(&audioBuffer,
1790                 blocking ? &ClientProxy::kForever : &ClientProxy::kNonBlocking);
1791         if (err < 0) {
1792             if (written > 0) {
1793                 break;
1794             }
1795             if (err == TIMED_OUT || err == -EINTR) {
1796                 err = WOULD_BLOCK;
1797             }
1798             return ssize_t(err);
1799         }
1800 
1801         size_t toWrite = audioBuffer.size;
1802         memcpy(audioBuffer.i8, buffer, toWrite);
1803         buffer = ((const char *) buffer) + toWrite;
1804         userSize -= toWrite;
1805         written += toWrite;
1806 
1807         releaseBuffer(&audioBuffer);
1808     }
1809 
1810     if (written > 0) {
1811         mFramesWritten += written / mFrameSize;
1812     }
1813     return written;
1814 }
1815 
1816 // -------------------------------------------------------------------------
1817 
processAudioBuffer()1818 nsecs_t AudioTrack::processAudioBuffer()
1819 {
1820     // Currently the AudioTrack thread is not created if there are no callbacks.
1821     // Would it ever make sense to run the thread, even without callbacks?
1822     // If so, then replace this by checks at each use for mCbf != NULL.
1823     LOG_ALWAYS_FATAL_IF(mCblk == NULL);
1824 
1825     mLock.lock();
1826     if (mAwaitBoost) {
1827         mAwaitBoost = false;
1828         mLock.unlock();
1829         static const int32_t kMaxTries = 5;
1830         int32_t tryCounter = kMaxTries;
1831         uint32_t pollUs = 10000;
1832         do {
1833             int policy = sched_getscheduler(0) & ~SCHED_RESET_ON_FORK;
1834             if (policy == SCHED_FIFO || policy == SCHED_RR) {
1835                 break;
1836             }
1837             usleep(pollUs);
1838             pollUs <<= 1;
1839         } while (tryCounter-- > 0);
1840         if (tryCounter < 0) {
1841             ALOGE("did not receive expected priority boost on time");
1842         }
1843         // Run again immediately
1844         return 0;
1845     }
1846 
1847     // Can only reference mCblk while locked
1848     int32_t flags = android_atomic_and(
1849         ~(CBLK_UNDERRUN | CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL | CBLK_BUFFER_END), &mCblk->mFlags);
1850 
1851     // Check for track invalidation
1852     if (flags & CBLK_INVALID) {
1853         // for offloaded tracks restoreTrack_l() will just update the sequence and clear
1854         // AudioSystem cache. We should not exit here but after calling the callback so
1855         // that the upper layers can recreate the track
1856         if (!isOffloadedOrDirect_l() || (mSequence == mObservedSequence)) {
1857             status_t status __unused = restoreTrack_l("processAudioBuffer");
1858             // FIXME unused status
1859             // after restoration, continue below to make sure that the loop and buffer events
1860             // are notified because they have been cleared from mCblk->mFlags above.
1861         }
1862     }
1863 
1864     bool waitStreamEnd = mState == STATE_STOPPING;
1865     bool active = mState == STATE_ACTIVE;
1866 
1867     // Manage underrun callback, must be done under lock to avoid race with releaseBuffer()
1868     bool newUnderrun = false;
1869     if (flags & CBLK_UNDERRUN) {
1870 #if 0
1871         // Currently in shared buffer mode, when the server reaches the end of buffer,
1872         // the track stays active in continuous underrun state.  It's up to the application
1873         // to pause or stop the track, or set the position to a new offset within buffer.
1874         // This was some experimental code to auto-pause on underrun.   Keeping it here
1875         // in "if 0" so we can re-visit this if we add a real sequencer for shared memory content.
1876         if (mTransfer == TRANSFER_SHARED) {
1877             mState = STATE_PAUSED;
1878             active = false;
1879         }
1880 #endif
1881         if (!mInUnderrun) {
1882             mInUnderrun = true;
1883             newUnderrun = true;
1884         }
1885     }
1886 
1887     // Get current position of server
1888     Modulo<uint32_t> position(updateAndGetPosition_l());
1889 
1890     // Manage marker callback
1891     bool markerReached = false;
1892     Modulo<uint32_t> markerPosition(mMarkerPosition);
1893     // uses 32 bit wraparound for comparison with position.
1894     if (!mMarkerReached && markerPosition.value() > 0 && position >= markerPosition) {
1895         mMarkerReached = markerReached = true;
1896     }
1897 
1898     // Determine number of new position callback(s) that will be needed, while locked
1899     size_t newPosCount = 0;
1900     Modulo<uint32_t> newPosition(mNewPosition);
1901     uint32_t updatePeriod = mUpdatePeriod;
1902     // FIXME fails for wraparound, need 64 bits
1903     if (updatePeriod > 0 && position >= newPosition) {
1904         newPosCount = ((position - newPosition).value() / updatePeriod) + 1;
1905         mNewPosition += updatePeriod * newPosCount;
1906     }
1907 
1908     // Cache other fields that will be needed soon
1909     uint32_t sampleRate = mSampleRate;
1910     float speed = mPlaybackRate.mSpeed;
1911     const uint32_t notificationFrames = mNotificationFramesAct;
1912     if (mRefreshRemaining) {
1913         mRefreshRemaining = false;
1914         mRemainingFrames = notificationFrames;
1915         mRetryOnPartialBuffer = false;
1916     }
1917     size_t misalignment = mProxy->getMisalignment();
1918     uint32_t sequence = mSequence;
1919     sp<AudioTrackClientProxy> proxy = mProxy;
1920 
1921     // Determine the number of new loop callback(s) that will be needed, while locked.
1922     int loopCountNotifications = 0;
1923     uint32_t loopPeriod = 0; // time in frames for next EVENT_LOOP_END or EVENT_BUFFER_END
1924 
1925     if (mLoopCount > 0) {
1926         int loopCount;
1927         size_t bufferPosition;
1928         mStaticProxy->getBufferPositionAndLoopCount(&bufferPosition, &loopCount);
1929         loopPeriod = ((loopCount > 0) ? mLoopEnd : mFrameCount) - bufferPosition;
1930         loopCountNotifications = min(mLoopCountNotified - loopCount, kMaxLoopCountNotifications);
1931         mLoopCountNotified = loopCount; // discard any excess notifications
1932     } else if (mLoopCount < 0) {
1933         // FIXME: We're not accurate with notification count and position with infinite looping
1934         // since loopCount from server side will always return -1 (we could decrement it).
1935         size_t bufferPosition = mStaticProxy->getBufferPosition();
1936         loopCountNotifications = int((flags & (CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL)) != 0);
1937         loopPeriod = mLoopEnd - bufferPosition;
1938     } else if (/* mLoopCount == 0 && */ mSharedBuffer != 0) {
1939         size_t bufferPosition = mStaticProxy->getBufferPosition();
1940         loopPeriod = mFrameCount - bufferPosition;
1941     }
1942 
1943     // These fields don't need to be cached, because they are assigned only by set():
1944     //     mTransfer, mCbf, mUserData, mFormat, mFrameSize, mFlags
1945     // mFlags is also assigned by createTrack_l(), but not the bit we care about.
1946 
1947     mLock.unlock();
1948 
1949     // get anchor time to account for callbacks.
1950     const nsecs_t timeBeforeCallbacks = systemTime();
1951 
1952     if (waitStreamEnd) {
1953         // FIXME:  Instead of blocking in proxy->waitStreamEndDone(), Callback thread
1954         // should wait on proxy futex and handle CBLK_STREAM_END_DONE within this function
1955         // (and make sure we don't callback for more data while we're stopping).
1956         // This helps with position, marker notifications, and track invalidation.
1957         struct timespec timeout;
1958         timeout.tv_sec = WAIT_STREAM_END_TIMEOUT_SEC;
1959         timeout.tv_nsec = 0;
1960 
1961         status_t status = proxy->waitStreamEndDone(&timeout);
1962         switch (status) {
1963         case NO_ERROR:
1964         case DEAD_OBJECT:
1965         case TIMED_OUT:
1966             if (status != DEAD_OBJECT) {
1967                 // for DEAD_OBJECT, we do not send a EVENT_STREAM_END after stop();
1968                 // instead, the application should handle the EVENT_NEW_IAUDIOTRACK.
1969                 mCbf(EVENT_STREAM_END, mUserData, NULL);
1970             }
1971             {
1972                 AutoMutex lock(mLock);
1973                 // The previously assigned value of waitStreamEnd is no longer valid,
1974                 // since the mutex has been unlocked and either the callback handler
1975                 // or another thread could have re-started the AudioTrack during that time.
1976                 waitStreamEnd = mState == STATE_STOPPING;
1977                 if (waitStreamEnd) {
1978                     mState = STATE_STOPPED;
1979                     mReleased = 0;
1980                 }
1981             }
1982             if (waitStreamEnd && status != DEAD_OBJECT) {
1983                return NS_INACTIVE;
1984             }
1985             break;
1986         }
1987         return 0;
1988     }
1989 
1990     // perform callbacks while unlocked
1991     if (newUnderrun) {
1992         mCbf(EVENT_UNDERRUN, mUserData, NULL);
1993     }
1994     while (loopCountNotifications > 0) {
1995         mCbf(EVENT_LOOP_END, mUserData, NULL);
1996         --loopCountNotifications;
1997     }
1998     if (flags & CBLK_BUFFER_END) {
1999         mCbf(EVENT_BUFFER_END, mUserData, NULL);
2000     }
2001     if (markerReached) {
2002         mCbf(EVENT_MARKER, mUserData, &markerPosition);
2003     }
2004     while (newPosCount > 0) {
2005         size_t temp = newPosition.value(); // FIXME size_t != uint32_t
2006         mCbf(EVENT_NEW_POS, mUserData, &temp);
2007         newPosition += updatePeriod;
2008         newPosCount--;
2009     }
2010 
2011     if (mObservedSequence != sequence) {
2012         mObservedSequence = sequence;
2013         mCbf(EVENT_NEW_IAUDIOTRACK, mUserData, NULL);
2014         // for offloaded tracks, just wait for the upper layers to recreate the track
2015         if (isOffloadedOrDirect()) {
2016             return NS_INACTIVE;
2017         }
2018     }
2019 
2020     // if inactive, then don't run me again until re-started
2021     if (!active) {
2022         return NS_INACTIVE;
2023     }
2024 
2025     // Compute the estimated time until the next timed event (position, markers, loops)
2026     // FIXME only for non-compressed audio
2027     uint32_t minFrames = ~0;
2028     if (!markerReached && position < markerPosition) {
2029         minFrames = (markerPosition - position).value();
2030     }
2031     if (loopPeriod > 0 && loopPeriod < minFrames) {
2032         // loopPeriod is already adjusted for actual position.
2033         minFrames = loopPeriod;
2034     }
2035     if (updatePeriod > 0) {
2036         minFrames = min(minFrames, (newPosition - position).value());
2037     }
2038 
2039     // If > 0, poll periodically to recover from a stuck server.  A good value is 2.
2040     static const uint32_t kPoll = 0;
2041     if (kPoll > 0 && mTransfer == TRANSFER_CALLBACK && kPoll * notificationFrames < minFrames) {
2042         minFrames = kPoll * notificationFrames;
2043     }
2044 
2045     // This "fudge factor" avoids soaking CPU, and compensates for late progress by server
2046     static const nsecs_t kWaitPeriodNs = WAIT_PERIOD_MS * 1000000LL;
2047     const nsecs_t timeAfterCallbacks = systemTime();
2048 
2049     // Convert frame units to time units
2050     nsecs_t ns = NS_WHENEVER;
2051     if (minFrames != (uint32_t) ~0) {
2052         // AudioFlinger consumption of client data may be irregular when coming out of device
2053         // standby since the kernel buffers require filling. This is throttled to no more than 2x
2054         // the expected rate in the MixerThread. Hence, we reduce the estimated time to wait by one
2055         // half (but no more than half a second) to improve callback accuracy during these temporary
2056         // data surges.
2057         const nsecs_t estimatedNs = framesToNanoseconds(minFrames, sampleRate, speed);
2058         constexpr nsecs_t maxThrottleCompensationNs = 500000000LL;
2059         ns = estimatedNs - min(estimatedNs / 2, maxThrottleCompensationNs) + kWaitPeriodNs;
2060         ns -= (timeAfterCallbacks - timeBeforeCallbacks);  // account for callback time
2061         // TODO: Should we warn if the callback time is too long?
2062         if (ns < 0) ns = 0;
2063     }
2064 
2065     // If not supplying data by EVENT_MORE_DATA, then we're done
2066     if (mTransfer != TRANSFER_CALLBACK) {
2067         return ns;
2068     }
2069 
2070     // EVENT_MORE_DATA callback handling.
2071     // Timing for linear pcm audio data formats can be derived directly from the
2072     // buffer fill level.
2073     // Timing for compressed data is not directly available from the buffer fill level,
2074     // rather indirectly from waiting for blocking mode callbacks or waiting for obtain()
2075     // to return a certain fill level.
2076 
2077     struct timespec timeout;
2078     const struct timespec *requested = &ClientProxy::kForever;
2079     if (ns != NS_WHENEVER) {
2080         timeout.tv_sec = ns / 1000000000LL;
2081         timeout.tv_nsec = ns % 1000000000LL;
2082         ALOGV("timeout %ld.%03d", timeout.tv_sec, (int) timeout.tv_nsec / 1000000);
2083         requested = &timeout;
2084     }
2085 
2086     size_t writtenFrames = 0;
2087     while (mRemainingFrames > 0) {
2088 
2089         Buffer audioBuffer;
2090         audioBuffer.frameCount = mRemainingFrames;
2091         size_t nonContig;
2092         status_t err = obtainBuffer(&audioBuffer, requested, NULL, &nonContig);
2093         LOG_ALWAYS_FATAL_IF((err != NO_ERROR) != (audioBuffer.frameCount == 0),
2094                 "obtainBuffer() err=%d frameCount=%zu", err, audioBuffer.frameCount);
2095         requested = &ClientProxy::kNonBlocking;
2096         size_t avail = audioBuffer.frameCount + nonContig;
2097         ALOGV("obtainBuffer(%u) returned %zu = %zu + %zu err %d",
2098                 mRemainingFrames, avail, audioBuffer.frameCount, nonContig, err);
2099         if (err != NO_ERROR) {
2100             if (err == TIMED_OUT || err == WOULD_BLOCK || err == -EINTR ||
2101                     (isOffloaded() && (err == DEAD_OBJECT))) {
2102                 // FIXME bug 25195759
2103                 return 1000000;
2104             }
2105             ALOGE("Error %d obtaining an audio buffer, giving up.", err);
2106             return NS_NEVER;
2107         }
2108 
2109         if (mRetryOnPartialBuffer && audio_has_proportional_frames(mFormat)) {
2110             mRetryOnPartialBuffer = false;
2111             if (avail < mRemainingFrames) {
2112                 if (ns > 0) { // account for obtain time
2113                     const nsecs_t timeNow = systemTime();
2114                     ns = max((nsecs_t)0, ns - (timeNow - timeAfterCallbacks));
2115                 }
2116                 nsecs_t myns = framesToNanoseconds(mRemainingFrames - avail, sampleRate, speed);
2117                 if (ns < 0 /* NS_WHENEVER */ || myns < ns) {
2118                     ns = myns;
2119                 }
2120                 return ns;
2121             }
2122         }
2123 
2124         size_t reqSize = audioBuffer.size;
2125         mCbf(EVENT_MORE_DATA, mUserData, &audioBuffer);
2126         size_t writtenSize = audioBuffer.size;
2127 
2128         // Sanity check on returned size
2129         if (ssize_t(writtenSize) < 0 || writtenSize > reqSize) {
2130             ALOGE("EVENT_MORE_DATA requested %zu bytes but callback returned %zd bytes",
2131                     reqSize, ssize_t(writtenSize));
2132             return NS_NEVER;
2133         }
2134 
2135         if (writtenSize == 0) {
2136             // The callback is done filling buffers
2137             // Keep this thread going to handle timed events and
2138             // still try to get more data in intervals of WAIT_PERIOD_MS
2139             // but don't just loop and block the CPU, so wait
2140 
2141             // mCbf(EVENT_MORE_DATA, ...) might either
2142             // (1) Block until it can fill the buffer, returning 0 size on EOS.
2143             // (2) Block until it can fill the buffer, returning 0 data (silence) on EOS.
2144             // (3) Return 0 size when no data is available, does not wait for more data.
2145             //
2146             // (1) and (2) occurs with AudioPlayer/AwesomePlayer; (3) occurs with NuPlayer.
2147             // We try to compute the wait time to avoid a tight sleep-wait cycle,
2148             // especially for case (3).
2149             //
2150             // The decision to support (1) and (2) affect the sizing of mRemainingFrames
2151             // and this loop; whereas for case (3) we could simply check once with the full
2152             // buffer size and skip the loop entirely.
2153 
2154             nsecs_t myns;
2155             if (audio_has_proportional_frames(mFormat)) {
2156                 // time to wait based on buffer occupancy
2157                 const nsecs_t datans = mRemainingFrames <= avail ? 0 :
2158                         framesToNanoseconds(mRemainingFrames - avail, sampleRate, speed);
2159                 // audio flinger thread buffer size (TODO: adjust for fast tracks)
2160                 // FIXME: use mAfFrameCountHAL instead of mAfFrameCount below for fast tracks.
2161                 const nsecs_t afns = framesToNanoseconds(mAfFrameCount, mAfSampleRate, speed);
2162                 // add a half the AudioFlinger buffer time to avoid soaking CPU if datans is 0.
2163                 myns = datans + (afns / 2);
2164             } else {
2165                 // FIXME: This could ping quite a bit if the buffer isn't full.
2166                 // Note that when mState is stopping we waitStreamEnd, so it never gets here.
2167                 myns = kWaitPeriodNs;
2168             }
2169             if (ns > 0) { // account for obtain and callback time
2170                 const nsecs_t timeNow = systemTime();
2171                 ns = max((nsecs_t)0, ns - (timeNow - timeAfterCallbacks));
2172             }
2173             if (ns < 0 /* NS_WHENEVER */ || myns < ns) {
2174                 ns = myns;
2175             }
2176             return ns;
2177         }
2178 
2179         size_t releasedFrames = writtenSize / mFrameSize;
2180         audioBuffer.frameCount = releasedFrames;
2181         mRemainingFrames -= releasedFrames;
2182         if (misalignment >= releasedFrames) {
2183             misalignment -= releasedFrames;
2184         } else {
2185             misalignment = 0;
2186         }
2187 
2188         releaseBuffer(&audioBuffer);
2189         writtenFrames += releasedFrames;
2190 
2191         // FIXME here is where we would repeat EVENT_MORE_DATA again on same advanced buffer
2192         // if callback doesn't like to accept the full chunk
2193         if (writtenSize < reqSize) {
2194             continue;
2195         }
2196 
2197         // There could be enough non-contiguous frames available to satisfy the remaining request
2198         if (mRemainingFrames <= nonContig) {
2199             continue;
2200         }
2201 
2202 #if 0
2203         // This heuristic tries to collapse a series of EVENT_MORE_DATA that would total to a
2204         // sum <= notificationFrames.  It replaces that series by at most two EVENT_MORE_DATA
2205         // that total to a sum == notificationFrames.
2206         if (0 < misalignment && misalignment <= mRemainingFrames) {
2207             mRemainingFrames = misalignment;
2208             return ((double)mRemainingFrames * 1100000000) / ((double)sampleRate * speed);
2209         }
2210 #endif
2211 
2212     }
2213     if (writtenFrames > 0) {
2214         AutoMutex lock(mLock);
2215         mFramesWritten += writtenFrames;
2216     }
2217     mRemainingFrames = notificationFrames;
2218     mRetryOnPartialBuffer = true;
2219 
2220     // A lot has transpired since ns was calculated, so run again immediately and re-calculate
2221     return 0;
2222 }
2223 
restoreTrack_l(const char * from)2224 status_t AudioTrack::restoreTrack_l(const char *from)
2225 {
2226     ALOGW("dead IAudioTrack, %s, creating a new one from %s()",
2227           isOffloadedOrDirect_l() ? "Offloaded or Direct" : "PCM", from);
2228     ++mSequence;
2229 
2230     // refresh the audio configuration cache in this process to make sure we get new
2231     // output parameters and new IAudioFlinger in createTrack_l()
2232     AudioSystem::clearAudioConfigCache();
2233 
2234     if (isOffloadedOrDirect_l() || mDoNotReconnect) {
2235         // FIXME re-creation of offloaded and direct tracks is not yet implemented;
2236         // reconsider enabling for linear PCM encodings when position can be preserved.
2237         return DEAD_OBJECT;
2238     }
2239 
2240     // Save so we can return count since creation.
2241     mUnderrunCountOffset = getUnderrunCount_l();
2242 
2243     // save the old static buffer position
2244     uint32_t staticPosition = 0;
2245     size_t bufferPosition = 0;
2246     int loopCount = 0;
2247     if (mStaticProxy != 0) {
2248         mStaticProxy->getBufferPositionAndLoopCount(&bufferPosition, &loopCount);
2249         staticPosition = mStaticProxy->getPosition().unsignedValue();
2250     }
2251 
2252     // See b/74409267. Connecting to a BT A2DP device supporting multiple codecs
2253     // causes a lot of churn on the service side, and it can reject starting
2254     // playback of a previously created track. May also apply to other cases.
2255     const int INITIAL_RETRIES = 3;
2256     int retries = INITIAL_RETRIES;
2257 retry:
2258     if (retries < INITIAL_RETRIES) {
2259         // See the comment for clearAudioConfigCache at the start of the function.
2260         AudioSystem::clearAudioConfigCache();
2261     }
2262     mFlags = mOrigFlags;
2263 
2264     // If a new IAudioTrack is successfully created, createTrack_l() will modify the
2265     // following member variables: mAudioTrack, mCblkMemory and mCblk.
2266     // It will also delete the strong references on previous IAudioTrack and IMemory.
2267     // If a new IAudioTrack cannot be created, the previous (dead) instance will be left intact.
2268     status_t result = createTrack_l();
2269 
2270     if (result != NO_ERROR) {
2271         ALOGW("%s(): createTrack_l failed, do not retry", __func__);
2272         retries = 0;
2273     } else {
2274         // take the frames that will be lost by track recreation into account in saved position
2275         // For streaming tracks, this is the amount we obtained from the user/client
2276         // (not the number actually consumed at the server - those are already lost).
2277         if (mStaticProxy == 0) {
2278             mPosition = mReleased;
2279         }
2280         // Continue playback from last known position and restore loop.
2281         if (mStaticProxy != 0) {
2282             if (loopCount != 0) {
2283                 mStaticProxy->setBufferPositionAndLoop(bufferPosition,
2284                         mLoopStart, mLoopEnd, loopCount);
2285             } else {
2286                 mStaticProxy->setBufferPosition(bufferPosition);
2287                 if (bufferPosition == mFrameCount) {
2288                     ALOGD("restoring track at end of static buffer");
2289                 }
2290             }
2291         }
2292         // restore volume handler
2293         mVolumeHandler->forall([this](const VolumeShaper &shaper) -> VolumeShaper::Status {
2294             sp<VolumeShaper::Operation> operationToEnd =
2295                     new VolumeShaper::Operation(shaper.mOperation);
2296             // TODO: Ideally we would restore to the exact xOffset position
2297             // as returned by getVolumeShaperState(), but we don't have that
2298             // information when restoring at the client unless we periodically poll
2299             // the server or create shared memory state.
2300             //
2301             // For now, we simply advance to the end of the VolumeShaper effect
2302             // if it has been started.
2303             if (shaper.isStarted()) {
2304                 operationToEnd->setNormalizedTime(1.f);
2305             }
2306             return mAudioTrack->applyVolumeShaper(shaper.mConfiguration, operationToEnd);
2307         });
2308 
2309         if (mState == STATE_ACTIVE) {
2310             result = mAudioTrack->start();
2311         }
2312         // server resets to zero so we offset
2313         mFramesWrittenServerOffset =
2314                 mStaticProxy.get() != nullptr ? staticPosition : mFramesWritten;
2315         mFramesWrittenAtRestore = mFramesWrittenServerOffset;
2316     }
2317     if (result != NO_ERROR) {
2318         ALOGW("%s() failed status %d, retries %d", __func__, result, retries);
2319         if (--retries > 0) {
2320             goto retry;
2321         }
2322         mState = STATE_STOPPED;
2323         mReleased = 0;
2324     }
2325 
2326     return result;
2327 }
2328 
updateAndGetPosition_l()2329 Modulo<uint32_t> AudioTrack::updateAndGetPosition_l()
2330 {
2331     // This is the sole place to read server consumed frames
2332     Modulo<uint32_t> newServer(mProxy->getPosition());
2333     const int32_t delta = (newServer - mServer).signedValue();
2334     // TODO There is controversy about whether there can be "negative jitter" in server position.
2335     //      This should be investigated further, and if possible, it should be addressed.
2336     //      A more definite failure mode is infrequent polling by client.
2337     //      One could call (void)getPosition_l() in releaseBuffer(),
2338     //      so mReleased and mPosition are always lock-step as best possible.
2339     //      That should ensure delta never goes negative for infrequent polling
2340     //      unless the server has more than 2^31 frames in its buffer,
2341     //      in which case the use of uint32_t for these counters has bigger issues.
2342     ALOGE_IF(delta < 0,
2343             "detected illegal retrograde motion by the server: mServer advanced by %d",
2344             delta);
2345     mServer = newServer;
2346     if (delta > 0) { // avoid retrograde
2347         mPosition += delta;
2348     }
2349     return mPosition;
2350 }
2351 
isSampleRateSpeedAllowed_l(uint32_t sampleRate,float speed)2352 bool AudioTrack::isSampleRateSpeedAllowed_l(uint32_t sampleRate, float speed)
2353 {
2354     updateLatency_l();
2355     // applicable for mixing tracks only (not offloaded or direct)
2356     if (mStaticProxy != 0) {
2357         return true; // static tracks do not have issues with buffer sizing.
2358     }
2359     const size_t minFrameCount =
2360             AudioSystem::calculateMinFrameCount(mAfLatency, mAfFrameCount, mAfSampleRate,
2361                                             sampleRate, speed /*, 0 mNotificationsPerBufferReq*/);
2362     const bool allowed = mFrameCount >= minFrameCount;
2363     ALOGD_IF(!allowed,
2364             "isSampleRateSpeedAllowed_l denied "
2365             "mAfLatency:%u  mAfFrameCount:%zu  mAfSampleRate:%u  sampleRate:%u  speed:%f "
2366             "mFrameCount:%zu < minFrameCount:%zu",
2367             mAfLatency, mAfFrameCount, mAfSampleRate, sampleRate, speed,
2368             mFrameCount, minFrameCount);
2369     return allowed;
2370 }
2371 
setParameters(const String8 & keyValuePairs)2372 status_t AudioTrack::setParameters(const String8& keyValuePairs)
2373 {
2374     AutoMutex lock(mLock);
2375     return mAudioTrack->setParameters(keyValuePairs);
2376 }
2377 
selectPresentation(int presentationId,int programId)2378 status_t AudioTrack::selectPresentation(int presentationId, int programId)
2379 {
2380     AutoMutex lock(mLock);
2381     AudioParameter param = AudioParameter();
2382     param.addInt(String8(AudioParameter::keyPresentationId), presentationId);
2383     param.addInt(String8(AudioParameter::keyProgramId), programId);
2384     ALOGV("PresentationId/ProgramId[%s]",param.toString().string());
2385 
2386     return mAudioTrack->setParameters(param.toString());
2387 }
2388 
applyVolumeShaper(const sp<VolumeShaper::Configuration> & configuration,const sp<VolumeShaper::Operation> & operation)2389 VolumeShaper::Status AudioTrack::applyVolumeShaper(
2390         const sp<VolumeShaper::Configuration>& configuration,
2391         const sp<VolumeShaper::Operation>& operation)
2392 {
2393     AutoMutex lock(mLock);
2394     mVolumeHandler->setIdIfNecessary(configuration);
2395     VolumeShaper::Status status = mAudioTrack->applyVolumeShaper(configuration, operation);
2396 
2397     if (status == DEAD_OBJECT) {
2398         if (restoreTrack_l("applyVolumeShaper") == OK) {
2399             status = mAudioTrack->applyVolumeShaper(configuration, operation);
2400         }
2401     }
2402     if (status >= 0) {
2403         // save VolumeShaper for restore
2404         mVolumeHandler->applyVolumeShaper(configuration, operation);
2405         if (mState == STATE_ACTIVE || mState == STATE_STOPPING) {
2406             mVolumeHandler->setStarted();
2407         }
2408     } else {
2409         // warn only if not an expected restore failure.
2410         ALOGW_IF(!((isOffloadedOrDirect_l() || mDoNotReconnect) && status == DEAD_OBJECT),
2411                 "applyVolumeShaper failed: %d", status);
2412     }
2413     return status;
2414 }
2415 
getVolumeShaperState(int id)2416 sp<VolumeShaper::State> AudioTrack::getVolumeShaperState(int id)
2417 {
2418     AutoMutex lock(mLock);
2419     sp<VolumeShaper::State> state = mAudioTrack->getVolumeShaperState(id);
2420     if (state.get() == nullptr && (mCblk->mFlags & CBLK_INVALID) != 0) {
2421         if (restoreTrack_l("getVolumeShaperState") == OK) {
2422             state = mAudioTrack->getVolumeShaperState(id);
2423         }
2424     }
2425     return state;
2426 }
2427 
getTimestamp(ExtendedTimestamp * timestamp)2428 status_t AudioTrack::getTimestamp(ExtendedTimestamp *timestamp)
2429 {
2430     if (timestamp == nullptr) {
2431         return BAD_VALUE;
2432     }
2433     AutoMutex lock(mLock);
2434     return getTimestamp_l(timestamp);
2435 }
2436 
getTimestamp_l(ExtendedTimestamp * timestamp)2437 status_t AudioTrack::getTimestamp_l(ExtendedTimestamp *timestamp)
2438 {
2439     if (mCblk->mFlags & CBLK_INVALID) {
2440         const status_t status = restoreTrack_l("getTimestampExtended");
2441         if (status != OK) {
2442             // per getTimestamp() API doc in header, we return DEAD_OBJECT here,
2443             // recommending that the track be recreated.
2444             return DEAD_OBJECT;
2445         }
2446     }
2447     // check for offloaded/direct here in case restoring somehow changed those flags.
2448     if (isOffloadedOrDirect_l()) {
2449         return INVALID_OPERATION; // not supported
2450     }
2451     status_t status = mProxy->getTimestamp(timestamp);
2452     LOG_ALWAYS_FATAL_IF(status != OK, "status %d not allowed from proxy getTimestamp", status);
2453     bool found = false;
2454     timestamp->mPosition[ExtendedTimestamp::LOCATION_CLIENT] = mFramesWritten;
2455     timestamp->mTimeNs[ExtendedTimestamp::LOCATION_CLIENT] = 0;
2456     // server side frame offset in case AudioTrack has been restored.
2457     for (int i = ExtendedTimestamp::LOCATION_SERVER;
2458             i < ExtendedTimestamp::LOCATION_MAX; ++i) {
2459         if (timestamp->mTimeNs[i] >= 0) {
2460             // apply server offset (frames flushed is ignored
2461             // so we don't report the jump when the flush occurs).
2462             timestamp->mPosition[i] += mFramesWrittenServerOffset;
2463             found = true;
2464         }
2465     }
2466     return found ? OK : WOULD_BLOCK;
2467 }
2468 
getTimestamp(AudioTimestamp & timestamp)2469 status_t AudioTrack::getTimestamp(AudioTimestamp& timestamp)
2470 {
2471     AutoMutex lock(mLock);
2472     return getTimestamp_l(timestamp);
2473 }
2474 
getTimestamp_l(AudioTimestamp & timestamp)2475 status_t AudioTrack::getTimestamp_l(AudioTimestamp& timestamp)
2476 {
2477     bool previousTimestampValid = mPreviousTimestampValid;
2478     // Set false here to cover all the error return cases.
2479     mPreviousTimestampValid = false;
2480 
2481     switch (mState) {
2482     case STATE_ACTIVE:
2483     case STATE_PAUSED:
2484         break; // handle below
2485     case STATE_FLUSHED:
2486     case STATE_STOPPED:
2487         return WOULD_BLOCK;
2488     case STATE_STOPPING:
2489     case STATE_PAUSED_STOPPING:
2490         if (!isOffloaded_l()) {
2491             return INVALID_OPERATION;
2492         }
2493         break; // offloaded tracks handled below
2494     default:
2495         LOG_ALWAYS_FATAL("Invalid mState in getTimestamp(): %d", mState);
2496         break;
2497     }
2498 
2499     if (mCblk->mFlags & CBLK_INVALID) {
2500         const status_t status = restoreTrack_l("getTimestamp");
2501         if (status != OK) {
2502             // per getTimestamp() API doc in header, we return DEAD_OBJECT here,
2503             // recommending that the track be recreated.
2504             return DEAD_OBJECT;
2505         }
2506     }
2507 
2508     // The presented frame count must always lag behind the consumed frame count.
2509     // To avoid a race, read the presented frames first.  This ensures that presented <= consumed.
2510 
2511     status_t status;
2512     if (isOffloadedOrDirect_l()) {
2513         // use Binder to get timestamp
2514         status = mAudioTrack->getTimestamp(timestamp);
2515     } else {
2516         // read timestamp from shared memory
2517         ExtendedTimestamp ets;
2518         status = mProxy->getTimestamp(&ets);
2519         if (status == OK) {
2520             ExtendedTimestamp::Location location;
2521             status = ets.getBestTimestamp(&timestamp, &location);
2522 
2523             if (status == OK) {
2524                 updateLatency_l();
2525                 // It is possible that the best location has moved from the kernel to the server.
2526                 // In this case we adjust the position from the previous computed latency.
2527                 if (location == ExtendedTimestamp::LOCATION_SERVER) {
2528                     ALOGW_IF(mPreviousLocation == ExtendedTimestamp::LOCATION_KERNEL,
2529                             "getTimestamp() location moved from kernel to server");
2530                     // check that the last kernel OK time info exists and the positions
2531                     // are valid (if they predate the current track, the positions may
2532                     // be zero or negative).
2533                     const int64_t frames =
2534                             (ets.mTimeNs[ExtendedTimestamp::LOCATION_SERVER_LASTKERNELOK] < 0 ||
2535                             ets.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL_LASTKERNELOK] < 0 ||
2536                             ets.mPosition[ExtendedTimestamp::LOCATION_SERVER_LASTKERNELOK] <= 0 ||
2537                             ets.mPosition[ExtendedTimestamp::LOCATION_KERNEL_LASTKERNELOK] <= 0)
2538                             ?
2539                             int64_t((double)mAfLatency * mSampleRate * mPlaybackRate.mSpeed
2540                                     / 1000)
2541                             :
2542                             (ets.mPosition[ExtendedTimestamp::LOCATION_SERVER_LASTKERNELOK]
2543                             - ets.mPosition[ExtendedTimestamp::LOCATION_KERNEL_LASTKERNELOK]);
2544                     ALOGV("frame adjustment:%lld  timestamp:%s",
2545                             (long long)frames, ets.toString().c_str());
2546                     if (frames >= ets.mPosition[location]) {
2547                         timestamp.mPosition = 0;
2548                     } else {
2549                         timestamp.mPosition = (uint32_t)(ets.mPosition[location] - frames);
2550                     }
2551                 } else if (location == ExtendedTimestamp::LOCATION_KERNEL) {
2552                     ALOGV_IF(mPreviousLocation == ExtendedTimestamp::LOCATION_SERVER,
2553                             "getTimestamp() location moved from server to kernel");
2554                 }
2555 
2556                 // We update the timestamp time even when paused.
2557                 if (mState == STATE_PAUSED /* not needed: STATE_PAUSED_STOPPING */) {
2558                     const int64_t now = systemTime();
2559                     const int64_t at = audio_utils_ns_from_timespec(&timestamp.mTime);
2560                     const int64_t lag =
2561                             (ets.mTimeNs[ExtendedTimestamp::LOCATION_SERVER_LASTKERNELOK] < 0 ||
2562                                 ets.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL_LASTKERNELOK] < 0)
2563                             ? int64_t(mAfLatency * 1000000LL)
2564                             : (ets.mPosition[ExtendedTimestamp::LOCATION_SERVER_LASTKERNELOK]
2565                              - ets.mPosition[ExtendedTimestamp::LOCATION_KERNEL_LASTKERNELOK])
2566                              * NANOS_PER_SECOND / mSampleRate;
2567                     const int64_t limit = now - lag; // no earlier than this limit
2568                     if (at < limit) {
2569                         ALOGV("timestamp pause lag:%lld adjusting from %lld to %lld",
2570                                 (long long)lag, (long long)at, (long long)limit);
2571                         timestamp.mTime = convertNsToTimespec(limit);
2572                     }
2573                 }
2574                 mPreviousLocation = location;
2575             } else {
2576                 // right after AudioTrack is started, one may not find a timestamp
2577                 ALOGV("getBestTimestamp did not find timestamp");
2578             }
2579         }
2580         if (status == INVALID_OPERATION) {
2581             // INVALID_OPERATION occurs when no timestamp has been issued by the server;
2582             // other failures are signaled by a negative time.
2583             // If we come out of FLUSHED or STOPPED where the position is known
2584             // to be zero we convert this to WOULD_BLOCK (with the implicit meaning of
2585             // "zero" for NuPlayer).  We don't convert for track restoration as position
2586             // does not reset.
2587             ALOGV("timestamp server offset:%lld restore frames:%lld",
2588                     (long long)mFramesWrittenServerOffset, (long long)mFramesWrittenAtRestore);
2589             if (mFramesWrittenServerOffset != mFramesWrittenAtRestore) {
2590                 status = WOULD_BLOCK;
2591             }
2592         }
2593     }
2594     if (status != NO_ERROR) {
2595         ALOGV_IF(status != WOULD_BLOCK, "getTimestamp error:%#x", status);
2596         return status;
2597     }
2598     if (isOffloadedOrDirect_l()) {
2599         if (isOffloaded_l() && (mState == STATE_PAUSED || mState == STATE_PAUSED_STOPPING)) {
2600             // use cached paused position in case another offloaded track is running.
2601             timestamp.mPosition = mPausedPosition;
2602             clock_gettime(CLOCK_MONOTONIC, &timestamp.mTime);
2603             // TODO: adjust for delay
2604             return NO_ERROR;
2605         }
2606 
2607         // Check whether a pending flush or stop has completed, as those commands may
2608         // be asynchronous or return near finish or exhibit glitchy behavior.
2609         //
2610         // Originally this showed up as the first timestamp being a continuation of
2611         // the previous song under gapless playback.
2612         // However, we sometimes see zero timestamps, then a glitch of
2613         // the previous song's position, and then correct timestamps afterwards.
2614         if (mStartFromZeroUs != 0 && mSampleRate != 0) {
2615             static const int kTimeJitterUs = 100000; // 100 ms
2616             static const int k1SecUs = 1000000;
2617 
2618             const int64_t timeNow = getNowUs();
2619 
2620             if (timeNow < mStartFromZeroUs + k1SecUs) { // within first second of starting
2621                 const int64_t timestampTimeUs = convertTimespecToUs(timestamp.mTime);
2622                 if (timestampTimeUs < mStartFromZeroUs) {
2623                     return WOULD_BLOCK;  // stale timestamp time, occurs before start.
2624                 }
2625                 const int64_t deltaTimeUs = timestampTimeUs - mStartFromZeroUs;
2626                 const int64_t deltaPositionByUs = (double)timestamp.mPosition * 1000000
2627                         / ((double)mSampleRate * mPlaybackRate.mSpeed);
2628 
2629                 if (deltaPositionByUs > deltaTimeUs + kTimeJitterUs) {
2630                     // Verify that the counter can't count faster than the sample rate
2631                     // since the start time.  If greater, then that means we may have failed
2632                     // to completely flush or stop the previous playing track.
2633                     ALOGW_IF(!mTimestampStartupGlitchReported,
2634                             "getTimestamp startup glitch detected"
2635                             " deltaTimeUs(%lld) deltaPositionUs(%lld) tsmPosition(%u)",
2636                             (long long)deltaTimeUs, (long long)deltaPositionByUs,
2637                             timestamp.mPosition);
2638                     mTimestampStartupGlitchReported = true;
2639                     if (previousTimestampValid
2640                             && mPreviousTimestamp.mPosition == 0 /* should be true if valid */) {
2641                         timestamp = mPreviousTimestamp;
2642                         mPreviousTimestampValid = true;
2643                         return NO_ERROR;
2644                     }
2645                     return WOULD_BLOCK;
2646                 }
2647                 if (deltaPositionByUs != 0) {
2648                     mStartFromZeroUs = 0; // don't check again, we got valid nonzero position.
2649                 }
2650             } else {
2651                 mStartFromZeroUs = 0; // don't check again, start time expired.
2652             }
2653             mTimestampStartupGlitchReported = false;
2654         }
2655     } else {
2656         // Update the mapping between local consumed (mPosition) and server consumed (mServer)
2657         (void) updateAndGetPosition_l();
2658         // Server consumed (mServer) and presented both use the same server time base,
2659         // and server consumed is always >= presented.
2660         // The delta between these represents the number of frames in the buffer pipeline.
2661         // If this delta between these is greater than the client position, it means that
2662         // actually presented is still stuck at the starting line (figuratively speaking),
2663         // waiting for the first frame to go by.  So we can't report a valid timestamp yet.
2664         // Note: We explicitly use non-Modulo comparison here - potential wrap issue when
2665         // mPosition exceeds 32 bits.
2666         // TODO Remove when timestamp is updated to contain pipeline status info.
2667         const int32_t pipelineDepthInFrames = (mServer - timestamp.mPosition).signedValue();
2668         if (pipelineDepthInFrames > 0 /* should be true, but we check anyways */
2669                 && (uint32_t)pipelineDepthInFrames > mPosition.value()) {
2670             return INVALID_OPERATION;
2671         }
2672         // Convert timestamp position from server time base to client time base.
2673         // TODO The following code should work OK now because timestamp.mPosition is 32-bit.
2674         // But if we change it to 64-bit then this could fail.
2675         // Use Modulo computation here.
2676         timestamp.mPosition = (mPosition - mServer + timestamp.mPosition).value();
2677         // Immediately after a call to getPosition_l(), mPosition and
2678         // mServer both represent the same frame position.  mPosition is
2679         // in client's point of view, and mServer is in server's point of
2680         // view.  So the difference between them is the "fudge factor"
2681         // between client and server views due to stop() and/or new
2682         // IAudioTrack.  And timestamp.mPosition is initially in server's
2683         // point of view, so we need to apply the same fudge factor to it.
2684     }
2685 
2686     // Prevent retrograde motion in timestamp.
2687     // This is sometimes caused by erratic reports of the available space in the ALSA drivers.
2688     if (status == NO_ERROR) {
2689         // previousTimestampValid is set to false when starting after a stop or flush.
2690         if (previousTimestampValid) {
2691             const int64_t previousTimeNanos =
2692                     audio_utils_ns_from_timespec(&mPreviousTimestamp.mTime);
2693             int64_t currentTimeNanos = audio_utils_ns_from_timespec(&timestamp.mTime);
2694 
2695             // Fix stale time when checking timestamp right after start().
2696             //
2697             // For offload compatibility, use a default lag value here.
2698             // Any time discrepancy between this update and the pause timestamp is handled
2699             // by the retrograde check afterwards.
2700             const int64_t lagNs = int64_t(mAfLatency * 1000000LL);
2701             const int64_t limitNs = mStartNs - lagNs;
2702             if (currentTimeNanos < limitNs) {
2703                 ALOGD("correcting timestamp time for pause, "
2704                         "currentTimeNanos: %lld < limitNs: %lld < mStartNs: %lld",
2705                         (long long)currentTimeNanos, (long long)limitNs, (long long)mStartNs);
2706                 timestamp.mTime = convertNsToTimespec(limitNs);
2707                 currentTimeNanos = limitNs;
2708             }
2709 
2710             // retrograde check
2711             if (currentTimeNanos < previousTimeNanos) {
2712                 ALOGW("retrograde timestamp time corrected, %lld < %lld",
2713                         (long long)currentTimeNanos, (long long)previousTimeNanos);
2714                 timestamp.mTime = mPreviousTimestamp.mTime;
2715                 // currentTimeNanos not used below.
2716             }
2717 
2718             // Looking at signed delta will work even when the timestamps
2719             // are wrapping around.
2720             int32_t deltaPosition = (Modulo<uint32_t>(timestamp.mPosition)
2721                     - mPreviousTimestamp.mPosition).signedValue();
2722             if (deltaPosition < 0) {
2723                 // Only report once per position instead of spamming the log.
2724                 if (!mRetrogradeMotionReported) {
2725                     ALOGW("retrograde timestamp position corrected, %d = %u - %u",
2726                             deltaPosition,
2727                             timestamp.mPosition,
2728                             mPreviousTimestamp.mPosition);
2729                     mRetrogradeMotionReported = true;
2730                 }
2731             } else {
2732                 mRetrogradeMotionReported = false;
2733             }
2734             if (deltaPosition < 0) {
2735                 timestamp.mPosition = mPreviousTimestamp.mPosition;
2736                 deltaPosition = 0;
2737             }
2738 #if 0
2739             // Uncomment this to verify audio timestamp rate.
2740             const int64_t deltaTime =
2741                     audio_utils_ns_from_timespec(&timestamp.mTime) - previousTimeNanos;
2742             if (deltaTime != 0) {
2743                 const int64_t computedSampleRate =
2744                         deltaPosition * (long long)NANOS_PER_SECOND / deltaTime;
2745                 ALOGD("computedSampleRate:%u  sampleRate:%u",
2746                         (unsigned)computedSampleRate, mSampleRate);
2747             }
2748 #endif
2749         }
2750         mPreviousTimestamp = timestamp;
2751         mPreviousTimestampValid = true;
2752     }
2753 
2754     return status;
2755 }
2756 
getParameters(const String8 & keys)2757 String8 AudioTrack::getParameters(const String8& keys)
2758 {
2759     audio_io_handle_t output = getOutput();
2760     if (output != AUDIO_IO_HANDLE_NONE) {
2761         return AudioSystem::getParameters(output, keys);
2762     } else {
2763         return String8::empty();
2764     }
2765 }
2766 
isOffloaded() const2767 bool AudioTrack::isOffloaded() const
2768 {
2769     AutoMutex lock(mLock);
2770     return isOffloaded_l();
2771 }
2772 
isDirect() const2773 bool AudioTrack::isDirect() const
2774 {
2775     AutoMutex lock(mLock);
2776     return isDirect_l();
2777 }
2778 
isOffloadedOrDirect() const2779 bool AudioTrack::isOffloadedOrDirect() const
2780 {
2781     AutoMutex lock(mLock);
2782     return isOffloadedOrDirect_l();
2783 }
2784 
2785 
dump(int fd,const Vector<String16> & args __unused) const2786 status_t AudioTrack::dump(int fd, const Vector<String16>& args __unused) const
2787 {
2788     String8 result;
2789 
2790     result.append(" AudioTrack::dump\n");
2791     result.appendFormat("  status(%d), state(%d), session Id(%d), flags(%#x)\n",
2792                         mStatus, mState, mSessionId, mFlags);
2793     result.appendFormat("  stream type(%d), left - right volume(%f, %f)\n",
2794                         (mStreamType == AUDIO_STREAM_DEFAULT) ?
2795                                 audio_attributes_to_stream_type(&mAttributes) : mStreamType,
2796                         mVolume[AUDIO_INTERLEAVE_LEFT], mVolume[AUDIO_INTERLEAVE_RIGHT]);
2797     result.appendFormat("  format(%#x), channel mask(%#x), channel count(%u)\n",
2798                   mFormat, mChannelMask, mChannelCount);
2799     result.appendFormat("  sample rate(%u), original sample rate(%u), speed(%f)\n",
2800                   mSampleRate, mOriginalSampleRate, mPlaybackRate.mSpeed);
2801     result.appendFormat("  frame count(%zu), req. frame count(%zu)\n",
2802                   mFrameCount, mReqFrameCount);
2803     result.appendFormat("  notif. frame count(%u), req. notif. frame count(%u),"
2804             " req. notif. per buff(%u)\n",
2805              mNotificationFramesAct, mNotificationFramesReq, mNotificationsPerBufferReq);
2806     result.appendFormat("  latency (%d), selected device Id(%d), routed device Id(%d)\n",
2807                         mLatency, mSelectedDeviceId, mRoutedDeviceId);
2808     result.appendFormat("  output(%d) AF latency (%u) AF frame count(%zu) AF SampleRate(%u)\n",
2809                         mOutput, mAfLatency, mAfFrameCount, mAfSampleRate);
2810     ::write(fd, result.string(), result.size());
2811     return NO_ERROR;
2812 }
2813 
getUnderrunCount() const2814 uint32_t AudioTrack::getUnderrunCount() const
2815 {
2816     AutoMutex lock(mLock);
2817     return getUnderrunCount_l();
2818 }
2819 
getUnderrunCount_l() const2820 uint32_t AudioTrack::getUnderrunCount_l() const
2821 {
2822     return mProxy->getUnderrunCount() + mUnderrunCountOffset;
2823 }
2824 
getUnderrunFrames() const2825 uint32_t AudioTrack::getUnderrunFrames() const
2826 {
2827     AutoMutex lock(mLock);
2828     return mProxy->getUnderrunFrames();
2829 }
2830 
addAudioDeviceCallback(const sp<AudioSystem::AudioDeviceCallback> & callback)2831 status_t AudioTrack::addAudioDeviceCallback(const sp<AudioSystem::AudioDeviceCallback>& callback)
2832 {
2833     if (callback == 0) {
2834         ALOGW("%s adding NULL callback!", __FUNCTION__);
2835         return BAD_VALUE;
2836     }
2837     AutoMutex lock(mLock);
2838     if (mDeviceCallback.unsafe_get() == callback.get()) {
2839         ALOGW("%s adding same callback!", __FUNCTION__);
2840         return INVALID_OPERATION;
2841     }
2842     status_t status = NO_ERROR;
2843     if (mOutput != AUDIO_IO_HANDLE_NONE) {
2844         if (mDeviceCallback != 0) {
2845             ALOGW("%s callback already present!", __FUNCTION__);
2846             AudioSystem::removeAudioDeviceCallback(this, mOutput);
2847         }
2848         status = AudioSystem::addAudioDeviceCallback(this, mOutput);
2849     }
2850     mDeviceCallback = callback;
2851     return status;
2852 }
2853 
removeAudioDeviceCallback(const sp<AudioSystem::AudioDeviceCallback> & callback)2854 status_t AudioTrack::removeAudioDeviceCallback(
2855         const sp<AudioSystem::AudioDeviceCallback>& callback)
2856 {
2857     if (callback == 0) {
2858         ALOGW("%s removing NULL callback!", __FUNCTION__);
2859         return BAD_VALUE;
2860     }
2861     AutoMutex lock(mLock);
2862     if (mDeviceCallback.unsafe_get() != callback.get()) {
2863         ALOGW("%s removing different callback!", __FUNCTION__);
2864         return INVALID_OPERATION;
2865     }
2866     mDeviceCallback.clear();
2867     if (mOutput != AUDIO_IO_HANDLE_NONE) {
2868         AudioSystem::removeAudioDeviceCallback(this, mOutput);
2869     }
2870     return NO_ERROR;
2871 }
2872 
2873 
onAudioDeviceUpdate(audio_io_handle_t audioIo,audio_port_handle_t deviceId)2874 void AudioTrack::onAudioDeviceUpdate(audio_io_handle_t audioIo,
2875                                  audio_port_handle_t deviceId)
2876 {
2877     sp<AudioSystem::AudioDeviceCallback> callback;
2878     {
2879         AutoMutex lock(mLock);
2880         if (audioIo != mOutput) {
2881             return;
2882         }
2883         callback = mDeviceCallback.promote();
2884         // only update device if the track is active as route changes due to other use cases are
2885         // irrelevant for this client
2886         if (mState == STATE_ACTIVE) {
2887             mRoutedDeviceId = deviceId;
2888         }
2889     }
2890     if (callback.get() != nullptr) {
2891         callback->onAudioDeviceUpdate(mOutput, mRoutedDeviceId);
2892     }
2893 }
2894 
pendingDuration(int32_t * msec,ExtendedTimestamp::Location location)2895 status_t AudioTrack::pendingDuration(int32_t *msec, ExtendedTimestamp::Location location)
2896 {
2897     if (msec == nullptr ||
2898             (location != ExtendedTimestamp::LOCATION_SERVER
2899                     && location != ExtendedTimestamp::LOCATION_KERNEL)) {
2900         return BAD_VALUE;
2901     }
2902     AutoMutex lock(mLock);
2903     // inclusive of offloaded and direct tracks.
2904     //
2905     // It is possible, but not enabled, to allow duration computation for non-pcm
2906     // audio_has_proportional_frames() formats because currently they have
2907     // the drain rate equivalent to the pcm sample rate * framesize.
2908     if (!isPurePcmData_l()) {
2909         return INVALID_OPERATION;
2910     }
2911     ExtendedTimestamp ets;
2912     if (getTimestamp_l(&ets) == OK
2913             && ets.mTimeNs[location] > 0) {
2914         int64_t diff = ets.mPosition[ExtendedTimestamp::LOCATION_CLIENT]
2915                 - ets.mPosition[location];
2916         if (diff < 0) {
2917             *msec = 0;
2918         } else {
2919             // ms is the playback time by frames
2920             int64_t ms = (int64_t)((double)diff * 1000 /
2921                     ((double)mSampleRate * mPlaybackRate.mSpeed));
2922             // clockdiff is the timestamp age (negative)
2923             int64_t clockdiff = (mState != STATE_ACTIVE) ? 0 :
2924                     ets.mTimeNs[location]
2925                     + ets.mTimebaseOffset[ExtendedTimestamp::TIMEBASE_MONOTONIC]
2926                     - systemTime(SYSTEM_TIME_MONOTONIC);
2927 
2928             //ALOGV("ms: %lld  clockdiff: %lld", (long long)ms, (long long)clockdiff);
2929             static const int NANOS_PER_MILLIS = 1000000;
2930             *msec = (int32_t)(ms + clockdiff / NANOS_PER_MILLIS);
2931         }
2932         return NO_ERROR;
2933     }
2934     if (location != ExtendedTimestamp::LOCATION_SERVER) {
2935         return INVALID_OPERATION; // LOCATION_KERNEL is not available
2936     }
2937     // use server position directly (offloaded and direct arrive here)
2938     updateAndGetPosition_l();
2939     int32_t diff = (Modulo<uint32_t>(mFramesWritten) - mPosition).signedValue();
2940     *msec = (diff <= 0) ? 0
2941             : (int32_t)((double)diff * 1000 / ((double)mSampleRate * mPlaybackRate.mSpeed));
2942     return NO_ERROR;
2943 }
2944 
hasStarted()2945 bool AudioTrack::hasStarted()
2946 {
2947     AutoMutex lock(mLock);
2948     switch (mState) {
2949     case STATE_STOPPED:
2950         if (isOffloadedOrDirect_l()) {
2951             // check if we have started in the past to return true.
2952             return mStartFromZeroUs > 0;
2953         }
2954         // A normal audio track may still be draining, so
2955         // check if stream has ended.  This covers fasttrack position
2956         // instability and start/stop without any data written.
2957         if (mProxy->getStreamEndDone()) {
2958             return true;
2959         }
2960         // fall through
2961     case STATE_ACTIVE:
2962     case STATE_STOPPING:
2963         break;
2964     case STATE_PAUSED:
2965     case STATE_PAUSED_STOPPING:
2966     case STATE_FLUSHED:
2967         return false;  // we're not active
2968     default:
2969         LOG_ALWAYS_FATAL("Invalid mState in hasStarted(): %d", mState);
2970         break;
2971     }
2972 
2973     // wait indicates whether we need to wait for a timestamp.
2974     // This is conservatively figured - if we encounter an unexpected error
2975     // then we will not wait.
2976     bool wait = false;
2977     if (isOffloadedOrDirect_l()) {
2978         AudioTimestamp ts;
2979         status_t status = getTimestamp_l(ts);
2980         if (status == WOULD_BLOCK) {
2981             wait = true;
2982         } else if (status == OK) {
2983             wait = (ts.mPosition == 0 || ts.mPosition == mStartTs.mPosition);
2984         }
2985         ALOGV("hasStarted wait:%d  ts:%u  start position:%lld",
2986                 (int)wait,
2987                 ts.mPosition,
2988                 (long long)mStartTs.mPosition);
2989     } else {
2990         int location = ExtendedTimestamp::LOCATION_SERVER; // for ALOG
2991         ExtendedTimestamp ets;
2992         status_t status = getTimestamp_l(&ets);
2993         if (status == WOULD_BLOCK) {  // no SERVER or KERNEL frame info in ets
2994             wait = true;
2995         } else if (status == OK) {
2996             for (location = ExtendedTimestamp::LOCATION_KERNEL;
2997                     location >= ExtendedTimestamp::LOCATION_SERVER; --location) {
2998                 if (ets.mTimeNs[location] < 0 || mStartEts.mTimeNs[location] < 0) {
2999                     continue;
3000                 }
3001                 wait = ets.mPosition[location] == 0
3002                         || ets.mPosition[location] == mStartEts.mPosition[location];
3003                 break;
3004             }
3005         }
3006         ALOGV("hasStarted wait:%d  ets:%lld  start position:%lld",
3007                 (int)wait,
3008                 (long long)ets.mPosition[location],
3009                 (long long)mStartEts.mPosition[location]);
3010     }
3011     return !wait;
3012 }
3013 
3014 // =========================================================================
3015 
binderDied(const wp<IBinder> & who __unused)3016 void AudioTrack::DeathNotifier::binderDied(const wp<IBinder>& who __unused)
3017 {
3018     sp<AudioTrack> audioTrack = mAudioTrack.promote();
3019     if (audioTrack != 0) {
3020         AutoMutex lock(audioTrack->mLock);
3021         audioTrack->mProxy->binderDied();
3022     }
3023 }
3024 
3025 // =========================================================================
3026 
AudioTrackThread(AudioTrack & receiver,bool bCanCallJava)3027 AudioTrack::AudioTrackThread::AudioTrackThread(AudioTrack& receiver, bool bCanCallJava)
3028     : Thread(bCanCallJava), mReceiver(receiver), mPaused(true), mPausedInt(false), mPausedNs(0LL),
3029       mIgnoreNextPausedInt(false)
3030 {
3031 }
3032 
~AudioTrackThread()3033 AudioTrack::AudioTrackThread::~AudioTrackThread()
3034 {
3035 }
3036 
threadLoop()3037 bool AudioTrack::AudioTrackThread::threadLoop()
3038 {
3039     {
3040         AutoMutex _l(mMyLock);
3041         if (mPaused) {
3042             // TODO check return value and handle or log
3043             mMyCond.wait(mMyLock);
3044             // caller will check for exitPending()
3045             return true;
3046         }
3047         if (mIgnoreNextPausedInt) {
3048             mIgnoreNextPausedInt = false;
3049             mPausedInt = false;
3050         }
3051         if (mPausedInt) {
3052             // TODO use futex instead of condition, for event flag "or"
3053             if (mPausedNs > 0) {
3054                 // TODO check return value and handle or log
3055                 (void) mMyCond.waitRelative(mMyLock, mPausedNs);
3056             } else {
3057                 // TODO check return value and handle or log
3058                 mMyCond.wait(mMyLock);
3059             }
3060             mPausedInt = false;
3061             return true;
3062         }
3063     }
3064     if (exitPending()) {
3065         return false;
3066     }
3067     nsecs_t ns = mReceiver.processAudioBuffer();
3068     switch (ns) {
3069     case 0:
3070         return true;
3071     case NS_INACTIVE:
3072         pauseInternal();
3073         return true;
3074     case NS_NEVER:
3075         return false;
3076     case NS_WHENEVER:
3077         // Event driven: call wake() when callback notifications conditions change.
3078         ns = INT64_MAX;
3079         // fall through
3080     default:
3081         LOG_ALWAYS_FATAL_IF(ns < 0, "processAudioBuffer() returned %" PRId64, ns);
3082         pauseInternal(ns);
3083         return true;
3084     }
3085 }
3086 
requestExit()3087 void AudioTrack::AudioTrackThread::requestExit()
3088 {
3089     // must be in this order to avoid a race condition
3090     Thread::requestExit();
3091     resume();
3092 }
3093 
pause()3094 void AudioTrack::AudioTrackThread::pause()
3095 {
3096     AutoMutex _l(mMyLock);
3097     mPaused = true;
3098 }
3099 
resume()3100 void AudioTrack::AudioTrackThread::resume()
3101 {
3102     AutoMutex _l(mMyLock);
3103     mIgnoreNextPausedInt = true;
3104     if (mPaused || mPausedInt) {
3105         mPaused = false;
3106         mPausedInt = false;
3107         mMyCond.signal();
3108     }
3109 }
3110 
wake()3111 void AudioTrack::AudioTrackThread::wake()
3112 {
3113     AutoMutex _l(mMyLock);
3114     if (!mPaused) {
3115         // wake() might be called while servicing a callback - ignore the next
3116         // pause time and call processAudioBuffer.
3117         mIgnoreNextPausedInt = true;
3118         if (mPausedInt && mPausedNs > 0) {
3119             // audio track is active and internally paused with timeout.
3120             mPausedInt = false;
3121             mMyCond.signal();
3122         }
3123     }
3124 }
3125 
pauseInternal(nsecs_t ns)3126 void AudioTrack::AudioTrackThread::pauseInternal(nsecs_t ns)
3127 {
3128     AutoMutex _l(mMyLock);
3129     mPausedInt = true;
3130     mPausedNs = ns;
3131 }
3132 
3133 } // namespace android
3134