1 /*
2 **
3 ** Copyright 2007, The Android Open Source Project
4 **
5 ** Licensed under the Apache License, Version 2.0 (the "License");
6 ** you may not use this file except in compliance with the License.
7 ** You may obtain a copy of the License at
8 **
9 ** http://www.apache.org/licenses/LICENSE-2.0
10 **
11 ** Unless required by applicable law or agreed to in writing, software
12 ** distributed under the License is distributed on an "AS IS" BASIS,
13 ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 ** See the License for the specific language governing permissions and
15 ** limitations under the License.
16 */
17
18 //#define LOG_NDEBUG 0
19 #define LOG_TAG "AudioTrack"
20
21 #include <inttypes.h>
22 #include <math.h>
23 #include <sys/resource.h>
24
25 #include <audio_utils/primitives.h>
26 #include <binder/IPCThreadState.h>
27 #include <media/AudioTrack.h>
28 #include <utils/Log.h>
29 #include <private/media/AudioTrackShared.h>
30 #include <media/IAudioFlinger.h>
31 #include <media/AudioPolicyHelper.h>
32 #include <media/AudioResamplerPublic.h>
33
34 #define WAIT_PERIOD_MS 10
35 #define WAIT_STREAM_END_TIMEOUT_SEC 120
36 static const int kMaxLoopCountNotifications = 32;
37
38 namespace android {
39 // ---------------------------------------------------------------------------
40
41 // TODO: Move to a separate .h
42
43 template <typename T>
min(const T & x,const T & y)44 static inline const T &min(const T &x, const T &y) {
45 return x < y ? x : y;
46 }
47
48 template <typename T>
max(const T & x,const T & y)49 static inline const T &max(const T &x, const T &y) {
50 return x > y ? x : y;
51 }
52
53 static const int32_t NANOS_PER_SECOND = 1000000000;
54
framesToNanoseconds(ssize_t frames,uint32_t sampleRate,float speed)55 static inline nsecs_t framesToNanoseconds(ssize_t frames, uint32_t sampleRate, float speed)
56 {
57 return ((double)frames * 1000000000) / ((double)sampleRate * speed);
58 }
59
convertTimespecToUs(const struct timespec & tv)60 static int64_t convertTimespecToUs(const struct timespec &tv)
61 {
62 return tv.tv_sec * 1000000ll + tv.tv_nsec / 1000;
63 }
64
convertTimespecToNs(const struct timespec & tv)65 static inline nsecs_t convertTimespecToNs(const struct timespec &tv)
66 {
67 return tv.tv_sec * (long long)NANOS_PER_SECOND + tv.tv_nsec;
68 }
69
70 // current monotonic time in microseconds.
getNowUs()71 static int64_t getNowUs()
72 {
73 struct timespec tv;
74 (void) clock_gettime(CLOCK_MONOTONIC, &tv);
75 return convertTimespecToUs(tv);
76 }
77
78 // FIXME: we don't use the pitch setting in the time stretcher (not working);
79 // instead we emulate it using our sample rate converter.
80 static const bool kFixPitch = true; // enable pitch fix
adjustSampleRate(uint32_t sampleRate,float pitch)81 static inline uint32_t adjustSampleRate(uint32_t sampleRate, float pitch)
82 {
83 return kFixPitch ? (sampleRate * pitch + 0.5) : sampleRate;
84 }
85
adjustSpeed(float speed,float pitch)86 static inline float adjustSpeed(float speed, float pitch)
87 {
88 return kFixPitch ? speed / max(pitch, AUDIO_TIMESTRETCH_PITCH_MIN_DELTA) : speed;
89 }
90
adjustPitch(float pitch)91 static inline float adjustPitch(float pitch)
92 {
93 return kFixPitch ? AUDIO_TIMESTRETCH_PITCH_NORMAL : pitch;
94 }
95
96 // Must match similar computation in createTrack_l in Threads.cpp.
97 // TODO: Move to a common library
calculateMinFrameCount(uint32_t afLatencyMs,uint32_t afFrameCount,uint32_t afSampleRate,uint32_t sampleRate,float speed)98 static size_t calculateMinFrameCount(
99 uint32_t afLatencyMs, uint32_t afFrameCount, uint32_t afSampleRate,
100 uint32_t sampleRate, float speed /*, uint32_t notificationsPerBufferReq*/)
101 {
102 // Ensure that buffer depth covers at least audio hardware latency
103 uint32_t minBufCount = afLatencyMs / ((1000 * afFrameCount) / afSampleRate);
104 if (minBufCount < 2) {
105 minBufCount = 2;
106 }
107 #if 0
108 // The notificationsPerBufferReq parameter is not yet used for non-fast tracks,
109 // but keeping the code here to make it easier to add later.
110 if (minBufCount < notificationsPerBufferReq) {
111 minBufCount = notificationsPerBufferReq;
112 }
113 #endif
114 ALOGV("calculateMinFrameCount afLatency %u afFrameCount %u afSampleRate %u "
115 "sampleRate %u speed %f minBufCount: %u" /*" notificationsPerBufferReq %u"*/,
116 afLatencyMs, afFrameCount, afSampleRate, sampleRate, speed, minBufCount
117 /*, notificationsPerBufferReq*/);
118 return minBufCount * sourceFramesNeededWithTimestretch(
119 sampleRate, afFrameCount, afSampleRate, speed);
120 }
121
122 // static
getMinFrameCount(size_t * frameCount,audio_stream_type_t streamType,uint32_t sampleRate)123 status_t AudioTrack::getMinFrameCount(
124 size_t* frameCount,
125 audio_stream_type_t streamType,
126 uint32_t sampleRate)
127 {
128 if (frameCount == NULL) {
129 return BAD_VALUE;
130 }
131
132 // FIXME handle in server, like createTrack_l(), possible missing info:
133 // audio_io_handle_t output
134 // audio_format_t format
135 // audio_channel_mask_t channelMask
136 // audio_output_flags_t flags (FAST)
137 uint32_t afSampleRate;
138 status_t status;
139 status = AudioSystem::getOutputSamplingRate(&afSampleRate, streamType);
140 if (status != NO_ERROR) {
141 ALOGE("Unable to query output sample rate for stream type %d; status %d",
142 streamType, status);
143 return status;
144 }
145 size_t afFrameCount;
146 status = AudioSystem::getOutputFrameCount(&afFrameCount, streamType);
147 if (status != NO_ERROR) {
148 ALOGE("Unable to query output frame count for stream type %d; status %d",
149 streamType, status);
150 return status;
151 }
152 uint32_t afLatency;
153 status = AudioSystem::getOutputLatency(&afLatency, streamType);
154 if (status != NO_ERROR) {
155 ALOGE("Unable to query output latency for stream type %d; status %d",
156 streamType, status);
157 return status;
158 }
159
160 // When called from createTrack, speed is 1.0f (normal speed).
161 // This is rechecked again on setting playback rate (TODO: on setting sample rate, too).
162 *frameCount = calculateMinFrameCount(afLatency, afFrameCount, afSampleRate, sampleRate, 1.0f
163 /*, 0 notificationsPerBufferReq*/);
164
165 // The formula above should always produce a non-zero value under normal circumstances:
166 // AudioTrack.SAMPLE_RATE_HZ_MIN <= sampleRate <= AudioTrack.SAMPLE_RATE_HZ_MAX.
167 // Return error in the unlikely event that it does not, as that's part of the API contract.
168 if (*frameCount == 0) {
169 ALOGE("AudioTrack::getMinFrameCount failed for streamType %d, sampleRate %u",
170 streamType, sampleRate);
171 return BAD_VALUE;
172 }
173 ALOGV("getMinFrameCount=%zu: afFrameCount=%zu, afSampleRate=%u, afLatency=%u",
174 *frameCount, afFrameCount, afSampleRate, afLatency);
175 return NO_ERROR;
176 }
177
178 // ---------------------------------------------------------------------------
179
AudioTrack()180 AudioTrack::AudioTrack()
181 : mStatus(NO_INIT),
182 mState(STATE_STOPPED),
183 mPreviousPriority(ANDROID_PRIORITY_NORMAL),
184 mPreviousSchedulingGroup(SP_DEFAULT),
185 mPausedPosition(0),
186 mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE),
187 mPortId(AUDIO_PORT_HANDLE_NONE)
188 {
189 mAttributes.content_type = AUDIO_CONTENT_TYPE_UNKNOWN;
190 mAttributes.usage = AUDIO_USAGE_UNKNOWN;
191 mAttributes.flags = 0x0;
192 strcpy(mAttributes.tags, "");
193 }
194
AudioTrack(audio_stream_type_t streamType,uint32_t sampleRate,audio_format_t format,audio_channel_mask_t channelMask,size_t frameCount,audio_output_flags_t flags,callback_t cbf,void * user,int32_t notificationFrames,audio_session_t sessionId,transfer_type transferType,const audio_offload_info_t * offloadInfo,uid_t uid,pid_t pid,const audio_attributes_t * pAttributes,bool doNotReconnect,float maxRequiredSpeed)195 AudioTrack::AudioTrack(
196 audio_stream_type_t streamType,
197 uint32_t sampleRate,
198 audio_format_t format,
199 audio_channel_mask_t channelMask,
200 size_t frameCount,
201 audio_output_flags_t flags,
202 callback_t cbf,
203 void* user,
204 int32_t notificationFrames,
205 audio_session_t sessionId,
206 transfer_type transferType,
207 const audio_offload_info_t *offloadInfo,
208 uid_t uid,
209 pid_t pid,
210 const audio_attributes_t* pAttributes,
211 bool doNotReconnect,
212 float maxRequiredSpeed)
213 : mStatus(NO_INIT),
214 mState(STATE_STOPPED),
215 mPreviousPriority(ANDROID_PRIORITY_NORMAL),
216 mPreviousSchedulingGroup(SP_DEFAULT),
217 mPausedPosition(0),
218 mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE),
219 mPortId(AUDIO_PORT_HANDLE_NONE)
220 {
221 mStatus = set(streamType, sampleRate, format, channelMask,
222 frameCount, flags, cbf, user, notificationFrames,
223 0 /*sharedBuffer*/, false /*threadCanCallJava*/, sessionId, transferType,
224 offloadInfo, uid, pid, pAttributes, doNotReconnect, maxRequiredSpeed);
225 }
226
AudioTrack(audio_stream_type_t streamType,uint32_t sampleRate,audio_format_t format,audio_channel_mask_t channelMask,const sp<IMemory> & sharedBuffer,audio_output_flags_t flags,callback_t cbf,void * user,int32_t notificationFrames,audio_session_t sessionId,transfer_type transferType,const audio_offload_info_t * offloadInfo,uid_t uid,pid_t pid,const audio_attributes_t * pAttributes,bool doNotReconnect,float maxRequiredSpeed)227 AudioTrack::AudioTrack(
228 audio_stream_type_t streamType,
229 uint32_t sampleRate,
230 audio_format_t format,
231 audio_channel_mask_t channelMask,
232 const sp<IMemory>& sharedBuffer,
233 audio_output_flags_t flags,
234 callback_t cbf,
235 void* user,
236 int32_t notificationFrames,
237 audio_session_t sessionId,
238 transfer_type transferType,
239 const audio_offload_info_t *offloadInfo,
240 uid_t uid,
241 pid_t pid,
242 const audio_attributes_t* pAttributes,
243 bool doNotReconnect,
244 float maxRequiredSpeed)
245 : mStatus(NO_INIT),
246 mState(STATE_STOPPED),
247 mPreviousPriority(ANDROID_PRIORITY_NORMAL),
248 mPreviousSchedulingGroup(SP_DEFAULT),
249 mPausedPosition(0),
250 mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE),
251 mPortId(AUDIO_PORT_HANDLE_NONE)
252 {
253 mStatus = set(streamType, sampleRate, format, channelMask,
254 0 /*frameCount*/, flags, cbf, user, notificationFrames,
255 sharedBuffer, false /*threadCanCallJava*/, sessionId, transferType, offloadInfo,
256 uid, pid, pAttributes, doNotReconnect, maxRequiredSpeed);
257 }
258
~AudioTrack()259 AudioTrack::~AudioTrack()
260 {
261 if (mStatus == NO_ERROR) {
262 // Make sure that callback function exits in the case where
263 // it is looping on buffer full condition in obtainBuffer().
264 // Otherwise the callback thread will never exit.
265 stop();
266 if (mAudioTrackThread != 0) {
267 mProxy->interrupt();
268 mAudioTrackThread->requestExit(); // see comment in AudioTrack.h
269 mAudioTrackThread->requestExitAndWait();
270 mAudioTrackThread.clear();
271 }
272 // No lock here: worst case we remove a NULL callback which will be a nop
273 if (mDeviceCallback != 0 && mOutput != AUDIO_IO_HANDLE_NONE) {
274 AudioSystem::removeAudioDeviceCallback(mDeviceCallback, mOutput);
275 }
276 IInterface::asBinder(mAudioTrack)->unlinkToDeath(mDeathNotifier, this);
277 mAudioTrack.clear();
278 mCblkMemory.clear();
279 mSharedBuffer.clear();
280 IPCThreadState::self()->flushCommands();
281 ALOGV("~AudioTrack, releasing session id %d from %d on behalf of %d",
282 mSessionId, IPCThreadState::self()->getCallingPid(), mClientPid);
283 AudioSystem::releaseAudioSessionId(mSessionId, mClientPid);
284 }
285 }
286
set(audio_stream_type_t streamType,uint32_t sampleRate,audio_format_t format,audio_channel_mask_t channelMask,size_t frameCount,audio_output_flags_t flags,callback_t cbf,void * user,int32_t notificationFrames,const sp<IMemory> & sharedBuffer,bool threadCanCallJava,audio_session_t sessionId,transfer_type transferType,const audio_offload_info_t * offloadInfo,uid_t uid,pid_t pid,const audio_attributes_t * pAttributes,bool doNotReconnect,float maxRequiredSpeed)287 status_t AudioTrack::set(
288 audio_stream_type_t streamType,
289 uint32_t sampleRate,
290 audio_format_t format,
291 audio_channel_mask_t channelMask,
292 size_t frameCount,
293 audio_output_flags_t flags,
294 callback_t cbf,
295 void* user,
296 int32_t notificationFrames,
297 const sp<IMemory>& sharedBuffer,
298 bool threadCanCallJava,
299 audio_session_t sessionId,
300 transfer_type transferType,
301 const audio_offload_info_t *offloadInfo,
302 uid_t uid,
303 pid_t pid,
304 const audio_attributes_t* pAttributes,
305 bool doNotReconnect,
306 float maxRequiredSpeed)
307 {
308 ALOGV("set(): streamType %d, sampleRate %u, format %#x, channelMask %#x, frameCount %zu, "
309 "flags #%x, notificationFrames %d, sessionId %d, transferType %d, uid %d, pid %d",
310 streamType, sampleRate, format, channelMask, frameCount, flags, notificationFrames,
311 sessionId, transferType, uid, pid);
312
313 mThreadCanCallJava = threadCanCallJava;
314
315 switch (transferType) {
316 case TRANSFER_DEFAULT:
317 if (sharedBuffer != 0) {
318 transferType = TRANSFER_SHARED;
319 } else if (cbf == NULL || threadCanCallJava) {
320 transferType = TRANSFER_SYNC;
321 } else {
322 transferType = TRANSFER_CALLBACK;
323 }
324 break;
325 case TRANSFER_CALLBACK:
326 if (cbf == NULL || sharedBuffer != 0) {
327 ALOGE("Transfer type TRANSFER_CALLBACK but cbf == NULL || sharedBuffer != 0");
328 return BAD_VALUE;
329 }
330 break;
331 case TRANSFER_OBTAIN:
332 case TRANSFER_SYNC:
333 if (sharedBuffer != 0) {
334 ALOGE("Transfer type TRANSFER_OBTAIN but sharedBuffer != 0");
335 return BAD_VALUE;
336 }
337 break;
338 case TRANSFER_SHARED:
339 if (sharedBuffer == 0) {
340 ALOGE("Transfer type TRANSFER_SHARED but sharedBuffer == 0");
341 return BAD_VALUE;
342 }
343 break;
344 default:
345 ALOGE("Invalid transfer type %d", transferType);
346 return BAD_VALUE;
347 }
348 mSharedBuffer = sharedBuffer;
349 mTransfer = transferType;
350 mDoNotReconnect = doNotReconnect;
351
352 ALOGV_IF(sharedBuffer != 0, "sharedBuffer: %p, size: %zu", sharedBuffer->pointer(),
353 sharedBuffer->size());
354
355 ALOGV("set() streamType %d frameCount %zu flags %04x", streamType, frameCount, flags);
356
357 // invariant that mAudioTrack != 0 is true only after set() returns successfully
358 if (mAudioTrack != 0) {
359 ALOGE("Track already in use");
360 return INVALID_OPERATION;
361 }
362
363 // handle default values first.
364 if (streamType == AUDIO_STREAM_DEFAULT) {
365 streamType = AUDIO_STREAM_MUSIC;
366 }
367 if (pAttributes == NULL) {
368 if (uint32_t(streamType) >= AUDIO_STREAM_PUBLIC_CNT) {
369 ALOGE("Invalid stream type %d", streamType);
370 return BAD_VALUE;
371 }
372 mStreamType = streamType;
373
374 } else {
375 // stream type shouldn't be looked at, this track has audio attributes
376 memcpy(&mAttributes, pAttributes, sizeof(audio_attributes_t));
377 ALOGV("Building AudioTrack with attributes: usage=%d content=%d flags=0x%x tags=[%s]",
378 mAttributes.usage, mAttributes.content_type, mAttributes.flags, mAttributes.tags);
379 mStreamType = AUDIO_STREAM_DEFAULT;
380 if ((mAttributes.flags & AUDIO_FLAG_HW_AV_SYNC) != 0) {
381 flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_HW_AV_SYNC);
382 }
383 if ((mAttributes.flags & AUDIO_FLAG_LOW_LATENCY) != 0) {
384 flags = (audio_output_flags_t) (flags | AUDIO_OUTPUT_FLAG_FAST);
385 }
386 // check deep buffer after flags have been modified above
387 if (flags == AUDIO_OUTPUT_FLAG_NONE && (mAttributes.flags & AUDIO_FLAG_DEEP_BUFFER) != 0) {
388 flags = AUDIO_OUTPUT_FLAG_DEEP_BUFFER;
389 }
390 }
391
392 // these below should probably come from the audioFlinger too...
393 if (format == AUDIO_FORMAT_DEFAULT) {
394 format = AUDIO_FORMAT_PCM_16_BIT;
395 } else if (format == AUDIO_FORMAT_IEC61937) { // HDMI pass-through?
396 mAttributes.flags |= AUDIO_OUTPUT_FLAG_IEC958_NONAUDIO;
397 }
398
399 // validate parameters
400 if (!audio_is_valid_format(format)) {
401 ALOGE("Invalid format %#x", format);
402 return BAD_VALUE;
403 }
404 mFormat = format;
405
406 if (!audio_is_output_channel(channelMask)) {
407 ALOGE("Invalid channel mask %#x", channelMask);
408 return BAD_VALUE;
409 }
410 mChannelMask = channelMask;
411 uint32_t channelCount = audio_channel_count_from_out_mask(channelMask);
412 mChannelCount = channelCount;
413
414 // force direct flag if format is not linear PCM
415 // or offload was requested
416 if ((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
417 || !audio_is_linear_pcm(format)) {
418 ALOGV( (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
419 ? "Offload request, forcing to Direct Output"
420 : "Not linear PCM, forcing to Direct Output");
421 flags = (audio_output_flags_t)
422 // FIXME why can't we allow direct AND fast?
423 ((flags | AUDIO_OUTPUT_FLAG_DIRECT) & ~AUDIO_OUTPUT_FLAG_FAST);
424 }
425
426 // force direct flag if HW A/V sync requested
427 if ((flags & AUDIO_OUTPUT_FLAG_HW_AV_SYNC) != 0) {
428 flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_DIRECT);
429 }
430
431 if (flags & AUDIO_OUTPUT_FLAG_DIRECT) {
432 if (audio_has_proportional_frames(format)) {
433 mFrameSize = channelCount * audio_bytes_per_sample(format);
434 } else {
435 mFrameSize = sizeof(uint8_t);
436 }
437 } else {
438 ALOG_ASSERT(audio_has_proportional_frames(format));
439 mFrameSize = channelCount * audio_bytes_per_sample(format);
440 // createTrack will return an error if PCM format is not supported by server,
441 // so no need to check for specific PCM formats here
442 }
443
444 // sampling rate must be specified for direct outputs
445 if (sampleRate == 0 && (flags & AUDIO_OUTPUT_FLAG_DIRECT) != 0) {
446 return BAD_VALUE;
447 }
448 mSampleRate = sampleRate;
449 mOriginalSampleRate = sampleRate;
450 mPlaybackRate = AUDIO_PLAYBACK_RATE_DEFAULT;
451 // 1.0 <= mMaxRequiredSpeed <= AUDIO_TIMESTRETCH_SPEED_MAX
452 mMaxRequiredSpeed = min(max(maxRequiredSpeed, 1.0f), AUDIO_TIMESTRETCH_SPEED_MAX);
453
454 // Make copy of input parameter offloadInfo so that in the future:
455 // (a) createTrack_l doesn't need it as an input parameter
456 // (b) we can support re-creation of offloaded tracks
457 if (offloadInfo != NULL) {
458 mOffloadInfoCopy = *offloadInfo;
459 mOffloadInfo = &mOffloadInfoCopy;
460 } else {
461 mOffloadInfo = NULL;
462 memset(&mOffloadInfoCopy, 0, sizeof(audio_offload_info_t));
463 }
464
465 mVolume[AUDIO_INTERLEAVE_LEFT] = 1.0f;
466 mVolume[AUDIO_INTERLEAVE_RIGHT] = 1.0f;
467 mSendLevel = 0.0f;
468 // mFrameCount is initialized in createTrack_l
469 mReqFrameCount = frameCount;
470 if (notificationFrames >= 0) {
471 mNotificationFramesReq = notificationFrames;
472 mNotificationsPerBufferReq = 0;
473 } else {
474 if (!(flags & AUDIO_OUTPUT_FLAG_FAST)) {
475 ALOGE("notificationFrames=%d not permitted for non-fast track",
476 notificationFrames);
477 return BAD_VALUE;
478 }
479 if (frameCount > 0) {
480 ALOGE("notificationFrames=%d not permitted with non-zero frameCount=%zu",
481 notificationFrames, frameCount);
482 return BAD_VALUE;
483 }
484 mNotificationFramesReq = 0;
485 const uint32_t minNotificationsPerBuffer = 1;
486 const uint32_t maxNotificationsPerBuffer = 8;
487 mNotificationsPerBufferReq = min(maxNotificationsPerBuffer,
488 max((uint32_t) -notificationFrames, minNotificationsPerBuffer));
489 ALOGW_IF(mNotificationsPerBufferReq != (uint32_t) -notificationFrames,
490 "notificationFrames=%d clamped to the range -%u to -%u",
491 notificationFrames, minNotificationsPerBuffer, maxNotificationsPerBuffer);
492 }
493 mNotificationFramesAct = 0;
494 if (sessionId == AUDIO_SESSION_ALLOCATE) {
495 mSessionId = (audio_session_t) AudioSystem::newAudioUniqueId(AUDIO_UNIQUE_ID_USE_SESSION);
496 } else {
497 mSessionId = sessionId;
498 }
499 int callingpid = IPCThreadState::self()->getCallingPid();
500 int mypid = getpid();
501 if (uid == AUDIO_UID_INVALID || (callingpid != mypid)) {
502 mClientUid = IPCThreadState::self()->getCallingUid();
503 } else {
504 mClientUid = uid;
505 }
506 if (pid == -1 || (callingpid != mypid)) {
507 mClientPid = callingpid;
508 } else {
509 mClientPid = pid;
510 }
511 mAuxEffectId = 0;
512 mOrigFlags = mFlags = flags;
513 mCbf = cbf;
514
515 if (cbf != NULL) {
516 mAudioTrackThread = new AudioTrackThread(*this, threadCanCallJava);
517 mAudioTrackThread->run("AudioTrack", ANDROID_PRIORITY_AUDIO, 0 /*stack*/);
518 // thread begins in paused state, and will not reference us until start()
519 }
520
521 // create the IAudioTrack
522 status_t status = createTrack_l();
523
524 if (status != NO_ERROR) {
525 if (mAudioTrackThread != 0) {
526 mAudioTrackThread->requestExit(); // see comment in AudioTrack.h
527 mAudioTrackThread->requestExitAndWait();
528 mAudioTrackThread.clear();
529 }
530 return status;
531 }
532
533 mStatus = NO_ERROR;
534 mUserData = user;
535 mLoopCount = 0;
536 mLoopStart = 0;
537 mLoopEnd = 0;
538 mLoopCountNotified = 0;
539 mMarkerPosition = 0;
540 mMarkerReached = false;
541 mNewPosition = 0;
542 mUpdatePeriod = 0;
543 mPosition = 0;
544 mReleased = 0;
545 mStartUs = 0;
546 AudioSystem::acquireAudioSessionId(mSessionId, mClientPid);
547 mSequence = 1;
548 mObservedSequence = mSequence;
549 mInUnderrun = false;
550 mPreviousTimestampValid = false;
551 mTimestampStartupGlitchReported = false;
552 mRetrogradeMotionReported = false;
553 mPreviousLocation = ExtendedTimestamp::LOCATION_INVALID;
554 mStartTs.mPosition = 0;
555 mUnderrunCountOffset = 0;
556 mFramesWritten = 0;
557 mFramesWrittenServerOffset = 0;
558 mFramesWrittenAtRestore = -1; // -1 is a unique initializer.
559 mVolumeHandler = new VolumeHandler();
560 return NO_ERROR;
561 }
562
563 // -------------------------------------------------------------------------
564
start()565 status_t AudioTrack::start()
566 {
567 AutoMutex lock(mLock);
568
569 if (mState == STATE_ACTIVE) {
570 return INVALID_OPERATION;
571 }
572
573 mInUnderrun = true;
574
575 State previousState = mState;
576 if (previousState == STATE_PAUSED_STOPPING) {
577 mState = STATE_STOPPING;
578 } else {
579 mState = STATE_ACTIVE;
580 }
581 (void) updateAndGetPosition_l();
582
583 // save start timestamp
584 if (isOffloadedOrDirect_l()) {
585 if (getTimestamp_l(mStartTs) != OK) {
586 mStartTs.mPosition = 0;
587 }
588 } else {
589 if (getTimestamp_l(&mStartEts) != OK) {
590 mStartEts.clear();
591 }
592 }
593 if (previousState == STATE_STOPPED || previousState == STATE_FLUSHED) {
594 // reset current position as seen by client to 0
595 mPosition = 0;
596 mPreviousTimestampValid = false;
597 mTimestampStartupGlitchReported = false;
598 mRetrogradeMotionReported = false;
599 mPreviousLocation = ExtendedTimestamp::LOCATION_INVALID;
600
601 if (!isOffloadedOrDirect_l()
602 && mStartEts.mTimeNs[ExtendedTimestamp::LOCATION_SERVER] > 0) {
603 // Server side has consumed something, but is it finished consuming?
604 // It is possible since flush and stop are asynchronous that the server
605 // is still active at this point.
606 ALOGV("start: server read:%lld cumulative flushed:%lld client written:%lld",
607 (long long)(mFramesWrittenServerOffset
608 + mStartEts.mPosition[ExtendedTimestamp::LOCATION_SERVER]),
609 (long long)mStartEts.mFlushed,
610 (long long)mFramesWritten);
611 mFramesWrittenServerOffset = -mStartEts.mPosition[ExtendedTimestamp::LOCATION_SERVER];
612 }
613 mFramesWritten = 0;
614 mProxy->clearTimestamp(); // need new server push for valid timestamp
615 mMarkerReached = false;
616
617 // For offloaded tracks, we don't know if the hardware counters are really zero here,
618 // since the flush is asynchronous and stop may not fully drain.
619 // We save the time when the track is started to later verify whether
620 // the counters are realistic (i.e. start from zero after this time).
621 mStartUs = getNowUs();
622
623 // force refresh of remaining frames by processAudioBuffer() as last
624 // write before stop could be partial.
625 mRefreshRemaining = true;
626 }
627 mNewPosition = mPosition + mUpdatePeriod;
628 int32_t flags = android_atomic_and(~(CBLK_STREAM_END_DONE | CBLK_DISABLED), &mCblk->mFlags);
629
630 status_t status = NO_ERROR;
631 if (!(flags & CBLK_INVALID)) {
632 status = mAudioTrack->start();
633 if (status == DEAD_OBJECT) {
634 flags |= CBLK_INVALID;
635 }
636 }
637 if (flags & CBLK_INVALID) {
638 status = restoreTrack_l("start");
639 }
640
641 // resume or pause the callback thread as needed.
642 sp<AudioTrackThread> t = mAudioTrackThread;
643 if (status == NO_ERROR) {
644 if (t != 0) {
645 if (previousState == STATE_STOPPING) {
646 mProxy->interrupt();
647 } else {
648 t->resume();
649 }
650 } else {
651 mPreviousPriority = getpriority(PRIO_PROCESS, 0);
652 get_sched_policy(0, &mPreviousSchedulingGroup);
653 androidSetThreadPriority(0, ANDROID_PRIORITY_AUDIO);
654 }
655
656 // Start our local VolumeHandler for restoration purposes.
657 mVolumeHandler->setStarted();
658 } else {
659 ALOGE("start() status %d", status);
660 mState = previousState;
661 if (t != 0) {
662 if (previousState != STATE_STOPPING) {
663 t->pause();
664 }
665 } else {
666 setpriority(PRIO_PROCESS, 0, mPreviousPriority);
667 set_sched_policy(0, mPreviousSchedulingGroup);
668 }
669 }
670
671 return status;
672 }
673
stop()674 void AudioTrack::stop()
675 {
676 AutoMutex lock(mLock);
677 if (mState != STATE_ACTIVE && mState != STATE_PAUSED) {
678 return;
679 }
680
681 if (isOffloaded_l()) {
682 mState = STATE_STOPPING;
683 } else {
684 mState = STATE_STOPPED;
685 ALOGD_IF(mSharedBuffer == nullptr,
686 "stop() called with %u frames delivered", mReleased.value());
687 mReleased = 0;
688 }
689
690 mProxy->interrupt();
691 mAudioTrack->stop();
692
693 // Note: legacy handling - stop does not clear playback marker
694 // and periodic update counter, but flush does for streaming tracks.
695
696 if (mSharedBuffer != 0) {
697 // clear buffer position and loop count.
698 mStaticProxy->setBufferPositionAndLoop(0 /* position */,
699 0 /* loopStart */, 0 /* loopEnd */, 0 /* loopCount */);
700 }
701
702 sp<AudioTrackThread> t = mAudioTrackThread;
703 if (t != 0) {
704 if (!isOffloaded_l()) {
705 t->pause();
706 }
707 } else {
708 setpriority(PRIO_PROCESS, 0, mPreviousPriority);
709 set_sched_policy(0, mPreviousSchedulingGroup);
710 }
711 }
712
stopped() const713 bool AudioTrack::stopped() const
714 {
715 AutoMutex lock(mLock);
716 return mState != STATE_ACTIVE;
717 }
718
flush()719 void AudioTrack::flush()
720 {
721 if (mSharedBuffer != 0) {
722 return;
723 }
724 AutoMutex lock(mLock);
725 if (mState == STATE_ACTIVE || mState == STATE_FLUSHED) {
726 return;
727 }
728 flush_l();
729 }
730
flush_l()731 void AudioTrack::flush_l()
732 {
733 ALOG_ASSERT(mState != STATE_ACTIVE);
734
735 // clear playback marker and periodic update counter
736 mMarkerPosition = 0;
737 mMarkerReached = false;
738 mUpdatePeriod = 0;
739 mRefreshRemaining = true;
740
741 mState = STATE_FLUSHED;
742 mReleased = 0;
743 if (isOffloaded_l()) {
744 mProxy->interrupt();
745 }
746 mProxy->flush();
747 mAudioTrack->flush();
748 }
749
pause()750 void AudioTrack::pause()
751 {
752 AutoMutex lock(mLock);
753 if (mState == STATE_ACTIVE) {
754 mState = STATE_PAUSED;
755 } else if (mState == STATE_STOPPING) {
756 mState = STATE_PAUSED_STOPPING;
757 } else {
758 return;
759 }
760 mProxy->interrupt();
761 mAudioTrack->pause();
762
763 if (isOffloaded_l()) {
764 if (mOutput != AUDIO_IO_HANDLE_NONE) {
765 // An offload output can be re-used between two audio tracks having
766 // the same configuration. A timestamp query for a paused track
767 // while the other is running would return an incorrect time.
768 // To fix this, cache the playback position on a pause() and return
769 // this time when requested until the track is resumed.
770
771 // OffloadThread sends HAL pause in its threadLoop. Time saved
772 // here can be slightly off.
773
774 // TODO: check return code for getRenderPosition.
775
776 uint32_t halFrames;
777 AudioSystem::getRenderPosition(mOutput, &halFrames, &mPausedPosition);
778 ALOGV("AudioTrack::pause for offload, cache current position %u", mPausedPosition);
779 }
780 }
781 }
782
setVolume(float left,float right)783 status_t AudioTrack::setVolume(float left, float right)
784 {
785 // This duplicates a test by AudioTrack JNI, but that is not the only caller
786 if (isnanf(left) || left < GAIN_FLOAT_ZERO || left > GAIN_FLOAT_UNITY ||
787 isnanf(right) || right < GAIN_FLOAT_ZERO || right > GAIN_FLOAT_UNITY) {
788 return BAD_VALUE;
789 }
790
791 AutoMutex lock(mLock);
792 mVolume[AUDIO_INTERLEAVE_LEFT] = left;
793 mVolume[AUDIO_INTERLEAVE_RIGHT] = right;
794
795 mProxy->setVolumeLR(gain_minifloat_pack(gain_from_float(left), gain_from_float(right)));
796
797 if (isOffloaded_l()) {
798 mAudioTrack->signal();
799 }
800 return NO_ERROR;
801 }
802
setVolume(float volume)803 status_t AudioTrack::setVolume(float volume)
804 {
805 return setVolume(volume, volume);
806 }
807
setAuxEffectSendLevel(float level)808 status_t AudioTrack::setAuxEffectSendLevel(float level)
809 {
810 // This duplicates a test by AudioTrack JNI, but that is not the only caller
811 if (isnanf(level) || level < GAIN_FLOAT_ZERO || level > GAIN_FLOAT_UNITY) {
812 return BAD_VALUE;
813 }
814
815 AutoMutex lock(mLock);
816 mSendLevel = level;
817 mProxy->setSendLevel(level);
818
819 return NO_ERROR;
820 }
821
getAuxEffectSendLevel(float * level) const822 void AudioTrack::getAuxEffectSendLevel(float* level) const
823 {
824 if (level != NULL) {
825 *level = mSendLevel;
826 }
827 }
828
setSampleRate(uint32_t rate)829 status_t AudioTrack::setSampleRate(uint32_t rate)
830 {
831 AutoMutex lock(mLock);
832 if (rate == mSampleRate) {
833 return NO_ERROR;
834 }
835 if (isOffloadedOrDirect_l() || (mFlags & AUDIO_OUTPUT_FLAG_FAST)) {
836 return INVALID_OPERATION;
837 }
838 if (mOutput == AUDIO_IO_HANDLE_NONE) {
839 return NO_INIT;
840 }
841 // NOTE: it is theoretically possible, but highly unlikely, that a device change
842 // could mean a previously allowed sampling rate is no longer allowed.
843 uint32_t afSamplingRate;
844 if (AudioSystem::getSamplingRate(mOutput, &afSamplingRate) != NO_ERROR) {
845 return NO_INIT;
846 }
847 // pitch is emulated by adjusting speed and sampleRate
848 const uint32_t effectiveSampleRate = adjustSampleRate(rate, mPlaybackRate.mPitch);
849 if (rate == 0 || effectiveSampleRate > afSamplingRate * AUDIO_RESAMPLER_DOWN_RATIO_MAX) {
850 return BAD_VALUE;
851 }
852 // TODO: Should we also check if the buffer size is compatible?
853
854 mSampleRate = rate;
855 mProxy->setSampleRate(effectiveSampleRate);
856
857 return NO_ERROR;
858 }
859
getSampleRate() const860 uint32_t AudioTrack::getSampleRate() const
861 {
862 AutoMutex lock(mLock);
863
864 // sample rate can be updated during playback by the offloaded decoder so we need to
865 // query the HAL and update if needed.
866 // FIXME use Proxy return channel to update the rate from server and avoid polling here
867 if (isOffloadedOrDirect_l()) {
868 if (mOutput != AUDIO_IO_HANDLE_NONE) {
869 uint32_t sampleRate = 0;
870 status_t status = AudioSystem::getSamplingRate(mOutput, &sampleRate);
871 if (status == NO_ERROR) {
872 mSampleRate = sampleRate;
873 }
874 }
875 }
876 return mSampleRate;
877 }
878
getOriginalSampleRate() const879 uint32_t AudioTrack::getOriginalSampleRate() const
880 {
881 return mOriginalSampleRate;
882 }
883
setPlaybackRate(const AudioPlaybackRate & playbackRate)884 status_t AudioTrack::setPlaybackRate(const AudioPlaybackRate &playbackRate)
885 {
886 AutoMutex lock(mLock);
887 if (isAudioPlaybackRateEqual(playbackRate, mPlaybackRate)) {
888 return NO_ERROR;
889 }
890 if (isOffloadedOrDirect_l()) {
891 return INVALID_OPERATION;
892 }
893 if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
894 return INVALID_OPERATION;
895 }
896
897 ALOGV("setPlaybackRate (input): mSampleRate:%u mSpeed:%f mPitch:%f",
898 mSampleRate, playbackRate.mSpeed, playbackRate.mPitch);
899 // pitch is emulated by adjusting speed and sampleRate
900 const uint32_t effectiveRate = adjustSampleRate(mSampleRate, playbackRate.mPitch);
901 const float effectiveSpeed = adjustSpeed(playbackRate.mSpeed, playbackRate.mPitch);
902 const float effectivePitch = adjustPitch(playbackRate.mPitch);
903 AudioPlaybackRate playbackRateTemp = playbackRate;
904 playbackRateTemp.mSpeed = effectiveSpeed;
905 playbackRateTemp.mPitch = effectivePitch;
906
907 ALOGV("setPlaybackRate (effective): mSampleRate:%u mSpeed:%f mPitch:%f",
908 effectiveRate, effectiveSpeed, effectivePitch);
909
910 if (!isAudioPlaybackRateValid(playbackRateTemp)) {
911 ALOGV("setPlaybackRate(%f, %f) failed (effective rate out of bounds)",
912 playbackRate.mSpeed, playbackRate.mPitch);
913 return BAD_VALUE;
914 }
915 // Check if the buffer size is compatible.
916 if (!isSampleRateSpeedAllowed_l(effectiveRate, effectiveSpeed)) {
917 ALOGV("setPlaybackRate(%f, %f) failed (buffer size)",
918 playbackRate.mSpeed, playbackRate.mPitch);
919 return BAD_VALUE;
920 }
921
922 // Check resampler ratios are within bounds
923 if ((uint64_t)effectiveRate > (uint64_t)mSampleRate *
924 (uint64_t)AUDIO_RESAMPLER_DOWN_RATIO_MAX) {
925 ALOGV("setPlaybackRate(%f, %f) failed. Resample rate exceeds max accepted value",
926 playbackRate.mSpeed, playbackRate.mPitch);
927 return BAD_VALUE;
928 }
929
930 if ((uint64_t)effectiveRate * (uint64_t)AUDIO_RESAMPLER_UP_RATIO_MAX < (uint64_t)mSampleRate) {
931 ALOGV("setPlaybackRate(%f, %f) failed. Resample rate below min accepted value",
932 playbackRate.mSpeed, playbackRate.mPitch);
933 return BAD_VALUE;
934 }
935 mPlaybackRate = playbackRate;
936 //set effective rates
937 mProxy->setPlaybackRate(playbackRateTemp);
938 mProxy->setSampleRate(effectiveRate); // FIXME: not quite "atomic" with setPlaybackRate
939 return NO_ERROR;
940 }
941
getPlaybackRate() const942 const AudioPlaybackRate& AudioTrack::getPlaybackRate() const
943 {
944 AutoMutex lock(mLock);
945 return mPlaybackRate;
946 }
947
getBufferSizeInFrames()948 ssize_t AudioTrack::getBufferSizeInFrames()
949 {
950 AutoMutex lock(mLock);
951 if (mOutput == AUDIO_IO_HANDLE_NONE || mProxy.get() == 0) {
952 return NO_INIT;
953 }
954 return (ssize_t) mProxy->getBufferSizeInFrames();
955 }
956
getBufferDurationInUs(int64_t * duration)957 status_t AudioTrack::getBufferDurationInUs(int64_t *duration)
958 {
959 if (duration == nullptr) {
960 return BAD_VALUE;
961 }
962 AutoMutex lock(mLock);
963 if (mOutput == AUDIO_IO_HANDLE_NONE || mProxy.get() == 0) {
964 return NO_INIT;
965 }
966 ssize_t bufferSizeInFrames = (ssize_t) mProxy->getBufferSizeInFrames();
967 if (bufferSizeInFrames < 0) {
968 return (status_t)bufferSizeInFrames;
969 }
970 *duration = (int64_t)((double)bufferSizeInFrames * 1000000
971 / ((double)mSampleRate * mPlaybackRate.mSpeed));
972 return NO_ERROR;
973 }
974
setBufferSizeInFrames(size_t bufferSizeInFrames)975 ssize_t AudioTrack::setBufferSizeInFrames(size_t bufferSizeInFrames)
976 {
977 AutoMutex lock(mLock);
978 if (mOutput == AUDIO_IO_HANDLE_NONE || mProxy.get() == 0) {
979 return NO_INIT;
980 }
981 // Reject if timed track or compressed audio.
982 if (!audio_is_linear_pcm(mFormat)) {
983 return INVALID_OPERATION;
984 }
985 return (ssize_t) mProxy->setBufferSizeInFrames((uint32_t) bufferSizeInFrames);
986 }
987
setLoop(uint32_t loopStart,uint32_t loopEnd,int loopCount)988 status_t AudioTrack::setLoop(uint32_t loopStart, uint32_t loopEnd, int loopCount)
989 {
990 if (mSharedBuffer == 0 || isOffloadedOrDirect()) {
991 return INVALID_OPERATION;
992 }
993
994 if (loopCount == 0) {
995 ;
996 } else if (loopCount >= -1 && loopStart < loopEnd && loopEnd <= mFrameCount &&
997 loopEnd - loopStart >= MIN_LOOP) {
998 ;
999 } else {
1000 return BAD_VALUE;
1001 }
1002
1003 AutoMutex lock(mLock);
1004 // See setPosition() regarding setting parameters such as loop points or position while active
1005 if (mState == STATE_ACTIVE) {
1006 return INVALID_OPERATION;
1007 }
1008 setLoop_l(loopStart, loopEnd, loopCount);
1009 return NO_ERROR;
1010 }
1011
setLoop_l(uint32_t loopStart,uint32_t loopEnd,int loopCount)1012 void AudioTrack::setLoop_l(uint32_t loopStart, uint32_t loopEnd, int loopCount)
1013 {
1014 // We do not update the periodic notification point.
1015 // mNewPosition = updateAndGetPosition_l() + mUpdatePeriod;
1016 mLoopCount = loopCount;
1017 mLoopEnd = loopEnd;
1018 mLoopStart = loopStart;
1019 mLoopCountNotified = loopCount;
1020 mStaticProxy->setLoop(loopStart, loopEnd, loopCount);
1021
1022 // Waking the AudioTrackThread is not needed as this cannot be called when active.
1023 }
1024
setMarkerPosition(uint32_t marker)1025 status_t AudioTrack::setMarkerPosition(uint32_t marker)
1026 {
1027 // The only purpose of setting marker position is to get a callback
1028 if (mCbf == NULL || isOffloadedOrDirect()) {
1029 return INVALID_OPERATION;
1030 }
1031
1032 AutoMutex lock(mLock);
1033 mMarkerPosition = marker;
1034 mMarkerReached = false;
1035
1036 sp<AudioTrackThread> t = mAudioTrackThread;
1037 if (t != 0) {
1038 t->wake();
1039 }
1040 return NO_ERROR;
1041 }
1042
getMarkerPosition(uint32_t * marker) const1043 status_t AudioTrack::getMarkerPosition(uint32_t *marker) const
1044 {
1045 if (isOffloadedOrDirect()) {
1046 return INVALID_OPERATION;
1047 }
1048 if (marker == NULL) {
1049 return BAD_VALUE;
1050 }
1051
1052 AutoMutex lock(mLock);
1053 mMarkerPosition.getValue(marker);
1054
1055 return NO_ERROR;
1056 }
1057
setPositionUpdatePeriod(uint32_t updatePeriod)1058 status_t AudioTrack::setPositionUpdatePeriod(uint32_t updatePeriod)
1059 {
1060 // The only purpose of setting position update period is to get a callback
1061 if (mCbf == NULL || isOffloadedOrDirect()) {
1062 return INVALID_OPERATION;
1063 }
1064
1065 AutoMutex lock(mLock);
1066 mNewPosition = updateAndGetPosition_l() + updatePeriod;
1067 mUpdatePeriod = updatePeriod;
1068
1069 sp<AudioTrackThread> t = mAudioTrackThread;
1070 if (t != 0) {
1071 t->wake();
1072 }
1073 return NO_ERROR;
1074 }
1075
getPositionUpdatePeriod(uint32_t * updatePeriod) const1076 status_t AudioTrack::getPositionUpdatePeriod(uint32_t *updatePeriod) const
1077 {
1078 if (isOffloadedOrDirect()) {
1079 return INVALID_OPERATION;
1080 }
1081 if (updatePeriod == NULL) {
1082 return BAD_VALUE;
1083 }
1084
1085 AutoMutex lock(mLock);
1086 *updatePeriod = mUpdatePeriod;
1087
1088 return NO_ERROR;
1089 }
1090
setPosition(uint32_t position)1091 status_t AudioTrack::setPosition(uint32_t position)
1092 {
1093 if (mSharedBuffer == 0 || isOffloadedOrDirect()) {
1094 return INVALID_OPERATION;
1095 }
1096 if (position > mFrameCount) {
1097 return BAD_VALUE;
1098 }
1099
1100 AutoMutex lock(mLock);
1101 // Currently we require that the player is inactive before setting parameters such as position
1102 // or loop points. Otherwise, there could be a race condition: the application could read the
1103 // current position, compute a new position or loop parameters, and then set that position or
1104 // loop parameters but it would do the "wrong" thing since the position has continued to advance
1105 // in the mean time. If we ever provide a sequencer in server, we could allow a way for the app
1106 // to specify how it wants to handle such scenarios.
1107 if (mState == STATE_ACTIVE) {
1108 return INVALID_OPERATION;
1109 }
1110 // After setting the position, use full update period before notification.
1111 mNewPosition = updateAndGetPosition_l() + mUpdatePeriod;
1112 mStaticProxy->setBufferPosition(position);
1113
1114 // Waking the AudioTrackThread is not needed as this cannot be called when active.
1115 return NO_ERROR;
1116 }
1117
getPosition(uint32_t * position)1118 status_t AudioTrack::getPosition(uint32_t *position)
1119 {
1120 if (position == NULL) {
1121 return BAD_VALUE;
1122 }
1123
1124 AutoMutex lock(mLock);
1125 // FIXME: offloaded and direct tracks call into the HAL for render positions
1126 // for compressed/synced data; however, we use proxy position for pure linear pcm data
1127 // as we do not know the capability of the HAL for pcm position support and standby.
1128 // There may be some latency differences between the HAL position and the proxy position.
1129 if (isOffloadedOrDirect_l() && !isPurePcmData_l()) {
1130 uint32_t dspFrames = 0;
1131
1132 if (isOffloaded_l() && ((mState == STATE_PAUSED) || (mState == STATE_PAUSED_STOPPING))) {
1133 ALOGV("getPosition called in paused state, return cached position %u", mPausedPosition);
1134 *position = mPausedPosition;
1135 return NO_ERROR;
1136 }
1137
1138 if (mOutput != AUDIO_IO_HANDLE_NONE) {
1139 uint32_t halFrames; // actually unused
1140 (void) AudioSystem::getRenderPosition(mOutput, &halFrames, &dspFrames);
1141 // FIXME: on getRenderPosition() error, we return OK with frame position 0.
1142 }
1143 // FIXME: dspFrames may not be zero in (mState == STATE_STOPPED || mState == STATE_FLUSHED)
1144 // due to hardware latency. We leave this behavior for now.
1145 *position = dspFrames;
1146 } else {
1147 if (mCblk->mFlags & CBLK_INVALID) {
1148 (void) restoreTrack_l("getPosition");
1149 // FIXME: for compatibility with the Java API we ignore the restoreTrack_l()
1150 // error here (e.g. DEAD_OBJECT) and return OK with the last recorded server position.
1151 }
1152
1153 // IAudioTrack::stop() isn't synchronous; we don't know when presentation completes
1154 *position = (mState == STATE_STOPPED || mState == STATE_FLUSHED) ?
1155 0 : updateAndGetPosition_l().value();
1156 }
1157 return NO_ERROR;
1158 }
1159
getBufferPosition(uint32_t * position)1160 status_t AudioTrack::getBufferPosition(uint32_t *position)
1161 {
1162 if (mSharedBuffer == 0) {
1163 return INVALID_OPERATION;
1164 }
1165 if (position == NULL) {
1166 return BAD_VALUE;
1167 }
1168
1169 AutoMutex lock(mLock);
1170 *position = mStaticProxy->getBufferPosition();
1171 return NO_ERROR;
1172 }
1173
reload()1174 status_t AudioTrack::reload()
1175 {
1176 if (mSharedBuffer == 0 || isOffloadedOrDirect()) {
1177 return INVALID_OPERATION;
1178 }
1179
1180 AutoMutex lock(mLock);
1181 // See setPosition() regarding setting parameters such as loop points or position while active
1182 if (mState == STATE_ACTIVE) {
1183 return INVALID_OPERATION;
1184 }
1185 mNewPosition = mUpdatePeriod;
1186 (void) updateAndGetPosition_l();
1187 mPosition = 0;
1188 mPreviousTimestampValid = false;
1189 #if 0
1190 // The documentation is not clear on the behavior of reload() and the restoration
1191 // of loop count. Historically we have not restored loop count, start, end,
1192 // but it makes sense if one desires to repeat playing a particular sound.
1193 if (mLoopCount != 0) {
1194 mLoopCountNotified = mLoopCount;
1195 mStaticProxy->setLoop(mLoopStart, mLoopEnd, mLoopCount);
1196 }
1197 #endif
1198 mStaticProxy->setBufferPosition(0);
1199 return NO_ERROR;
1200 }
1201
getOutput() const1202 audio_io_handle_t AudioTrack::getOutput() const
1203 {
1204 AutoMutex lock(mLock);
1205 return mOutput;
1206 }
1207
setOutputDevice(audio_port_handle_t deviceId)1208 status_t AudioTrack::setOutputDevice(audio_port_handle_t deviceId) {
1209 AutoMutex lock(mLock);
1210 if (mSelectedDeviceId != deviceId) {
1211 mSelectedDeviceId = deviceId;
1212 if (mStatus == NO_ERROR) {
1213 android_atomic_or(CBLK_INVALID, &mCblk->mFlags);
1214 }
1215 }
1216 return NO_ERROR;
1217 }
1218
getOutputDevice()1219 audio_port_handle_t AudioTrack::getOutputDevice() {
1220 AutoMutex lock(mLock);
1221 return mSelectedDeviceId;
1222 }
1223
getRoutedDeviceId()1224 audio_port_handle_t AudioTrack::getRoutedDeviceId() {
1225 AutoMutex lock(mLock);
1226 if (mOutput == AUDIO_IO_HANDLE_NONE) {
1227 return AUDIO_PORT_HANDLE_NONE;
1228 }
1229 return AudioSystem::getDeviceIdForIo(mOutput);
1230 }
1231
attachAuxEffect(int effectId)1232 status_t AudioTrack::attachAuxEffect(int effectId)
1233 {
1234 AutoMutex lock(mLock);
1235 status_t status = mAudioTrack->attachAuxEffect(effectId);
1236 if (status == NO_ERROR) {
1237 mAuxEffectId = effectId;
1238 }
1239 return status;
1240 }
1241
streamType() const1242 audio_stream_type_t AudioTrack::streamType() const
1243 {
1244 if (mStreamType == AUDIO_STREAM_DEFAULT) {
1245 return audio_attributes_to_stream_type(&mAttributes);
1246 }
1247 return mStreamType;
1248 }
1249
1250 // -------------------------------------------------------------------------
1251
1252 // must be called with mLock held
createTrack_l()1253 status_t AudioTrack::createTrack_l()
1254 {
1255 const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();
1256 if (audioFlinger == 0) {
1257 ALOGE("Could not get audioflinger");
1258 return NO_INIT;
1259 }
1260
1261 if (mDeviceCallback != 0 && mOutput != AUDIO_IO_HANDLE_NONE) {
1262 AudioSystem::removeAudioDeviceCallback(mDeviceCallback, mOutput);
1263 }
1264 audio_io_handle_t output;
1265 audio_stream_type_t streamType = mStreamType;
1266 audio_attributes_t *attr = (mStreamType == AUDIO_STREAM_DEFAULT) ? &mAttributes : NULL;
1267
1268 // mFlags (not mOrigFlags) is modified depending on whether fast request is accepted.
1269 // After fast request is denied, we will request again if IAudioTrack is re-created.
1270
1271 status_t status;
1272 audio_config_t config = AUDIO_CONFIG_INITIALIZER;
1273 config.sample_rate = mSampleRate;
1274 config.channel_mask = mChannelMask;
1275 config.format = mFormat;
1276 config.offload_info = mOffloadInfoCopy;
1277 status = AudioSystem::getOutputForAttr(attr, &output,
1278 mSessionId, &streamType, mClientUid,
1279 &config,
1280 mFlags, mSelectedDeviceId, &mPortId);
1281
1282 if (status != NO_ERROR || output == AUDIO_IO_HANDLE_NONE) {
1283 ALOGE("Could not get audio output for session %d, stream type %d, usage %d, sample rate %u,"
1284 " format %#x, channel mask %#x, flags %#x",
1285 mSessionId, streamType, mAttributes.usage, mSampleRate, mFormat, mChannelMask,
1286 mFlags);
1287 return BAD_VALUE;
1288 }
1289 {
1290 // Now that we have a reference to an I/O handle and have not yet handed it off to AudioFlinger,
1291 // we must release it ourselves if anything goes wrong.
1292
1293 // Not all of these values are needed under all conditions, but it is easier to get them all
1294 status = AudioSystem::getLatency(output, &mAfLatency);
1295 if (status != NO_ERROR) {
1296 ALOGE("getLatency(%d) failed status %d", output, status);
1297 goto release;
1298 }
1299 ALOGV("createTrack_l() output %d afLatency %u", output, mAfLatency);
1300
1301 status = AudioSystem::getFrameCount(output, &mAfFrameCount);
1302 if (status != NO_ERROR) {
1303 ALOGE("getFrameCount(output=%d) status %d", output, status);
1304 goto release;
1305 }
1306
1307 // TODO consider making this a member variable if there are other uses for it later
1308 size_t afFrameCountHAL;
1309 status = AudioSystem::getFrameCountHAL(output, &afFrameCountHAL);
1310 if (status != NO_ERROR) {
1311 ALOGE("getFrameCountHAL(output=%d) status %d", output, status);
1312 goto release;
1313 }
1314 ALOG_ASSERT(afFrameCountHAL > 0);
1315
1316 status = AudioSystem::getSamplingRate(output, &mAfSampleRate);
1317 if (status != NO_ERROR) {
1318 ALOGE("getSamplingRate(output=%d) status %d", output, status);
1319 goto release;
1320 }
1321 if (mSampleRate == 0) {
1322 mSampleRate = mAfSampleRate;
1323 mOriginalSampleRate = mAfSampleRate;
1324 }
1325
1326 // Client can only express a preference for FAST. Server will perform additional tests.
1327 if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1328 bool useCaseAllowed =
1329 // either of these use cases:
1330 // use case 1: shared buffer
1331 (mSharedBuffer != 0) ||
1332 // use case 2: callback transfer mode
1333 (mTransfer == TRANSFER_CALLBACK) ||
1334 // use case 3: obtain/release mode
1335 (mTransfer == TRANSFER_OBTAIN) ||
1336 // use case 4: synchronous write
1337 ((mTransfer == TRANSFER_SYNC) && mThreadCanCallJava);
1338 // sample rates must also match
1339 bool fastAllowed = useCaseAllowed && (mSampleRate == mAfSampleRate);
1340 if (!fastAllowed) {
1341 ALOGW("AUDIO_OUTPUT_FLAG_FAST denied by client; transfer %d, "
1342 "track %u Hz, output %u Hz",
1343 mTransfer, mSampleRate, mAfSampleRate);
1344 mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_FAST);
1345 }
1346 }
1347
1348 mNotificationFramesAct = mNotificationFramesReq;
1349
1350 size_t frameCount = mReqFrameCount;
1351 if (!audio_has_proportional_frames(mFormat)) {
1352
1353 if (mSharedBuffer != 0) {
1354 // Same comment as below about ignoring frameCount parameter for set()
1355 frameCount = mSharedBuffer->size();
1356 } else if (frameCount == 0) {
1357 frameCount = mAfFrameCount;
1358 }
1359 if (mNotificationFramesAct != frameCount) {
1360 mNotificationFramesAct = frameCount;
1361 }
1362 } else if (mSharedBuffer != 0) {
1363 // FIXME: Ensure client side memory buffers need
1364 // not have additional alignment beyond sample
1365 // (e.g. 16 bit stereo accessed as 32 bit frame).
1366 size_t alignment = audio_bytes_per_sample(mFormat);
1367 if (alignment & 1) {
1368 // for AUDIO_FORMAT_PCM_24_BIT_PACKED (not exposed through Java).
1369 alignment = 1;
1370 }
1371 if (mChannelCount > 1) {
1372 // More than 2 channels does not require stronger alignment than stereo
1373 alignment <<= 1;
1374 }
1375 if (((uintptr_t)mSharedBuffer->pointer() & (alignment - 1)) != 0) {
1376 ALOGE("Invalid buffer alignment: address %p, channel count %u",
1377 mSharedBuffer->pointer(), mChannelCount);
1378 status = BAD_VALUE;
1379 goto release;
1380 }
1381
1382 // When initializing a shared buffer AudioTrack via constructors,
1383 // there's no frameCount parameter.
1384 // But when initializing a shared buffer AudioTrack via set(),
1385 // there _is_ a frameCount parameter. We silently ignore it.
1386 frameCount = mSharedBuffer->size() / mFrameSize;
1387 } else {
1388 size_t minFrameCount = 0;
1389 // For fast tracks the frame count calculations and checks are mostly done by server,
1390 // but we try to respect the application's request for notifications per buffer.
1391 if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1392 if (mNotificationsPerBufferReq > 0) {
1393 // Avoid possible arithmetic overflow during multiplication.
1394 // mNotificationsPerBuffer is clamped to a small integer earlier, so it is unlikely.
1395 if (mNotificationsPerBufferReq > SIZE_MAX / afFrameCountHAL) {
1396 ALOGE("Requested notificationPerBuffer=%u ignored for HAL frameCount=%zu",
1397 mNotificationsPerBufferReq, afFrameCountHAL);
1398 } else {
1399 minFrameCount = afFrameCountHAL * mNotificationsPerBufferReq;
1400 }
1401 }
1402 } else {
1403 // for normal tracks precompute the frame count based on speed.
1404 const float speed = !isPurePcmData_l() || isOffloadedOrDirect_l() ? 1.0f :
1405 max(mMaxRequiredSpeed, mPlaybackRate.mSpeed);
1406 minFrameCount = calculateMinFrameCount(
1407 mAfLatency, mAfFrameCount, mAfSampleRate, mSampleRate,
1408 speed /*, 0 mNotificationsPerBufferReq*/);
1409 }
1410 if (frameCount < minFrameCount) {
1411 frameCount = minFrameCount;
1412 }
1413 }
1414
1415 audio_output_flags_t flags = mFlags;
1416
1417 pid_t tid = -1;
1418 if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1419 if (mAudioTrackThread != 0 && !mThreadCanCallJava) {
1420 tid = mAudioTrackThread->getTid();
1421 }
1422 }
1423
1424 size_t temp = frameCount; // temp may be replaced by a revised value of frameCount,
1425 // but we will still need the original value also
1426 audio_session_t originalSessionId = mSessionId;
1427 sp<IAudioTrack> track = audioFlinger->createTrack(streamType,
1428 mSampleRate,
1429 mFormat,
1430 mChannelMask,
1431 &temp,
1432 &flags,
1433 mSharedBuffer,
1434 output,
1435 mClientPid,
1436 tid,
1437 &mSessionId,
1438 mClientUid,
1439 &status,
1440 mPortId);
1441 ALOGE_IF(originalSessionId != AUDIO_SESSION_ALLOCATE && mSessionId != originalSessionId,
1442 "session ID changed from %d to %d", originalSessionId, mSessionId);
1443
1444 if (status != NO_ERROR) {
1445 ALOGE("AudioFlinger could not create track, status: %d", status);
1446 goto release;
1447 }
1448 ALOG_ASSERT(track != 0);
1449
1450 // AudioFlinger now owns the reference to the I/O handle,
1451 // so we are no longer responsible for releasing it.
1452
1453 // FIXME compare to AudioRecord
1454 sp<IMemory> iMem = track->getCblk();
1455 if (iMem == 0) {
1456 ALOGE("Could not get control block");
1457 return NO_INIT;
1458 }
1459 void *iMemPointer = iMem->pointer();
1460 if (iMemPointer == NULL) {
1461 ALOGE("Could not get control block pointer");
1462 return NO_INIT;
1463 }
1464 // invariant that mAudioTrack != 0 is true only after set() returns successfully
1465 if (mAudioTrack != 0) {
1466 IInterface::asBinder(mAudioTrack)->unlinkToDeath(mDeathNotifier, this);
1467 mDeathNotifier.clear();
1468 }
1469 mAudioTrack = track;
1470 mCblkMemory = iMem;
1471 IPCThreadState::self()->flushCommands();
1472
1473 audio_track_cblk_t* cblk = static_cast<audio_track_cblk_t*>(iMemPointer);
1474 mCblk = cblk;
1475 // note that temp is the (possibly revised) value of frameCount
1476 if (temp < frameCount || (frameCount == 0 && temp == 0)) {
1477 // In current design, AudioTrack client checks and ensures frame count validity before
1478 // passing it to AudioFlinger so AudioFlinger should not return a different value except
1479 // for fast track as it uses a special method of assigning frame count.
1480 ALOGW("Requested frameCount %zu but received frameCount %zu", frameCount, temp);
1481 }
1482 frameCount = temp;
1483
1484 mAwaitBoost = false;
1485 if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1486 if (flags & AUDIO_OUTPUT_FLAG_FAST) {
1487 ALOGI("AUDIO_OUTPUT_FLAG_FAST successful; frameCount %zu -> %zu", frameCount, temp);
1488 if (!mThreadCanCallJava) {
1489 mAwaitBoost = true;
1490 }
1491 } else {
1492 ALOGW("AUDIO_OUTPUT_FLAG_FAST denied by server; frameCount %zu -> %zu", frameCount,
1493 temp);
1494 }
1495 }
1496 mFlags = flags;
1497
1498 // Make sure that application is notified with sufficient margin before underrun.
1499 // The client can divide the AudioTrack buffer into sub-buffers,
1500 // and expresses its desire to server as the notification frame count.
1501 if (mSharedBuffer == 0 && audio_is_linear_pcm(mFormat)) {
1502 size_t maxNotificationFrames;
1503 if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1504 // notify every HAL buffer, regardless of the size of the track buffer
1505 maxNotificationFrames = afFrameCountHAL;
1506 } else {
1507 // For normal tracks, use at least double-buffering if no sample rate conversion,
1508 // or at least triple-buffering if there is sample rate conversion
1509 const int nBuffering = mOriginalSampleRate == mAfSampleRate ? 2 : 3;
1510 maxNotificationFrames = frameCount / nBuffering;
1511 }
1512 if (mNotificationFramesAct == 0 || mNotificationFramesAct > maxNotificationFrames) {
1513 if (mNotificationFramesAct == 0) {
1514 ALOGD("Client defaulted notificationFrames to %zu for frameCount %zu",
1515 maxNotificationFrames, frameCount);
1516 } else {
1517 ALOGW("Client adjusted notificationFrames from %u to %zu for frameCount %zu",
1518 mNotificationFramesAct, maxNotificationFrames, frameCount);
1519 }
1520 mNotificationFramesAct = (uint32_t) maxNotificationFrames;
1521 }
1522 }
1523
1524 // We retain a copy of the I/O handle, but don't own the reference
1525 mOutput = output;
1526 mRefreshRemaining = true;
1527
1528 // Starting address of buffers in shared memory. If there is a shared buffer, buffers
1529 // is the value of pointer() for the shared buffer, otherwise buffers points
1530 // immediately after the control block. This address is for the mapping within client
1531 // address space. AudioFlinger::TrackBase::mBuffer is for the server address space.
1532 void* buffers;
1533 if (mSharedBuffer == 0) {
1534 buffers = cblk + 1;
1535 } else {
1536 buffers = mSharedBuffer->pointer();
1537 if (buffers == NULL) {
1538 ALOGE("Could not get buffer pointer");
1539 return NO_INIT;
1540 }
1541 }
1542
1543 mAudioTrack->attachAuxEffect(mAuxEffectId);
1544 // FIXME doesn't take into account speed or future sample rate changes (until restoreTrack)
1545 // FIXME don't believe this lie
1546 mLatency = mAfLatency + (1000*frameCount) / mSampleRate;
1547
1548 mFrameCount = frameCount;
1549 // If IAudioTrack is re-created, don't let the requested frameCount
1550 // decrease. This can confuse clients that cache frameCount().
1551 if (frameCount > mReqFrameCount) {
1552 mReqFrameCount = frameCount;
1553 }
1554
1555 // reset server position to 0 as we have new cblk.
1556 mServer = 0;
1557
1558 // update proxy
1559 if (mSharedBuffer == 0) {
1560 mStaticProxy.clear();
1561 mProxy = new AudioTrackClientProxy(cblk, buffers, frameCount, mFrameSize);
1562 } else {
1563 mStaticProxy = new StaticAudioTrackClientProxy(cblk, buffers, frameCount, mFrameSize);
1564 mProxy = mStaticProxy;
1565 }
1566
1567 mProxy->setVolumeLR(gain_minifloat_pack(
1568 gain_from_float(mVolume[AUDIO_INTERLEAVE_LEFT]),
1569 gain_from_float(mVolume[AUDIO_INTERLEAVE_RIGHT])));
1570
1571 mProxy->setSendLevel(mSendLevel);
1572 const uint32_t effectiveSampleRate = adjustSampleRate(mSampleRate, mPlaybackRate.mPitch);
1573 const float effectiveSpeed = adjustSpeed(mPlaybackRate.mSpeed, mPlaybackRate.mPitch);
1574 const float effectivePitch = adjustPitch(mPlaybackRate.mPitch);
1575 mProxy->setSampleRate(effectiveSampleRate);
1576
1577 AudioPlaybackRate playbackRateTemp = mPlaybackRate;
1578 playbackRateTemp.mSpeed = effectiveSpeed;
1579 playbackRateTemp.mPitch = effectivePitch;
1580 mProxy->setPlaybackRate(playbackRateTemp);
1581 mProxy->setMinimum(mNotificationFramesAct);
1582
1583 mDeathNotifier = new DeathNotifier(this);
1584 IInterface::asBinder(mAudioTrack)->linkToDeath(mDeathNotifier, this);
1585
1586 if (mDeviceCallback != 0) {
1587 AudioSystem::addAudioDeviceCallback(mDeviceCallback, mOutput);
1588 }
1589
1590 return NO_ERROR;
1591 }
1592
1593 release:
1594 AudioSystem::releaseOutput(output, streamType, mSessionId);
1595 if (status == NO_ERROR) {
1596 status = NO_INIT;
1597 }
1598 return status;
1599 }
1600
obtainBuffer(Buffer * audioBuffer,int32_t waitCount,size_t * nonContig)1601 status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, int32_t waitCount, size_t *nonContig)
1602 {
1603 if (audioBuffer == NULL) {
1604 if (nonContig != NULL) {
1605 *nonContig = 0;
1606 }
1607 return BAD_VALUE;
1608 }
1609 if (mTransfer != TRANSFER_OBTAIN) {
1610 audioBuffer->frameCount = 0;
1611 audioBuffer->size = 0;
1612 audioBuffer->raw = NULL;
1613 if (nonContig != NULL) {
1614 *nonContig = 0;
1615 }
1616 return INVALID_OPERATION;
1617 }
1618
1619 const struct timespec *requested;
1620 struct timespec timeout;
1621 if (waitCount == -1) {
1622 requested = &ClientProxy::kForever;
1623 } else if (waitCount == 0) {
1624 requested = &ClientProxy::kNonBlocking;
1625 } else if (waitCount > 0) {
1626 long long ms = WAIT_PERIOD_MS * (long long) waitCount;
1627 timeout.tv_sec = ms / 1000;
1628 timeout.tv_nsec = (int) (ms % 1000) * 1000000;
1629 requested = &timeout;
1630 } else {
1631 ALOGE("%s invalid waitCount %d", __func__, waitCount);
1632 requested = NULL;
1633 }
1634 return obtainBuffer(audioBuffer, requested, NULL /*elapsed*/, nonContig);
1635 }
1636
obtainBuffer(Buffer * audioBuffer,const struct timespec * requested,struct timespec * elapsed,size_t * nonContig)1637 status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, const struct timespec *requested,
1638 struct timespec *elapsed, size_t *nonContig)
1639 {
1640 // previous and new IAudioTrack sequence numbers are used to detect track re-creation
1641 uint32_t oldSequence = 0;
1642 uint32_t newSequence;
1643
1644 Proxy::Buffer buffer;
1645 status_t status = NO_ERROR;
1646
1647 static const int32_t kMaxTries = 5;
1648 int32_t tryCounter = kMaxTries;
1649
1650 do {
1651 // obtainBuffer() is called with mutex unlocked, so keep extra references to these fields to
1652 // keep them from going away if another thread re-creates the track during obtainBuffer()
1653 sp<AudioTrackClientProxy> proxy;
1654 sp<IMemory> iMem;
1655
1656 { // start of lock scope
1657 AutoMutex lock(mLock);
1658
1659 newSequence = mSequence;
1660 // did previous obtainBuffer() fail due to media server death or voluntary invalidation?
1661 if (status == DEAD_OBJECT) {
1662 // re-create track, unless someone else has already done so
1663 if (newSequence == oldSequence) {
1664 status = restoreTrack_l("obtainBuffer");
1665 if (status != NO_ERROR) {
1666 buffer.mFrameCount = 0;
1667 buffer.mRaw = NULL;
1668 buffer.mNonContig = 0;
1669 break;
1670 }
1671 }
1672 }
1673 oldSequence = newSequence;
1674
1675 if (status == NOT_ENOUGH_DATA) {
1676 restartIfDisabled();
1677 }
1678
1679 // Keep the extra references
1680 proxy = mProxy;
1681 iMem = mCblkMemory;
1682
1683 if (mState == STATE_STOPPING) {
1684 status = -EINTR;
1685 buffer.mFrameCount = 0;
1686 buffer.mRaw = NULL;
1687 buffer.mNonContig = 0;
1688 break;
1689 }
1690
1691 // Non-blocking if track is stopped or paused
1692 if (mState != STATE_ACTIVE) {
1693 requested = &ClientProxy::kNonBlocking;
1694 }
1695
1696 } // end of lock scope
1697
1698 buffer.mFrameCount = audioBuffer->frameCount;
1699 // FIXME starts the requested timeout and elapsed over from scratch
1700 status = proxy->obtainBuffer(&buffer, requested, elapsed);
1701 } while (((status == DEAD_OBJECT) || (status == NOT_ENOUGH_DATA)) && (tryCounter-- > 0));
1702
1703 audioBuffer->frameCount = buffer.mFrameCount;
1704 audioBuffer->size = buffer.mFrameCount * mFrameSize;
1705 audioBuffer->raw = buffer.mRaw;
1706 if (nonContig != NULL) {
1707 *nonContig = buffer.mNonContig;
1708 }
1709 return status;
1710 }
1711
releaseBuffer(const Buffer * audioBuffer)1712 void AudioTrack::releaseBuffer(const Buffer* audioBuffer)
1713 {
1714 // FIXME add error checking on mode, by adding an internal version
1715 if (mTransfer == TRANSFER_SHARED) {
1716 return;
1717 }
1718
1719 size_t stepCount = audioBuffer->size / mFrameSize;
1720 if (stepCount == 0) {
1721 return;
1722 }
1723
1724 Proxy::Buffer buffer;
1725 buffer.mFrameCount = stepCount;
1726 buffer.mRaw = audioBuffer->raw;
1727
1728 AutoMutex lock(mLock);
1729 mReleased += stepCount;
1730 mInUnderrun = false;
1731 mProxy->releaseBuffer(&buffer);
1732
1733 // restart track if it was disabled by audioflinger due to previous underrun
1734 restartIfDisabled();
1735 }
1736
restartIfDisabled()1737 void AudioTrack::restartIfDisabled()
1738 {
1739 int32_t flags = android_atomic_and(~CBLK_DISABLED, &mCblk->mFlags);
1740 if ((mState == STATE_ACTIVE) && (flags & CBLK_DISABLED)) {
1741 ALOGW("releaseBuffer() track %p disabled due to previous underrun, restarting", this);
1742 // FIXME ignoring status
1743 mAudioTrack->start();
1744 }
1745 }
1746
1747 // -------------------------------------------------------------------------
1748
write(const void * buffer,size_t userSize,bool blocking)1749 ssize_t AudioTrack::write(const void* buffer, size_t userSize, bool blocking)
1750 {
1751 if (mTransfer != TRANSFER_SYNC) {
1752 return INVALID_OPERATION;
1753 }
1754
1755 if (isDirect()) {
1756 AutoMutex lock(mLock);
1757 int32_t flags = android_atomic_and(
1758 ~(CBLK_UNDERRUN | CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL | CBLK_BUFFER_END),
1759 &mCblk->mFlags);
1760 if (flags & CBLK_INVALID) {
1761 return DEAD_OBJECT;
1762 }
1763 }
1764
1765 if (ssize_t(userSize) < 0 || (buffer == NULL && userSize != 0)) {
1766 // Sanity-check: user is most-likely passing an error code, and it would
1767 // make the return value ambiguous (actualSize vs error).
1768 ALOGE("AudioTrack::write(buffer=%p, size=%zu (%zd)", buffer, userSize, userSize);
1769 return BAD_VALUE;
1770 }
1771
1772 size_t written = 0;
1773 Buffer audioBuffer;
1774
1775 while (userSize >= mFrameSize) {
1776 audioBuffer.frameCount = userSize / mFrameSize;
1777
1778 status_t err = obtainBuffer(&audioBuffer,
1779 blocking ? &ClientProxy::kForever : &ClientProxy::kNonBlocking);
1780 if (err < 0) {
1781 if (written > 0) {
1782 break;
1783 }
1784 if (err == TIMED_OUT || err == -EINTR) {
1785 err = WOULD_BLOCK;
1786 }
1787 return ssize_t(err);
1788 }
1789
1790 size_t toWrite = audioBuffer.size;
1791 memcpy(audioBuffer.i8, buffer, toWrite);
1792 buffer = ((const char *) buffer) + toWrite;
1793 userSize -= toWrite;
1794 written += toWrite;
1795
1796 releaseBuffer(&audioBuffer);
1797 }
1798
1799 if (written > 0) {
1800 mFramesWritten += written / mFrameSize;
1801 }
1802 return written;
1803 }
1804
1805 // -------------------------------------------------------------------------
1806
processAudioBuffer()1807 nsecs_t AudioTrack::processAudioBuffer()
1808 {
1809 // Currently the AudioTrack thread is not created if there are no callbacks.
1810 // Would it ever make sense to run the thread, even without callbacks?
1811 // If so, then replace this by checks at each use for mCbf != NULL.
1812 LOG_ALWAYS_FATAL_IF(mCblk == NULL);
1813
1814 mLock.lock();
1815 if (mAwaitBoost) {
1816 mAwaitBoost = false;
1817 mLock.unlock();
1818 static const int32_t kMaxTries = 5;
1819 int32_t tryCounter = kMaxTries;
1820 uint32_t pollUs = 10000;
1821 do {
1822 int policy = sched_getscheduler(0) & ~SCHED_RESET_ON_FORK;
1823 if (policy == SCHED_FIFO || policy == SCHED_RR) {
1824 break;
1825 }
1826 usleep(pollUs);
1827 pollUs <<= 1;
1828 } while (tryCounter-- > 0);
1829 if (tryCounter < 0) {
1830 ALOGE("did not receive expected priority boost on time");
1831 }
1832 // Run again immediately
1833 return 0;
1834 }
1835
1836 // Can only reference mCblk while locked
1837 int32_t flags = android_atomic_and(
1838 ~(CBLK_UNDERRUN | CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL | CBLK_BUFFER_END), &mCblk->mFlags);
1839
1840 // Check for track invalidation
1841 if (flags & CBLK_INVALID) {
1842 // for offloaded tracks restoreTrack_l() will just update the sequence and clear
1843 // AudioSystem cache. We should not exit here but after calling the callback so
1844 // that the upper layers can recreate the track
1845 if (!isOffloadedOrDirect_l() || (mSequence == mObservedSequence)) {
1846 status_t status __unused = restoreTrack_l("processAudioBuffer");
1847 // FIXME unused status
1848 // after restoration, continue below to make sure that the loop and buffer events
1849 // are notified because they have been cleared from mCblk->mFlags above.
1850 }
1851 }
1852
1853 bool waitStreamEnd = mState == STATE_STOPPING;
1854 bool active = mState == STATE_ACTIVE;
1855
1856 // Manage underrun callback, must be done under lock to avoid race with releaseBuffer()
1857 bool newUnderrun = false;
1858 if (flags & CBLK_UNDERRUN) {
1859 #if 0
1860 // Currently in shared buffer mode, when the server reaches the end of buffer,
1861 // the track stays active in continuous underrun state. It's up to the application
1862 // to pause or stop the track, or set the position to a new offset within buffer.
1863 // This was some experimental code to auto-pause on underrun. Keeping it here
1864 // in "if 0" so we can re-visit this if we add a real sequencer for shared memory content.
1865 if (mTransfer == TRANSFER_SHARED) {
1866 mState = STATE_PAUSED;
1867 active = false;
1868 }
1869 #endif
1870 if (!mInUnderrun) {
1871 mInUnderrun = true;
1872 newUnderrun = true;
1873 }
1874 }
1875
1876 // Get current position of server
1877 Modulo<uint32_t> position(updateAndGetPosition_l());
1878
1879 // Manage marker callback
1880 bool markerReached = false;
1881 Modulo<uint32_t> markerPosition(mMarkerPosition);
1882 // uses 32 bit wraparound for comparison with position.
1883 if (!mMarkerReached && markerPosition.value() > 0 && position >= markerPosition) {
1884 mMarkerReached = markerReached = true;
1885 }
1886
1887 // Determine number of new position callback(s) that will be needed, while locked
1888 size_t newPosCount = 0;
1889 Modulo<uint32_t> newPosition(mNewPosition);
1890 uint32_t updatePeriod = mUpdatePeriod;
1891 // FIXME fails for wraparound, need 64 bits
1892 if (updatePeriod > 0 && position >= newPosition) {
1893 newPosCount = ((position - newPosition).value() / updatePeriod) + 1;
1894 mNewPosition += updatePeriod * newPosCount;
1895 }
1896
1897 // Cache other fields that will be needed soon
1898 uint32_t sampleRate = mSampleRate;
1899 float speed = mPlaybackRate.mSpeed;
1900 const uint32_t notificationFrames = mNotificationFramesAct;
1901 if (mRefreshRemaining) {
1902 mRefreshRemaining = false;
1903 mRemainingFrames = notificationFrames;
1904 mRetryOnPartialBuffer = false;
1905 }
1906 size_t misalignment = mProxy->getMisalignment();
1907 uint32_t sequence = mSequence;
1908 sp<AudioTrackClientProxy> proxy = mProxy;
1909
1910 // Determine the number of new loop callback(s) that will be needed, while locked.
1911 int loopCountNotifications = 0;
1912 uint32_t loopPeriod = 0; // time in frames for next EVENT_LOOP_END or EVENT_BUFFER_END
1913
1914 if (mLoopCount > 0) {
1915 int loopCount;
1916 size_t bufferPosition;
1917 mStaticProxy->getBufferPositionAndLoopCount(&bufferPosition, &loopCount);
1918 loopPeriod = ((loopCount > 0) ? mLoopEnd : mFrameCount) - bufferPosition;
1919 loopCountNotifications = min(mLoopCountNotified - loopCount, kMaxLoopCountNotifications);
1920 mLoopCountNotified = loopCount; // discard any excess notifications
1921 } else if (mLoopCount < 0) {
1922 // FIXME: We're not accurate with notification count and position with infinite looping
1923 // since loopCount from server side will always return -1 (we could decrement it).
1924 size_t bufferPosition = mStaticProxy->getBufferPosition();
1925 loopCountNotifications = int((flags & (CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL)) != 0);
1926 loopPeriod = mLoopEnd - bufferPosition;
1927 } else if (/* mLoopCount == 0 && */ mSharedBuffer != 0) {
1928 size_t bufferPosition = mStaticProxy->getBufferPosition();
1929 loopPeriod = mFrameCount - bufferPosition;
1930 }
1931
1932 // These fields don't need to be cached, because they are assigned only by set():
1933 // mTransfer, mCbf, mUserData, mFormat, mFrameSize, mFlags
1934 // mFlags is also assigned by createTrack_l(), but not the bit we care about.
1935
1936 mLock.unlock();
1937
1938 // get anchor time to account for callbacks.
1939 const nsecs_t timeBeforeCallbacks = systemTime();
1940
1941 if (waitStreamEnd) {
1942 // FIXME: Instead of blocking in proxy->waitStreamEndDone(), Callback thread
1943 // should wait on proxy futex and handle CBLK_STREAM_END_DONE within this function
1944 // (and make sure we don't callback for more data while we're stopping).
1945 // This helps with position, marker notifications, and track invalidation.
1946 struct timespec timeout;
1947 timeout.tv_sec = WAIT_STREAM_END_TIMEOUT_SEC;
1948 timeout.tv_nsec = 0;
1949
1950 status_t status = proxy->waitStreamEndDone(&timeout);
1951 switch (status) {
1952 case NO_ERROR:
1953 case DEAD_OBJECT:
1954 case TIMED_OUT:
1955 if (status != DEAD_OBJECT) {
1956 // for DEAD_OBJECT, we do not send a EVENT_STREAM_END after stop();
1957 // instead, the application should handle the EVENT_NEW_IAUDIOTRACK.
1958 mCbf(EVENT_STREAM_END, mUserData, NULL);
1959 }
1960 {
1961 AutoMutex lock(mLock);
1962 // The previously assigned value of waitStreamEnd is no longer valid,
1963 // since the mutex has been unlocked and either the callback handler
1964 // or another thread could have re-started the AudioTrack during that time.
1965 waitStreamEnd = mState == STATE_STOPPING;
1966 if (waitStreamEnd) {
1967 mState = STATE_STOPPED;
1968 mReleased = 0;
1969 }
1970 }
1971 if (waitStreamEnd && status != DEAD_OBJECT) {
1972 return NS_INACTIVE;
1973 }
1974 break;
1975 }
1976 return 0;
1977 }
1978
1979 // perform callbacks while unlocked
1980 if (newUnderrun) {
1981 mCbf(EVENT_UNDERRUN, mUserData, NULL);
1982 }
1983 while (loopCountNotifications > 0) {
1984 mCbf(EVENT_LOOP_END, mUserData, NULL);
1985 --loopCountNotifications;
1986 }
1987 if (flags & CBLK_BUFFER_END) {
1988 mCbf(EVENT_BUFFER_END, mUserData, NULL);
1989 }
1990 if (markerReached) {
1991 mCbf(EVENT_MARKER, mUserData, &markerPosition);
1992 }
1993 while (newPosCount > 0) {
1994 size_t temp = newPosition.value(); // FIXME size_t != uint32_t
1995 mCbf(EVENT_NEW_POS, mUserData, &temp);
1996 newPosition += updatePeriod;
1997 newPosCount--;
1998 }
1999
2000 if (mObservedSequence != sequence) {
2001 mObservedSequence = sequence;
2002 mCbf(EVENT_NEW_IAUDIOTRACK, mUserData, NULL);
2003 // for offloaded tracks, just wait for the upper layers to recreate the track
2004 if (isOffloadedOrDirect()) {
2005 return NS_INACTIVE;
2006 }
2007 }
2008
2009 // if inactive, then don't run me again until re-started
2010 if (!active) {
2011 return NS_INACTIVE;
2012 }
2013
2014 // Compute the estimated time until the next timed event (position, markers, loops)
2015 // FIXME only for non-compressed audio
2016 uint32_t minFrames = ~0;
2017 if (!markerReached && position < markerPosition) {
2018 minFrames = (markerPosition - position).value();
2019 }
2020 if (loopPeriod > 0 && loopPeriod < minFrames) {
2021 // loopPeriod is already adjusted for actual position.
2022 minFrames = loopPeriod;
2023 }
2024 if (updatePeriod > 0) {
2025 minFrames = min(minFrames, (newPosition - position).value());
2026 }
2027
2028 // If > 0, poll periodically to recover from a stuck server. A good value is 2.
2029 static const uint32_t kPoll = 0;
2030 if (kPoll > 0 && mTransfer == TRANSFER_CALLBACK && kPoll * notificationFrames < minFrames) {
2031 minFrames = kPoll * notificationFrames;
2032 }
2033
2034 // This "fudge factor" avoids soaking CPU, and compensates for late progress by server
2035 static const nsecs_t kWaitPeriodNs = WAIT_PERIOD_MS * 1000000LL;
2036 const nsecs_t timeAfterCallbacks = systemTime();
2037
2038 // Convert frame units to time units
2039 nsecs_t ns = NS_WHENEVER;
2040 if (minFrames != (uint32_t) ~0) {
2041 ns = framesToNanoseconds(minFrames, sampleRate, speed) + kWaitPeriodNs;
2042 ns -= (timeAfterCallbacks - timeBeforeCallbacks); // account for callback time
2043 // TODO: Should we warn if the callback time is too long?
2044 if (ns < 0) ns = 0;
2045 }
2046
2047 // If not supplying data by EVENT_MORE_DATA, then we're done
2048 if (mTransfer != TRANSFER_CALLBACK) {
2049 return ns;
2050 }
2051
2052 // EVENT_MORE_DATA callback handling.
2053 // Timing for linear pcm audio data formats can be derived directly from the
2054 // buffer fill level.
2055 // Timing for compressed data is not directly available from the buffer fill level,
2056 // rather indirectly from waiting for blocking mode callbacks or waiting for obtain()
2057 // to return a certain fill level.
2058
2059 struct timespec timeout;
2060 const struct timespec *requested = &ClientProxy::kForever;
2061 if (ns != NS_WHENEVER) {
2062 timeout.tv_sec = ns / 1000000000LL;
2063 timeout.tv_nsec = ns % 1000000000LL;
2064 ALOGV("timeout %ld.%03d", timeout.tv_sec, (int) timeout.tv_nsec / 1000000);
2065 requested = &timeout;
2066 }
2067
2068 size_t writtenFrames = 0;
2069 while (mRemainingFrames > 0) {
2070
2071 Buffer audioBuffer;
2072 audioBuffer.frameCount = mRemainingFrames;
2073 size_t nonContig;
2074 status_t err = obtainBuffer(&audioBuffer, requested, NULL, &nonContig);
2075 LOG_ALWAYS_FATAL_IF((err != NO_ERROR) != (audioBuffer.frameCount == 0),
2076 "obtainBuffer() err=%d frameCount=%zu", err, audioBuffer.frameCount);
2077 requested = &ClientProxy::kNonBlocking;
2078 size_t avail = audioBuffer.frameCount + nonContig;
2079 ALOGV("obtainBuffer(%u) returned %zu = %zu + %zu err %d",
2080 mRemainingFrames, avail, audioBuffer.frameCount, nonContig, err);
2081 if (err != NO_ERROR) {
2082 if (err == TIMED_OUT || err == WOULD_BLOCK || err == -EINTR ||
2083 (isOffloaded() && (err == DEAD_OBJECT))) {
2084 // FIXME bug 25195759
2085 return 1000000;
2086 }
2087 ALOGE("Error %d obtaining an audio buffer, giving up.", err);
2088 return NS_NEVER;
2089 }
2090
2091 if (mRetryOnPartialBuffer && audio_has_proportional_frames(mFormat)) {
2092 mRetryOnPartialBuffer = false;
2093 if (avail < mRemainingFrames) {
2094 if (ns > 0) { // account for obtain time
2095 const nsecs_t timeNow = systemTime();
2096 ns = max((nsecs_t)0, ns - (timeNow - timeAfterCallbacks));
2097 }
2098 nsecs_t myns = framesToNanoseconds(mRemainingFrames - avail, sampleRate, speed);
2099 if (ns < 0 /* NS_WHENEVER */ || myns < ns) {
2100 ns = myns;
2101 }
2102 return ns;
2103 }
2104 }
2105
2106 size_t reqSize = audioBuffer.size;
2107 mCbf(EVENT_MORE_DATA, mUserData, &audioBuffer);
2108 size_t writtenSize = audioBuffer.size;
2109
2110 // Sanity check on returned size
2111 if (ssize_t(writtenSize) < 0 || writtenSize > reqSize) {
2112 ALOGE("EVENT_MORE_DATA requested %zu bytes but callback returned %zd bytes",
2113 reqSize, ssize_t(writtenSize));
2114 return NS_NEVER;
2115 }
2116
2117 if (writtenSize == 0) {
2118 // The callback is done filling buffers
2119 // Keep this thread going to handle timed events and
2120 // still try to get more data in intervals of WAIT_PERIOD_MS
2121 // but don't just loop and block the CPU, so wait
2122
2123 // mCbf(EVENT_MORE_DATA, ...) might either
2124 // (1) Block until it can fill the buffer, returning 0 size on EOS.
2125 // (2) Block until it can fill the buffer, returning 0 data (silence) on EOS.
2126 // (3) Return 0 size when no data is available, does not wait for more data.
2127 //
2128 // (1) and (2) occurs with AudioPlayer/AwesomePlayer; (3) occurs with NuPlayer.
2129 // We try to compute the wait time to avoid a tight sleep-wait cycle,
2130 // especially for case (3).
2131 //
2132 // The decision to support (1) and (2) affect the sizing of mRemainingFrames
2133 // and this loop; whereas for case (3) we could simply check once with the full
2134 // buffer size and skip the loop entirely.
2135
2136 nsecs_t myns;
2137 if (audio_has_proportional_frames(mFormat)) {
2138 // time to wait based on buffer occupancy
2139 const nsecs_t datans = mRemainingFrames <= avail ? 0 :
2140 framesToNanoseconds(mRemainingFrames - avail, sampleRate, speed);
2141 // audio flinger thread buffer size (TODO: adjust for fast tracks)
2142 // FIXME: use mAfFrameCountHAL instead of mAfFrameCount below for fast tracks.
2143 const nsecs_t afns = framesToNanoseconds(mAfFrameCount, mAfSampleRate, speed);
2144 // add a half the AudioFlinger buffer time to avoid soaking CPU if datans is 0.
2145 myns = datans + (afns / 2);
2146 } else {
2147 // FIXME: This could ping quite a bit if the buffer isn't full.
2148 // Note that when mState is stopping we waitStreamEnd, so it never gets here.
2149 myns = kWaitPeriodNs;
2150 }
2151 if (ns > 0) { // account for obtain and callback time
2152 const nsecs_t timeNow = systemTime();
2153 ns = max((nsecs_t)0, ns - (timeNow - timeAfterCallbacks));
2154 }
2155 if (ns < 0 /* NS_WHENEVER */ || myns < ns) {
2156 ns = myns;
2157 }
2158 return ns;
2159 }
2160
2161 size_t releasedFrames = writtenSize / mFrameSize;
2162 audioBuffer.frameCount = releasedFrames;
2163 mRemainingFrames -= releasedFrames;
2164 if (misalignment >= releasedFrames) {
2165 misalignment -= releasedFrames;
2166 } else {
2167 misalignment = 0;
2168 }
2169
2170 releaseBuffer(&audioBuffer);
2171 writtenFrames += releasedFrames;
2172
2173 // FIXME here is where we would repeat EVENT_MORE_DATA again on same advanced buffer
2174 // if callback doesn't like to accept the full chunk
2175 if (writtenSize < reqSize) {
2176 continue;
2177 }
2178
2179 // There could be enough non-contiguous frames available to satisfy the remaining request
2180 if (mRemainingFrames <= nonContig) {
2181 continue;
2182 }
2183
2184 #if 0
2185 // This heuristic tries to collapse a series of EVENT_MORE_DATA that would total to a
2186 // sum <= notificationFrames. It replaces that series by at most two EVENT_MORE_DATA
2187 // that total to a sum == notificationFrames.
2188 if (0 < misalignment && misalignment <= mRemainingFrames) {
2189 mRemainingFrames = misalignment;
2190 return ((double)mRemainingFrames * 1100000000) / ((double)sampleRate * speed);
2191 }
2192 #endif
2193
2194 }
2195 if (writtenFrames > 0) {
2196 AutoMutex lock(mLock);
2197 mFramesWritten += writtenFrames;
2198 }
2199 mRemainingFrames = notificationFrames;
2200 mRetryOnPartialBuffer = true;
2201
2202 // A lot has transpired since ns was calculated, so run again immediately and re-calculate
2203 return 0;
2204 }
2205
restoreTrack_l(const char * from)2206 status_t AudioTrack::restoreTrack_l(const char *from)
2207 {
2208 ALOGW("dead IAudioTrack, %s, creating a new one from %s()",
2209 isOffloadedOrDirect_l() ? "Offloaded or Direct" : "PCM", from);
2210 ++mSequence;
2211
2212 // refresh the audio configuration cache in this process to make sure we get new
2213 // output parameters and new IAudioFlinger in createTrack_l()
2214 AudioSystem::clearAudioConfigCache();
2215
2216 if (isOffloadedOrDirect_l() || mDoNotReconnect) {
2217 // FIXME re-creation of offloaded and direct tracks is not yet implemented;
2218 // reconsider enabling for linear PCM encodings when position can be preserved.
2219 return DEAD_OBJECT;
2220 }
2221
2222 // Save so we can return count since creation.
2223 mUnderrunCountOffset = getUnderrunCount_l();
2224
2225 // save the old static buffer position
2226 uint32_t staticPosition = 0;
2227 size_t bufferPosition = 0;
2228 int loopCount = 0;
2229 if (mStaticProxy != 0) {
2230 mStaticProxy->getBufferPositionAndLoopCount(&bufferPosition, &loopCount);
2231 staticPosition = mStaticProxy->getPosition().unsignedValue();
2232 }
2233
2234 mFlags = mOrigFlags;
2235
2236 // If a new IAudioTrack is successfully created, createTrack_l() will modify the
2237 // following member variables: mAudioTrack, mCblkMemory and mCblk.
2238 // It will also delete the strong references on previous IAudioTrack and IMemory.
2239 // If a new IAudioTrack cannot be created, the previous (dead) instance will be left intact.
2240 status_t result = createTrack_l();
2241
2242 if (result == NO_ERROR) {
2243 // take the frames that will be lost by track recreation into account in saved position
2244 // For streaming tracks, this is the amount we obtained from the user/client
2245 // (not the number actually consumed at the server - those are already lost).
2246 if (mStaticProxy == 0) {
2247 mPosition = mReleased;
2248 }
2249 // Continue playback from last known position and restore loop.
2250 if (mStaticProxy != 0) {
2251 if (loopCount != 0) {
2252 mStaticProxy->setBufferPositionAndLoop(bufferPosition,
2253 mLoopStart, mLoopEnd, loopCount);
2254 } else {
2255 mStaticProxy->setBufferPosition(bufferPosition);
2256 if (bufferPosition == mFrameCount) {
2257 ALOGD("restoring track at end of static buffer");
2258 }
2259 }
2260 }
2261 // restore volume handler
2262 mVolumeHandler->forall([this](const VolumeShaper &shaper) -> VolumeShaper::Status {
2263 sp<VolumeShaper::Operation> operationToEnd =
2264 new VolumeShaper::Operation(shaper.mOperation);
2265 // TODO: Ideally we would restore to the exact xOffset position
2266 // as returned by getVolumeShaperState(), but we don't have that
2267 // information when restoring at the client unless we periodically poll
2268 // the server or create shared memory state.
2269 //
2270 // For now, we simply advance to the end of the VolumeShaper effect
2271 // if it has been started.
2272 if (shaper.isStarted()) {
2273 operationToEnd->setNormalizedTime(1.f);
2274 }
2275 return mAudioTrack->applyVolumeShaper(shaper.mConfiguration, operationToEnd);
2276 });
2277
2278 if (mState == STATE_ACTIVE) {
2279 result = mAudioTrack->start();
2280 }
2281 // server resets to zero so we offset
2282 mFramesWrittenServerOffset =
2283 mStaticProxy.get() != nullptr ? staticPosition : mFramesWritten;
2284 mFramesWrittenAtRestore = mFramesWrittenServerOffset;
2285 }
2286 if (result != NO_ERROR) {
2287 ALOGW("restoreTrack_l() failed status %d", result);
2288 mState = STATE_STOPPED;
2289 mReleased = 0;
2290 }
2291
2292 return result;
2293 }
2294
updateAndGetPosition_l()2295 Modulo<uint32_t> AudioTrack::updateAndGetPosition_l()
2296 {
2297 // This is the sole place to read server consumed frames
2298 Modulo<uint32_t> newServer(mProxy->getPosition());
2299 const int32_t delta = (newServer - mServer).signedValue();
2300 // TODO There is controversy about whether there can be "negative jitter" in server position.
2301 // This should be investigated further, and if possible, it should be addressed.
2302 // A more definite failure mode is infrequent polling by client.
2303 // One could call (void)getPosition_l() in releaseBuffer(),
2304 // so mReleased and mPosition are always lock-step as best possible.
2305 // That should ensure delta never goes negative for infrequent polling
2306 // unless the server has more than 2^31 frames in its buffer,
2307 // in which case the use of uint32_t for these counters has bigger issues.
2308 ALOGE_IF(delta < 0,
2309 "detected illegal retrograde motion by the server: mServer advanced by %d",
2310 delta);
2311 mServer = newServer;
2312 if (delta > 0) { // avoid retrograde
2313 mPosition += delta;
2314 }
2315 return mPosition;
2316 }
2317
isSampleRateSpeedAllowed_l(uint32_t sampleRate,float speed) const2318 bool AudioTrack::isSampleRateSpeedAllowed_l(uint32_t sampleRate, float speed) const
2319 {
2320 // applicable for mixing tracks only (not offloaded or direct)
2321 if (mStaticProxy != 0) {
2322 return true; // static tracks do not have issues with buffer sizing.
2323 }
2324 const size_t minFrameCount =
2325 calculateMinFrameCount(mAfLatency, mAfFrameCount, mAfSampleRate, sampleRate, speed
2326 /*, 0 mNotificationsPerBufferReq*/);
2327 ALOGV("isSampleRateSpeedAllowed_l mFrameCount %zu minFrameCount %zu",
2328 mFrameCount, minFrameCount);
2329 return mFrameCount >= minFrameCount;
2330 }
2331
setParameters(const String8 & keyValuePairs)2332 status_t AudioTrack::setParameters(const String8& keyValuePairs)
2333 {
2334 AutoMutex lock(mLock);
2335 return mAudioTrack->setParameters(keyValuePairs);
2336 }
2337
applyVolumeShaper(const sp<VolumeShaper::Configuration> & configuration,const sp<VolumeShaper::Operation> & operation)2338 VolumeShaper::Status AudioTrack::applyVolumeShaper(
2339 const sp<VolumeShaper::Configuration>& configuration,
2340 const sp<VolumeShaper::Operation>& operation)
2341 {
2342 AutoMutex lock(mLock);
2343 mVolumeHandler->setIdIfNecessary(configuration);
2344 VolumeShaper::Status status = mAudioTrack->applyVolumeShaper(configuration, operation);
2345
2346 if (status == DEAD_OBJECT) {
2347 if (restoreTrack_l("applyVolumeShaper") == OK) {
2348 status = mAudioTrack->applyVolumeShaper(configuration, operation);
2349 }
2350 }
2351 if (status >= 0) {
2352 // save VolumeShaper for restore
2353 mVolumeHandler->applyVolumeShaper(configuration, operation);
2354 if (mState == STATE_ACTIVE || mState == STATE_STOPPING) {
2355 mVolumeHandler->setStarted();
2356 }
2357 } else {
2358 // warn only if not an expected restore failure.
2359 ALOGW_IF(!((isOffloadedOrDirect_l() || mDoNotReconnect) && status == DEAD_OBJECT),
2360 "applyVolumeShaper failed: %d", status);
2361 }
2362 return status;
2363 }
2364
getVolumeShaperState(int id)2365 sp<VolumeShaper::State> AudioTrack::getVolumeShaperState(int id)
2366 {
2367 AutoMutex lock(mLock);
2368 sp<VolumeShaper::State> state = mAudioTrack->getVolumeShaperState(id);
2369 if (state.get() == nullptr && (mCblk->mFlags & CBLK_INVALID) != 0) {
2370 if (restoreTrack_l("getVolumeShaperState") == OK) {
2371 state = mAudioTrack->getVolumeShaperState(id);
2372 }
2373 }
2374 return state;
2375 }
2376
getTimestamp(ExtendedTimestamp * timestamp)2377 status_t AudioTrack::getTimestamp(ExtendedTimestamp *timestamp)
2378 {
2379 if (timestamp == nullptr) {
2380 return BAD_VALUE;
2381 }
2382 AutoMutex lock(mLock);
2383 return getTimestamp_l(timestamp);
2384 }
2385
getTimestamp_l(ExtendedTimestamp * timestamp)2386 status_t AudioTrack::getTimestamp_l(ExtendedTimestamp *timestamp)
2387 {
2388 if (mCblk->mFlags & CBLK_INVALID) {
2389 const status_t status = restoreTrack_l("getTimestampExtended");
2390 if (status != OK) {
2391 // per getTimestamp() API doc in header, we return DEAD_OBJECT here,
2392 // recommending that the track be recreated.
2393 return DEAD_OBJECT;
2394 }
2395 }
2396 // check for offloaded/direct here in case restoring somehow changed those flags.
2397 if (isOffloadedOrDirect_l()) {
2398 return INVALID_OPERATION; // not supported
2399 }
2400 status_t status = mProxy->getTimestamp(timestamp);
2401 LOG_ALWAYS_FATAL_IF(status != OK, "status %d not allowed from proxy getTimestamp", status);
2402 bool found = false;
2403 timestamp->mPosition[ExtendedTimestamp::LOCATION_CLIENT] = mFramesWritten;
2404 timestamp->mTimeNs[ExtendedTimestamp::LOCATION_CLIENT] = 0;
2405 // server side frame offset in case AudioTrack has been restored.
2406 for (int i = ExtendedTimestamp::LOCATION_SERVER;
2407 i < ExtendedTimestamp::LOCATION_MAX; ++i) {
2408 if (timestamp->mTimeNs[i] >= 0) {
2409 // apply server offset (frames flushed is ignored
2410 // so we don't report the jump when the flush occurs).
2411 timestamp->mPosition[i] += mFramesWrittenServerOffset;
2412 found = true;
2413 }
2414 }
2415 return found ? OK : WOULD_BLOCK;
2416 }
2417
getTimestamp(AudioTimestamp & timestamp)2418 status_t AudioTrack::getTimestamp(AudioTimestamp& timestamp)
2419 {
2420 AutoMutex lock(mLock);
2421 return getTimestamp_l(timestamp);
2422 }
2423
getTimestamp_l(AudioTimestamp & timestamp)2424 status_t AudioTrack::getTimestamp_l(AudioTimestamp& timestamp)
2425 {
2426 bool previousTimestampValid = mPreviousTimestampValid;
2427 // Set false here to cover all the error return cases.
2428 mPreviousTimestampValid = false;
2429
2430 switch (mState) {
2431 case STATE_ACTIVE:
2432 case STATE_PAUSED:
2433 break; // handle below
2434 case STATE_FLUSHED:
2435 case STATE_STOPPED:
2436 return WOULD_BLOCK;
2437 case STATE_STOPPING:
2438 case STATE_PAUSED_STOPPING:
2439 if (!isOffloaded_l()) {
2440 return INVALID_OPERATION;
2441 }
2442 break; // offloaded tracks handled below
2443 default:
2444 LOG_ALWAYS_FATAL("Invalid mState in getTimestamp(): %d", mState);
2445 break;
2446 }
2447
2448 if (mCblk->mFlags & CBLK_INVALID) {
2449 const status_t status = restoreTrack_l("getTimestamp");
2450 if (status != OK) {
2451 // per getTimestamp() API doc in header, we return DEAD_OBJECT here,
2452 // recommending that the track be recreated.
2453 return DEAD_OBJECT;
2454 }
2455 }
2456
2457 // The presented frame count must always lag behind the consumed frame count.
2458 // To avoid a race, read the presented frames first. This ensures that presented <= consumed.
2459
2460 status_t status;
2461 if (isOffloadedOrDirect_l()) {
2462 // use Binder to get timestamp
2463 status = mAudioTrack->getTimestamp(timestamp);
2464 } else {
2465 // read timestamp from shared memory
2466 ExtendedTimestamp ets;
2467 status = mProxy->getTimestamp(&ets);
2468 if (status == OK) {
2469 ExtendedTimestamp::Location location;
2470 status = ets.getBestTimestamp(×tamp, &location);
2471
2472 if (status == OK) {
2473 // It is possible that the best location has moved from the kernel to the server.
2474 // In this case we adjust the position from the previous computed latency.
2475 if (location == ExtendedTimestamp::LOCATION_SERVER) {
2476 ALOGW_IF(mPreviousLocation == ExtendedTimestamp::LOCATION_KERNEL,
2477 "getTimestamp() location moved from kernel to server");
2478 // check that the last kernel OK time info exists and the positions
2479 // are valid (if they predate the current track, the positions may
2480 // be zero or negative).
2481 const int64_t frames =
2482 (ets.mTimeNs[ExtendedTimestamp::LOCATION_SERVER_LASTKERNELOK] < 0 ||
2483 ets.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL_LASTKERNELOK] < 0 ||
2484 ets.mPosition[ExtendedTimestamp::LOCATION_SERVER_LASTKERNELOK] <= 0 ||
2485 ets.mPosition[ExtendedTimestamp::LOCATION_KERNEL_LASTKERNELOK] <= 0)
2486 ?
2487 int64_t((double)mAfLatency * mSampleRate * mPlaybackRate.mSpeed
2488 / 1000)
2489 :
2490 (ets.mPosition[ExtendedTimestamp::LOCATION_SERVER_LASTKERNELOK]
2491 - ets.mPosition[ExtendedTimestamp::LOCATION_KERNEL_LASTKERNELOK]);
2492 ALOGV("frame adjustment:%lld timestamp:%s",
2493 (long long)frames, ets.toString().c_str());
2494 if (frames >= ets.mPosition[location]) {
2495 timestamp.mPosition = 0;
2496 } else {
2497 timestamp.mPosition = (uint32_t)(ets.mPosition[location] - frames);
2498 }
2499 } else if (location == ExtendedTimestamp::LOCATION_KERNEL) {
2500 ALOGV_IF(mPreviousLocation == ExtendedTimestamp::LOCATION_SERVER,
2501 "getTimestamp() location moved from server to kernel");
2502 }
2503
2504 // We update the timestamp time even when paused.
2505 if (mState == STATE_PAUSED /* not needed: STATE_PAUSED_STOPPING */) {
2506 const int64_t now = systemTime();
2507 const int64_t at = convertTimespecToNs(timestamp.mTime);
2508 const int64_t lag =
2509 (ets.mTimeNs[ExtendedTimestamp::LOCATION_SERVER_LASTKERNELOK] < 0 ||
2510 ets.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL_LASTKERNELOK] < 0)
2511 ? int64_t(mAfLatency * 1000000LL)
2512 : (ets.mPosition[ExtendedTimestamp::LOCATION_SERVER_LASTKERNELOK]
2513 - ets.mPosition[ExtendedTimestamp::LOCATION_KERNEL_LASTKERNELOK])
2514 * NANOS_PER_SECOND / mSampleRate;
2515 const int64_t limit = now - lag; // no earlier than this limit
2516 if (at < limit) {
2517 ALOGV("timestamp pause lag:%lld adjusting from %lld to %lld",
2518 (long long)lag, (long long)at, (long long)limit);
2519 timestamp.mTime.tv_sec = limit / NANOS_PER_SECOND;
2520 timestamp.mTime.tv_nsec = limit % NANOS_PER_SECOND; // compiler opt.
2521 }
2522 }
2523 mPreviousLocation = location;
2524 } else {
2525 // right after AudioTrack is started, one may not find a timestamp
2526 ALOGV("getBestTimestamp did not find timestamp");
2527 }
2528 }
2529 if (status == INVALID_OPERATION) {
2530 // INVALID_OPERATION occurs when no timestamp has been issued by the server;
2531 // other failures are signaled by a negative time.
2532 // If we come out of FLUSHED or STOPPED where the position is known
2533 // to be zero we convert this to WOULD_BLOCK (with the implicit meaning of
2534 // "zero" for NuPlayer). We don't convert for track restoration as position
2535 // does not reset.
2536 ALOGV("timestamp server offset:%lld restore frames:%lld",
2537 (long long)mFramesWrittenServerOffset, (long long)mFramesWrittenAtRestore);
2538 if (mFramesWrittenServerOffset != mFramesWrittenAtRestore) {
2539 status = WOULD_BLOCK;
2540 }
2541 }
2542 }
2543 if (status != NO_ERROR) {
2544 ALOGV_IF(status != WOULD_BLOCK, "getTimestamp error:%#x", status);
2545 return status;
2546 }
2547 if (isOffloadedOrDirect_l()) {
2548 if (isOffloaded_l() && (mState == STATE_PAUSED || mState == STATE_PAUSED_STOPPING)) {
2549 // use cached paused position in case another offloaded track is running.
2550 timestamp.mPosition = mPausedPosition;
2551 clock_gettime(CLOCK_MONOTONIC, ×tamp.mTime);
2552 // TODO: adjust for delay
2553 return NO_ERROR;
2554 }
2555
2556 // Check whether a pending flush or stop has completed, as those commands may
2557 // be asynchronous or return near finish or exhibit glitchy behavior.
2558 //
2559 // Originally this showed up as the first timestamp being a continuation of
2560 // the previous song under gapless playback.
2561 // However, we sometimes see zero timestamps, then a glitch of
2562 // the previous song's position, and then correct timestamps afterwards.
2563 if (mStartUs != 0 && mSampleRate != 0) {
2564 static const int kTimeJitterUs = 100000; // 100 ms
2565 static const int k1SecUs = 1000000;
2566
2567 const int64_t timeNow = getNowUs();
2568
2569 if (timeNow < mStartUs + k1SecUs) { // within first second of starting
2570 const int64_t timestampTimeUs = convertTimespecToUs(timestamp.mTime);
2571 if (timestampTimeUs < mStartUs) {
2572 return WOULD_BLOCK; // stale timestamp time, occurs before start.
2573 }
2574 const int64_t deltaTimeUs = timestampTimeUs - mStartUs;
2575 const int64_t deltaPositionByUs = (double)timestamp.mPosition * 1000000
2576 / ((double)mSampleRate * mPlaybackRate.mSpeed);
2577
2578 if (deltaPositionByUs > deltaTimeUs + kTimeJitterUs) {
2579 // Verify that the counter can't count faster than the sample rate
2580 // since the start time. If greater, then that means we may have failed
2581 // to completely flush or stop the previous playing track.
2582 ALOGW_IF(!mTimestampStartupGlitchReported,
2583 "getTimestamp startup glitch detected"
2584 " deltaTimeUs(%lld) deltaPositionUs(%lld) tsmPosition(%u)",
2585 (long long)deltaTimeUs, (long long)deltaPositionByUs,
2586 timestamp.mPosition);
2587 mTimestampStartupGlitchReported = true;
2588 if (previousTimestampValid
2589 && mPreviousTimestamp.mPosition == 0 /* should be true if valid */) {
2590 timestamp = mPreviousTimestamp;
2591 mPreviousTimestampValid = true;
2592 return NO_ERROR;
2593 }
2594 return WOULD_BLOCK;
2595 }
2596 if (deltaPositionByUs != 0) {
2597 mStartUs = 0; // don't check again, we got valid nonzero position.
2598 }
2599 } else {
2600 mStartUs = 0; // don't check again, start time expired.
2601 }
2602 mTimestampStartupGlitchReported = false;
2603 }
2604 } else {
2605 // Update the mapping between local consumed (mPosition) and server consumed (mServer)
2606 (void) updateAndGetPosition_l();
2607 // Server consumed (mServer) and presented both use the same server time base,
2608 // and server consumed is always >= presented.
2609 // The delta between these represents the number of frames in the buffer pipeline.
2610 // If this delta between these is greater than the client position, it means that
2611 // actually presented is still stuck at the starting line (figuratively speaking),
2612 // waiting for the first frame to go by. So we can't report a valid timestamp yet.
2613 // Note: We explicitly use non-Modulo comparison here - potential wrap issue when
2614 // mPosition exceeds 32 bits.
2615 // TODO Remove when timestamp is updated to contain pipeline status info.
2616 const int32_t pipelineDepthInFrames = (mServer - timestamp.mPosition).signedValue();
2617 if (pipelineDepthInFrames > 0 /* should be true, but we check anyways */
2618 && (uint32_t)pipelineDepthInFrames > mPosition.value()) {
2619 return INVALID_OPERATION;
2620 }
2621 // Convert timestamp position from server time base to client time base.
2622 // TODO The following code should work OK now because timestamp.mPosition is 32-bit.
2623 // But if we change it to 64-bit then this could fail.
2624 // Use Modulo computation here.
2625 timestamp.mPosition = (mPosition - mServer + timestamp.mPosition).value();
2626 // Immediately after a call to getPosition_l(), mPosition and
2627 // mServer both represent the same frame position. mPosition is
2628 // in client's point of view, and mServer is in server's point of
2629 // view. So the difference between them is the "fudge factor"
2630 // between client and server views due to stop() and/or new
2631 // IAudioTrack. And timestamp.mPosition is initially in server's
2632 // point of view, so we need to apply the same fudge factor to it.
2633 }
2634
2635 // Prevent retrograde motion in timestamp.
2636 // This is sometimes caused by erratic reports of the available space in the ALSA drivers.
2637 if (status == NO_ERROR) {
2638 if (previousTimestampValid) {
2639 const int64_t previousTimeNanos = convertTimespecToNs(mPreviousTimestamp.mTime);
2640 const int64_t currentTimeNanos = convertTimespecToNs(timestamp.mTime);
2641 if (currentTimeNanos < previousTimeNanos) {
2642 ALOGW("retrograde timestamp time corrected, %lld < %lld",
2643 (long long)currentTimeNanos, (long long)previousTimeNanos);
2644 timestamp.mTime = mPreviousTimestamp.mTime;
2645 }
2646
2647 // Looking at signed delta will work even when the timestamps
2648 // are wrapping around.
2649 int32_t deltaPosition = (Modulo<uint32_t>(timestamp.mPosition)
2650 - mPreviousTimestamp.mPosition).signedValue();
2651 if (deltaPosition < 0) {
2652 // Only report once per position instead of spamming the log.
2653 if (!mRetrogradeMotionReported) {
2654 ALOGW("retrograde timestamp position corrected, %d = %u - %u",
2655 deltaPosition,
2656 timestamp.mPosition,
2657 mPreviousTimestamp.mPosition);
2658 mRetrogradeMotionReported = true;
2659 }
2660 } else {
2661 mRetrogradeMotionReported = false;
2662 }
2663 if (deltaPosition < 0) {
2664 timestamp.mPosition = mPreviousTimestamp.mPosition;
2665 deltaPosition = 0;
2666 }
2667 #if 0
2668 // Uncomment this to verify audio timestamp rate.
2669 const int64_t deltaTime =
2670 convertTimespecToNs(timestamp.mTime) - previousTimeNanos;
2671 if (deltaTime != 0) {
2672 const int64_t computedSampleRate =
2673 deltaPosition * (long long)NANOS_PER_SECOND / deltaTime;
2674 ALOGD("computedSampleRate:%u sampleRate:%u",
2675 (unsigned)computedSampleRate, mSampleRate);
2676 }
2677 #endif
2678 }
2679 mPreviousTimestamp = timestamp;
2680 mPreviousTimestampValid = true;
2681 }
2682
2683 return status;
2684 }
2685
getParameters(const String8 & keys)2686 String8 AudioTrack::getParameters(const String8& keys)
2687 {
2688 audio_io_handle_t output = getOutput();
2689 if (output != AUDIO_IO_HANDLE_NONE) {
2690 return AudioSystem::getParameters(output, keys);
2691 } else {
2692 return String8::empty();
2693 }
2694 }
2695
isOffloaded() const2696 bool AudioTrack::isOffloaded() const
2697 {
2698 AutoMutex lock(mLock);
2699 return isOffloaded_l();
2700 }
2701
isDirect() const2702 bool AudioTrack::isDirect() const
2703 {
2704 AutoMutex lock(mLock);
2705 return isDirect_l();
2706 }
2707
isOffloadedOrDirect() const2708 bool AudioTrack::isOffloadedOrDirect() const
2709 {
2710 AutoMutex lock(mLock);
2711 return isOffloadedOrDirect_l();
2712 }
2713
2714
dump(int fd,const Vector<String16> & args __unused) const2715 status_t AudioTrack::dump(int fd, const Vector<String16>& args __unused) const
2716 {
2717
2718 const size_t SIZE = 256;
2719 char buffer[SIZE];
2720 String8 result;
2721
2722 result.append(" AudioTrack::dump\n");
2723 snprintf(buffer, 255, " stream type(%d), left - right volume(%f, %f)\n", mStreamType,
2724 mVolume[AUDIO_INTERLEAVE_LEFT], mVolume[AUDIO_INTERLEAVE_RIGHT]);
2725 result.append(buffer);
2726 snprintf(buffer, 255, " format(%d), channel count(%d), frame count(%zu)\n", mFormat,
2727 mChannelCount, mFrameCount);
2728 result.append(buffer);
2729 snprintf(buffer, 255, " sample rate(%u), speed(%f), status(%d)\n",
2730 mSampleRate, mPlaybackRate.mSpeed, mStatus);
2731 result.append(buffer);
2732 snprintf(buffer, 255, " state(%d), latency (%d)\n", mState, mLatency);
2733 result.append(buffer);
2734 ::write(fd, result.string(), result.size());
2735 return NO_ERROR;
2736 }
2737
getUnderrunCount() const2738 uint32_t AudioTrack::getUnderrunCount() const
2739 {
2740 AutoMutex lock(mLock);
2741 return getUnderrunCount_l();
2742 }
2743
getUnderrunCount_l() const2744 uint32_t AudioTrack::getUnderrunCount_l() const
2745 {
2746 return mProxy->getUnderrunCount() + mUnderrunCountOffset;
2747 }
2748
getUnderrunFrames() const2749 uint32_t AudioTrack::getUnderrunFrames() const
2750 {
2751 AutoMutex lock(mLock);
2752 return mProxy->getUnderrunFrames();
2753 }
2754
addAudioDeviceCallback(const sp<AudioSystem::AudioDeviceCallback> & callback)2755 status_t AudioTrack::addAudioDeviceCallback(const sp<AudioSystem::AudioDeviceCallback>& callback)
2756 {
2757 if (callback == 0) {
2758 ALOGW("%s adding NULL callback!", __FUNCTION__);
2759 return BAD_VALUE;
2760 }
2761 AutoMutex lock(mLock);
2762 if (mDeviceCallback == callback) {
2763 ALOGW("%s adding same callback!", __FUNCTION__);
2764 return INVALID_OPERATION;
2765 }
2766 status_t status = NO_ERROR;
2767 if (mOutput != AUDIO_IO_HANDLE_NONE) {
2768 if (mDeviceCallback != 0) {
2769 ALOGW("%s callback already present!", __FUNCTION__);
2770 AudioSystem::removeAudioDeviceCallback(mDeviceCallback, mOutput);
2771 }
2772 status = AudioSystem::addAudioDeviceCallback(callback, mOutput);
2773 }
2774 mDeviceCallback = callback;
2775 return status;
2776 }
2777
removeAudioDeviceCallback(const sp<AudioSystem::AudioDeviceCallback> & callback)2778 status_t AudioTrack::removeAudioDeviceCallback(
2779 const sp<AudioSystem::AudioDeviceCallback>& callback)
2780 {
2781 if (callback == 0) {
2782 ALOGW("%s removing NULL callback!", __FUNCTION__);
2783 return BAD_VALUE;
2784 }
2785 AutoMutex lock(mLock);
2786 if (mDeviceCallback != callback) {
2787 ALOGW("%s removing different callback!", __FUNCTION__);
2788 return INVALID_OPERATION;
2789 }
2790 if (mOutput != AUDIO_IO_HANDLE_NONE) {
2791 AudioSystem::removeAudioDeviceCallback(mDeviceCallback, mOutput);
2792 }
2793 mDeviceCallback = 0;
2794 return NO_ERROR;
2795 }
2796
pendingDuration(int32_t * msec,ExtendedTimestamp::Location location)2797 status_t AudioTrack::pendingDuration(int32_t *msec, ExtendedTimestamp::Location location)
2798 {
2799 if (msec == nullptr ||
2800 (location != ExtendedTimestamp::LOCATION_SERVER
2801 && location != ExtendedTimestamp::LOCATION_KERNEL)) {
2802 return BAD_VALUE;
2803 }
2804 AutoMutex lock(mLock);
2805 // inclusive of offloaded and direct tracks.
2806 //
2807 // It is possible, but not enabled, to allow duration computation for non-pcm
2808 // audio_has_proportional_frames() formats because currently they have
2809 // the drain rate equivalent to the pcm sample rate * framesize.
2810 if (!isPurePcmData_l()) {
2811 return INVALID_OPERATION;
2812 }
2813 ExtendedTimestamp ets;
2814 if (getTimestamp_l(&ets) == OK
2815 && ets.mTimeNs[location] > 0) {
2816 int64_t diff = ets.mPosition[ExtendedTimestamp::LOCATION_CLIENT]
2817 - ets.mPosition[location];
2818 if (diff < 0) {
2819 *msec = 0;
2820 } else {
2821 // ms is the playback time by frames
2822 int64_t ms = (int64_t)((double)diff * 1000 /
2823 ((double)mSampleRate * mPlaybackRate.mSpeed));
2824 // clockdiff is the timestamp age (negative)
2825 int64_t clockdiff = (mState != STATE_ACTIVE) ? 0 :
2826 ets.mTimeNs[location]
2827 + ets.mTimebaseOffset[ExtendedTimestamp::TIMEBASE_MONOTONIC]
2828 - systemTime(SYSTEM_TIME_MONOTONIC);
2829
2830 //ALOGV("ms: %lld clockdiff: %lld", (long long)ms, (long long)clockdiff);
2831 static const int NANOS_PER_MILLIS = 1000000;
2832 *msec = (int32_t)(ms + clockdiff / NANOS_PER_MILLIS);
2833 }
2834 return NO_ERROR;
2835 }
2836 if (location != ExtendedTimestamp::LOCATION_SERVER) {
2837 return INVALID_OPERATION; // LOCATION_KERNEL is not available
2838 }
2839 // use server position directly (offloaded and direct arrive here)
2840 updateAndGetPosition_l();
2841 int32_t diff = (Modulo<uint32_t>(mFramesWritten) - mPosition).signedValue();
2842 *msec = (diff <= 0) ? 0
2843 : (int32_t)((double)diff * 1000 / ((double)mSampleRate * mPlaybackRate.mSpeed));
2844 return NO_ERROR;
2845 }
2846
hasStarted()2847 bool AudioTrack::hasStarted()
2848 {
2849 AutoMutex lock(mLock);
2850 switch (mState) {
2851 case STATE_STOPPED:
2852 if (isOffloadedOrDirect_l()) {
2853 // check if we have started in the past to return true.
2854 return mStartUs > 0;
2855 }
2856 // A normal audio track may still be draining, so
2857 // check if stream has ended. This covers fasttrack position
2858 // instability and start/stop without any data written.
2859 if (mProxy->getStreamEndDone()) {
2860 return true;
2861 }
2862 // fall through
2863 case STATE_ACTIVE:
2864 case STATE_STOPPING:
2865 break;
2866 case STATE_PAUSED:
2867 case STATE_PAUSED_STOPPING:
2868 case STATE_FLUSHED:
2869 return false; // we're not active
2870 default:
2871 LOG_ALWAYS_FATAL("Invalid mState in hasStarted(): %d", mState);
2872 break;
2873 }
2874
2875 // wait indicates whether we need to wait for a timestamp.
2876 // This is conservatively figured - if we encounter an unexpected error
2877 // then we will not wait.
2878 bool wait = false;
2879 if (isOffloadedOrDirect_l()) {
2880 AudioTimestamp ts;
2881 status_t status = getTimestamp_l(ts);
2882 if (status == WOULD_BLOCK) {
2883 wait = true;
2884 } else if (status == OK) {
2885 wait = (ts.mPosition == 0 || ts.mPosition == mStartTs.mPosition);
2886 }
2887 ALOGV("hasStarted wait:%d ts:%u start position:%lld",
2888 (int)wait,
2889 ts.mPosition,
2890 (long long)mStartTs.mPosition);
2891 } else {
2892 int location = ExtendedTimestamp::LOCATION_SERVER; // for ALOG
2893 ExtendedTimestamp ets;
2894 status_t status = getTimestamp_l(&ets);
2895 if (status == WOULD_BLOCK) { // no SERVER or KERNEL frame info in ets
2896 wait = true;
2897 } else if (status == OK) {
2898 for (location = ExtendedTimestamp::LOCATION_KERNEL;
2899 location >= ExtendedTimestamp::LOCATION_SERVER; --location) {
2900 if (ets.mTimeNs[location] < 0 || mStartEts.mTimeNs[location] < 0) {
2901 continue;
2902 }
2903 wait = ets.mPosition[location] == 0
2904 || ets.mPosition[location] == mStartEts.mPosition[location];
2905 break;
2906 }
2907 }
2908 ALOGV("hasStarted wait:%d ets:%lld start position:%lld",
2909 (int)wait,
2910 (long long)ets.mPosition[location],
2911 (long long)mStartEts.mPosition[location]);
2912 }
2913 return !wait;
2914 }
2915
2916 // =========================================================================
2917
binderDied(const wp<IBinder> & who __unused)2918 void AudioTrack::DeathNotifier::binderDied(const wp<IBinder>& who __unused)
2919 {
2920 sp<AudioTrack> audioTrack = mAudioTrack.promote();
2921 if (audioTrack != 0) {
2922 AutoMutex lock(audioTrack->mLock);
2923 audioTrack->mProxy->binderDied();
2924 }
2925 }
2926
2927 // =========================================================================
2928
AudioTrackThread(AudioTrack & receiver,bool bCanCallJava)2929 AudioTrack::AudioTrackThread::AudioTrackThread(AudioTrack& receiver, bool bCanCallJava)
2930 : Thread(bCanCallJava), mReceiver(receiver), mPaused(true), mPausedInt(false), mPausedNs(0LL),
2931 mIgnoreNextPausedInt(false)
2932 {
2933 }
2934
~AudioTrackThread()2935 AudioTrack::AudioTrackThread::~AudioTrackThread()
2936 {
2937 }
2938
threadLoop()2939 bool AudioTrack::AudioTrackThread::threadLoop()
2940 {
2941 {
2942 AutoMutex _l(mMyLock);
2943 if (mPaused) {
2944 mMyCond.wait(mMyLock);
2945 // caller will check for exitPending()
2946 return true;
2947 }
2948 if (mIgnoreNextPausedInt) {
2949 mIgnoreNextPausedInt = false;
2950 mPausedInt = false;
2951 }
2952 if (mPausedInt) {
2953 if (mPausedNs > 0) {
2954 (void) mMyCond.waitRelative(mMyLock, mPausedNs);
2955 } else {
2956 mMyCond.wait(mMyLock);
2957 }
2958 mPausedInt = false;
2959 return true;
2960 }
2961 }
2962 if (exitPending()) {
2963 return false;
2964 }
2965 nsecs_t ns = mReceiver.processAudioBuffer();
2966 switch (ns) {
2967 case 0:
2968 return true;
2969 case NS_INACTIVE:
2970 pauseInternal();
2971 return true;
2972 case NS_NEVER:
2973 return false;
2974 case NS_WHENEVER:
2975 // Event driven: call wake() when callback notifications conditions change.
2976 ns = INT64_MAX;
2977 // fall through
2978 default:
2979 LOG_ALWAYS_FATAL_IF(ns < 0, "processAudioBuffer() returned %" PRId64, ns);
2980 pauseInternal(ns);
2981 return true;
2982 }
2983 }
2984
requestExit()2985 void AudioTrack::AudioTrackThread::requestExit()
2986 {
2987 // must be in this order to avoid a race condition
2988 Thread::requestExit();
2989 resume();
2990 }
2991
pause()2992 void AudioTrack::AudioTrackThread::pause()
2993 {
2994 AutoMutex _l(mMyLock);
2995 mPaused = true;
2996 }
2997
resume()2998 void AudioTrack::AudioTrackThread::resume()
2999 {
3000 AutoMutex _l(mMyLock);
3001 mIgnoreNextPausedInt = true;
3002 if (mPaused || mPausedInt) {
3003 mPaused = false;
3004 mPausedInt = false;
3005 mMyCond.signal();
3006 }
3007 }
3008
wake()3009 void AudioTrack::AudioTrackThread::wake()
3010 {
3011 AutoMutex _l(mMyLock);
3012 if (!mPaused) {
3013 // wake() might be called while servicing a callback - ignore the next
3014 // pause time and call processAudioBuffer.
3015 mIgnoreNextPausedInt = true;
3016 if (mPausedInt && mPausedNs > 0) {
3017 // audio track is active and internally paused with timeout.
3018 mPausedInt = false;
3019 mMyCond.signal();
3020 }
3021 }
3022 }
3023
pauseInternal(nsecs_t ns)3024 void AudioTrack::AudioTrackThread::pauseInternal(nsecs_t ns)
3025 {
3026 AutoMutex _l(mMyLock);
3027 mPausedInt = true;
3028 mPausedNs = ns;
3029 }
3030
3031 } // namespace android
3032