1 /*
2 * Copyright (C) 2017 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 //#define LOG_NDEBUG 0
18 #include <utils/Log.h>
19
20 #define ATRACE_TAG ATRACE_TAG_AUDIO
21
22 #include <algorithm>
23
24 #include <media/MediaMetricsItem.h>
25 #include <utils/Trace.h>
26
27 #include "client/AudioStreamInternalPlay.h"
28 #include "utility/AudioClock.h"
29
30 // We do this after the #includes because if a header uses ALOG.
31 // it would fail on the reference to mInService.
32 #undef LOG_TAG
33 // This file is used in both client and server processes.
34 // This is needed to make sense of the logs more easily.
35 #define LOG_TAG (mInService ? "AudioStreamInternalPlay_Service" \
36 : "AudioStreamInternalPlay_Client")
37
38 using android::status_t;
39 using android::WrappingBuffer;
40
41 using namespace aaudio;
42
AudioStreamInternalPlay(AAudioServiceInterface & serviceInterface,bool inService)43 AudioStreamInternalPlay::AudioStreamInternalPlay(AAudioServiceInterface &serviceInterface,
44 bool inService)
45 : AudioStreamInternal(serviceInterface, inService) {
46
47 }
48
49 constexpr int kRampMSec = 10; // time to apply a change in volume
50
open(const AudioStreamBuilder & builder)51 aaudio_result_t AudioStreamInternalPlay::open(const AudioStreamBuilder &builder) {
52 aaudio_result_t result = AudioStreamInternal::open(builder);
53 const bool useVolumeRamps = (getSharingMode() == AAUDIO_SHARING_MODE_EXCLUSIVE);
54 if (result == AAUDIO_OK) {
55 result = mFlowGraph.configure(getFormat(),
56 getSamplesPerFrame(),
57 getSampleRate(),
58 getDeviceFormat(),
59 getDeviceSamplesPerFrame(),
60 getDeviceSampleRate(),
61 getRequireMonoBlend(),
62 useVolumeRamps,
63 getAudioBalance(),
64 aaudio::resampler::MultiChannelResampler::Quality::Medium);
65
66 if (result != AAUDIO_OK) {
67 safeReleaseClose();
68 }
69 // Sample rate is constrained to common values by now and should not overflow.
70 int32_t numFrames = kRampMSec * getSampleRate() / AAUDIO_MILLIS_PER_SECOND;
71 mFlowGraph.setRampLengthInFrames(numFrames);
72 }
73 return result;
74 }
75
76 // This must be called under mStreamLock.
requestPause_l()77 aaudio_result_t AudioStreamInternalPlay::requestPause_l()
78 {
79 aaudio_result_t result = stopCallback_l();
80 if (result != AAUDIO_OK) {
81 return result;
82 }
83 if (getServiceHandle() == AAUDIO_HANDLE_INVALID) {
84 ALOGW("%s() mServiceStreamHandle invalid", __func__);
85 return AAUDIO_ERROR_INVALID_STATE;
86 }
87
88 mClockModel.stop(AudioClock::getNanoseconds());
89 setState(AAUDIO_STREAM_STATE_PAUSING);
90 mAtomicInternalTimestamp.clear();
91 return mServiceInterface.pauseStream(mServiceStreamHandleInfo);
92 }
93
requestFlush_l()94 aaudio_result_t AudioStreamInternalPlay::requestFlush_l() {
95 if (getServiceHandle() == AAUDIO_HANDLE_INVALID) {
96 ALOGW("%s() mServiceStreamHandle invalid", __func__);
97 return AAUDIO_ERROR_INVALID_STATE;
98 }
99
100 setState(AAUDIO_STREAM_STATE_FLUSHING);
101 return mServiceInterface.flushStream(mServiceStreamHandleInfo);
102 }
103
prepareBuffersForStart()104 void AudioStreamInternalPlay::prepareBuffersForStart() {
105 // Reset volume ramps to avoid a starting noise.
106 // This was called here instead of AudioStreamInternal so that
107 // it will be easier to backport.
108 mFlowGraph.reset();
109 // Prevent stale data from being played.
110 mAudioEndpoint->eraseDataMemory();
111 }
112
prepareBuffersForStop()113 void AudioStreamInternalPlay::prepareBuffersForStop() {
114 // If this is a shared stream and the FIFO is being read by the mixer then
115 // we don't have to worry about the DSP reading past the valid data. We can skip all this.
116 if(!mAudioEndpoint->isFreeRunning()) {
117 return;
118 }
119 // Sleep until the DSP has read all of the data written.
120 int64_t validFramesInBuffer = getFramesWritten() - getFramesRead();
121 if (validFramesInBuffer >= 0) {
122 int64_t emptyFramesInBuffer = ((int64_t) getBufferCapacity()) - validFramesInBuffer;
123
124 // Prevent stale data from being played if the DSP is still running.
125 // Erase some of the FIFO memory in front of the DSP read cursor.
126 // Subtract one burst so we do not accidentally erase data that the DSP might be using.
127 int64_t framesToErase = std::max((int64_t) 0,
128 emptyFramesInBuffer - getFramesPerBurst());
129 mAudioEndpoint->eraseEmptyDataMemory(framesToErase);
130
131 // Sleep until we are confident the DSP has consumed all of the valid data.
132 // Sleep for one extra burst as a safety margin because the IsochronousClockModel
133 // is not perfectly accurate.
134 int64_t positionInEmptyMemory = getFramesWritten() + getFramesPerBurst();
135 int64_t timeAllConsumed = mClockModel.convertPositionToTime(positionInEmptyMemory);
136 int64_t durationAllConsumed = timeAllConsumed - AudioClock::getNanoseconds();
137 // Prevent sleeping for too long.
138 durationAllConsumed = std::min(200 * AAUDIO_NANOS_PER_MILLISECOND, durationAllConsumed);
139 AudioClock::sleepForNanos(durationAllConsumed);
140 }
141
142 // Erase all of the memory in case the DSP keeps going and wraps around.
143 mAudioEndpoint->eraseDataMemory();
144
145 // Wait for the last buffer to reach the DAC.
146 // This is because the expected behavior of stop() is that all data written to the stream
147 // should be played before the hardware actually shuts down.
148 // This is different than pause(), where we just end as soon as possible.
149 // This can be important when, for example, playing car navigation and
150 // you want the user to hear the complete instruction.
151 if (mAtomicInternalTimestamp.isValid()) {
152 // Use timestamps to calculate the latency between the DSP reading
153 // a frame and when it reaches the DAC.
154 // This code assumes that timestamps are accurate.
155 Timestamp timestamp = mAtomicInternalTimestamp.read();
156 int64_t dacPosition = timestamp.getPosition();
157 int64_t hardwareReadTime = mClockModel.convertPositionToTime(dacPosition);
158 int64_t hardwareLatencyNanos = timestamp.getNanoseconds() - hardwareReadTime;
159 ALOGD("%s() hardwareLatencyNanos = %lld", __func__,
160 (long long) hardwareLatencyNanos);
161 // Prevent sleeping for too long.
162 hardwareLatencyNanos = std::min(30 * AAUDIO_NANOS_PER_MILLISECOND,
163 hardwareLatencyNanos);
164 AudioClock::sleepForNanos(hardwareLatencyNanos);
165 }
166 }
167
advanceClientToMatchServerPosition(int32_t serverMargin)168 void AudioStreamInternalPlay::advanceClientToMatchServerPosition(int32_t serverMargin) {
169 int64_t readCounter = mAudioEndpoint->getDataReadCounter() + serverMargin;
170 int64_t writeCounter = mAudioEndpoint->getDataWriteCounter();
171
172 // Bump offset so caller does not see the retrograde motion in getFramesRead().
173 int64_t offset = writeCounter - readCounter;
174 mFramesOffsetFromService += offset;
175 ALOGV("%s() readN = %lld, writeN = %lld, offset = %lld", __func__,
176 (long long)readCounter, (long long)writeCounter, (long long)mFramesOffsetFromService);
177
178 // Force writeCounter to match readCounter.
179 // This is because we cannot change the read counter in the hardware.
180 mAudioEndpoint->setDataWriteCounter(readCounter);
181 }
182
onFlushFromServer()183 void AudioStreamInternalPlay::onFlushFromServer() {
184 advanceClientToMatchServerPosition(0 /*serverMargin*/);
185 }
186
187 // Write the data, block if needed and timeoutMillis > 0
write(const void * buffer,int32_t numFrames,int64_t timeoutNanoseconds)188 aaudio_result_t AudioStreamInternalPlay::write(const void *buffer, int32_t numFrames,
189 int64_t timeoutNanoseconds) {
190 return processData((void *)buffer, numFrames, timeoutNanoseconds);
191 }
192
193 // Write as much data as we can without blocking.
processDataNow(void * buffer,int32_t numFrames,int64_t currentNanoTime,int64_t * wakeTimePtr)194 aaudio_result_t AudioStreamInternalPlay::processDataNow(void *buffer, int32_t numFrames,
195 int64_t currentNanoTime, int64_t *wakeTimePtr) {
196 aaudio_result_t result = processCommands();
197 if (result != AAUDIO_OK) {
198 return result;
199 }
200
201 const char *traceName = "aaWrNow";
202 ATRACE_BEGIN(traceName);
203
204 if (mClockModel.isStarting()) {
205 // Still haven't got any timestamps from server.
206 // Keep waiting until we get some valid timestamps then start writing to the
207 // current buffer position.
208 ALOGV("%s() wait for valid timestamps", __func__);
209 // Sleep very briefly and hope we get a timestamp soon.
210 *wakeTimePtr = currentNanoTime + (2000 * AAUDIO_NANOS_PER_MICROSECOND);
211 ATRACE_END();
212 return 0;
213 }
214 // If we have gotten this far then we have at least one timestamp from server.
215
216 // If a DMA channel or DSP is reading the other end then we have to update the readCounter.
217 if (mAudioEndpoint->isFreeRunning()) {
218 // Update data queue based on the timing model.
219 int64_t estimatedReadCounter = mClockModel.convertTimeToPosition(currentNanoTime);
220 // ALOGD("AudioStreamInternal::processDataNow() - estimatedReadCounter = %d", (int)estimatedReadCounter);
221 mAudioEndpoint->setDataReadCounter(estimatedReadCounter);
222 }
223
224 if (mNeedCatchUp.isRequested()) {
225 // Catch an MMAP pointer that is already advancing.
226 // This will avoid initial underruns caused by a slow cold start.
227 // We add a one burst margin in case the DSP advances before we can write the data.
228 // This can help prevent the beginning of the stream from being skipped.
229 advanceClientToMatchServerPosition(getFramesPerBurst());
230 mNeedCatchUp.acknowledge();
231 }
232
233 // If the read index passed the write index then consider it an underrun.
234 // For shared streams, the xRunCount is passed up from the service.
235 if (mAudioEndpoint->isFreeRunning() && mAudioEndpoint->getFullFramesAvailable() < 0) {
236 mXRunCount++;
237 if (ATRACE_ENABLED()) {
238 ATRACE_INT("aaUnderRuns", mXRunCount);
239 }
240 }
241
242 // Write some data to the buffer.
243 //ALOGD("AudioStreamInternal::processDataNow() - writeNowWithConversion(%d)", numFrames);
244 int32_t framesWritten = writeNowWithConversion(buffer, numFrames);
245 //ALOGD("AudioStreamInternal::processDataNow() - tried to write %d frames, wrote %d",
246 // numFrames, framesWritten);
247 if (ATRACE_ENABLED()) {
248 ATRACE_INT("aaWrote", framesWritten);
249 }
250
251 // Sleep if there is too much data in the buffer.
252 // Calculate an ideal time to wake up.
253 if (wakeTimePtr != nullptr
254 && (mAudioEndpoint->getFullFramesAvailable() >= getDeviceBufferSize())) {
255 // By default wake up a few milliseconds from now. // TODO review
256 int64_t wakeTime = currentNanoTime + (1 * AAUDIO_NANOS_PER_MILLISECOND);
257 aaudio_stream_state_t state = getState();
258 //ALOGD("AudioStreamInternal::processDataNow() - wakeTime based on %s",
259 // AAudio_convertStreamStateToText(state));
260 switch (state) {
261 case AAUDIO_STREAM_STATE_OPEN:
262 case AAUDIO_STREAM_STATE_STARTING:
263 if (framesWritten != 0) {
264 // Don't wait to write more data. Just prime the buffer.
265 wakeTime = currentNanoTime;
266 }
267 break;
268 case AAUDIO_STREAM_STATE_STARTED:
269 {
270 // Calculate when there will be room available to write to the buffer.
271 // If the appBufferSize is smaller than the endpointBufferSize then
272 // we will have room to write data beyond the appBufferSize.
273 // That is a technique used to reduce glitches without adding latency.
274 const int64_t appBufferSize = getDeviceBufferSize();
275 // The endpoint buffer size is set to the maximum that can be written.
276 // If we use it then we must carve out some room to write data when we wake up.
277 const int64_t endBufferSize = mAudioEndpoint->getBufferSizeInFrames()
278 - getDeviceFramesPerBurst();
279 const int64_t bestBufferSize = std::min(appBufferSize, endBufferSize);
280 int64_t targetReadPosition = mAudioEndpoint->getDataWriteCounter() - bestBufferSize;
281 wakeTime = mClockModel.convertPositionToTime(targetReadPosition);
282 }
283 break;
284 default:
285 break;
286 }
287 *wakeTimePtr = wakeTime;
288
289 }
290
291 ATRACE_END();
292 return framesWritten;
293 }
294
295
writeNowWithConversion(const void * buffer,int32_t numFrames)296 aaudio_result_t AudioStreamInternalPlay::writeNowWithConversion(const void *buffer,
297 int32_t numFrames) {
298 WrappingBuffer wrappingBuffer;
299 uint8_t *byteBuffer = (uint8_t *) buffer;
300 int32_t framesLeftInByteBuffer = numFrames;
301
302 mAudioEndpoint->getEmptyFramesAvailable(&wrappingBuffer);
303
304 // Write data in one or two parts.
305 int partIndex = 0;
306 int framesWrittenToAudioEndpoint = 0;
307 while (framesLeftInByteBuffer > 0 && partIndex < WrappingBuffer::SIZE) {
308 int32_t framesAvailableInWrappingBuffer = wrappingBuffer.numFrames[partIndex];
309 uint8_t *currentWrappingBuffer = (uint8_t *) wrappingBuffer.data[partIndex];
310
311 if (framesAvailableInWrappingBuffer > 0) {
312 // Pull data from the flowgraph in case there is residual data.
313 const int32_t framesActuallyWrittenToWrappingBuffer = mFlowGraph.pull(
314 (void*) currentWrappingBuffer,
315 framesAvailableInWrappingBuffer);
316
317 const int32_t numBytesActuallyWrittenToWrappingBuffer =
318 framesActuallyWrittenToWrappingBuffer * getBytesPerDeviceFrame();
319 currentWrappingBuffer += numBytesActuallyWrittenToWrappingBuffer;
320 framesAvailableInWrappingBuffer -= framesActuallyWrittenToWrappingBuffer;
321 framesWrittenToAudioEndpoint += framesActuallyWrittenToWrappingBuffer;
322 } else {
323 break;
324 }
325
326 // Put data from byteBuffer into the flowgraph one buffer (8 frames) at a time.
327 // Continuously pull as much data as possible from the flowgraph into the wrapping buffer.
328 // The return value of mFlowGraph.process is the number of frames actually pulled.
329 while (framesAvailableInWrappingBuffer > 0 && framesLeftInByteBuffer > 0) {
330 int32_t framesToWriteFromByteBuffer = std::min(flowgraph::kDefaultBufferSize,
331 framesLeftInByteBuffer);
332 // If the wrapping buffer is running low, write one frame at a time.
333 if (framesAvailableInWrappingBuffer < flowgraph::kDefaultBufferSize) {
334 framesToWriteFromByteBuffer = 1;
335 }
336
337 const int32_t numBytesToWriteFromByteBuffer = getBytesPerFrame() *
338 framesToWriteFromByteBuffer;
339
340 //ALOGD("%s() framesLeftInByteBuffer %d, framesAvailableInWrappingBuffer %d"
341 // "framesToWriteFromByteBuffer %d, numBytesToWriteFromByteBuffer %d"
342 // , __func__, framesLeftInByteBuffer, framesAvailableInWrappingBuffer,
343 // framesToWriteFromByteBuffer, numBytesToWriteFromByteBuffer);
344
345 const int32_t framesActuallyWrittenToWrappingBuffer = mFlowGraph.process(
346 (void *)byteBuffer,
347 framesToWriteFromByteBuffer,
348 (void *)currentWrappingBuffer,
349 framesAvailableInWrappingBuffer);
350
351 byteBuffer += numBytesToWriteFromByteBuffer;
352 framesLeftInByteBuffer -= framesToWriteFromByteBuffer;
353 const int32_t numBytesActuallyWrittenToWrappingBuffer =
354 framesActuallyWrittenToWrappingBuffer * getBytesPerDeviceFrame();
355 currentWrappingBuffer += numBytesActuallyWrittenToWrappingBuffer;
356 framesAvailableInWrappingBuffer -= framesActuallyWrittenToWrappingBuffer;
357 framesWrittenToAudioEndpoint += framesActuallyWrittenToWrappingBuffer;
358
359 //ALOGD("%s() numBytesActuallyWrittenToWrappingBuffer %d, framesLeftInByteBuffer %d"
360 // "framesActuallyWrittenToWrappingBuffer %d, numBytesToWriteFromByteBuffer %d"
361 // "framesWrittenToAudioEndpoint %d"
362 // , __func__, numBytesActuallyWrittenToWrappingBuffer, framesLeftInByteBuffer,
363 // framesActuallyWrittenToWrappingBuffer, numBytesToWriteFromByteBuffer,
364 // framesWrittenToAudioEndpoint);
365 }
366 partIndex++;
367 }
368 //ALOGD("%s() framesWrittenToAudioEndpoint %d, numFrames %d"
369 // "framesLeftInByteBuffer %d"
370 // , __func__, framesWrittenToAudioEndpoint, numFrames,
371 // framesLeftInByteBuffer);
372
373 // The audio endpoint should reference the number of frames written to the wrapping buffer.
374 mAudioEndpoint->advanceWriteIndex(framesWrittenToAudioEndpoint);
375
376 // The internal code should use the number of frames read from the app.
377 return numFrames - framesLeftInByteBuffer;
378 }
379
getFramesRead()380 int64_t AudioStreamInternalPlay::getFramesRead() {
381 if (mAudioEndpoint) {
382 const int64_t framesReadHardware = isClockModelInControl()
383 ? mClockModel.convertTimeToPosition(AudioClock::getNanoseconds())
384 : mAudioEndpoint->getDataReadCounter();
385 // Add service offset and prevent retrograde motion.
386 mLastFramesRead = std::max(mLastFramesRead, framesReadHardware + mFramesOffsetFromService);
387 }
388 return mLastFramesRead;
389 }
390
getFramesWritten()391 int64_t AudioStreamInternalPlay::getFramesWritten() {
392 if (mAudioEndpoint) {
393 mLastFramesWritten = std::max(
394 mLastFramesWritten,
395 mAudioEndpoint->getDataWriteCounter() + mFramesOffsetFromService);
396 }
397 return mLastFramesWritten;
398 }
399
400 // Render audio in the application callback and then write the data to the stream.
callbackLoop()401 void *AudioStreamInternalPlay::callbackLoop() {
402 ALOGD("%s() entering >>>>>>>>>>>>>>>", __func__);
403 aaudio_result_t result = AAUDIO_OK;
404 aaudio_data_callback_result_t callbackResult = AAUDIO_CALLBACK_RESULT_CONTINUE;
405 if (!isDataCallbackSet()) return nullptr;
406 int64_t timeoutNanos = calculateReasonableTimeout(mCallbackFrames);
407
408 // result might be a frame count
409 while (mCallbackEnabled.load() && isActive() && (result >= 0)) {
410 // Call application using the AAudio callback interface.
411 callbackResult = maybeCallDataCallback(mCallbackBuffer.get(), mCallbackFrames);
412
413 // Write audio data to stream. This is a BLOCKING WRITE!
414 // Write data regardless of the callbackResult because we assume the data
415 // is valid even when the callback returns AAUDIO_CALLBACK_RESULT_STOP.
416 // Imagine a callback that is playing a large sound in menory.
417 // When it gets to the end of the sound it can partially fill
418 // the last buffer with the end of the sound, then zero pad the buffer, then return STOP.
419 // If the callback has no valid data then it should zero-fill the entire buffer.
420 result = write(mCallbackBuffer.get(), mCallbackFrames, timeoutNanos);
421 if ((result != mCallbackFrames)) {
422 if (result >= 0) {
423 // Only wrote some of the frames requested. The stream can be disconnected
424 // or timed out.
425 processCommands();
426 result = isDisconnected() ? AAUDIO_ERROR_DISCONNECTED : AAUDIO_ERROR_TIMEOUT;
427 }
428 maybeCallErrorCallback(result);
429 break;
430 }
431
432 if (callbackResult == AAUDIO_CALLBACK_RESULT_STOP) {
433 ALOGD("%s(): callback returned AAUDIO_CALLBACK_RESULT_STOP", __func__);
434 result = systemStopInternal();
435 break;
436 }
437 }
438
439 ALOGD("%s() exiting, result = %d, isActive() = %d <<<<<<<<<<<<<<",
440 __func__, result, (int) isActive());
441 return nullptr;
442 }
443
444 //------------------------------------------------------------------------------
445 // Implementation of PlayerBase
doSetVolume()446 status_t AudioStreamInternalPlay::doSetVolume() {
447 float combinedVolume = mStreamVolume * getDuckAndMuteVolume();
448 ALOGD("%s() mStreamVolume * duckAndMuteVolume = %f * %f = %f",
449 __func__, mStreamVolume, getDuckAndMuteVolume(), combinedVolume);
450 mFlowGraph.setTargetVolume(combinedVolume);
451 return android::NO_ERROR;
452 }
453