1 /*
2 * Copyright (C) 2017 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #define LOG_TAG (mInService ? "AudioStreamInternalPlay_Service" \
18 : "AudioStreamInternalPlay_Client")
19 //#define LOG_NDEBUG 0
20 #include <utils/Log.h>
21
22 #define ATRACE_TAG ATRACE_TAG_AUDIO
23
24 #include <utils/Trace.h>
25
26 #include "client/AudioStreamInternalPlay.h"
27 #include "utility/AudioClock.h"
28
29 using android::WrappingBuffer;
30
31 using namespace aaudio;
32
AudioStreamInternalPlay(AAudioServiceInterface & serviceInterface,bool inService)33 AudioStreamInternalPlay::AudioStreamInternalPlay(AAudioServiceInterface &serviceInterface,
34 bool inService)
35 : AudioStreamInternal(serviceInterface, inService) {
36
37 }
38
~AudioStreamInternalPlay()39 AudioStreamInternalPlay::~AudioStreamInternalPlay() {}
40
41 constexpr int kRampMSec = 10; // time to apply a change in volume
42
open(const AudioStreamBuilder & builder)43 aaudio_result_t AudioStreamInternalPlay::open(const AudioStreamBuilder &builder) {
44 aaudio_result_t result = AudioStreamInternal::open(builder);
45 if (result == AAUDIO_OK) {
46 // Sample rate is constrained to common values by now and should not overflow.
47 int32_t numFrames = kRampMSec * getSampleRate() / AAUDIO_MILLIS_PER_SECOND;
48 mVolumeRamp.setLengthInFrames(numFrames);
49 }
50 return result;
51 }
52
requestPause()53 aaudio_result_t AudioStreamInternalPlay::requestPause()
54 {
55 aaudio_result_t result = stopCallback();
56 if (result != AAUDIO_OK) {
57 return result;
58 }
59 if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
60 ALOGE("%s() mServiceStreamHandle invalid", __func__);
61 return AAUDIO_ERROR_INVALID_STATE;
62 }
63
64 mClockModel.stop(AudioClock::getNanoseconds());
65 setState(AAUDIO_STREAM_STATE_PAUSING);
66 mAtomicTimestamp.clear();
67 return mServiceInterface.pauseStream(mServiceStreamHandle);
68 }
69
requestFlush()70 aaudio_result_t AudioStreamInternalPlay::requestFlush() {
71 if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
72 ALOGE("%s() mServiceStreamHandle invalid", __func__);
73 return AAUDIO_ERROR_INVALID_STATE;
74 }
75
76 setState(AAUDIO_STREAM_STATE_FLUSHING);
77 return mServiceInterface.flushStream(mServiceStreamHandle);
78 }
79
advanceClientToMatchServerPosition()80 void AudioStreamInternalPlay::advanceClientToMatchServerPosition() {
81 int64_t readCounter = mAudioEndpoint.getDataReadCounter();
82 int64_t writeCounter = mAudioEndpoint.getDataWriteCounter();
83
84 // Bump offset so caller does not see the retrograde motion in getFramesRead().
85 int64_t offset = writeCounter - readCounter;
86 mFramesOffsetFromService += offset;
87 ALOGV("%s() readN = %lld, writeN = %lld, offset = %lld", __func__,
88 (long long)readCounter, (long long)writeCounter, (long long)mFramesOffsetFromService);
89
90 // Force writeCounter to match readCounter.
91 // This is because we cannot change the read counter in the hardware.
92 mAudioEndpoint.setDataWriteCounter(readCounter);
93 }
94
onFlushFromServer()95 void AudioStreamInternalPlay::onFlushFromServer() {
96 advanceClientToMatchServerPosition();
97 }
98
99 // Write the data, block if needed and timeoutMillis > 0
write(const void * buffer,int32_t numFrames,int64_t timeoutNanoseconds)100 aaudio_result_t AudioStreamInternalPlay::write(const void *buffer, int32_t numFrames,
101 int64_t timeoutNanoseconds) {
102 return processData((void *)buffer, numFrames, timeoutNanoseconds);
103 }
104
105 // Write as much data as we can without blocking.
processDataNow(void * buffer,int32_t numFrames,int64_t currentNanoTime,int64_t * wakeTimePtr)106 aaudio_result_t AudioStreamInternalPlay::processDataNow(void *buffer, int32_t numFrames,
107 int64_t currentNanoTime, int64_t *wakeTimePtr) {
108 aaudio_result_t result = processCommands();
109 if (result != AAUDIO_OK) {
110 return result;
111 }
112
113 const char *traceName = "aaWrNow";
114 ATRACE_BEGIN(traceName);
115
116 if (mClockModel.isStarting()) {
117 // Still haven't got any timestamps from server.
118 // Keep waiting until we get some valid timestamps then start writing to the
119 // current buffer position.
120 ALOGV("%s() wait for valid timestamps", __func__);
121 // Sleep very briefly and hope we get a timestamp soon.
122 *wakeTimePtr = currentNanoTime + (2000 * AAUDIO_NANOS_PER_MICROSECOND);
123 ATRACE_END();
124 return 0;
125 }
126 // If we have gotten this far then we have at least one timestamp from server.
127
128 // If a DMA channel or DSP is reading the other end then we have to update the readCounter.
129 if (mAudioEndpoint.isFreeRunning()) {
130 // Update data queue based on the timing model.
131 int64_t estimatedReadCounter = mClockModel.convertTimeToPosition(currentNanoTime);
132 // ALOGD("AudioStreamInternal::processDataNow() - estimatedReadCounter = %d", (int)estimatedReadCounter);
133 mAudioEndpoint.setDataReadCounter(estimatedReadCounter);
134 }
135
136 if (mNeedCatchUp.isRequested()) {
137 // Catch an MMAP pointer that is already advancing.
138 // This will avoid initial underruns caused by a slow cold start.
139 advanceClientToMatchServerPosition();
140 mNeedCatchUp.acknowledge();
141 }
142
143 // If the read index passed the write index then consider it an underrun.
144 // For shared streams, the xRunCount is passed up from the service.
145 if (mAudioEndpoint.isFreeRunning() && mAudioEndpoint.getFullFramesAvailable() < 0) {
146 mXRunCount++;
147 if (ATRACE_ENABLED()) {
148 ATRACE_INT("aaUnderRuns", mXRunCount);
149 }
150 }
151
152 // Write some data to the buffer.
153 //ALOGD("AudioStreamInternal::processDataNow() - writeNowWithConversion(%d)", numFrames);
154 int32_t framesWritten = writeNowWithConversion(buffer, numFrames);
155 //ALOGD("AudioStreamInternal::processDataNow() - tried to write %d frames, wrote %d",
156 // numFrames, framesWritten);
157 if (ATRACE_ENABLED()) {
158 ATRACE_INT("aaWrote", framesWritten);
159 }
160
161 // Calculate an ideal time to wake up.
162 if (wakeTimePtr != nullptr && framesWritten >= 0) {
163 // By default wake up a few milliseconds from now. // TODO review
164 int64_t wakeTime = currentNanoTime + (1 * AAUDIO_NANOS_PER_MILLISECOND);
165 aaudio_stream_state_t state = getState();
166 //ALOGD("AudioStreamInternal::processDataNow() - wakeTime based on %s",
167 // AAudio_convertStreamStateToText(state));
168 switch (state) {
169 case AAUDIO_STREAM_STATE_OPEN:
170 case AAUDIO_STREAM_STATE_STARTING:
171 if (framesWritten != 0) {
172 // Don't wait to write more data. Just prime the buffer.
173 wakeTime = currentNanoTime;
174 }
175 break;
176 case AAUDIO_STREAM_STATE_STARTED:
177 {
178 // When do we expect the next read burst to occur?
179
180 // Calculate frame position based off of the writeCounter because
181 // the readCounter might have just advanced in the background,
182 // causing us to sleep until a later burst.
183 int64_t nextPosition = mAudioEndpoint.getDataWriteCounter() + mFramesPerBurst
184 - mAudioEndpoint.getBufferSizeInFrames();
185 wakeTime = mClockModel.convertPositionToTime(nextPosition);
186 }
187 break;
188 default:
189 break;
190 }
191 *wakeTimePtr = wakeTime;
192
193 }
194
195 ATRACE_END();
196 return framesWritten;
197 }
198
199
writeNowWithConversion(const void * buffer,int32_t numFrames)200 aaudio_result_t AudioStreamInternalPlay::writeNowWithConversion(const void *buffer,
201 int32_t numFrames) {
202 WrappingBuffer wrappingBuffer;
203 uint8_t *byteBuffer = (uint8_t *) buffer;
204 int32_t framesLeft = numFrames;
205
206 mAudioEndpoint.getEmptyFramesAvailable(&wrappingBuffer);
207
208 // Write data in one or two parts.
209 int partIndex = 0;
210 while (framesLeft > 0 && partIndex < WrappingBuffer::SIZE) {
211 int32_t framesToWrite = framesLeft;
212 int32_t framesAvailable = wrappingBuffer.numFrames[partIndex];
213 if (framesAvailable > 0) {
214 if (framesToWrite > framesAvailable) {
215 framesToWrite = framesAvailable;
216 }
217
218 int32_t numBytes = getBytesPerFrame() * framesToWrite;
219 // Data conversion.
220 float levelFrom;
221 float levelTo;
222 mVolumeRamp.nextSegment(framesToWrite, &levelFrom, &levelTo);
223
224 AAudioDataConverter::FormattedData source(
225 (void *)byteBuffer,
226 getFormat(),
227 getSamplesPerFrame());
228 AAudioDataConverter::FormattedData destination(
229 wrappingBuffer.data[partIndex],
230 getDeviceFormat(),
231 getDeviceChannelCount());
232
233 AAudioDataConverter::convert(source, destination, framesToWrite,
234 levelFrom, levelTo);
235
236 byteBuffer += numBytes;
237 framesLeft -= framesToWrite;
238 } else {
239 break;
240 }
241 partIndex++;
242 }
243 int32_t framesWritten = numFrames - framesLeft;
244 mAudioEndpoint.advanceWriteIndex(framesWritten);
245
246 return framesWritten;
247 }
248
getFramesRead()249 int64_t AudioStreamInternalPlay::getFramesRead()
250 {
251 int64_t framesReadHardware;
252 if (isActive()) {
253 framesReadHardware = mClockModel.convertTimeToPosition(AudioClock::getNanoseconds());
254 } else {
255 framesReadHardware = mAudioEndpoint.getDataReadCounter();
256 }
257 int64_t framesRead = framesReadHardware + mFramesOffsetFromService;
258 // Prevent retrograde motion.
259 if (framesRead < mLastFramesRead) {
260 framesRead = mLastFramesRead;
261 } else {
262 mLastFramesRead = framesRead;
263 }
264 return framesRead;
265 }
266
getFramesWritten()267 int64_t AudioStreamInternalPlay::getFramesWritten()
268 {
269 int64_t framesWritten = mAudioEndpoint.getDataWriteCounter()
270 + mFramesOffsetFromService;
271 return framesWritten;
272 }
273
274
275 // Render audio in the application callback and then write the data to the stream.
callbackLoop()276 void *AudioStreamInternalPlay::callbackLoop() {
277 ALOGD("%s() entering >>>>>>>>>>>>>>>", __func__);
278 aaudio_result_t result = AAUDIO_OK;
279 aaudio_data_callback_result_t callbackResult = AAUDIO_CALLBACK_RESULT_CONTINUE;
280 if (!isDataCallbackSet()) return NULL;
281 int64_t timeoutNanos = calculateReasonableTimeout(mCallbackFrames);
282
283 // result might be a frame count
284 while (mCallbackEnabled.load() && isActive() && (result >= 0)) {
285 // Call application using the AAudio callback interface.
286 callbackResult = maybeCallDataCallback(mCallbackBuffer, mCallbackFrames);
287
288 if (callbackResult == AAUDIO_CALLBACK_RESULT_CONTINUE) {
289 // Write audio data to stream. This is a BLOCKING WRITE!
290 result = write(mCallbackBuffer, mCallbackFrames, timeoutNanos);
291 if ((result != mCallbackFrames)) {
292 if (result >= 0) {
293 // Only wrote some of the frames requested. Must have timed out.
294 result = AAUDIO_ERROR_TIMEOUT;
295 }
296 maybeCallErrorCallback(result);
297 break;
298 }
299 } else if (callbackResult == AAUDIO_CALLBACK_RESULT_STOP) {
300 ALOGV("%s(): callback returned AAUDIO_CALLBACK_RESULT_STOP", __func__);
301 break;
302 }
303 }
304
305 ALOGD("%s() exiting, result = %d, isActive() = %d <<<<<<<<<<<<<<",
306 __func__, result, (int) isActive());
307 return NULL;
308 }
309
310 //------------------------------------------------------------------------------
311 // Implementation of PlayerBase
doSetVolume()312 status_t AudioStreamInternalPlay::doSetVolume() {
313 float combinedVolume = mStreamVolume * getDuckAndMuteVolume();
314 ALOGD("%s() mStreamVolume * duckAndMuteVolume = %f * %f = %f",
315 __func__, mStreamVolume, getDuckAndMuteVolume(), combinedVolume);
316 mVolumeRamp.setTarget(combinedVolume);
317 return android::NO_ERROR;
318 }
319