1 /*
2 * Copyright (C) 2017 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 //#define LOG_NDEBUG 0
18 #include <utils/Log.h>
19
20 #include <algorithm>
21 #include <audio_utils/format.h>
22 #include <aaudio/AAudio.h>
23 #include <media/MediaMetricsItem.h>
24
25 #include "client/AudioStreamInternalCapture.h"
26 #include "utility/AudioClock.h"
27
28 #undef ATRACE_TAG
29 #define ATRACE_TAG ATRACE_TAG_AUDIO
30 #include <utils/Trace.h>
31
32 // We do this after the #includes because if a header uses ALOG.
33 // it would fail on the reference to mInService.
34 #undef LOG_TAG
35 // This file is used in both client and server processes.
36 // This is needed to make sense of the logs more easily.
37 #define LOG_TAG (mInService ? "AudioStreamInternalCapture_Service" \
38 : "AudioStreamInternalCapture_Client")
39
40 using android::WrappingBuffer;
41
42 using namespace aaudio;
43
AudioStreamInternalCapture(AAudioServiceInterface & serviceInterface,bool inService)44 AudioStreamInternalCapture::AudioStreamInternalCapture(AAudioServiceInterface &serviceInterface,
45 bool inService)
46 : AudioStreamInternal(serviceInterface, inService) {
47
48 }
49
open(const AudioStreamBuilder & builder)50 aaudio_result_t AudioStreamInternalCapture::open(const AudioStreamBuilder &builder) {
51 aaudio_result_t result = AudioStreamInternal::open(builder);
52 if (result == AAUDIO_OK) {
53 result = mFlowGraph.configure(getDeviceFormat(),
54 getDeviceSamplesPerFrame(),
55 getDeviceSampleRate(),
56 getFormat(),
57 getSamplesPerFrame(),
58 getSampleRate(),
59 getRequireMonoBlend(),
60 false /* useVolumeRamps */,
61 getAudioBalance(),
62 aaudio::resampler::MultiChannelResampler::Quality::Medium);
63
64 if (result != AAUDIO_OK) {
65 safeReleaseClose();
66 }
67 }
68 return result;
69 }
70
advanceClientToMatchServerPosition(int32_t serverMargin)71 void AudioStreamInternalCapture::advanceClientToMatchServerPosition(int32_t serverMargin) {
72 int64_t readCounter = mAudioEndpoint->getDataReadCounter();
73 int64_t writeCounter = mAudioEndpoint->getDataWriteCounter() + serverMargin;
74
75 // Bump offset so caller does not see the retrograde motion in getFramesRead().
76 int64_t offset = readCounter - writeCounter;
77 mFramesOffsetFromService += offset;
78 ALOGD("advanceClientToMatchServerPosition() readN = %lld, writeN = %lld, offset = %lld",
79 (long long)readCounter, (long long)writeCounter, (long long)mFramesOffsetFromService);
80
81 // Force readCounter to match writeCounter.
82 // This is because we cannot change the write counter in the hardware.
83 mAudioEndpoint->setDataReadCounter(writeCounter);
84 }
85
86 // Write the data, block if needed and timeoutMillis > 0
read(void * buffer,int32_t numFrames,int64_t timeoutNanoseconds)87 aaudio_result_t AudioStreamInternalCapture::read(void *buffer, int32_t numFrames,
88 int64_t timeoutNanoseconds)
89 {
90 return processData(buffer, numFrames, timeoutNanoseconds);
91 }
92
93 // Read as much data as we can without blocking.
processDataNow(void * buffer,int32_t numFrames,int64_t currentNanoTime,int64_t * wakeTimePtr)94 aaudio_result_t AudioStreamInternalCapture::processDataNow(void *buffer, int32_t numFrames,
95 int64_t currentNanoTime, int64_t *wakeTimePtr) {
96 aaudio_result_t result = processCommands();
97 if (result != AAUDIO_OK) {
98 return result;
99 }
100
101 const char *traceName = "aaRdNow";
102 ATRACE_BEGIN(traceName);
103
104 if (mClockModel.isStarting()) {
105 // Still haven't got any timestamps from server.
106 // Keep waiting until we get some valid timestamps then start writing to the
107 // current buffer position.
108 ALOGD("processDataNow() wait for valid timestamps");
109 // Sleep very briefly and hope we get a timestamp soon.
110 *wakeTimePtr = currentNanoTime + (2000 * AAUDIO_NANOS_PER_MICROSECOND);
111 ATRACE_END();
112 return 0;
113 }
114 // If we have gotten this far then we have at least one timestamp from server.
115
116 if (mAudioEndpoint->isFreeRunning()) {
117 //ALOGD("AudioStreamInternalCapture::processDataNow() - update remote counter");
118 // Update data queue based on the timing model.
119 // Jitter in the DSP can cause late writes to the FIFO.
120 // This might be caused by resampling.
121 // We want to read the FIFO after the latest possible time
122 // that the DSP could have written the data.
123 int64_t estimatedRemoteCounter = mClockModel.convertLatestTimeToPosition(currentNanoTime);
124 // TODO refactor, maybe use setRemoteCounter()
125 mAudioEndpoint->setDataWriteCounter(estimatedRemoteCounter);
126 }
127
128 // This code assumes that we have already received valid timestamps.
129 if (mNeedCatchUp.isRequested()) {
130 // Catch an MMAP pointer that is already advancing.
131 // This will avoid initial underruns caused by a slow cold start.
132 advanceClientToMatchServerPosition(0 /*serverMargin*/);
133 mNeedCatchUp.acknowledge();
134 }
135
136 // If the capture buffer is full beyond capacity then consider it an overrun.
137 // For shared streams, the xRunCount is passed up from the service.
138 if (mAudioEndpoint->isFreeRunning()
139 && mAudioEndpoint->getFullFramesAvailable() > mAudioEndpoint->getBufferCapacityInFrames()) {
140 mXRunCount++;
141 if (ATRACE_ENABLED()) {
142 ATRACE_INT("aaOverRuns", mXRunCount);
143 }
144 }
145
146 // Read some data from the buffer.
147 //ALOGD("AudioStreamInternalCapture::processDataNow() - readNowWithConversion(%d)", numFrames);
148 int32_t framesProcessed = readNowWithConversion(buffer, numFrames);
149 //ALOGD("AudioStreamInternalCapture::processDataNow() - tried to read %d frames, read %d",
150 // numFrames, framesProcessed);
151 if (ATRACE_ENABLED()) {
152 ATRACE_INT("aaRead", framesProcessed);
153 }
154
155 // Calculate an ideal time to wake up.
156 if (wakeTimePtr != nullptr && framesProcessed >= 0) {
157 // By default wake up a few milliseconds from now. // TODO review
158 int64_t wakeTime = currentNanoTime + (1 * AAUDIO_NANOS_PER_MILLISECOND);
159 aaudio_stream_state_t state = getState();
160 //ALOGD("AudioStreamInternalCapture::processDataNow() - wakeTime based on %s",
161 // AAudio_convertStreamStateToText(state));
162 switch (state) {
163 case AAUDIO_STREAM_STATE_OPEN:
164 case AAUDIO_STREAM_STATE_STARTING:
165 break;
166 case AAUDIO_STREAM_STATE_STARTED:
167 {
168 // When do we expect the next write burst to occur?
169
170 // Calculate frame position based off of the readCounter because
171 // the writeCounter might have just advanced in the background,
172 // causing us to sleep until a later burst.
173 const int64_t nextPosition = mAudioEndpoint->getDataReadCounter() +
174 getDeviceFramesPerBurst();
175 wakeTime = mClockModel.convertPositionToLatestTime(nextPosition);
176 }
177 break;
178 default:
179 break;
180 }
181 *wakeTimePtr = wakeTime;
182
183 }
184
185 ATRACE_END();
186 return framesProcessed;
187 }
188
readNowWithConversion(void * buffer,int32_t numFrames)189 aaudio_result_t AudioStreamInternalCapture::readNowWithConversion(void *buffer,
190 int32_t numFrames) {
191 WrappingBuffer wrappingBuffer;
192 uint8_t *byteBuffer = (uint8_t *) buffer;
193 int32_t framesLeftInByteBuffer = numFrames;
194
195 if (framesLeftInByteBuffer > 0) {
196 // Pull data from the flowgraph in case there is residual data.
197 const int32_t framesActuallyWrittenToByteBuffer = mFlowGraph.pull(
198 (void *)byteBuffer,
199 framesLeftInByteBuffer);
200
201 const int32_t numBytesActuallyWrittenToByteBuffer =
202 framesActuallyWrittenToByteBuffer * getBytesPerFrame();
203 byteBuffer += numBytesActuallyWrittenToByteBuffer;
204 framesLeftInByteBuffer -= framesActuallyWrittenToByteBuffer;
205 }
206
207 mAudioEndpoint->getFullFramesAvailable(&wrappingBuffer);
208
209 // Write data in one or two parts.
210 int partIndex = 0;
211 int framesReadFromAudioEndpoint = 0;
212 while (framesLeftInByteBuffer > 0 && partIndex < WrappingBuffer::SIZE) {
213 const int32_t totalFramesInWrappingBuffer = wrappingBuffer.numFrames[partIndex];
214 int32_t framesAvailableInWrappingBuffer = totalFramesInWrappingBuffer;
215 uint8_t *currentWrappingBuffer = (uint8_t *) wrappingBuffer.data[partIndex];
216
217 if (framesAvailableInWrappingBuffer <= 0) break;
218
219 // Put data from the wrapping buffer into the flowgraph 8 frames at a time.
220 // Continuously pull as much data as possible from the flowgraph into the byte buffer.
221 // The return value of mFlowGraph.process is the number of frames actually pulled.
222 while (framesAvailableInWrappingBuffer > 0 && framesLeftInByteBuffer > 0) {
223 const int32_t framesToReadFromWrappingBuffer = std::min(flowgraph::kDefaultBufferSize,
224 framesAvailableInWrappingBuffer);
225
226 const int32_t numBytesToReadFromWrappingBuffer = getBytesPerDeviceFrame() *
227 framesToReadFromWrappingBuffer;
228
229 // If framesActuallyWrittenToByteBuffer < framesLeftInByteBuffer, it is guaranteed
230 // that all the data is pulled. If there is no more space in the byteBuffer, the
231 // remaining data will be pulled in the following readNowWithConversion().
232 const int32_t framesActuallyWrittenToByteBuffer = mFlowGraph.process(
233 (void *)currentWrappingBuffer,
234 framesToReadFromWrappingBuffer,
235 (void *)byteBuffer,
236 framesLeftInByteBuffer);
237
238 const int32_t numBytesActuallyWrittenToByteBuffer =
239 framesActuallyWrittenToByteBuffer * getBytesPerFrame();
240 byteBuffer += numBytesActuallyWrittenToByteBuffer;
241 framesLeftInByteBuffer -= framesActuallyWrittenToByteBuffer;
242 currentWrappingBuffer += numBytesToReadFromWrappingBuffer;
243 framesAvailableInWrappingBuffer -= framesToReadFromWrappingBuffer;
244
245 //ALOGD("%s() numBytesActuallyWrittenToByteBuffer %d, framesLeftInByteBuffer %d"
246 // "framesAvailableInWrappingBuffer %d, framesReadFromAudioEndpoint %d"
247 // , __func__, numBytesActuallyWrittenToByteBuffer, framesLeftInByteBuffer,
248 // framesAvailableInWrappingBuffer, framesReadFromAudioEndpoint);
249 }
250 framesReadFromAudioEndpoint += totalFramesInWrappingBuffer -
251 framesAvailableInWrappingBuffer;
252 partIndex++;
253 }
254
255 // The audio endpoint should reference the number of frames written to the wrapping buffer.
256 mAudioEndpoint->advanceReadIndex(framesReadFromAudioEndpoint);
257
258 // The internal code should use the number of frames read from the app.
259 return numFrames - framesLeftInByteBuffer;
260 }
261
getFramesWritten()262 int64_t AudioStreamInternalCapture::getFramesWritten() {
263 if (mAudioEndpoint) {
264 const int64_t framesWrittenHardware = isClockModelInControl()
265 ? mClockModel.convertTimeToPosition(AudioClock::getNanoseconds())
266 : mAudioEndpoint->getDataWriteCounter();
267 // Add service offset and prevent retrograde motion.
268 mLastFramesWritten = std::max(mLastFramesWritten,
269 framesWrittenHardware + mFramesOffsetFromService);
270 }
271 return mLastFramesWritten;
272 }
273
getFramesRead()274 int64_t AudioStreamInternalCapture::getFramesRead() {
275 if (mAudioEndpoint) {
276 mLastFramesRead = std::max(mLastFramesRead,
277 mAudioEndpoint->getDataReadCounter() + mFramesOffsetFromService);
278 }
279 return mLastFramesRead;
280 }
281
282 // Read data from the stream and pass it to the callback for processing.
callbackLoop()283 void *AudioStreamInternalCapture::callbackLoop() {
284 aaudio_result_t result = AAUDIO_OK;
285 aaudio_data_callback_result_t callbackResult = AAUDIO_CALLBACK_RESULT_CONTINUE;
286 if (!isDataCallbackSet()) return nullptr;
287
288 // result might be a frame count
289 while (mCallbackEnabled.load() && isActive() && (result >= 0)) {
290
291 // Read audio data from stream.
292 int64_t timeoutNanos = calculateReasonableTimeout(mCallbackFrames);
293
294 // This is a BLOCKING READ!
295 result = read(mCallbackBuffer.get(), mCallbackFrames, timeoutNanos);
296 if ((result != mCallbackFrames)) {
297 ALOGE("callbackLoop: read() returned %d", result);
298 if (result >= 0) {
299 // Only read some of the frames requested. The stream can be disconnected
300 // or timed out.
301 processCommands();
302 result = isDisconnected() ? AAUDIO_ERROR_DISCONNECTED : AAUDIO_ERROR_TIMEOUT;
303 }
304 maybeCallErrorCallback(result);
305 break;
306 }
307
308 // Call application using the AAudio callback interface.
309 callbackResult = maybeCallDataCallback(mCallbackBuffer.get(), mCallbackFrames);
310
311 if (callbackResult == AAUDIO_CALLBACK_RESULT_STOP) {
312 ALOGD("%s(): callback returned AAUDIO_CALLBACK_RESULT_STOP", __func__);
313 result = systemStopInternal();
314 break;
315 }
316 }
317
318 ALOGD("callbackLoop() exiting, result = %d, isActive() = %d",
319 result, (int) isActive());
320 return nullptr;
321 }
322