1 /*
2 * Copyright 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #define LOG_TAG "AudioStreamRecord"
18 //#define LOG_NDEBUG 0
19 #include <utils/Log.h>
20
21 #include <stdint.h>
22
23 #include <aaudio/AAudio.h>
24 #include <audio_utils/primitives.h>
25 #include <media/AudioRecord.h>
26 #include <utils/String16.h>
27
28 #include "legacy/AudioStreamLegacy.h"
29 #include "legacy/AudioStreamRecord.h"
30 #include "utility/AudioClock.h"
31 #include "utility/FixedBlockWriter.h"
32
33 using namespace android;
34 using namespace aaudio;
35
AudioStreamRecord()36 AudioStreamRecord::AudioStreamRecord()
37 : AudioStreamLegacy()
38 , mFixedBlockWriter(*this)
39 {
40 }
41
~AudioStreamRecord()42 AudioStreamRecord::~AudioStreamRecord()
43 {
44 const aaudio_stream_state_t state = getState();
45 bool bad = !(state == AAUDIO_STREAM_STATE_UNINITIALIZED || state == AAUDIO_STREAM_STATE_CLOSED);
46 ALOGE_IF(bad, "stream not closed, in state %d", state);
47 }
48
open(const AudioStreamBuilder & builder)49 aaudio_result_t AudioStreamRecord::open(const AudioStreamBuilder& builder)
50 {
51 aaudio_result_t result = AAUDIO_OK;
52
53 result = AudioStream::open(builder);
54 if (result != AAUDIO_OK) {
55 return result;
56 }
57
58 // Try to create an AudioRecord
59
60 const aaudio_session_id_t requestedSessionId = builder.getSessionId();
61 const audio_session_t sessionId = AAudioConvert_aaudioToAndroidSessionId(requestedSessionId);
62
63 // TODO Support UNSPECIFIED in AudioRecord. For now, use stereo if unspecified.
64 int32_t samplesPerFrame = (getSamplesPerFrame() == AAUDIO_UNSPECIFIED)
65 ? 2 : getSamplesPerFrame();
66 audio_channel_mask_t channelMask = samplesPerFrame <= 2 ?
67 audio_channel_in_mask_from_count(samplesPerFrame) :
68 audio_channel_mask_for_index_assignment_from_count(samplesPerFrame);
69
70 size_t frameCount = (builder.getBufferCapacity() == AAUDIO_UNSPECIFIED) ? 0
71 : builder.getBufferCapacity();
72
73
74 audio_input_flags_t flags;
75 aaudio_performance_mode_t perfMode = getPerformanceMode();
76 switch (perfMode) {
77 case AAUDIO_PERFORMANCE_MODE_LOW_LATENCY:
78 // If the app asks for a sessionId then it means they want to use effects.
79 // So don't use RAW flag.
80 flags = (audio_input_flags_t) ((requestedSessionId == AAUDIO_SESSION_ID_NONE)
81 ? (AUDIO_INPUT_FLAG_FAST | AUDIO_INPUT_FLAG_RAW)
82 : (AUDIO_INPUT_FLAG_FAST));
83 break;
84
85 case AAUDIO_PERFORMANCE_MODE_POWER_SAVING:
86 case AAUDIO_PERFORMANCE_MODE_NONE:
87 default:
88 flags = AUDIO_INPUT_FLAG_NONE;
89 break;
90 }
91
92 // Preserve behavior of API 26
93 if (getFormat() == AUDIO_FORMAT_DEFAULT) {
94 setFormat(AUDIO_FORMAT_PCM_FLOAT);
95 }
96
97 // Maybe change device format to get a FAST path.
98 // AudioRecord does not support FAST mode for FLOAT data.
99 // TODO AudioRecord should allow FLOAT data paths for FAST tracks.
100 // So IF the user asks for low latency FLOAT
101 // AND the sampleRate is likely to be compatible with FAST
102 // THEN request I16 and convert to FLOAT when passing to user.
103 // Note that hard coding 48000 Hz is not ideal because the sampleRate
104 // for a FAST path might not be 48000 Hz.
105 // It normally is but there is a chance that it is not.
106 // And there is no reliable way to know that in advance.
107 // Luckily the consequences of a wrong guess are minor.
108 // We just may not get a FAST track.
109 // But we wouldn't have anyway without this hack.
110 constexpr int32_t kMostLikelySampleRateForFast = 48000;
111 if (getFormat() == AUDIO_FORMAT_PCM_FLOAT
112 && perfMode == AAUDIO_PERFORMANCE_MODE_LOW_LATENCY
113 && (samplesPerFrame <= 2) // FAST only for mono and stereo
114 && (getSampleRate() == kMostLikelySampleRateForFast
115 || getSampleRate() == AAUDIO_UNSPECIFIED)) {
116 setDeviceFormat(AUDIO_FORMAT_PCM_16_BIT);
117 } else {
118 setDeviceFormat(getFormat());
119 }
120
121 uint32_t notificationFrames = 0;
122
123 // Setup the callback if there is one.
124 AudioRecord::callback_t callback = nullptr;
125 void *callbackData = nullptr;
126 AudioRecord::transfer_type streamTransferType = AudioRecord::transfer_type::TRANSFER_SYNC;
127 if (builder.getDataCallbackProc() != nullptr) {
128 streamTransferType = AudioRecord::transfer_type::TRANSFER_CALLBACK;
129 callback = getLegacyCallback();
130 callbackData = this;
131 notificationFrames = builder.getFramesPerDataCallback();
132 }
133 mCallbackBufferSize = builder.getFramesPerDataCallback();
134
135 // Don't call mAudioRecord->setInputDevice() because it will be overwritten by set()!
136 audio_port_handle_t selectedDeviceId = (getDeviceId() == AAUDIO_UNSPECIFIED)
137 ? AUDIO_PORT_HANDLE_NONE
138 : getDeviceId();
139
140 const audio_content_type_t contentType =
141 AAudioConvert_contentTypeToInternal(builder.getContentType());
142 const audio_source_t source =
143 AAudioConvert_inputPresetToAudioSource(builder.getInputPreset());
144
145 const audio_flags_mask_t attrFlags =
146 AAudioConvert_privacySensitiveToAudioFlagsMask(builder.isPrivacySensitive());
147 const audio_attributes_t attributes = {
148 .content_type = contentType,
149 .usage = AUDIO_USAGE_UNKNOWN, // only used for output
150 .source = source,
151 .flags = attrFlags, // Different than the AUDIO_INPUT_FLAGS
152 .tags = ""
153 };
154
155 // ----------- open the AudioRecord ---------------------
156 // Might retry, but never more than once.
157 for (int i = 0; i < 2; i ++) {
158 const audio_format_t requestedInternalFormat = getDeviceFormat();
159
160 mAudioRecord = new AudioRecord(
161 mOpPackageName // const String16& opPackageName TODO does not compile
162 );
163 mAudioRecord->set(
164 AUDIO_SOURCE_DEFAULT, // ignored because we pass attributes below
165 getSampleRate(),
166 requestedInternalFormat,
167 channelMask,
168 frameCount,
169 callback,
170 callbackData,
171 notificationFrames,
172 false /*threadCanCallJava*/,
173 sessionId,
174 streamTransferType,
175 flags,
176 AUDIO_UID_INVALID, // DEFAULT uid
177 -1, // DEFAULT pid
178 &attributes,
179 selectedDeviceId
180 );
181
182 // Set it here so it can be logged by the destructor if the open failed.
183 mAudioRecord->setCallerName(kCallerName);
184
185 // Did we get a valid track?
186 status_t status = mAudioRecord->initCheck();
187 if (status != OK) {
188 releaseCloseFinal();
189 ALOGE("open(), initCheck() returned %d", status);
190 return AAudioConvert_androidToAAudioResult(status);
191 }
192
193 // Check to see if it was worth hacking the deviceFormat.
194 bool gotFastPath = (mAudioRecord->getFlags() & AUDIO_INPUT_FLAG_FAST)
195 == AUDIO_INPUT_FLAG_FAST;
196 if (getFormat() != getDeviceFormat() && !gotFastPath) {
197 // We tried to get a FAST path by switching the device format.
198 // But it didn't work. So we might as well reopen using the same
199 // format for device and for app.
200 ALOGD("%s() used a different device format but no FAST path, reopen", __func__);
201 mAudioRecord.clear();
202 setDeviceFormat(getFormat());
203 } else {
204 break; // Keep the one we just opened.
205 }
206 }
207
208 mMetricsId = std::string(AMEDIAMETRICS_KEY_PREFIX_AUDIO_RECORD)
209 + std::to_string(mAudioRecord->getPortId());
210
211 // Get the actual values from the AudioRecord.
212 setSamplesPerFrame(mAudioRecord->channelCount());
213
214 int32_t actualSampleRate = mAudioRecord->getSampleRate();
215 ALOGW_IF(actualSampleRate != getSampleRate(),
216 "open() sampleRate changed from %d to %d",
217 getSampleRate(), actualSampleRate);
218 setSampleRate(actualSampleRate);
219
220 // We may need to pass the data through a block size adapter to guarantee constant size.
221 if (mCallbackBufferSize != AAUDIO_UNSPECIFIED) {
222 // The block adapter runs before the format conversion.
223 // So we need to use the device frame size.
224 mBlockAdapterBytesPerFrame = getBytesPerDeviceFrame();
225 int callbackSizeBytes = mBlockAdapterBytesPerFrame * mCallbackBufferSize;
226 mFixedBlockWriter.open(callbackSizeBytes);
227 mBlockAdapter = &mFixedBlockWriter;
228 } else {
229 mBlockAdapter = nullptr;
230 }
231
232 // Allocate format conversion buffer if needed.
233 if (getDeviceFormat() == AUDIO_FORMAT_PCM_16_BIT
234 && getFormat() == AUDIO_FORMAT_PCM_FLOAT) {
235
236 if (builder.getDataCallbackProc() != nullptr) {
237 // If we have a callback then we need to convert the data into an internal float
238 // array and then pass that entire array to the app.
239 mFormatConversionBufferSizeInFrames =
240 (mCallbackBufferSize != AAUDIO_UNSPECIFIED)
241 ? mCallbackBufferSize : getFramesPerBurst();
242 int32_t numSamples = mFormatConversionBufferSizeInFrames * getSamplesPerFrame();
243 mFormatConversionBufferFloat = std::make_unique<float[]>(numSamples);
244 } else {
245 // If we don't have a callback then we will read into an internal short array
246 // and then convert into the app float array in read().
247 mFormatConversionBufferSizeInFrames = getFramesPerBurst();
248 int32_t numSamples = mFormatConversionBufferSizeInFrames * getSamplesPerFrame();
249 mFormatConversionBufferI16 = std::make_unique<int16_t[]>(numSamples);
250 }
251 ALOGD("%s() setup I16>FLOAT conversion buffer with %d frames",
252 __func__, mFormatConversionBufferSizeInFrames);
253 }
254
255 // Update performance mode based on the actual stream.
256 // For example, if the sample rate does not match native then you won't get a FAST track.
257 audio_input_flags_t actualFlags = mAudioRecord->getFlags();
258 aaudio_performance_mode_t actualPerformanceMode = AAUDIO_PERFORMANCE_MODE_NONE;
259 // FIXME Some platforms do not advertise RAW mode for low latency inputs.
260 if ((actualFlags & (AUDIO_INPUT_FLAG_FAST))
261 == (AUDIO_INPUT_FLAG_FAST)) {
262 actualPerformanceMode = AAUDIO_PERFORMANCE_MODE_LOW_LATENCY;
263 }
264 setPerformanceMode(actualPerformanceMode);
265
266 setSharingMode(AAUDIO_SHARING_MODE_SHARED); // EXCLUSIVE mode not supported in legacy
267
268 // Log warning if we did not get what we asked for.
269 ALOGW_IF(actualFlags != flags,
270 "open() flags changed from 0x%08X to 0x%08X",
271 flags, actualFlags);
272 ALOGW_IF(actualPerformanceMode != perfMode,
273 "open() perfMode changed from %d to %d",
274 perfMode, actualPerformanceMode);
275
276 setState(AAUDIO_STREAM_STATE_OPEN);
277 setDeviceId(mAudioRecord->getRoutedDeviceId());
278
279 aaudio_session_id_t actualSessionId =
280 (requestedSessionId == AAUDIO_SESSION_ID_NONE)
281 ? AAUDIO_SESSION_ID_NONE
282 : (aaudio_session_id_t) mAudioRecord->getSessionId();
283 setSessionId(actualSessionId);
284
285 mAudioRecord->addAudioDeviceCallback(mDeviceCallback);
286
287 return AAUDIO_OK;
288 }
289
release_l()290 aaudio_result_t AudioStreamRecord::release_l() {
291 // TODO add close() or release() to AudioFlinger's AudioRecord API.
292 // Then call it from here
293 if (getState() != AAUDIO_STREAM_STATE_CLOSING) {
294 mAudioRecord->removeAudioDeviceCallback(mDeviceCallback);
295 logReleaseBufferState();
296 mAudioRecord.clear();
297 mFixedBlockWriter.close();
298 return AudioStream::release_l();
299 } else {
300 return AAUDIO_OK; // already released
301 }
302 }
303
maybeConvertDeviceData(const void * audioData,int32_t numFrames)304 const void * AudioStreamRecord::maybeConvertDeviceData(const void *audioData, int32_t numFrames) {
305 if (mFormatConversionBufferFloat.get() != nullptr) {
306 LOG_ALWAYS_FATAL_IF(numFrames > mFormatConversionBufferSizeInFrames,
307 "%s() conversion size %d too large for buffer %d",
308 __func__, numFrames, mFormatConversionBufferSizeInFrames);
309
310 int32_t numSamples = numFrames * getSamplesPerFrame();
311 // Only conversion supported is I16 to FLOAT
312 memcpy_to_float_from_i16(
313 mFormatConversionBufferFloat.get(),
314 (const int16_t *) audioData,
315 numSamples);
316 return mFormatConversionBufferFloat.get();
317 } else {
318 return audioData;
319 }
320 }
321
processCallback(int event,void * info)322 void AudioStreamRecord::processCallback(int event, void *info) {
323 switch (event) {
324 case AudioRecord::EVENT_MORE_DATA:
325 processCallbackCommon(AAUDIO_CALLBACK_OPERATION_PROCESS_DATA, info);
326 break;
327
328 // Stream got rerouted so we disconnect.
329 case AudioRecord::EVENT_NEW_IAUDIORECORD:
330 processCallbackCommon(AAUDIO_CALLBACK_OPERATION_DISCONNECTED, info);
331 break;
332
333 default:
334 break;
335 }
336 return;
337 }
338
requestStart()339 aaudio_result_t AudioStreamRecord::requestStart()
340 {
341 if (mAudioRecord.get() == nullptr) {
342 return AAUDIO_ERROR_INVALID_STATE;
343 }
344
345 // Enable callback before starting AudioRecord to avoid shutting
346 // down because of a race condition.
347 mCallbackEnabled.store(true);
348 aaudio_stream_state_t originalState = getState();
349 // Set before starting the callback so that we are in the correct state
350 // before updateStateMachine() can be called by the callback.
351 setState(AAUDIO_STREAM_STATE_STARTING);
352 mFramesWritten.reset32(); // service writes frames
353 mTimestampPosition.reset32();
354 status_t err = mAudioRecord->start(); // resets position to zero
355 if (err != OK) {
356 mCallbackEnabled.store(false);
357 setState(originalState);
358 return AAudioConvert_androidToAAudioResult(err);
359 }
360 return AAUDIO_OK;
361 }
362
requestStop()363 aaudio_result_t AudioStreamRecord::requestStop() {
364 if (mAudioRecord.get() == nullptr) {
365 return AAUDIO_ERROR_INVALID_STATE;
366 }
367 setState(AAUDIO_STREAM_STATE_STOPPING);
368 mFramesWritten.catchUpTo(getFramesRead());
369 mTimestampPosition.catchUpTo(getFramesRead());
370 mAudioRecord->stop();
371 mCallbackEnabled.store(false);
372 // Pass false to prevent errorCallback from being called after disconnect
373 // when app has already requested a stop().
374 return checkForDisconnectRequest(false);
375 }
376
updateStateMachine()377 aaudio_result_t AudioStreamRecord::updateStateMachine()
378 {
379 aaudio_result_t result = AAUDIO_OK;
380 aaudio_wrapping_frames_t position;
381 status_t err;
382 switch (getState()) {
383 // TODO add better state visibility to AudioRecord
384 case AAUDIO_STREAM_STATE_STARTING:
385 // When starting, the position will begin at zero and then go positive.
386 // The position can wrap but by that time the state will not be STARTING.
387 err = mAudioRecord->getPosition(&position);
388 if (err != OK) {
389 result = AAudioConvert_androidToAAudioResult(err);
390 } else if (position > 0) {
391 setState(AAUDIO_STREAM_STATE_STARTED);
392 }
393 break;
394 case AAUDIO_STREAM_STATE_STOPPING:
395 if (mAudioRecord->stopped()) {
396 setState(AAUDIO_STREAM_STATE_STOPPED);
397 }
398 break;
399 default:
400 break;
401 }
402 return result;
403 }
404
read(void * buffer,int32_t numFrames,int64_t timeoutNanoseconds)405 aaudio_result_t AudioStreamRecord::read(void *buffer,
406 int32_t numFrames,
407 int64_t timeoutNanoseconds)
408 {
409 int32_t bytesPerDeviceFrame = getBytesPerDeviceFrame();
410 int32_t numBytes;
411 // This will detect out of range values for numFrames.
412 aaudio_result_t result = AAudioConvert_framesToBytes(numFrames, bytesPerDeviceFrame, &numBytes);
413 if (result != AAUDIO_OK) {
414 return result;
415 }
416
417 if (getState() == AAUDIO_STREAM_STATE_DISCONNECTED) {
418 return AAUDIO_ERROR_DISCONNECTED;
419 }
420
421 // TODO add timeout to AudioRecord
422 bool blocking = (timeoutNanoseconds > 0);
423
424 ssize_t bytesActuallyRead = 0;
425 ssize_t totalBytesRead = 0;
426 if (mFormatConversionBufferI16.get() != nullptr) {
427 // Convert I16 data to float using an intermediate buffer.
428 float *floatBuffer = (float *) buffer;
429 int32_t framesLeft = numFrames;
430 // Perform conversion using multiple read()s if necessary.
431 while (framesLeft > 0) {
432 // Read into short internal buffer.
433 int32_t framesToRead = std::min(framesLeft, mFormatConversionBufferSizeInFrames);
434 size_t bytesToRead = framesToRead * bytesPerDeviceFrame;
435 bytesActuallyRead = mAudioRecord->read(mFormatConversionBufferI16.get(), bytesToRead, blocking);
436 if (bytesActuallyRead <= 0) {
437 break;
438 }
439 totalBytesRead += bytesActuallyRead;
440 int32_t framesToConvert = bytesActuallyRead / bytesPerDeviceFrame;
441 // Convert into app float buffer.
442 size_t numSamples = framesToConvert * getSamplesPerFrame();
443 memcpy_to_float_from_i16(
444 floatBuffer,
445 mFormatConversionBufferI16.get(),
446 numSamples);
447 floatBuffer += numSamples;
448 framesLeft -= framesToConvert;
449 }
450 } else {
451 bytesActuallyRead = mAudioRecord->read(buffer, numBytes, blocking);
452 totalBytesRead = bytesActuallyRead;
453 }
454 if (bytesActuallyRead == WOULD_BLOCK) {
455 return 0;
456 } else if (bytesActuallyRead < 0) {
457 // In this context, a DEAD_OBJECT is more likely to be a disconnect notification due to
458 // AudioRecord invalidation.
459 if (bytesActuallyRead == DEAD_OBJECT) {
460 setState(AAUDIO_STREAM_STATE_DISCONNECTED);
461 return AAUDIO_ERROR_DISCONNECTED;
462 }
463 return AAudioConvert_androidToAAudioResult(bytesActuallyRead);
464 }
465 int32_t framesRead = (int32_t)(totalBytesRead / bytesPerDeviceFrame);
466 incrementFramesRead(framesRead);
467
468 result = updateStateMachine();
469 if (result != AAUDIO_OK) {
470 return result;
471 }
472
473 return (aaudio_result_t) framesRead;
474 }
475
setBufferSize(int32_t requestedFrames)476 aaudio_result_t AudioStreamRecord::setBufferSize(int32_t requestedFrames)
477 {
478 return getBufferSize();
479 }
480
getBufferSize() const481 int32_t AudioStreamRecord::getBufferSize() const
482 {
483 return getBufferCapacity(); // TODO implement in AudioRecord?
484 }
485
getBufferCapacity() const486 int32_t AudioStreamRecord::getBufferCapacity() const
487 {
488 return static_cast<int32_t>(mAudioRecord->frameCount());
489 }
490
getXRunCount() const491 int32_t AudioStreamRecord::getXRunCount() const
492 {
493 return 0; // TODO implement when AudioRecord supports it
494 }
495
getFramesPerBurst() const496 int32_t AudioStreamRecord::getFramesPerBurst() const
497 {
498 return static_cast<int32_t>(mAudioRecord->getNotificationPeriodInFrames());
499 }
500
getTimestamp(clockid_t clockId,int64_t * framePosition,int64_t * timeNanoseconds)501 aaudio_result_t AudioStreamRecord::getTimestamp(clockid_t clockId,
502 int64_t *framePosition,
503 int64_t *timeNanoseconds) {
504 ExtendedTimestamp extendedTimestamp;
505 if (getState() != AAUDIO_STREAM_STATE_STARTED) {
506 return AAUDIO_ERROR_INVALID_STATE;
507 }
508 status_t status = mAudioRecord->getTimestamp(&extendedTimestamp);
509 if (status == WOULD_BLOCK) {
510 return AAUDIO_ERROR_INVALID_STATE;
511 } else if (status != NO_ERROR) {
512 return AAudioConvert_androidToAAudioResult(status);
513 }
514 return getBestTimestamp(clockId, framePosition, timeNanoseconds, &extendedTimestamp);
515 }
516
getFramesWritten()517 int64_t AudioStreamRecord::getFramesWritten() {
518 aaudio_wrapping_frames_t position;
519 status_t result;
520 switch (getState()) {
521 case AAUDIO_STREAM_STATE_STARTING:
522 case AAUDIO_STREAM_STATE_STARTED:
523 result = mAudioRecord->getPosition(&position);
524 if (result == OK) {
525 mFramesWritten.update32(position);
526 }
527 break;
528 case AAUDIO_STREAM_STATE_STOPPING:
529 default:
530 break;
531 }
532 return AudioStreamLegacy::getFramesWritten();
533 }
534