1 /*
2  * Copyright 2016 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #define LOG_TAG "AudioStreamRecord"
18 //#define LOG_NDEBUG 0
19 #include <utils/Log.h>
20 
21 #include <stdint.h>
22 
23 #include <aaudio/AAudio.h>
24 #include <audio_utils/primitives.h>
25 #include <media/AudioRecord.h>
26 #include <utils/String16.h>
27 
28 #include "legacy/AudioStreamLegacy.h"
29 #include "legacy/AudioStreamRecord.h"
30 #include "utility/AudioClock.h"
31 #include "utility/FixedBlockWriter.h"
32 
33 using namespace android;
34 using namespace aaudio;
35 
AudioStreamRecord()36 AudioStreamRecord::AudioStreamRecord()
37     : AudioStreamLegacy()
38     , mFixedBlockWriter(*this)
39 {
40 }
41 
~AudioStreamRecord()42 AudioStreamRecord::~AudioStreamRecord()
43 {
44     const aaudio_stream_state_t state = getState();
45     bool bad = !(state == AAUDIO_STREAM_STATE_UNINITIALIZED || state == AAUDIO_STREAM_STATE_CLOSED);
46     ALOGE_IF(bad, "stream not closed, in state %d", state);
47 }
48 
open(const AudioStreamBuilder & builder)49 aaudio_result_t AudioStreamRecord::open(const AudioStreamBuilder& builder)
50 {
51     aaudio_result_t result = AAUDIO_OK;
52 
53     result = AudioStream::open(builder);
54     if (result != AAUDIO_OK) {
55         return result;
56     }
57 
58     // Try to create an AudioRecord
59 
60     // TODO Support UNSPECIFIED in AudioRecord. For now, use stereo if unspecified.
61     int32_t samplesPerFrame = (getSamplesPerFrame() == AAUDIO_UNSPECIFIED)
62                               ? 2 : getSamplesPerFrame();
63     audio_channel_mask_t channelMask = audio_channel_in_mask_from_count(samplesPerFrame);
64 
65     size_t frameCount = (builder.getBufferCapacity() == AAUDIO_UNSPECIFIED) ? 0
66                         : builder.getBufferCapacity();
67 
68 
69     audio_input_flags_t flags = AUDIO_INPUT_FLAG_NONE;
70     aaudio_performance_mode_t perfMode = getPerformanceMode();
71     switch (perfMode) {
72         case AAUDIO_PERFORMANCE_MODE_LOW_LATENCY:
73             flags = (audio_input_flags_t) (AUDIO_INPUT_FLAG_FAST | AUDIO_INPUT_FLAG_RAW);
74             break;
75 
76         case AAUDIO_PERFORMANCE_MODE_POWER_SAVING:
77         case AAUDIO_PERFORMANCE_MODE_NONE:
78         default:
79             // No flags.
80             break;
81     }
82 
83     // Preserve behavior of API 26
84     if (getFormat() == AAUDIO_FORMAT_UNSPECIFIED) {
85         setFormat(AAUDIO_FORMAT_PCM_FLOAT);
86     }
87 
88     // Maybe change device format to get a FAST path.
89     // AudioRecord does not support FAST mode for FLOAT data.
90     // TODO AudioRecord should allow FLOAT data paths for FAST tracks.
91     // So IF the user asks for low latency FLOAT
92     // AND the sampleRate is likely to be compatible with FAST
93     // THEN request I16 and convert to FLOAT when passing to user.
94     // Note that hard coding 48000 Hz is not ideal because the sampleRate
95     // for a FAST path might not be 48000 Hz.
96     // It normally is but there is a chance that it is not.
97     // And there is no reliable way to know that in advance.
98     // Luckily the consequences of a wrong guess are minor.
99     // We just may not get a FAST track.
100     // But we wouldn't have anyway without this hack.
101     constexpr int32_t kMostLikelySampleRateForFast = 48000;
102     if (getFormat() == AAUDIO_FORMAT_PCM_FLOAT
103             && perfMode == AAUDIO_PERFORMANCE_MODE_LOW_LATENCY
104             && (samplesPerFrame <= 2) // FAST only for mono and stereo
105             && (getSampleRate() == kMostLikelySampleRateForFast
106                 || getSampleRate() == AAUDIO_UNSPECIFIED)) {
107         setDeviceFormat(AAUDIO_FORMAT_PCM_I16);
108     } else {
109         setDeviceFormat(getFormat());
110     }
111 
112     uint32_t notificationFrames = 0;
113 
114     // Setup the callback if there is one.
115     AudioRecord::callback_t callback = nullptr;
116     void *callbackData = nullptr;
117     AudioRecord::transfer_type streamTransferType = AudioRecord::transfer_type::TRANSFER_SYNC;
118     if (builder.getDataCallbackProc() != nullptr) {
119         streamTransferType = AudioRecord::transfer_type::TRANSFER_CALLBACK;
120         callback = getLegacyCallback();
121         callbackData = this;
122         notificationFrames = builder.getFramesPerDataCallback();
123     }
124     mCallbackBufferSize = builder.getFramesPerDataCallback();
125 
126     // Don't call mAudioRecord->setInputDevice() because it will be overwritten by set()!
127     audio_port_handle_t selectedDeviceId = (getDeviceId() == AAUDIO_UNSPECIFIED)
128                                            ? AUDIO_PORT_HANDLE_NONE
129                                            : getDeviceId();
130 
131     const audio_content_type_t contentType =
132             AAudioConvert_contentTypeToInternal(builder.getContentType());
133     const audio_source_t source =
134             AAudioConvert_inputPresetToAudioSource(builder.getInputPreset());
135 
136     const audio_attributes_t attributes = {
137             .content_type = contentType,
138             .usage = AUDIO_USAGE_UNKNOWN, // only used for output
139             .source = source,
140             .flags = AUDIO_FLAG_NONE, // Different than the AUDIO_INPUT_FLAGS
141             .tags = ""
142     };
143 
144     aaudio_session_id_t requestedSessionId = builder.getSessionId();
145     audio_session_t sessionId = AAudioConvert_aaudioToAndroidSessionId(requestedSessionId);
146 
147     // ----------- open the AudioRecord ---------------------
148     // Might retry, but never more than once.
149     for (int i = 0; i < 2; i ++) {
150         audio_format_t requestedInternalFormat =
151                 AAudioConvert_aaudioToAndroidDataFormat(getDeviceFormat());
152 
153         mAudioRecord = new AudioRecord(
154                 mOpPackageName // const String16& opPackageName TODO does not compile
155         );
156         mAudioRecord->set(
157                 AUDIO_SOURCE_DEFAULT, // ignored because we pass attributes below
158                 getSampleRate(),
159                 requestedInternalFormat,
160                 channelMask,
161                 frameCount,
162                 callback,
163                 callbackData,
164                 notificationFrames,
165                 false /*threadCanCallJava*/,
166                 sessionId,
167                 streamTransferType,
168                 flags,
169                 AUDIO_UID_INVALID, // DEFAULT uid
170                 -1,                // DEFAULT pid
171                 &attributes,
172                 selectedDeviceId
173         );
174 
175         // Did we get a valid track?
176         status_t status = mAudioRecord->initCheck();
177         if (status != OK) {
178             close();
179             ALOGE("open(), initCheck() returned %d", status);
180             return AAudioConvert_androidToAAudioResult(status);
181         }
182 
183         // Check to see if it was worth hacking the deviceFormat.
184         bool gotFastPath = (mAudioRecord->getFlags() & AUDIO_INPUT_FLAG_FAST)
185                            == AUDIO_INPUT_FLAG_FAST;
186         if (getFormat() != getDeviceFormat() && !gotFastPath) {
187             // We tried to get a FAST path by switching the device format.
188             // But it didn't work. So we might as well reopen using the same
189             // format for device and for app.
190             ALOGD("%s() used a different device format but no FAST path, reopen", __func__);
191             mAudioRecord.clear();
192             setDeviceFormat(getFormat());
193         } else {
194             break; // Keep the one we just opened.
195         }
196     }
197 
198     // Get the actual values from the AudioRecord.
199     setSamplesPerFrame(mAudioRecord->channelCount());
200 
201     int32_t actualSampleRate = mAudioRecord->getSampleRate();
202     ALOGW_IF(actualSampleRate != getSampleRate(),
203              "open() sampleRate changed from %d to %d",
204              getSampleRate(), actualSampleRate);
205     setSampleRate(actualSampleRate);
206 
207     // We may need to pass the data through a block size adapter to guarantee constant size.
208     if (mCallbackBufferSize != AAUDIO_UNSPECIFIED) {
209         int callbackSizeBytes = getBytesPerFrame() * mCallbackBufferSize;
210         mFixedBlockWriter.open(callbackSizeBytes);
211         mBlockAdapter = &mFixedBlockWriter;
212     } else {
213         mBlockAdapter = nullptr;
214     }
215 
216     // Allocate format conversion buffer if needed.
217     if (getDeviceFormat() == AAUDIO_FORMAT_PCM_I16
218         && getFormat() == AAUDIO_FORMAT_PCM_FLOAT) {
219 
220         if (builder.getDataCallbackProc() != nullptr) {
221             // If we have a callback then we need to convert the data into an internal float
222             // array and then pass that entire array to the app.
223             mFormatConversionBufferSizeInFrames =
224                     (mCallbackBufferSize != AAUDIO_UNSPECIFIED)
225                     ? mCallbackBufferSize : getFramesPerBurst();
226             int32_t numSamples = mFormatConversionBufferSizeInFrames * getSamplesPerFrame();
227             mFormatConversionBufferFloat = std::make_unique<float[]>(numSamples);
228         } else {
229             // If we don't have a callback then we will read into an internal short array
230             // and then convert into the app float array in read().
231             mFormatConversionBufferSizeInFrames = getFramesPerBurst();
232             int32_t numSamples = mFormatConversionBufferSizeInFrames * getSamplesPerFrame();
233             mFormatConversionBufferI16 = std::make_unique<int16_t[]>(numSamples);
234         }
235         ALOGD("%s() setup I16>FLOAT conversion buffer with %d frames",
236               __func__, mFormatConversionBufferSizeInFrames);
237     }
238 
239     // Update performance mode based on the actual stream.
240     // For example, if the sample rate does not match native then you won't get a FAST track.
241     audio_input_flags_t actualFlags = mAudioRecord->getFlags();
242     aaudio_performance_mode_t actualPerformanceMode = AAUDIO_PERFORMANCE_MODE_NONE;
243     // FIXME Some platforms do not advertise RAW mode for low latency inputs.
244     if ((actualFlags & (AUDIO_INPUT_FLAG_FAST))
245         == (AUDIO_INPUT_FLAG_FAST)) {
246         actualPerformanceMode = AAUDIO_PERFORMANCE_MODE_LOW_LATENCY;
247     }
248     setPerformanceMode(actualPerformanceMode);
249 
250     setSharingMode(AAUDIO_SHARING_MODE_SHARED); // EXCLUSIVE mode not supported in legacy
251 
252     // Log warning if we did not get what we asked for.
253     ALOGW_IF(actualFlags != flags,
254              "open() flags changed from 0x%08X to 0x%08X",
255              flags, actualFlags);
256     ALOGW_IF(actualPerformanceMode != perfMode,
257              "open() perfMode changed from %d to %d",
258              perfMode, actualPerformanceMode);
259 
260     setState(AAUDIO_STREAM_STATE_OPEN);
261     setDeviceId(mAudioRecord->getRoutedDeviceId());
262 
263     aaudio_session_id_t actualSessionId =
264             (requestedSessionId == AAUDIO_SESSION_ID_NONE)
265             ? AAUDIO_SESSION_ID_NONE
266             : (aaudio_session_id_t) mAudioRecord->getSessionId();
267     setSessionId(actualSessionId);
268 
269     mAudioRecord->addAudioDeviceCallback(mDeviceCallback);
270 
271     return AAUDIO_OK;
272 }
273 
close()274 aaudio_result_t AudioStreamRecord::close()
275 {
276     // TODO add close() or release() to AudioRecord API then call it from here
277     if (getState() != AAUDIO_STREAM_STATE_CLOSED) {
278         mAudioRecord->removeAudioDeviceCallback(mDeviceCallback);
279         mAudioRecord.clear();
280         setState(AAUDIO_STREAM_STATE_CLOSED);
281     }
282     mFixedBlockWriter.close();
283     return AudioStream::close();
284 }
285 
maybeConvertDeviceData(const void * audioData,int32_t numFrames)286 const void * AudioStreamRecord::maybeConvertDeviceData(const void *audioData, int32_t numFrames) {
287     if (mFormatConversionBufferFloat.get() != nullptr) {
288         LOG_ALWAYS_FATAL_IF(numFrames > mFormatConversionBufferSizeInFrames,
289                             "%s() conversion size %d too large for buffer %d",
290                             __func__, numFrames, mFormatConversionBufferSizeInFrames);
291 
292         int32_t numSamples = numFrames * getSamplesPerFrame();
293         // Only conversion supported is I16 to FLOAT
294         memcpy_to_float_from_i16(
295                     mFormatConversionBufferFloat.get(),
296                     (const int16_t *) audioData,
297                     numSamples);
298         return mFormatConversionBufferFloat.get();
299     } else {
300         return audioData;
301     }
302 }
303 
processCallback(int event,void * info)304 void AudioStreamRecord::processCallback(int event, void *info) {
305     switch (event) {
306         case AudioRecord::EVENT_MORE_DATA:
307             processCallbackCommon(AAUDIO_CALLBACK_OPERATION_PROCESS_DATA, info);
308             break;
309 
310             // Stream got rerouted so we disconnect.
311         case AudioRecord::EVENT_NEW_IAUDIORECORD:
312             processCallbackCommon(AAUDIO_CALLBACK_OPERATION_DISCONNECTED, info);
313             break;
314 
315         default:
316             break;
317     }
318     return;
319 }
320 
requestStart()321 aaudio_result_t AudioStreamRecord::requestStart()
322 {
323     if (mAudioRecord.get() == nullptr) {
324         return AAUDIO_ERROR_INVALID_STATE;
325     }
326     // Get current position so we can detect when the track is recording.
327     status_t err = mAudioRecord->getPosition(&mPositionWhenStarting);
328     if (err != OK) {
329         return AAudioConvert_androidToAAudioResult(err);
330     }
331 
332     // Enable callback before starting AudioTrack to avoid shutting
333     // down because of a race condition.
334     mCallbackEnabled.store(true);
335     err = mAudioRecord->start();
336     if (err != OK) {
337         return AAudioConvert_androidToAAudioResult(err);
338     } else {
339         setState(AAUDIO_STREAM_STATE_STARTING);
340     }
341     return AAUDIO_OK;
342 }
343 
requestStop()344 aaudio_result_t AudioStreamRecord::requestStop() {
345     if (mAudioRecord.get() == nullptr) {
346         return AAUDIO_ERROR_INVALID_STATE;
347     }
348     setState(AAUDIO_STREAM_STATE_STOPPING);
349     incrementFramesWritten(getFramesRead() - getFramesWritten()); // TODO review
350     mTimestampPosition.set(getFramesRead());
351     mAudioRecord->stop();
352     mCallbackEnabled.store(false);
353     mFramesWritten.reset32(); // service writes frames, service position reset on flush
354     mTimestampPosition.reset32();
355     // Pass false to prevent errorCallback from being called after disconnect
356     // when app has already requested a stop().
357     return checkForDisconnectRequest(false);
358 }
359 
updateStateMachine()360 aaudio_result_t AudioStreamRecord::updateStateMachine()
361 {
362     aaudio_result_t result = AAUDIO_OK;
363     aaudio_wrapping_frames_t position;
364     status_t err;
365     switch (getState()) {
366     // TODO add better state visibility to AudioRecord
367     case AAUDIO_STREAM_STATE_STARTING:
368         err = mAudioRecord->getPosition(&position);
369         if (err != OK) {
370             result = AAudioConvert_androidToAAudioResult(err);
371         } else if (position != mPositionWhenStarting) {
372             setState(AAUDIO_STREAM_STATE_STARTED);
373         }
374         break;
375     case AAUDIO_STREAM_STATE_STOPPING:
376         if (mAudioRecord->stopped()) {
377             setState(AAUDIO_STREAM_STATE_STOPPED);
378         }
379         break;
380     default:
381         break;
382     }
383     return result;
384 }
385 
read(void * buffer,int32_t numFrames,int64_t timeoutNanoseconds)386 aaudio_result_t AudioStreamRecord::read(void *buffer,
387                                       int32_t numFrames,
388                                       int64_t timeoutNanoseconds)
389 {
390     int32_t bytesPerDeviceFrame = getBytesPerDeviceFrame();
391     int32_t numBytes;
392     // This will detect out of range values for numFrames.
393     aaudio_result_t result = AAudioConvert_framesToBytes(numFrames, bytesPerDeviceFrame, &numBytes);
394     if (result != AAUDIO_OK) {
395         return result;
396     }
397 
398     if (getState() == AAUDIO_STREAM_STATE_DISCONNECTED) {
399         return AAUDIO_ERROR_DISCONNECTED;
400     }
401 
402     // TODO add timeout to AudioRecord
403     bool blocking = (timeoutNanoseconds > 0);
404 
405     ssize_t bytesActuallyRead = 0;
406     ssize_t totalBytesRead = 0;
407     if (mFormatConversionBufferI16.get() != nullptr) {
408         // Convert I16 data to float using an intermediate buffer.
409         float *floatBuffer = (float *) buffer;
410         int32_t framesLeft = numFrames;
411         // Perform conversion using multiple read()s if necessary.
412         while (framesLeft > 0) {
413             // Read into short internal buffer.
414             int32_t framesToRead = std::min(framesLeft, mFormatConversionBufferSizeInFrames);
415             size_t bytesToRead = framesToRead * bytesPerDeviceFrame;
416             bytesActuallyRead = mAudioRecord->read(mFormatConversionBufferI16.get(), bytesToRead, blocking);
417             if (bytesActuallyRead <= 0) {
418                 break;
419             }
420             totalBytesRead += bytesActuallyRead;
421             int32_t framesToConvert = bytesActuallyRead / bytesPerDeviceFrame;
422             // Convert into app float buffer.
423             size_t numSamples = framesToConvert * getSamplesPerFrame();
424             memcpy_to_float_from_i16(
425                     floatBuffer,
426                     mFormatConversionBufferI16.get(),
427                     numSamples);
428             floatBuffer += numSamples;
429             framesLeft -= framesToConvert;
430         }
431     } else {
432         bytesActuallyRead = mAudioRecord->read(buffer, numBytes, blocking);
433         totalBytesRead = bytesActuallyRead;
434     }
435     if (bytesActuallyRead == WOULD_BLOCK) {
436         return 0;
437     } else if (bytesActuallyRead < 0) {
438         // In this context, a DEAD_OBJECT is more likely to be a disconnect notification due to
439         // AudioRecord invalidation.
440         if (bytesActuallyRead == DEAD_OBJECT) {
441             setState(AAUDIO_STREAM_STATE_DISCONNECTED);
442             return AAUDIO_ERROR_DISCONNECTED;
443         }
444         return AAudioConvert_androidToAAudioResult(bytesActuallyRead);
445     }
446     int32_t framesRead = (int32_t)(totalBytesRead / bytesPerDeviceFrame);
447     incrementFramesRead(framesRead);
448 
449     result = updateStateMachine();
450     if (result != AAUDIO_OK) {
451         return result;
452     }
453 
454     return (aaudio_result_t) framesRead;
455 }
456 
setBufferSize(int32_t requestedFrames)457 aaudio_result_t AudioStreamRecord::setBufferSize(int32_t requestedFrames)
458 {
459     return getBufferSize();
460 }
461 
getBufferSize() const462 int32_t AudioStreamRecord::getBufferSize() const
463 {
464     return getBufferCapacity(); // TODO implement in AudioRecord?
465 }
466 
getBufferCapacity() const467 int32_t AudioStreamRecord::getBufferCapacity() const
468 {
469     return static_cast<int32_t>(mAudioRecord->frameCount());
470 }
471 
getXRunCount() const472 int32_t AudioStreamRecord::getXRunCount() const
473 {
474     return 0; // TODO implement when AudioRecord supports it
475 }
476 
getFramesPerBurst() const477 int32_t AudioStreamRecord::getFramesPerBurst() const
478 {
479     return static_cast<int32_t>(mAudioRecord->getNotificationPeriodInFrames());
480 }
481 
getTimestamp(clockid_t clockId,int64_t * framePosition,int64_t * timeNanoseconds)482 aaudio_result_t AudioStreamRecord::getTimestamp(clockid_t clockId,
483                                                int64_t *framePosition,
484                                                int64_t *timeNanoseconds) {
485     ExtendedTimestamp extendedTimestamp;
486     status_t status = mAudioRecord->getTimestamp(&extendedTimestamp);
487     if (status == WOULD_BLOCK) {
488         return AAUDIO_ERROR_INVALID_STATE;
489     } else if (status != NO_ERROR) {
490         return AAudioConvert_androidToAAudioResult(status);
491     }
492     return getBestTimestamp(clockId, framePosition, timeNanoseconds, &extendedTimestamp);
493 }
494 
getFramesWritten()495 int64_t AudioStreamRecord::getFramesWritten() {
496     aaudio_wrapping_frames_t position;
497     status_t result;
498     switch (getState()) {
499         case AAUDIO_STREAM_STATE_STARTING:
500         case AAUDIO_STREAM_STATE_STARTED:
501         case AAUDIO_STREAM_STATE_STOPPING:
502             result = mAudioRecord->getPosition(&position);
503             if (result == OK) {
504                 mFramesWritten.update32(position);
505             }
506             break;
507         default:
508             break;
509     }
510     return AudioStreamLegacy::getFramesWritten();
511 }
512