1/*
2 *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3 *
4 *  Use of this source code is governed by a BSD-style license
5 *  that can be found in the LICENSE file in the root of the source
6 *  tree. An additional intellectual property rights grant can be found
7 *  in the file PATENTS.  All contributing project authors may
8 *  be found in the AUTHORS file in the root of the source tree.
9 */
10
11#import <AVFoundation/AVFoundation.h>
12#import <Foundation/Foundation.h>
13
14#include "audio_device_ios.h"
15
16#include <cmath>
17
18#include "api/array_view.h"
19#include "helpers.h"
20#include "modules/audio_device/fine_audio_buffer.h"
21#include "rtc_base/atomic_ops.h"
22#include "rtc_base/bind.h"
23#include "rtc_base/checks.h"
24#include "rtc_base/logging.h"
25#include "rtc_base/thread.h"
26#include "rtc_base/thread_annotations.h"
27#include "rtc_base/time_utils.h"
28#include "system_wrappers/include/field_trial.h"
29#include "system_wrappers/include/metrics.h"
30
31#import "base/RTCLogging.h"
32#import "components/audio/RTCAudioSession+Private.h"
33#import "components/audio/RTCAudioSession.h"
34#import "components/audio/RTCAudioSessionConfiguration.h"
35#import "components/audio/RTCNativeAudioSessionDelegateAdapter.h"
36
37namespace webrtc {
38namespace ios_adm {
39
40#define LOGI() RTC_LOG(LS_INFO) << "AudioDeviceIOS::"
41
42#define LOG_AND_RETURN_IF_ERROR(error, message)    \
43  do {                                             \
44    OSStatus err = error;                          \
45    if (err) {                                     \
46      RTC_LOG(LS_ERROR) << message << ": " << err; \
47      return false;                                \
48    }                                              \
49  } while (0)
50
51#define LOG_IF_ERROR(error, message)               \
52  do {                                             \
53    OSStatus err = error;                          \
54    if (err) {                                     \
55      RTC_LOG(LS_ERROR) << message << ": " << err; \
56    }                                              \
57  } while (0)
58
59// Hardcoded delay estimates based on real measurements.
60// TODO(henrika): these value is not used in combination with built-in AEC.
61// Can most likely be removed.
62const UInt16 kFixedPlayoutDelayEstimate = 30;
63const UInt16 kFixedRecordDelayEstimate = 30;
64
65enum AudioDeviceMessageType : uint32_t {
66  kMessageTypeInterruptionBegin,
67  kMessageTypeInterruptionEnd,
68  kMessageTypeValidRouteChange,
69  kMessageTypeCanPlayOrRecordChange,
70  kMessageTypePlayoutGlitchDetected,
71  kMessageOutputVolumeChange,
72};
73
74using ios::CheckAndLogError;
75
76#if !defined(NDEBUG)
77// Returns true when the code runs on a device simulator.
78static bool DeviceIsSimulator() {
79  return ios::GetDeviceName() == "x86_64";
80}
81
82// Helper method that logs essential device information strings.
83static void LogDeviceInfo() {
84  RTC_LOG(LS_INFO) << "LogDeviceInfo";
85  @autoreleasepool {
86    RTC_LOG(LS_INFO) << " system name: " << ios::GetSystemName();
87    RTC_LOG(LS_INFO) << " system version: " << ios::GetSystemVersionAsString();
88    RTC_LOG(LS_INFO) << " device type: " << ios::GetDeviceType();
89    RTC_LOG(LS_INFO) << " device name: " << ios::GetDeviceName();
90    RTC_LOG(LS_INFO) << " process name: " << ios::GetProcessName();
91    RTC_LOG(LS_INFO) << " process ID: " << ios::GetProcessID();
92    RTC_LOG(LS_INFO) << " OS version: " << ios::GetOSVersionString();
93    RTC_LOG(LS_INFO) << " processing cores: " << ios::GetProcessorCount();
94    RTC_LOG(LS_INFO) << " low power mode: " << ios::GetLowPowerModeEnabled();
95#if TARGET_IPHONE_SIMULATOR
96    RTC_LOG(LS_INFO) << " TARGET_IPHONE_SIMULATOR is defined";
97#endif
98    RTC_LOG(LS_INFO) << " DeviceIsSimulator: " << DeviceIsSimulator();
99  }
100}
101#endif  // !defined(NDEBUG)
102
103AudioDeviceIOS::AudioDeviceIOS()
104    : audio_device_buffer_(nullptr),
105      audio_unit_(nullptr),
106      recording_(0),
107      playing_(0),
108      initialized_(false),
109      audio_is_initialized_(false),
110      is_interrupted_(false),
111      has_configured_session_(false),
112      num_detected_playout_glitches_(0),
113      last_playout_time_(0),
114      num_playout_callbacks_(0),
115      last_output_volume_change_time_(0) {
116  LOGI() << "ctor" << ios::GetCurrentThreadDescription();
117  io_thread_checker_.Detach();
118  thread_checker_.Detach();
119  thread_ = rtc::Thread::Current();
120
121  audio_session_observer_ = [[RTCNativeAudioSessionDelegateAdapter alloc] initWithObserver:this];
122}
123
124AudioDeviceIOS::~AudioDeviceIOS() {
125  RTC_DCHECK(thread_checker_.IsCurrent());
126  LOGI() << "~dtor" << ios::GetCurrentThreadDescription();
127  Terminate();
128  audio_session_observer_ = nil;
129}
130
131void AudioDeviceIOS::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
132  LOGI() << "AttachAudioBuffer";
133  RTC_DCHECK(audioBuffer);
134  RTC_DCHECK(thread_checker_.IsCurrent());
135  audio_device_buffer_ = audioBuffer;
136}
137
138AudioDeviceGeneric::InitStatus AudioDeviceIOS::Init() {
139  LOGI() << "Init";
140  io_thread_checker_.Detach();
141  thread_checker_.Detach();
142
143  RTC_DCHECK_RUN_ON(&thread_checker_);
144  if (initialized_) {
145    return InitStatus::OK;
146  }
147#if !defined(NDEBUG)
148  LogDeviceInfo();
149#endif
150  // Store the preferred sample rate and preferred number of channels already
151  // here. They have not been set and confirmed yet since configureForWebRTC
152  // is not called until audio is about to start. However, it makes sense to
153  // store the parameters now and then verify at a later stage.
154  RTC_OBJC_TYPE(RTCAudioSessionConfiguration)* config =
155      [RTC_OBJC_TYPE(RTCAudioSessionConfiguration) webRTCConfiguration];
156  playout_parameters_.reset(config.sampleRate, config.outputNumberOfChannels);
157  record_parameters_.reset(config.sampleRate, config.inputNumberOfChannels);
158  // Ensure that the audio device buffer (ADB) knows about the internal audio
159  // parameters. Note that, even if we are unable to get a mono audio session,
160  // we will always tell the I/O audio unit to do a channel format conversion
161  // to guarantee mono on the "input side" of the audio unit.
162  UpdateAudioDeviceBuffer();
163  initialized_ = true;
164  return InitStatus::OK;
165}
166
167int32_t AudioDeviceIOS::Terminate() {
168  LOGI() << "Terminate";
169  RTC_DCHECK_RUN_ON(&thread_checker_);
170  if (!initialized_) {
171    return 0;
172  }
173  StopPlayout();
174  StopRecording();
175  initialized_ = false;
176  return 0;
177}
178
179bool AudioDeviceIOS::Initialized() const {
180  RTC_DCHECK_RUN_ON(&thread_checker_);
181  return initialized_;
182}
183
184int32_t AudioDeviceIOS::InitPlayout() {
185  LOGI() << "InitPlayout";
186  RTC_DCHECK_RUN_ON(&thread_checker_);
187  RTC_DCHECK(initialized_);
188  RTC_DCHECK(!audio_is_initialized_);
189  RTC_DCHECK(!playing_);
190  if (!audio_is_initialized_) {
191    if (!InitPlayOrRecord()) {
192      RTC_LOG_F(LS_ERROR) << "InitPlayOrRecord failed for InitPlayout!";
193      return -1;
194    }
195  }
196  audio_is_initialized_ = true;
197  return 0;
198}
199
200bool AudioDeviceIOS::PlayoutIsInitialized() const {
201  RTC_DCHECK_RUN_ON(&thread_checker_);
202  return audio_is_initialized_;
203}
204
205bool AudioDeviceIOS::RecordingIsInitialized() const {
206  RTC_DCHECK_RUN_ON(&thread_checker_);
207  return audio_is_initialized_;
208}
209
210int32_t AudioDeviceIOS::InitRecording() {
211  LOGI() << "InitRecording";
212  RTC_DCHECK_RUN_ON(&thread_checker_);
213  RTC_DCHECK(initialized_);
214  RTC_DCHECK(!audio_is_initialized_);
215  RTC_DCHECK(!recording_);
216  if (!audio_is_initialized_) {
217    if (!InitPlayOrRecord()) {
218      RTC_LOG_F(LS_ERROR) << "InitPlayOrRecord failed for InitRecording!";
219      return -1;
220    }
221  }
222  audio_is_initialized_ = true;
223  return 0;
224}
225
226int32_t AudioDeviceIOS::StartPlayout() {
227  LOGI() << "StartPlayout";
228  RTC_DCHECK_RUN_ON(&thread_checker_);
229  RTC_DCHECK(audio_is_initialized_);
230  RTC_DCHECK(!playing_);
231  RTC_DCHECK(audio_unit_);
232  if (fine_audio_buffer_) {
233    fine_audio_buffer_->ResetPlayout();
234  }
235  if (!recording_ && audio_unit_->GetState() == VoiceProcessingAudioUnit::kInitialized) {
236    if (!audio_unit_->Start()) {
237      RTCLogError(@"StartPlayout failed to start audio unit.");
238      return -1;
239    }
240    RTC_LOG(LS_INFO) << "Voice-Processing I/O audio unit is now started";
241  }
242  rtc::AtomicOps::ReleaseStore(&playing_, 1);
243  num_playout_callbacks_ = 0;
244  num_detected_playout_glitches_ = 0;
245  return 0;
246}
247
248int32_t AudioDeviceIOS::StopPlayout() {
249  LOGI() << "StopPlayout";
250  RTC_DCHECK_RUN_ON(&thread_checker_);
251  if (!audio_is_initialized_ || !playing_) {
252    return 0;
253  }
254  if (!recording_) {
255    ShutdownPlayOrRecord();
256    audio_is_initialized_ = false;
257  }
258  rtc::AtomicOps::ReleaseStore(&playing_, 0);
259
260  // Derive average number of calls to OnGetPlayoutData() between detected
261  // audio glitches and add the result to a histogram.
262  int average_number_of_playout_callbacks_between_glitches = 100000;
263  RTC_DCHECK_GE(num_playout_callbacks_, num_detected_playout_glitches_);
264  if (num_detected_playout_glitches_ > 0) {
265    average_number_of_playout_callbacks_between_glitches =
266        num_playout_callbacks_ / num_detected_playout_glitches_;
267  }
268  RTC_HISTOGRAM_COUNTS_100000("WebRTC.Audio.AveragePlayoutCallbacksBetweenGlitches",
269                              average_number_of_playout_callbacks_between_glitches);
270  RTCLog(@"Average number of playout callbacks between glitches: %d",
271         average_number_of_playout_callbacks_between_glitches);
272  return 0;
273}
274
275bool AudioDeviceIOS::Playing() const {
276  return playing_;
277}
278
279int32_t AudioDeviceIOS::StartRecording() {
280  LOGI() << "StartRecording";
281  RTC_DCHECK_RUN_ON(&thread_checker_);
282  RTC_DCHECK(audio_is_initialized_);
283  RTC_DCHECK(!recording_);
284  RTC_DCHECK(audio_unit_);
285  if (fine_audio_buffer_) {
286    fine_audio_buffer_->ResetRecord();
287  }
288  if (!playing_ && audio_unit_->GetState() == VoiceProcessingAudioUnit::kInitialized) {
289    if (!audio_unit_->Start()) {
290      RTCLogError(@"StartRecording failed to start audio unit.");
291      return -1;
292    }
293    RTC_LOG(LS_INFO) << "Voice-Processing I/O audio unit is now started";
294  }
295  rtc::AtomicOps::ReleaseStore(&recording_, 1);
296  return 0;
297}
298
299int32_t AudioDeviceIOS::StopRecording() {
300  LOGI() << "StopRecording";
301  RTC_DCHECK_RUN_ON(&thread_checker_);
302  if (!audio_is_initialized_ || !recording_) {
303    return 0;
304  }
305  if (!playing_) {
306    ShutdownPlayOrRecord();
307    audio_is_initialized_ = false;
308  }
309  rtc::AtomicOps::ReleaseStore(&recording_, 0);
310  return 0;
311}
312
313bool AudioDeviceIOS::Recording() const {
314  return recording_;
315}
316
317int32_t AudioDeviceIOS::PlayoutDelay(uint16_t& delayMS) const {
318  delayMS = kFixedPlayoutDelayEstimate;
319  return 0;
320}
321
322int AudioDeviceIOS::GetPlayoutAudioParameters(AudioParameters* params) const {
323  LOGI() << "GetPlayoutAudioParameters";
324  RTC_DCHECK(playout_parameters_.is_valid());
325  RTC_DCHECK(thread_checker_.IsCurrent());
326  *params = playout_parameters_;
327  return 0;
328}
329
330int AudioDeviceIOS::GetRecordAudioParameters(AudioParameters* params) const {
331  LOGI() << "GetRecordAudioParameters";
332  RTC_DCHECK(record_parameters_.is_valid());
333  RTC_DCHECK(thread_checker_.IsCurrent());
334  *params = record_parameters_;
335  return 0;
336}
337
338void AudioDeviceIOS::OnInterruptionBegin() {
339  RTC_DCHECK(thread_);
340  LOGI() << "OnInterruptionBegin";
341  thread_->Post(RTC_FROM_HERE, this, kMessageTypeInterruptionBegin);
342}
343
344void AudioDeviceIOS::OnInterruptionEnd() {
345  RTC_DCHECK(thread_);
346  LOGI() << "OnInterruptionEnd";
347  thread_->Post(RTC_FROM_HERE, this, kMessageTypeInterruptionEnd);
348}
349
350void AudioDeviceIOS::OnValidRouteChange() {
351  RTC_DCHECK(thread_);
352  thread_->Post(RTC_FROM_HERE, this, kMessageTypeValidRouteChange);
353}
354
355void AudioDeviceIOS::OnCanPlayOrRecordChange(bool can_play_or_record) {
356  RTC_DCHECK(thread_);
357  thread_->Post(RTC_FROM_HERE,
358                this,
359                kMessageTypeCanPlayOrRecordChange,
360                new rtc::TypedMessageData<bool>(can_play_or_record));
361}
362
363void AudioDeviceIOS::OnChangedOutputVolume() {
364  RTC_DCHECK(thread_);
365  thread_->Post(RTC_FROM_HERE, this, kMessageOutputVolumeChange);
366}
367
368OSStatus AudioDeviceIOS::OnDeliverRecordedData(AudioUnitRenderActionFlags* flags,
369                                               const AudioTimeStamp* time_stamp,
370                                               UInt32 bus_number,
371                                               UInt32 num_frames,
372                                               AudioBufferList* /* io_data */) {
373  RTC_DCHECK_RUN_ON(&io_thread_checker_);
374  OSStatus result = noErr;
375  // Simply return if recording is not enabled.
376  if (!rtc::AtomicOps::AcquireLoad(&recording_)) return result;
377
378  // Set the size of our own audio buffer and clear it first to avoid copying
379  // in combination with potential reallocations.
380  // On real iOS devices, the size will only be set once (at first callback).
381  record_audio_buffer_.Clear();
382  record_audio_buffer_.SetSize(num_frames);
383
384  // Allocate AudioBuffers to be used as storage for the received audio.
385  // The AudioBufferList structure works as a placeholder for the
386  // AudioBuffer structure, which holds a pointer to the actual data buffer
387  // in |record_audio_buffer_|. Recorded audio will be rendered into this memory
388  // at each input callback when calling AudioUnitRender().
389  AudioBufferList audio_buffer_list;
390  audio_buffer_list.mNumberBuffers = 1;
391  AudioBuffer* audio_buffer = &audio_buffer_list.mBuffers[0];
392  audio_buffer->mNumberChannels = record_parameters_.channels();
393  audio_buffer->mDataByteSize =
394      record_audio_buffer_.size() * VoiceProcessingAudioUnit::kBytesPerSample;
395  audio_buffer->mData = reinterpret_cast<int8_t*>(record_audio_buffer_.data());
396
397  // Obtain the recorded audio samples by initiating a rendering cycle.
398  // Since it happens on the input bus, the |io_data| parameter is a reference
399  // to the preallocated audio buffer list that the audio unit renders into.
400  // We can make the audio unit provide a buffer instead in io_data, but we
401  // currently just use our own.
402  // TODO(henrika): should error handling be improved?
403  result = audio_unit_->Render(flags, time_stamp, bus_number, num_frames, &audio_buffer_list);
404  if (result != noErr) {
405    RTCLogError(@"Failed to render audio.");
406    return result;
407  }
408
409  // Get a pointer to the recorded audio and send it to the WebRTC ADB.
410  // Use the FineAudioBuffer instance to convert between native buffer size
411  // and the 10ms buffer size used by WebRTC.
412  fine_audio_buffer_->DeliverRecordedData(record_audio_buffer_, kFixedRecordDelayEstimate);
413  return noErr;
414}
415
416OSStatus AudioDeviceIOS::OnGetPlayoutData(AudioUnitRenderActionFlags* flags,
417                                          const AudioTimeStamp* time_stamp,
418                                          UInt32 bus_number,
419                                          UInt32 num_frames,
420                                          AudioBufferList* io_data) {
421  RTC_DCHECK_RUN_ON(&io_thread_checker_);
422  // Verify 16-bit, noninterleaved mono PCM signal format.
423  RTC_DCHECK_EQ(1, io_data->mNumberBuffers);
424  AudioBuffer* audio_buffer = &io_data->mBuffers[0];
425  RTC_DCHECK_EQ(1, audio_buffer->mNumberChannels);
426
427  // Produce silence and give audio unit a hint about it if playout is not
428  // activated.
429  if (!rtc::AtomicOps::AcquireLoad(&playing_)) {
430    const size_t size_in_bytes = audio_buffer->mDataByteSize;
431    RTC_CHECK_EQ(size_in_bytes / VoiceProcessingAudioUnit::kBytesPerSample, num_frames);
432    *flags |= kAudioUnitRenderAction_OutputIsSilence;
433    memset(static_cast<int8_t*>(audio_buffer->mData), 0, size_in_bytes);
434    return noErr;
435  }
436
437  // Measure time since last call to OnGetPlayoutData() and see if it is larger
438  // than a well defined threshold which depends on the current IO buffer size.
439  // If so, we have an indication of a glitch in the output audio since the
440  // core audio layer will most likely run dry in this state.
441  ++num_playout_callbacks_;
442  const int64_t now_time = rtc::TimeMillis();
443  if (time_stamp->mSampleTime != num_frames) {
444    const int64_t delta_time = now_time - last_playout_time_;
445    const int glitch_threshold = 1.6 * playout_parameters_.GetBufferSizeInMilliseconds();
446    if (delta_time > glitch_threshold) {
447      RTCLogWarning(@"Possible playout audio glitch detected.\n"
448                     "  Time since last OnGetPlayoutData was %lld ms.\n",
449                    delta_time);
450      // Exclude extreme delta values since they do most likely not correspond
451      // to a real glitch. Instead, the most probable cause is that a headset
452      // has been plugged in or out. There are more direct ways to detect
453      // audio device changes (see HandleValidRouteChange()) but experiments
454      // show that using it leads to more complex implementations.
455      // TODO(henrika): more tests might be needed to come up with an even
456      // better upper limit.
457      if (glitch_threshold < 120 && delta_time > 120) {
458        RTCLog(@"Glitch warning is ignored. Probably caused by device switch.");
459      } else {
460        thread_->Post(RTC_FROM_HERE, this, kMessageTypePlayoutGlitchDetected);
461      }
462    }
463  }
464  last_playout_time_ = now_time;
465
466  // Read decoded 16-bit PCM samples from WebRTC (using a size that matches
467  // the native I/O audio unit) and copy the result to the audio buffer in the
468  // |io_data| destination.
469  fine_audio_buffer_->GetPlayoutData(
470      rtc::ArrayView<int16_t>(static_cast<int16_t*>(audio_buffer->mData), num_frames),
471      kFixedPlayoutDelayEstimate);
472  return noErr;
473}
474
475void AudioDeviceIOS::OnMessage(rtc::Message* msg) {
476  switch (msg->message_id) {
477    case kMessageTypeInterruptionBegin:
478      HandleInterruptionBegin();
479      break;
480    case kMessageTypeInterruptionEnd:
481      HandleInterruptionEnd();
482      break;
483    case kMessageTypeValidRouteChange:
484      HandleValidRouteChange();
485      break;
486    case kMessageTypeCanPlayOrRecordChange: {
487      rtc::TypedMessageData<bool>* data = static_cast<rtc::TypedMessageData<bool>*>(msg->pdata);
488      HandleCanPlayOrRecordChange(data->data());
489      delete data;
490      break;
491    }
492    case kMessageTypePlayoutGlitchDetected:
493      HandlePlayoutGlitchDetected();
494      break;
495    case kMessageOutputVolumeChange:
496      HandleOutputVolumeChange();
497      break;
498  }
499}
500
501void AudioDeviceIOS::HandleInterruptionBegin() {
502  RTC_DCHECK_RUN_ON(&thread_checker_);
503  RTCLog(@"Interruption begin. IsInterrupted changed from %d to 1.", is_interrupted_);
504  if (audio_unit_ && audio_unit_->GetState() == VoiceProcessingAudioUnit::kStarted) {
505    RTCLog(@"Stopping the audio unit due to interruption begin.");
506    if (!audio_unit_->Stop()) {
507      RTCLogError(@"Failed to stop the audio unit for interruption begin.");
508    } else {
509      PrepareForNewStart();
510    }
511  }
512  is_interrupted_ = true;
513}
514
515void AudioDeviceIOS::HandleInterruptionEnd() {
516  RTC_DCHECK_RUN_ON(&thread_checker_);
517  RTCLog(@"Interruption ended. IsInterrupted changed from %d to 0. "
518          "Updating audio unit state.",
519         is_interrupted_);
520  is_interrupted_ = false;
521  if (!audio_unit_) return;
522  if (webrtc::field_trial::IsEnabled("WebRTC-Audio-iOS-Holding")) {
523    // Work around an issue where audio does not restart properly after an interruption
524    // by restarting the audio unit when the interruption ends.
525    if (audio_unit_->GetState() == VoiceProcessingAudioUnit::kStarted) {
526      audio_unit_->Stop();
527      PrepareForNewStart();
528    }
529    if (audio_unit_->GetState() == VoiceProcessingAudioUnit::kInitialized) {
530      audio_unit_->Uninitialize();
531    }
532    // Allocate new buffers given the potentially new stream format.
533    SetupAudioBuffersForActiveAudioSession();
534  }
535  UpdateAudioUnit([RTC_OBJC_TYPE(RTCAudioSession) sharedInstance].canPlayOrRecord);
536}
537
538void AudioDeviceIOS::HandleValidRouteChange() {
539  RTC_DCHECK_RUN_ON(&thread_checker_);
540  RTC_OBJC_TYPE(RTCAudioSession)* session = [RTC_OBJC_TYPE(RTCAudioSession) sharedInstance];
541  RTCLog(@"%@", session);
542  HandleSampleRateChange(session.sampleRate);
543}
544
545void AudioDeviceIOS::HandleCanPlayOrRecordChange(bool can_play_or_record) {
546  RTCLog(@"Handling CanPlayOrRecord change to: %d", can_play_or_record);
547  UpdateAudioUnit(can_play_or_record);
548}
549
550void AudioDeviceIOS::HandleSampleRateChange(float sample_rate) {
551  RTC_DCHECK_RUN_ON(&thread_checker_);
552  RTCLog(@"Handling sample rate change to %f.", sample_rate);
553
554  // Don't do anything if we're interrupted.
555  if (is_interrupted_) {
556    RTCLog(@"Ignoring sample rate change to %f due to interruption.", sample_rate);
557    return;
558  }
559
560  // If we don't have an audio unit yet, or the audio unit is uninitialized,
561  // there is no work to do.
562  if (!audio_unit_ || audio_unit_->GetState() < VoiceProcessingAudioUnit::kInitialized) {
563    return;
564  }
565
566  // The audio unit is already initialized or started.
567  // Check to see if the sample rate or buffer size has changed.
568  RTC_OBJC_TYPE(RTCAudioSession)* session = [RTC_OBJC_TYPE(RTCAudioSession) sharedInstance];
569  const double session_sample_rate = session.sampleRate;
570  const NSTimeInterval session_buffer_duration = session.IOBufferDuration;
571  const size_t session_frames_per_buffer =
572      static_cast<size_t>(session_sample_rate * session_buffer_duration + .5);
573  const double current_sample_rate = playout_parameters_.sample_rate();
574  const size_t current_frames_per_buffer = playout_parameters_.frames_per_buffer();
575  RTCLog(@"Handling playout sample rate change to: %f\n"
576          "  Session sample rate: %f frames_per_buffer: %lu\n"
577          "  ADM sample rate: %f frames_per_buffer: %lu",
578         sample_rate,
579         session_sample_rate,
580         (unsigned long)session_frames_per_buffer,
581         current_sample_rate,
582         (unsigned long)current_frames_per_buffer);
583
584  // Sample rate and buffer size are the same, no work to do.
585  if (std::abs(current_sample_rate - session_sample_rate) <= DBL_EPSILON &&
586      current_frames_per_buffer == session_frames_per_buffer) {
587    RTCLog(@"Ignoring sample rate change since audio parameters are intact.");
588    return;
589  }
590
591  // Extra sanity check to ensure that the new sample rate is valid.
592  if (session_sample_rate <= 0.0) {
593    RTCLogError(@"Sample rate is invalid: %f", session_sample_rate);
594    return;
595  }
596
597  // We need to adjust our format and buffer sizes.
598  // The stream format is about to be changed and it requires that we first
599  // stop and uninitialize the audio unit to deallocate its resources.
600  RTCLog(@"Stopping and uninitializing audio unit to adjust buffers.");
601  bool restart_audio_unit = false;
602  if (audio_unit_->GetState() == VoiceProcessingAudioUnit::kStarted) {
603    audio_unit_->Stop();
604    restart_audio_unit = true;
605    PrepareForNewStart();
606  }
607  if (audio_unit_->GetState() == VoiceProcessingAudioUnit::kInitialized) {
608    audio_unit_->Uninitialize();
609  }
610
611  // Allocate new buffers given the new stream format.
612  SetupAudioBuffersForActiveAudioSession();
613
614  // Initialize the audio unit again with the new sample rate.
615  RTC_DCHECK_EQ(playout_parameters_.sample_rate(), session_sample_rate);
616  if (!audio_unit_->Initialize(session_sample_rate)) {
617    RTCLogError(@"Failed to initialize the audio unit with sample rate: %f", session_sample_rate);
618    return;
619  }
620
621  // Restart the audio unit if it was already running.
622  if (restart_audio_unit && !audio_unit_->Start()) {
623    RTCLogError(@"Failed to start audio unit with sample rate: %f", session_sample_rate);
624    return;
625  }
626  RTCLog(@"Successfully handled sample rate change.");
627}
628
629void AudioDeviceIOS::HandlePlayoutGlitchDetected() {
630  RTC_DCHECK_RUN_ON(&thread_checker_);
631  // Don't update metrics if we're interrupted since a "glitch" is expected
632  // in this state.
633  if (is_interrupted_) {
634    RTCLog(@"Ignoring audio glitch due to interruption.");
635    return;
636  }
637  // Avoid doing glitch detection for two seconds after a volume change
638  // has been detected to reduce the risk of false alarm.
639  if (last_output_volume_change_time_ > 0 &&
640      rtc::TimeSince(last_output_volume_change_time_) < 2000) {
641    RTCLog(@"Ignoring audio glitch due to recent output volume change.");
642    return;
643  }
644  num_detected_playout_glitches_++;
645  RTCLog(@"Number of detected playout glitches: %lld", num_detected_playout_glitches_);
646
647  int64_t glitch_count = num_detected_playout_glitches_;
648  dispatch_async(dispatch_get_main_queue(), ^{
649    RTC_OBJC_TYPE(RTCAudioSession)* session = [RTC_OBJC_TYPE(RTCAudioSession) sharedInstance];
650    [session notifyDidDetectPlayoutGlitch:glitch_count];
651  });
652}
653
654void AudioDeviceIOS::HandleOutputVolumeChange() {
655  RTC_DCHECK_RUN_ON(&thread_checker_);
656  RTCLog(@"Output volume change detected.");
657  // Store time of this detection so it can be used to defer detection of
658  // glitches too close in time to this event.
659  last_output_volume_change_time_ = rtc::TimeMillis();
660}
661
662void AudioDeviceIOS::UpdateAudioDeviceBuffer() {
663  LOGI() << "UpdateAudioDevicebuffer";
664  // AttachAudioBuffer() is called at construction by the main class but check
665  // just in case.
666  RTC_DCHECK(audio_device_buffer_) << "AttachAudioBuffer must be called first";
667  RTC_DCHECK_GT(playout_parameters_.sample_rate(), 0);
668  RTC_DCHECK_GT(record_parameters_.sample_rate(), 0);
669  RTC_DCHECK_EQ(playout_parameters_.channels(), 1);
670  RTC_DCHECK_EQ(record_parameters_.channels(), 1);
671  // Inform the audio device buffer (ADB) about the new audio format.
672  audio_device_buffer_->SetPlayoutSampleRate(playout_parameters_.sample_rate());
673  audio_device_buffer_->SetPlayoutChannels(playout_parameters_.channels());
674  audio_device_buffer_->SetRecordingSampleRate(record_parameters_.sample_rate());
675  audio_device_buffer_->SetRecordingChannels(record_parameters_.channels());
676}
677
678void AudioDeviceIOS::SetupAudioBuffersForActiveAudioSession() {
679  LOGI() << "SetupAudioBuffersForActiveAudioSession";
680  // Verify the current values once the audio session has been activated.
681  RTC_OBJC_TYPE(RTCAudioSession)* session = [RTC_OBJC_TYPE(RTCAudioSession) sharedInstance];
682  double sample_rate = session.sampleRate;
683  NSTimeInterval io_buffer_duration = session.IOBufferDuration;
684  RTCLog(@"%@", session);
685
686  // Log a warning message for the case when we are unable to set the preferred
687  // hardware sample rate but continue and use the non-ideal sample rate after
688  // reinitializing the audio parameters. Most BT headsets only support 8kHz or
689  // 16kHz.
690  RTC_OBJC_TYPE(RTCAudioSessionConfiguration)* webRTCConfig =
691      [RTC_OBJC_TYPE(RTCAudioSessionConfiguration) webRTCConfiguration];
692  if (sample_rate != webRTCConfig.sampleRate) {
693    RTC_LOG(LS_WARNING) << "Unable to set the preferred sample rate";
694  }
695
696  // Crash reports indicates that it can happen in rare cases that the reported
697  // sample rate is less than or equal to zero. If that happens and if a valid
698  // sample rate has already been set during initialization, the best guess we
699  // can do is to reuse the current sample rate.
700  if (sample_rate <= DBL_EPSILON && playout_parameters_.sample_rate() > 0) {
701    RTCLogError(@"Reported rate is invalid: %f. "
702                 "Using %d as sample rate instead.",
703                sample_rate, playout_parameters_.sample_rate());
704    sample_rate = playout_parameters_.sample_rate();
705  }
706
707  // At this stage, we also know the exact IO buffer duration and can add
708  // that info to the existing audio parameters where it is converted into
709  // number of audio frames.
710  // Example: IO buffer size = 0.008 seconds <=> 128 audio frames at 16kHz.
711  // Hence, 128 is the size we expect to see in upcoming render callbacks.
712  playout_parameters_.reset(sample_rate, playout_parameters_.channels(), io_buffer_duration);
713  RTC_DCHECK(playout_parameters_.is_complete());
714  record_parameters_.reset(sample_rate, record_parameters_.channels(), io_buffer_duration);
715  RTC_DCHECK(record_parameters_.is_complete());
716  RTC_LOG(LS_INFO) << " frames per I/O buffer: " << playout_parameters_.frames_per_buffer();
717  RTC_LOG(LS_INFO) << " bytes per I/O buffer: " << playout_parameters_.GetBytesPerBuffer();
718  RTC_DCHECK_EQ(playout_parameters_.GetBytesPerBuffer(), record_parameters_.GetBytesPerBuffer());
719
720  // Update the ADB parameters since the sample rate might have changed.
721  UpdateAudioDeviceBuffer();
722
723  // Create a modified audio buffer class which allows us to ask for,
724  // or deliver, any number of samples (and not only multiple of 10ms) to match
725  // the native audio unit buffer size.
726  RTC_DCHECK(audio_device_buffer_);
727  fine_audio_buffer_.reset(new FineAudioBuffer(audio_device_buffer_));
728}
729
730bool AudioDeviceIOS::CreateAudioUnit() {
731  RTC_DCHECK(!audio_unit_);
732
733  audio_unit_.reset(new VoiceProcessingAudioUnit(this));
734  if (!audio_unit_->Init()) {
735    audio_unit_.reset();
736    return false;
737  }
738
739  return true;
740}
741
742void AudioDeviceIOS::UpdateAudioUnit(bool can_play_or_record) {
743  RTC_DCHECK_RUN_ON(&thread_checker_);
744  RTCLog(@"Updating audio unit state. CanPlayOrRecord=%d IsInterrupted=%d",
745         can_play_or_record,
746         is_interrupted_);
747
748  if (is_interrupted_) {
749    RTCLog(@"Ignoring audio unit update due to interruption.");
750    return;
751  }
752
753  // If we're not initialized we don't need to do anything. Audio unit will
754  // be initialized on initialization.
755  if (!audio_is_initialized_) return;
756
757  // If we're initialized, we must have an audio unit.
758  RTC_DCHECK(audio_unit_);
759
760  bool should_initialize_audio_unit = false;
761  bool should_uninitialize_audio_unit = false;
762  bool should_start_audio_unit = false;
763  bool should_stop_audio_unit = false;
764
765  switch (audio_unit_->GetState()) {
766    case VoiceProcessingAudioUnit::kInitRequired:
767      RTCLog(@"VPAU state: InitRequired");
768      RTC_NOTREACHED();
769      break;
770    case VoiceProcessingAudioUnit::kUninitialized:
771      RTCLog(@"VPAU state: Uninitialized");
772      should_initialize_audio_unit = can_play_or_record;
773      should_start_audio_unit = should_initialize_audio_unit && (playing_ || recording_);
774      break;
775    case VoiceProcessingAudioUnit::kInitialized:
776      RTCLog(@"VPAU state: Initialized");
777      should_start_audio_unit = can_play_or_record && (playing_ || recording_);
778      should_uninitialize_audio_unit = !can_play_or_record;
779      break;
780    case VoiceProcessingAudioUnit::kStarted:
781      RTCLog(@"VPAU state: Started");
782      RTC_DCHECK(playing_ || recording_);
783      should_stop_audio_unit = !can_play_or_record;
784      should_uninitialize_audio_unit = should_stop_audio_unit;
785      break;
786  }
787
788  if (should_initialize_audio_unit) {
789    RTCLog(@"Initializing audio unit for UpdateAudioUnit");
790    ConfigureAudioSession();
791    SetupAudioBuffersForActiveAudioSession();
792    if (!audio_unit_->Initialize(playout_parameters_.sample_rate())) {
793      RTCLogError(@"Failed to initialize audio unit.");
794      return;
795    }
796  }
797
798  if (should_start_audio_unit) {
799    RTCLog(@"Starting audio unit for UpdateAudioUnit");
800    // Log session settings before trying to start audio streaming.
801    RTC_OBJC_TYPE(RTCAudioSession)* session = [RTC_OBJC_TYPE(RTCAudioSession) sharedInstance];
802    RTCLog(@"%@", session);
803    if (!audio_unit_->Start()) {
804      RTCLogError(@"Failed to start audio unit.");
805      return;
806    }
807  }
808
809  if (should_stop_audio_unit) {
810    RTCLog(@"Stopping audio unit for UpdateAudioUnit");
811    if (!audio_unit_->Stop()) {
812      RTCLogError(@"Failed to stop audio unit.");
813      return;
814    }
815  }
816
817  if (should_uninitialize_audio_unit) {
818    RTCLog(@"Uninitializing audio unit for UpdateAudioUnit");
819    audio_unit_->Uninitialize();
820    UnconfigureAudioSession();
821  }
822}
823
824bool AudioDeviceIOS::ConfigureAudioSession() {
825  RTC_DCHECK_RUN_ON(&thread_checker_);
826  RTCLog(@"Configuring audio session.");
827  if (has_configured_session_) {
828    RTCLogWarning(@"Audio session already configured.");
829    return false;
830  }
831  RTC_OBJC_TYPE(RTCAudioSession)* session = [RTC_OBJC_TYPE(RTCAudioSession) sharedInstance];
832  [session lockForConfiguration];
833  bool success = [session configureWebRTCSession:nil];
834  [session unlockForConfiguration];
835  if (success) {
836    has_configured_session_ = true;
837    RTCLog(@"Configured audio session.");
838  } else {
839    RTCLog(@"Failed to configure audio session.");
840  }
841  return success;
842}
843
844void AudioDeviceIOS::UnconfigureAudioSession() {
845  RTC_DCHECK_RUN_ON(&thread_checker_);
846  RTCLog(@"Unconfiguring audio session.");
847  if (!has_configured_session_) {
848    RTCLogWarning(@"Audio session already unconfigured.");
849    return;
850  }
851  RTC_OBJC_TYPE(RTCAudioSession)* session = [RTC_OBJC_TYPE(RTCAudioSession) sharedInstance];
852  [session lockForConfiguration];
853  [session unconfigureWebRTCSession:nil];
854  [session endWebRTCSession:nil];
855  [session unlockForConfiguration];
856  has_configured_session_ = false;
857  RTCLog(@"Unconfigured audio session.");
858}
859
860bool AudioDeviceIOS::InitPlayOrRecord() {
861  LOGI() << "InitPlayOrRecord";
862  RTC_DCHECK_RUN_ON(&thread_checker_);
863
864  // There should be no audio unit at this point.
865  if (!CreateAudioUnit()) {
866    return false;
867  }
868
869  RTC_OBJC_TYPE(RTCAudioSession)* session = [RTC_OBJC_TYPE(RTCAudioSession) sharedInstance];
870  // Subscribe to audio session events.
871  [session pushDelegate:audio_session_observer_];
872  is_interrupted_ = session.isInterrupted ? true : false;
873
874  // Lock the session to make configuration changes.
875  [session lockForConfiguration];
876  NSError* error = nil;
877  if (![session beginWebRTCSession:&error]) {
878    [session unlockForConfiguration];
879    RTCLogError(@"Failed to begin WebRTC session: %@", error.localizedDescription);
880    audio_unit_.reset();
881    return false;
882  }
883
884  // If we are ready to play or record, and if the audio session can be
885  // configured, then initialize the audio unit.
886  if (session.canPlayOrRecord) {
887    if (!ConfigureAudioSession()) {
888      // One possible reason for failure is if an attempt was made to use the
889      // audio session during or after a Media Services failure.
890      // See AVAudioSessionErrorCodeMediaServicesFailed for details.
891      [session unlockForConfiguration];
892      audio_unit_.reset();
893      return false;
894    }
895    SetupAudioBuffersForActiveAudioSession();
896    audio_unit_->Initialize(playout_parameters_.sample_rate());
897  }
898
899  // Release the lock.
900  [session unlockForConfiguration];
901  return true;
902}
903
904void AudioDeviceIOS::ShutdownPlayOrRecord() {
905  LOGI() << "ShutdownPlayOrRecord";
906  RTC_DCHECK_RUN_ON(&thread_checker_);
907
908  // Stop the audio unit to prevent any additional audio callbacks.
909  audio_unit_->Stop();
910
911  // Close and delete the voice-processing I/O unit.
912  audio_unit_.reset();
913
914  // Detach thread checker for the AURemoteIO::IOThread to ensure that the
915  // next session uses a fresh thread id.
916  io_thread_checker_.Detach();
917
918  // Remove audio session notification observers.
919  RTC_OBJC_TYPE(RTCAudioSession)* session = [RTC_OBJC_TYPE(RTCAudioSession) sharedInstance];
920  [session removeDelegate:audio_session_observer_];
921
922  // All I/O should be stopped or paused prior to deactivating the audio
923  // session, hence we deactivate as last action.
924  UnconfigureAudioSession();
925}
926
927void AudioDeviceIOS::PrepareForNewStart() {
928  LOGI() << "PrepareForNewStart";
929  // The audio unit has been stopped and preparations are needed for an upcoming
930  // restart. It will result in audio callbacks from a new native I/O thread
931  // which means that we must detach thread checkers here to be prepared for an
932  // upcoming new audio stream.
933  io_thread_checker_.Detach();
934}
935
936bool AudioDeviceIOS::IsInterrupted() {
937  return is_interrupted_;
938}
939
940#pragma mark - Not Implemented
941
942int32_t AudioDeviceIOS::ActiveAudioLayer(AudioDeviceModule::AudioLayer& audioLayer) const {
943  audioLayer = AudioDeviceModule::kPlatformDefaultAudio;
944  return 0;
945}
946
947int16_t AudioDeviceIOS::PlayoutDevices() {
948  // TODO(henrika): improve.
949  RTC_LOG_F(LS_WARNING) << "Not implemented";
950  return (int16_t)1;
951}
952
953int16_t AudioDeviceIOS::RecordingDevices() {
954  // TODO(henrika): improve.
955  RTC_LOG_F(LS_WARNING) << "Not implemented";
956  return (int16_t)1;
957}
958
959int32_t AudioDeviceIOS::InitSpeaker() {
960  return 0;
961}
962
963bool AudioDeviceIOS::SpeakerIsInitialized() const {
964  return true;
965}
966
967int32_t AudioDeviceIOS::SpeakerVolumeIsAvailable(bool& available) {
968  available = false;
969  return 0;
970}
971
972int32_t AudioDeviceIOS::SetSpeakerVolume(uint32_t volume) {
973  RTC_NOTREACHED() << "Not implemented";
974  return -1;
975}
976
977int32_t AudioDeviceIOS::SpeakerVolume(uint32_t& volume) const {
978  RTC_NOTREACHED() << "Not implemented";
979  return -1;
980}
981
982int32_t AudioDeviceIOS::MaxSpeakerVolume(uint32_t& maxVolume) const {
983  RTC_NOTREACHED() << "Not implemented";
984  return -1;
985}
986
987int32_t AudioDeviceIOS::MinSpeakerVolume(uint32_t& minVolume) const {
988  RTC_NOTREACHED() << "Not implemented";
989  return -1;
990}
991
992int32_t AudioDeviceIOS::SpeakerMuteIsAvailable(bool& available) {
993  available = false;
994  return 0;
995}
996
997int32_t AudioDeviceIOS::SetSpeakerMute(bool enable) {
998  RTC_NOTREACHED() << "Not implemented";
999  return -1;
1000}
1001
1002int32_t AudioDeviceIOS::SpeakerMute(bool& enabled) const {
1003  RTC_NOTREACHED() << "Not implemented";
1004  return -1;
1005}
1006
1007int32_t AudioDeviceIOS::SetPlayoutDevice(uint16_t index) {
1008  RTC_LOG_F(LS_WARNING) << "Not implemented";
1009  return 0;
1010}
1011
1012int32_t AudioDeviceIOS::SetPlayoutDevice(AudioDeviceModule::WindowsDeviceType) {
1013  RTC_NOTREACHED() << "Not implemented";
1014  return -1;
1015}
1016
1017int32_t AudioDeviceIOS::InitMicrophone() {
1018  return 0;
1019}
1020
1021bool AudioDeviceIOS::MicrophoneIsInitialized() const {
1022  return true;
1023}
1024
1025int32_t AudioDeviceIOS::MicrophoneMuteIsAvailable(bool& available) {
1026  available = false;
1027  return 0;
1028}
1029
1030int32_t AudioDeviceIOS::SetMicrophoneMute(bool enable) {
1031  RTC_NOTREACHED() << "Not implemented";
1032  return -1;
1033}
1034
1035int32_t AudioDeviceIOS::MicrophoneMute(bool& enabled) const {
1036  RTC_NOTREACHED() << "Not implemented";
1037  return -1;
1038}
1039
1040int32_t AudioDeviceIOS::StereoRecordingIsAvailable(bool& available) {
1041  available = false;
1042  return 0;
1043}
1044
1045int32_t AudioDeviceIOS::SetStereoRecording(bool enable) {
1046  RTC_LOG_F(LS_WARNING) << "Not implemented";
1047  return -1;
1048}
1049
1050int32_t AudioDeviceIOS::StereoRecording(bool& enabled) const {
1051  enabled = false;
1052  return 0;
1053}
1054
1055int32_t AudioDeviceIOS::StereoPlayoutIsAvailable(bool& available) {
1056  available = false;
1057  return 0;
1058}
1059
1060int32_t AudioDeviceIOS::SetStereoPlayout(bool enable) {
1061  RTC_LOG_F(LS_WARNING) << "Not implemented";
1062  return -1;
1063}
1064
1065int32_t AudioDeviceIOS::StereoPlayout(bool& enabled) const {
1066  enabled = false;
1067  return 0;
1068}
1069
1070int32_t AudioDeviceIOS::MicrophoneVolumeIsAvailable(bool& available) {
1071  available = false;
1072  return 0;
1073}
1074
1075int32_t AudioDeviceIOS::SetMicrophoneVolume(uint32_t volume) {
1076  RTC_NOTREACHED() << "Not implemented";
1077  return -1;
1078}
1079
1080int32_t AudioDeviceIOS::MicrophoneVolume(uint32_t& volume) const {
1081  RTC_NOTREACHED() << "Not implemented";
1082  return -1;
1083}
1084
1085int32_t AudioDeviceIOS::MaxMicrophoneVolume(uint32_t& maxVolume) const {
1086  RTC_NOTREACHED() << "Not implemented";
1087  return -1;
1088}
1089
1090int32_t AudioDeviceIOS::MinMicrophoneVolume(uint32_t& minVolume) const {
1091  RTC_NOTREACHED() << "Not implemented";
1092  return -1;
1093}
1094
1095int32_t AudioDeviceIOS::PlayoutDeviceName(uint16_t index,
1096                                          char name[kAdmMaxDeviceNameSize],
1097                                          char guid[kAdmMaxGuidSize]) {
1098  RTC_NOTREACHED() << "Not implemented";
1099  return -1;
1100}
1101
1102int32_t AudioDeviceIOS::RecordingDeviceName(uint16_t index,
1103                                            char name[kAdmMaxDeviceNameSize],
1104                                            char guid[kAdmMaxGuidSize]) {
1105  RTC_NOTREACHED() << "Not implemented";
1106  return -1;
1107}
1108
1109int32_t AudioDeviceIOS::SetRecordingDevice(uint16_t index) {
1110  RTC_LOG_F(LS_WARNING) << "Not implemented";
1111  return 0;
1112}
1113
1114int32_t AudioDeviceIOS::SetRecordingDevice(AudioDeviceModule::WindowsDeviceType) {
1115  RTC_NOTREACHED() << "Not implemented";
1116  return -1;
1117}
1118
1119int32_t AudioDeviceIOS::PlayoutIsAvailable(bool& available) {
1120  available = true;
1121  return 0;
1122}
1123
1124int32_t AudioDeviceIOS::RecordingIsAvailable(bool& available) {
1125  available = true;
1126  return 0;
1127}
1128
1129}  // namespace ios_adm
1130}  // namespace webrtc
1131