1 /*
2  *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3  *
4  *  Use of this source code is governed by a BSD-style license
5  *  that can be found in the LICENSE file in the root of the source
6  *  tree. An additional intellectual property rights grant can be found
7  *  in the file PATENTS.  All contributing project authors may
8  *  be found in the AUTHORS file in the root of the source tree.
9  */
10 
11 #include "modules/audio_device/mac/audio_device_mac.h"
12 
13 #include <ApplicationServices/ApplicationServices.h>
14 #include <libkern/OSAtomic.h>  // OSAtomicCompareAndSwap()
15 #include <mach/mach.h>         // mach_task_self()
16 #include <sys/sysctl.h>        // sysctlbyname()
17 
18 #include <memory>
19 
20 #include "modules/audio_device/audio_device_config.h"
21 #include "modules/third_party/portaudio/pa_ringbuffer.h"
22 #include "rtc_base/arraysize.h"
23 #include "rtc_base/checks.h"
24 #include "rtc_base/platform_thread.h"
25 #include "rtc_base/system/arch.h"
26 
27 namespace webrtc {
28 
29 #define WEBRTC_CA_RETURN_ON_ERR(expr)                                \
30   do {                                                               \
31     err = expr;                                                      \
32     if (err != noErr) {                                              \
33       logCAMsg(rtc::LS_ERROR, "Error in " #expr, (const char*)&err); \
34       return -1;                                                     \
35     }                                                                \
36   } while (0)
37 
38 #define WEBRTC_CA_LOG_ERR(expr)                                      \
39   do {                                                               \
40     err = expr;                                                      \
41     if (err != noErr) {                                              \
42       logCAMsg(rtc::LS_ERROR, "Error in " #expr, (const char*)&err); \
43     }                                                                \
44   } while (0)
45 
46 #define WEBRTC_CA_LOG_WARN(expr)                                       \
47   do {                                                                 \
48     err = expr;                                                        \
49     if (err != noErr) {                                                \
50       logCAMsg(rtc::LS_WARNING, "Error in " #expr, (const char*)&err); \
51     }                                                                  \
52   } while (0)
53 
54 enum { MaxNumberDevices = 64 };
55 
AtomicSet32(int32_t * theValue,int32_t newValue)56 void AudioDeviceMac::AtomicSet32(int32_t* theValue, int32_t newValue) {
57   while (1) {
58     int32_t oldValue = *theValue;
59     if (OSAtomicCompareAndSwap32Barrier(oldValue, newValue, theValue) == true) {
60       return;
61     }
62   }
63 }
64 
AtomicGet32(int32_t * theValue)65 int32_t AudioDeviceMac::AtomicGet32(int32_t* theValue) {
66   while (1) {
67     int32_t value = *theValue;
68     if (OSAtomicCompareAndSwap32Barrier(value, value, theValue) == true) {
69       return value;
70     }
71   }
72 }
73 
74 // CoreAudio errors are best interpreted as four character strings.
logCAMsg(const rtc::LoggingSeverity sev,const char * msg,const char * err)75 void AudioDeviceMac::logCAMsg(const rtc::LoggingSeverity sev,
76                               const char* msg,
77                               const char* err) {
78   RTC_DCHECK(msg != NULL);
79   RTC_DCHECK(err != NULL);
80 
81 #ifdef WEBRTC_ARCH_BIG_ENDIAN
82   switch (sev) {
83     case rtc::LS_ERROR:
84       RTC_LOG(LS_ERROR) << msg << ": " << err[0] << err[1] << err[2] << err[3];
85       break;
86     case rtc::LS_WARNING:
87       RTC_LOG(LS_WARNING) << msg << ": " << err[0] << err[1] << err[2]
88                           << err[3];
89       break;
90     case rtc::LS_VERBOSE:
91       RTC_LOG(LS_VERBOSE) << msg << ": " << err[0] << err[1] << err[2]
92                           << err[3];
93       break;
94     default:
95       break;
96   }
97 #else
98   // We need to flip the characters in this case.
99   switch (sev) {
100     case rtc::LS_ERROR:
101       RTC_LOG(LS_ERROR) << msg << ": " << err[3] << err[2] << err[1] << err[0];
102       break;
103     case rtc::LS_WARNING:
104       RTC_LOG(LS_WARNING) << msg << ": " << err[3] << err[2] << err[1]
105                           << err[0];
106       break;
107     case rtc::LS_VERBOSE:
108       RTC_LOG(LS_VERBOSE) << msg << ": " << err[3] << err[2] << err[1]
109                           << err[0];
110       break;
111     default:
112       break;
113   }
114 #endif
115 }
116 
AudioDeviceMac()117 AudioDeviceMac::AudioDeviceMac()
118     : _ptrAudioBuffer(NULL),
119       _mixerManager(),
120       _inputDeviceIndex(0),
121       _outputDeviceIndex(0),
122       _inputDeviceID(kAudioObjectUnknown),
123       _outputDeviceID(kAudioObjectUnknown),
124       _inputDeviceIsSpecified(false),
125       _outputDeviceIsSpecified(false),
126       _recChannels(N_REC_CHANNELS),
127       _playChannels(N_PLAY_CHANNELS),
128       _captureBufData(NULL),
129       _renderBufData(NULL),
130       _initialized(false),
131       _isShutDown(false),
132       _recording(false),
133       _playing(false),
134       _recIsInitialized(false),
135       _playIsInitialized(false),
136       _renderDeviceIsAlive(1),
137       _captureDeviceIsAlive(1),
138       _twoDevices(true),
139       _doStop(false),
140       _doStopRec(false),
141       _macBookPro(false),
142       _macBookProPanRight(false),
143       _captureLatencyUs(0),
144       _renderLatencyUs(0),
145       _captureDelayUs(0),
146       _renderDelayUs(0),
147       _renderDelayOffsetSamples(0),
148       _paCaptureBuffer(NULL),
149       _paRenderBuffer(NULL),
150       _captureBufSizeSamples(0),
151       _renderBufSizeSamples(0),
152       prev_key_state_() {
153   RTC_LOG(LS_INFO) << __FUNCTION__ << " created";
154 
155   memset(_renderConvertData, 0, sizeof(_renderConvertData));
156   memset(&_outStreamFormat, 0, sizeof(AudioStreamBasicDescription));
157   memset(&_outDesiredFormat, 0, sizeof(AudioStreamBasicDescription));
158   memset(&_inStreamFormat, 0, sizeof(AudioStreamBasicDescription));
159   memset(&_inDesiredFormat, 0, sizeof(AudioStreamBasicDescription));
160 }
161 
~AudioDeviceMac()162 AudioDeviceMac::~AudioDeviceMac() {
163   RTC_LOG(LS_INFO) << __FUNCTION__ << " destroyed";
164 
165   if (!_isShutDown) {
166     Terminate();
167   }
168 
169   RTC_DCHECK(!capture_worker_thread_.get());
170   RTC_DCHECK(!render_worker_thread_.get());
171 
172   if (_paRenderBuffer) {
173     delete _paRenderBuffer;
174     _paRenderBuffer = NULL;
175   }
176 
177   if (_paCaptureBuffer) {
178     delete _paCaptureBuffer;
179     _paCaptureBuffer = NULL;
180   }
181 
182   if (_renderBufData) {
183     delete[] _renderBufData;
184     _renderBufData = NULL;
185   }
186 
187   if (_captureBufData) {
188     delete[] _captureBufData;
189     _captureBufData = NULL;
190   }
191 
192   kern_return_t kernErr = KERN_SUCCESS;
193   kernErr = semaphore_destroy(mach_task_self(), _renderSemaphore);
194   if (kernErr != KERN_SUCCESS) {
195     RTC_LOG(LS_ERROR) << "semaphore_destroy() error: " << kernErr;
196   }
197 
198   kernErr = semaphore_destroy(mach_task_self(), _captureSemaphore);
199   if (kernErr != KERN_SUCCESS) {
200     RTC_LOG(LS_ERROR) << "semaphore_destroy() error: " << kernErr;
201   }
202 }
203 
204 // ============================================================================
205 //                                     API
206 // ============================================================================
207 
AttachAudioBuffer(AudioDeviceBuffer * audioBuffer)208 void AudioDeviceMac::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
209   MutexLock lock(&mutex_);
210 
211   _ptrAudioBuffer = audioBuffer;
212 
213   // inform the AudioBuffer about default settings for this implementation
214   _ptrAudioBuffer->SetRecordingSampleRate(N_REC_SAMPLES_PER_SEC);
215   _ptrAudioBuffer->SetPlayoutSampleRate(N_PLAY_SAMPLES_PER_SEC);
216   _ptrAudioBuffer->SetRecordingChannels(N_REC_CHANNELS);
217   _ptrAudioBuffer->SetPlayoutChannels(N_PLAY_CHANNELS);
218 }
219 
ActiveAudioLayer(AudioDeviceModule::AudioLayer & audioLayer) const220 int32_t AudioDeviceMac::ActiveAudioLayer(
221     AudioDeviceModule::AudioLayer& audioLayer) const {
222   audioLayer = AudioDeviceModule::kPlatformDefaultAudio;
223   return 0;
224 }
225 
Init()226 AudioDeviceGeneric::InitStatus AudioDeviceMac::Init() {
227   MutexLock lock(&mutex_);
228 
229   if (_initialized) {
230     return InitStatus::OK;
231   }
232 
233   OSStatus err = noErr;
234 
235   _isShutDown = false;
236 
237   // PortAudio ring buffers require an elementCount which is a power of two.
238   if (_renderBufData == NULL) {
239     UInt32 powerOfTwo = 1;
240     while (powerOfTwo < PLAY_BUF_SIZE_IN_SAMPLES) {
241       powerOfTwo <<= 1;
242     }
243     _renderBufSizeSamples = powerOfTwo;
244     _renderBufData = new SInt16[_renderBufSizeSamples];
245   }
246 
247   if (_paRenderBuffer == NULL) {
248     _paRenderBuffer = new PaUtilRingBuffer;
249     PaRingBufferSize bufSize = -1;
250     bufSize = PaUtil_InitializeRingBuffer(
251         _paRenderBuffer, sizeof(SInt16), _renderBufSizeSamples, _renderBufData);
252     if (bufSize == -1) {
253       RTC_LOG(LS_ERROR) << "PaUtil_InitializeRingBuffer() error";
254       return InitStatus::PLAYOUT_ERROR;
255     }
256   }
257 
258   if (_captureBufData == NULL) {
259     UInt32 powerOfTwo = 1;
260     while (powerOfTwo < REC_BUF_SIZE_IN_SAMPLES) {
261       powerOfTwo <<= 1;
262     }
263     _captureBufSizeSamples = powerOfTwo;
264     _captureBufData = new Float32[_captureBufSizeSamples];
265   }
266 
267   if (_paCaptureBuffer == NULL) {
268     _paCaptureBuffer = new PaUtilRingBuffer;
269     PaRingBufferSize bufSize = -1;
270     bufSize =
271         PaUtil_InitializeRingBuffer(_paCaptureBuffer, sizeof(Float32),
272                                     _captureBufSizeSamples, _captureBufData);
273     if (bufSize == -1) {
274       RTC_LOG(LS_ERROR) << "PaUtil_InitializeRingBuffer() error";
275       return InitStatus::RECORDING_ERROR;
276     }
277   }
278 
279   kern_return_t kernErr = KERN_SUCCESS;
280   kernErr = semaphore_create(mach_task_self(), &_renderSemaphore,
281                              SYNC_POLICY_FIFO, 0);
282   if (kernErr != KERN_SUCCESS) {
283     RTC_LOG(LS_ERROR) << "semaphore_create() error: " << kernErr;
284     return InitStatus::OTHER_ERROR;
285   }
286 
287   kernErr = semaphore_create(mach_task_self(), &_captureSemaphore,
288                              SYNC_POLICY_FIFO, 0);
289   if (kernErr != KERN_SUCCESS) {
290     RTC_LOG(LS_ERROR) << "semaphore_create() error: " << kernErr;
291     return InitStatus::OTHER_ERROR;
292   }
293 
294   // Setting RunLoop to NULL here instructs HAL to manage its own thread for
295   // notifications. This was the default behaviour on OS X 10.5 and earlier,
296   // but now must be explicitly specified. HAL would otherwise try to use the
297   // main thread to issue notifications.
298   AudioObjectPropertyAddress propertyAddress = {
299       kAudioHardwarePropertyRunLoop, kAudioObjectPropertyScopeGlobal,
300       kAudioObjectPropertyElementMaster};
301   CFRunLoopRef runLoop = NULL;
302   UInt32 size = sizeof(CFRunLoopRef);
303   int aoerr = AudioObjectSetPropertyData(
304       kAudioObjectSystemObject, &propertyAddress, 0, NULL, size, &runLoop);
305   if (aoerr != noErr) {
306     RTC_LOG(LS_ERROR) << "Error in AudioObjectSetPropertyData: "
307                       << (const char*)&aoerr;
308     return InitStatus::OTHER_ERROR;
309   }
310 
311   // Listen for any device changes.
312   propertyAddress.mSelector = kAudioHardwarePropertyDevices;
313   WEBRTC_CA_LOG_ERR(AudioObjectAddPropertyListener(
314       kAudioObjectSystemObject, &propertyAddress, &objectListenerProc, this));
315 
316   // Determine if this is a MacBook Pro
317   _macBookPro = false;
318   _macBookProPanRight = false;
319   char buf[128];
320   size_t length = sizeof(buf);
321   memset(buf, 0, length);
322 
323   int intErr = sysctlbyname("hw.model", buf, &length, NULL, 0);
324   if (intErr != 0) {
325     RTC_LOG(LS_ERROR) << "Error in sysctlbyname(): " << err;
326   } else {
327     RTC_LOG(LS_VERBOSE) << "Hardware model: " << buf;
328     if (strncmp(buf, "MacBookPro", 10) == 0) {
329       _macBookPro = true;
330     }
331   }
332 
333   _initialized = true;
334 
335   return InitStatus::OK;
336 }
337 
Terminate()338 int32_t AudioDeviceMac::Terminate() {
339   if (!_initialized) {
340     return 0;
341   }
342 
343   if (_recording) {
344     RTC_LOG(LS_ERROR) << "Recording must be stopped";
345     return -1;
346   }
347 
348   if (_playing) {
349     RTC_LOG(LS_ERROR) << "Playback must be stopped";
350     return -1;
351   }
352 
353   MutexLock lock(&mutex_);
354   _mixerManager.Close();
355 
356   OSStatus err = noErr;
357   int retVal = 0;
358 
359   AudioObjectPropertyAddress propertyAddress = {
360       kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal,
361       kAudioObjectPropertyElementMaster};
362   WEBRTC_CA_LOG_WARN(AudioObjectRemovePropertyListener(
363       kAudioObjectSystemObject, &propertyAddress, &objectListenerProc, this));
364 
365   err = AudioHardwareUnload();
366   if (err != noErr) {
367     logCAMsg(rtc::LS_ERROR, "Error in AudioHardwareUnload()",
368              (const char*)&err);
369     retVal = -1;
370   }
371 
372   _isShutDown = true;
373   _initialized = false;
374   _outputDeviceIsSpecified = false;
375   _inputDeviceIsSpecified = false;
376 
377   return retVal;
378 }
379 
Initialized() const380 bool AudioDeviceMac::Initialized() const {
381   return (_initialized);
382 }
383 
SpeakerIsAvailable(bool & available)384 int32_t AudioDeviceMac::SpeakerIsAvailable(bool& available) {
385   bool wasInitialized = _mixerManager.SpeakerIsInitialized();
386 
387   // Make an attempt to open up the
388   // output mixer corresponding to the currently selected output device.
389   //
390   if (!wasInitialized && InitSpeaker() == -1) {
391     available = false;
392     return 0;
393   }
394 
395   // Given that InitSpeaker was successful, we know that a valid speaker
396   // exists.
397   available = true;
398 
399   // Close the initialized output mixer
400   //
401   if (!wasInitialized) {
402     _mixerManager.CloseSpeaker();
403   }
404 
405   return 0;
406 }
407 
InitSpeaker()408 int32_t AudioDeviceMac::InitSpeaker() {
409   MutexLock lock(&mutex_);
410   return InitSpeakerLocked();
411 }
412 
InitSpeakerLocked()413 int32_t AudioDeviceMac::InitSpeakerLocked() {
414   if (_playing) {
415     return -1;
416   }
417 
418   if (InitDevice(_outputDeviceIndex, _outputDeviceID, false) == -1) {
419     return -1;
420   }
421 
422   if (_inputDeviceID == _outputDeviceID) {
423     _twoDevices = false;
424   } else {
425     _twoDevices = true;
426   }
427 
428   if (_mixerManager.OpenSpeaker(_outputDeviceID) == -1) {
429     return -1;
430   }
431 
432   return 0;
433 }
434 
MicrophoneIsAvailable(bool & available)435 int32_t AudioDeviceMac::MicrophoneIsAvailable(bool& available) {
436   bool wasInitialized = _mixerManager.MicrophoneIsInitialized();
437 
438   // Make an attempt to open up the
439   // input mixer corresponding to the currently selected output device.
440   //
441   if (!wasInitialized && InitMicrophone() == -1) {
442     available = false;
443     return 0;
444   }
445 
446   // Given that InitMicrophone was successful, we know that a valid microphone
447   // exists.
448   available = true;
449 
450   // Close the initialized input mixer
451   //
452   if (!wasInitialized) {
453     _mixerManager.CloseMicrophone();
454   }
455 
456   return 0;
457 }
458 
InitMicrophone()459 int32_t AudioDeviceMac::InitMicrophone() {
460   MutexLock lock(&mutex_);
461   return InitMicrophoneLocked();
462 }
463 
InitMicrophoneLocked()464 int32_t AudioDeviceMac::InitMicrophoneLocked() {
465   if (_recording) {
466     return -1;
467   }
468 
469   if (InitDevice(_inputDeviceIndex, _inputDeviceID, true) == -1) {
470     return -1;
471   }
472 
473   if (_inputDeviceID == _outputDeviceID) {
474     _twoDevices = false;
475   } else {
476     _twoDevices = true;
477   }
478 
479   if (_mixerManager.OpenMicrophone(_inputDeviceID) == -1) {
480     return -1;
481   }
482 
483   return 0;
484 }
485 
SpeakerIsInitialized() const486 bool AudioDeviceMac::SpeakerIsInitialized() const {
487   return (_mixerManager.SpeakerIsInitialized());
488 }
489 
MicrophoneIsInitialized() const490 bool AudioDeviceMac::MicrophoneIsInitialized() const {
491   return (_mixerManager.MicrophoneIsInitialized());
492 }
493 
SpeakerVolumeIsAvailable(bool & available)494 int32_t AudioDeviceMac::SpeakerVolumeIsAvailable(bool& available) {
495   bool wasInitialized = _mixerManager.SpeakerIsInitialized();
496 
497   // Make an attempt to open up the
498   // output mixer corresponding to the currently selected output device.
499   //
500   if (!wasInitialized && InitSpeaker() == -1) {
501     // If we end up here it means that the selected speaker has no volume
502     // control.
503     available = false;
504     return 0;
505   }
506 
507   // Given that InitSpeaker was successful, we know that a volume control exists
508   //
509   available = true;
510 
511   // Close the initialized output mixer
512   //
513   if (!wasInitialized) {
514     _mixerManager.CloseSpeaker();
515   }
516 
517   return 0;
518 }
519 
SetSpeakerVolume(uint32_t volume)520 int32_t AudioDeviceMac::SetSpeakerVolume(uint32_t volume) {
521   return (_mixerManager.SetSpeakerVolume(volume));
522 }
523 
SpeakerVolume(uint32_t & volume) const524 int32_t AudioDeviceMac::SpeakerVolume(uint32_t& volume) const {
525   uint32_t level(0);
526 
527   if (_mixerManager.SpeakerVolume(level) == -1) {
528     return -1;
529   }
530 
531   volume = level;
532   return 0;
533 }
534 
MaxSpeakerVolume(uint32_t & maxVolume) const535 int32_t AudioDeviceMac::MaxSpeakerVolume(uint32_t& maxVolume) const {
536   uint32_t maxVol(0);
537 
538   if (_mixerManager.MaxSpeakerVolume(maxVol) == -1) {
539     return -1;
540   }
541 
542   maxVolume = maxVol;
543   return 0;
544 }
545 
MinSpeakerVolume(uint32_t & minVolume) const546 int32_t AudioDeviceMac::MinSpeakerVolume(uint32_t& minVolume) const {
547   uint32_t minVol(0);
548 
549   if (_mixerManager.MinSpeakerVolume(minVol) == -1) {
550     return -1;
551   }
552 
553   minVolume = minVol;
554   return 0;
555 }
556 
SpeakerMuteIsAvailable(bool & available)557 int32_t AudioDeviceMac::SpeakerMuteIsAvailable(bool& available) {
558   bool isAvailable(false);
559   bool wasInitialized = _mixerManager.SpeakerIsInitialized();
560 
561   // Make an attempt to open up the
562   // output mixer corresponding to the currently selected output device.
563   //
564   if (!wasInitialized && InitSpeaker() == -1) {
565     // If we end up here it means that the selected speaker has no volume
566     // control, hence it is safe to state that there is no mute control
567     // already at this stage.
568     available = false;
569     return 0;
570   }
571 
572   // Check if the selected speaker has a mute control
573   //
574   _mixerManager.SpeakerMuteIsAvailable(isAvailable);
575 
576   available = isAvailable;
577 
578   // Close the initialized output mixer
579   //
580   if (!wasInitialized) {
581     _mixerManager.CloseSpeaker();
582   }
583 
584   return 0;
585 }
586 
SetSpeakerMute(bool enable)587 int32_t AudioDeviceMac::SetSpeakerMute(bool enable) {
588   return (_mixerManager.SetSpeakerMute(enable));
589 }
590 
SpeakerMute(bool & enabled) const591 int32_t AudioDeviceMac::SpeakerMute(bool& enabled) const {
592   bool muted(0);
593 
594   if (_mixerManager.SpeakerMute(muted) == -1) {
595     return -1;
596   }
597 
598   enabled = muted;
599   return 0;
600 }
601 
MicrophoneMuteIsAvailable(bool & available)602 int32_t AudioDeviceMac::MicrophoneMuteIsAvailable(bool& available) {
603   bool isAvailable(false);
604   bool wasInitialized = _mixerManager.MicrophoneIsInitialized();
605 
606   // Make an attempt to open up the
607   // input mixer corresponding to the currently selected input device.
608   //
609   if (!wasInitialized && InitMicrophone() == -1) {
610     // If we end up here it means that the selected microphone has no volume
611     // control, hence it is safe to state that there is no boost control
612     // already at this stage.
613     available = false;
614     return 0;
615   }
616 
617   // Check if the selected microphone has a mute control
618   //
619   _mixerManager.MicrophoneMuteIsAvailable(isAvailable);
620   available = isAvailable;
621 
622   // Close the initialized input mixer
623   //
624   if (!wasInitialized) {
625     _mixerManager.CloseMicrophone();
626   }
627 
628   return 0;
629 }
630 
SetMicrophoneMute(bool enable)631 int32_t AudioDeviceMac::SetMicrophoneMute(bool enable) {
632   return (_mixerManager.SetMicrophoneMute(enable));
633 }
634 
MicrophoneMute(bool & enabled) const635 int32_t AudioDeviceMac::MicrophoneMute(bool& enabled) const {
636   bool muted(0);
637 
638   if (_mixerManager.MicrophoneMute(muted) == -1) {
639     return -1;
640   }
641 
642   enabled = muted;
643   return 0;
644 }
645 
StereoRecordingIsAvailable(bool & available)646 int32_t AudioDeviceMac::StereoRecordingIsAvailable(bool& available) {
647   bool isAvailable(false);
648   bool wasInitialized = _mixerManager.MicrophoneIsInitialized();
649 
650   if (!wasInitialized && InitMicrophone() == -1) {
651     // Cannot open the specified device
652     available = false;
653     return 0;
654   }
655 
656   // Check if the selected microphone can record stereo
657   //
658   _mixerManager.StereoRecordingIsAvailable(isAvailable);
659   available = isAvailable;
660 
661   // Close the initialized input mixer
662   //
663   if (!wasInitialized) {
664     _mixerManager.CloseMicrophone();
665   }
666 
667   return 0;
668 }
669 
SetStereoRecording(bool enable)670 int32_t AudioDeviceMac::SetStereoRecording(bool enable) {
671   if (enable)
672     _recChannels = 2;
673   else
674     _recChannels = 1;
675 
676   return 0;
677 }
678 
StereoRecording(bool & enabled) const679 int32_t AudioDeviceMac::StereoRecording(bool& enabled) const {
680   if (_recChannels == 2)
681     enabled = true;
682   else
683     enabled = false;
684 
685   return 0;
686 }
687 
StereoPlayoutIsAvailable(bool & available)688 int32_t AudioDeviceMac::StereoPlayoutIsAvailable(bool& available) {
689   bool isAvailable(false);
690   bool wasInitialized = _mixerManager.SpeakerIsInitialized();
691 
692   if (!wasInitialized && InitSpeaker() == -1) {
693     // Cannot open the specified device
694     available = false;
695     return 0;
696   }
697 
698   // Check if the selected microphone can record stereo
699   //
700   _mixerManager.StereoPlayoutIsAvailable(isAvailable);
701   available = isAvailable;
702 
703   // Close the initialized input mixer
704   //
705   if (!wasInitialized) {
706     _mixerManager.CloseSpeaker();
707   }
708 
709   return 0;
710 }
711 
SetStereoPlayout(bool enable)712 int32_t AudioDeviceMac::SetStereoPlayout(bool enable) {
713   if (enable)
714     _playChannels = 2;
715   else
716     _playChannels = 1;
717 
718   return 0;
719 }
720 
StereoPlayout(bool & enabled) const721 int32_t AudioDeviceMac::StereoPlayout(bool& enabled) const {
722   if (_playChannels == 2)
723     enabled = true;
724   else
725     enabled = false;
726 
727   return 0;
728 }
729 
MicrophoneVolumeIsAvailable(bool & available)730 int32_t AudioDeviceMac::MicrophoneVolumeIsAvailable(bool& available) {
731   bool wasInitialized = _mixerManager.MicrophoneIsInitialized();
732 
733   // Make an attempt to open up the
734   // input mixer corresponding to the currently selected output device.
735   //
736   if (!wasInitialized && InitMicrophone() == -1) {
737     // If we end up here it means that the selected microphone has no volume
738     // control.
739     available = false;
740     return 0;
741   }
742 
743   // Given that InitMicrophone was successful, we know that a volume control
744   // exists
745   //
746   available = true;
747 
748   // Close the initialized input mixer
749   //
750   if (!wasInitialized) {
751     _mixerManager.CloseMicrophone();
752   }
753 
754   return 0;
755 }
756 
SetMicrophoneVolume(uint32_t volume)757 int32_t AudioDeviceMac::SetMicrophoneVolume(uint32_t volume) {
758   return (_mixerManager.SetMicrophoneVolume(volume));
759 }
760 
MicrophoneVolume(uint32_t & volume) const761 int32_t AudioDeviceMac::MicrophoneVolume(uint32_t& volume) const {
762   uint32_t level(0);
763 
764   if (_mixerManager.MicrophoneVolume(level) == -1) {
765     RTC_LOG(LS_WARNING) << "failed to retrieve current microphone level";
766     return -1;
767   }
768 
769   volume = level;
770   return 0;
771 }
772 
MaxMicrophoneVolume(uint32_t & maxVolume) const773 int32_t AudioDeviceMac::MaxMicrophoneVolume(uint32_t& maxVolume) const {
774   uint32_t maxVol(0);
775 
776   if (_mixerManager.MaxMicrophoneVolume(maxVol) == -1) {
777     return -1;
778   }
779 
780   maxVolume = maxVol;
781   return 0;
782 }
783 
MinMicrophoneVolume(uint32_t & minVolume) const784 int32_t AudioDeviceMac::MinMicrophoneVolume(uint32_t& minVolume) const {
785   uint32_t minVol(0);
786 
787   if (_mixerManager.MinMicrophoneVolume(minVol) == -1) {
788     return -1;
789   }
790 
791   minVolume = minVol;
792   return 0;
793 }
794 
PlayoutDevices()795 int16_t AudioDeviceMac::PlayoutDevices() {
796   AudioDeviceID playDevices[MaxNumberDevices];
797   return GetNumberDevices(kAudioDevicePropertyScopeOutput, playDevices,
798                           MaxNumberDevices);
799 }
800 
SetPlayoutDevice(uint16_t index)801 int32_t AudioDeviceMac::SetPlayoutDevice(uint16_t index) {
802   MutexLock lock(&mutex_);
803 
804   if (_playIsInitialized) {
805     return -1;
806   }
807 
808   AudioDeviceID playDevices[MaxNumberDevices];
809   uint32_t nDevices = GetNumberDevices(kAudioDevicePropertyScopeOutput,
810                                        playDevices, MaxNumberDevices);
811   RTC_LOG(LS_VERBOSE) << "number of available waveform-audio output devices is "
812                       << nDevices;
813 
814   if (index > (nDevices - 1)) {
815     RTC_LOG(LS_ERROR) << "device index is out of range [0," << (nDevices - 1)
816                       << "]";
817     return -1;
818   }
819 
820   _outputDeviceIndex = index;
821   _outputDeviceIsSpecified = true;
822 
823   return 0;
824 }
825 
SetPlayoutDevice(AudioDeviceModule::WindowsDeviceType)826 int32_t AudioDeviceMac::SetPlayoutDevice(
827     AudioDeviceModule::WindowsDeviceType /*device*/) {
828   RTC_LOG(LS_ERROR) << "WindowsDeviceType not supported";
829   return -1;
830 }
831 
PlayoutDeviceName(uint16_t index,char name[kAdmMaxDeviceNameSize],char guid[kAdmMaxGuidSize])832 int32_t AudioDeviceMac::PlayoutDeviceName(uint16_t index,
833                                           char name[kAdmMaxDeviceNameSize],
834                                           char guid[kAdmMaxGuidSize]) {
835   const uint16_t nDevices(PlayoutDevices());
836 
837   if ((index > (nDevices - 1)) || (name == NULL)) {
838     return -1;
839   }
840 
841   memset(name, 0, kAdmMaxDeviceNameSize);
842 
843   if (guid != NULL) {
844     memset(guid, 0, kAdmMaxGuidSize);
845   }
846 
847   return GetDeviceName(kAudioDevicePropertyScopeOutput, index, name);
848 }
849 
RecordingDeviceName(uint16_t index,char name[kAdmMaxDeviceNameSize],char guid[kAdmMaxGuidSize])850 int32_t AudioDeviceMac::RecordingDeviceName(uint16_t index,
851                                             char name[kAdmMaxDeviceNameSize],
852                                             char guid[kAdmMaxGuidSize]) {
853   const uint16_t nDevices(RecordingDevices());
854 
855   if ((index > (nDevices - 1)) || (name == NULL)) {
856     return -1;
857   }
858 
859   memset(name, 0, kAdmMaxDeviceNameSize);
860 
861   if (guid != NULL) {
862     memset(guid, 0, kAdmMaxGuidSize);
863   }
864 
865   return GetDeviceName(kAudioDevicePropertyScopeInput, index, name);
866 }
867 
RecordingDevices()868 int16_t AudioDeviceMac::RecordingDevices() {
869   AudioDeviceID recDevices[MaxNumberDevices];
870   return GetNumberDevices(kAudioDevicePropertyScopeInput, recDevices,
871                           MaxNumberDevices);
872 }
873 
SetRecordingDevice(uint16_t index)874 int32_t AudioDeviceMac::SetRecordingDevice(uint16_t index) {
875   if (_recIsInitialized) {
876     return -1;
877   }
878 
879   AudioDeviceID recDevices[MaxNumberDevices];
880   uint32_t nDevices = GetNumberDevices(kAudioDevicePropertyScopeInput,
881                                        recDevices, MaxNumberDevices);
882   RTC_LOG(LS_VERBOSE) << "number of available waveform-audio input devices is "
883                       << nDevices;
884 
885   if (index > (nDevices - 1)) {
886     RTC_LOG(LS_ERROR) << "device index is out of range [0," << (nDevices - 1)
887                       << "]";
888     return -1;
889   }
890 
891   _inputDeviceIndex = index;
892   _inputDeviceIsSpecified = true;
893 
894   return 0;
895 }
896 
SetRecordingDevice(AudioDeviceModule::WindowsDeviceType)897 int32_t AudioDeviceMac::SetRecordingDevice(
898     AudioDeviceModule::WindowsDeviceType /*device*/) {
899   RTC_LOG(LS_ERROR) << "WindowsDeviceType not supported";
900   return -1;
901 }
902 
PlayoutIsAvailable(bool & available)903 int32_t AudioDeviceMac::PlayoutIsAvailable(bool& available) {
904   available = true;
905 
906   // Try to initialize the playout side
907   if (InitPlayout() == -1) {
908     available = false;
909   }
910 
911   // We destroy the IOProc created by InitPlayout() in implDeviceIOProc().
912   // We must actually start playout here in order to have the IOProc
913   // deleted by calling StopPlayout().
914   if (StartPlayout() == -1) {
915     available = false;
916   }
917 
918   // Cancel effect of initialization
919   if (StopPlayout() == -1) {
920     available = false;
921   }
922 
923   return 0;
924 }
925 
RecordingIsAvailable(bool & available)926 int32_t AudioDeviceMac::RecordingIsAvailable(bool& available) {
927   available = true;
928 
929   // Try to initialize the recording side
930   if (InitRecording() == -1) {
931     available = false;
932   }
933 
934   // We destroy the IOProc created by InitRecording() in implInDeviceIOProc().
935   // We must actually start recording here in order to have the IOProc
936   // deleted by calling StopRecording().
937   if (StartRecording() == -1) {
938     available = false;
939   }
940 
941   // Cancel effect of initialization
942   if (StopRecording() == -1) {
943     available = false;
944   }
945 
946   return 0;
947 }
948 
InitPlayout()949 int32_t AudioDeviceMac::InitPlayout() {
950   RTC_LOG(LS_INFO) << "InitPlayout";
951   MutexLock lock(&mutex_);
952 
953   if (_playing) {
954     return -1;
955   }
956 
957   if (!_outputDeviceIsSpecified) {
958     return -1;
959   }
960 
961   if (_playIsInitialized) {
962     return 0;
963   }
964 
965   // Initialize the speaker (devices might have been added or removed)
966   if (InitSpeakerLocked() == -1) {
967     RTC_LOG(LS_WARNING) << "InitSpeaker() failed";
968   }
969 
970   if (!MicrophoneIsInitialized()) {
971     // Make this call to check if we are using
972     // one or two devices (_twoDevices)
973     bool available = false;
974     if (MicrophoneIsAvailable(available) == -1) {
975       RTC_LOG(LS_WARNING) << "MicrophoneIsAvailable() failed";
976     }
977   }
978 
979   PaUtil_FlushRingBuffer(_paRenderBuffer);
980 
981   OSStatus err = noErr;
982   UInt32 size = 0;
983   _renderDelayOffsetSamples = 0;
984   _renderDelayUs = 0;
985   _renderLatencyUs = 0;
986   _renderDeviceIsAlive = 1;
987   _doStop = false;
988 
989   // The internal microphone of a MacBook Pro is located under the left speaker
990   // grille. When the internal speakers are in use, we want to fully stereo
991   // pan to the right.
992   AudioObjectPropertyAddress propertyAddress = {
993       kAudioDevicePropertyDataSource, kAudioDevicePropertyScopeOutput, 0};
994   if (_macBookPro) {
995     _macBookProPanRight = false;
996     Boolean hasProperty =
997         AudioObjectHasProperty(_outputDeviceID, &propertyAddress);
998     if (hasProperty) {
999       UInt32 dataSource = 0;
1000       size = sizeof(dataSource);
1001       WEBRTC_CA_LOG_WARN(AudioObjectGetPropertyData(
1002           _outputDeviceID, &propertyAddress, 0, NULL, &size, &dataSource));
1003 
1004       if (dataSource == 'ispk') {
1005         _macBookProPanRight = true;
1006         RTC_LOG(LS_VERBOSE)
1007             << "MacBook Pro using internal speakers; stereo panning right";
1008       } else {
1009         RTC_LOG(LS_VERBOSE) << "MacBook Pro not using internal speakers";
1010       }
1011 
1012       // Add a listener to determine if the status changes.
1013       WEBRTC_CA_LOG_WARN(AudioObjectAddPropertyListener(
1014           _outputDeviceID, &propertyAddress, &objectListenerProc, this));
1015     }
1016   }
1017 
1018   // Get current stream description
1019   propertyAddress.mSelector = kAudioDevicePropertyStreamFormat;
1020   memset(&_outStreamFormat, 0, sizeof(_outStreamFormat));
1021   size = sizeof(_outStreamFormat);
1022   WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
1023       _outputDeviceID, &propertyAddress, 0, NULL, &size, &_outStreamFormat));
1024 
1025   if (_outStreamFormat.mFormatID != kAudioFormatLinearPCM) {
1026     logCAMsg(rtc::LS_ERROR, "Unacceptable output stream format -> mFormatID",
1027              (const char*)&_outStreamFormat.mFormatID);
1028     return -1;
1029   }
1030 
1031   if (_outStreamFormat.mChannelsPerFrame > N_DEVICE_CHANNELS) {
1032     RTC_LOG(LS_ERROR)
1033         << "Too many channels on output device (mChannelsPerFrame = "
1034         << _outStreamFormat.mChannelsPerFrame << ")";
1035     return -1;
1036   }
1037 
1038   if (_outStreamFormat.mFormatFlags & kAudioFormatFlagIsNonInterleaved) {
1039     RTC_LOG(LS_ERROR) << "Non-interleaved audio data is not supported."
1040                          "AudioHardware streams should not have this format.";
1041     return -1;
1042   }
1043 
1044   RTC_LOG(LS_VERBOSE) << "Ouput stream format:";
1045   RTC_LOG(LS_VERBOSE) << "mSampleRate = " << _outStreamFormat.mSampleRate
1046                       << ", mChannelsPerFrame = "
1047                       << _outStreamFormat.mChannelsPerFrame;
1048   RTC_LOG(LS_VERBOSE) << "mBytesPerPacket = "
1049                       << _outStreamFormat.mBytesPerPacket
1050                       << ", mFramesPerPacket = "
1051                       << _outStreamFormat.mFramesPerPacket;
1052   RTC_LOG(LS_VERBOSE) << "mBytesPerFrame = " << _outStreamFormat.mBytesPerFrame
1053                       << ", mBitsPerChannel = "
1054                       << _outStreamFormat.mBitsPerChannel;
1055   RTC_LOG(LS_VERBOSE) << "mFormatFlags = " << _outStreamFormat.mFormatFlags;
1056   logCAMsg(rtc::LS_VERBOSE, "mFormatID",
1057            (const char*)&_outStreamFormat.mFormatID);
1058 
1059   // Our preferred format to work with.
1060   if (_outStreamFormat.mChannelsPerFrame < 2) {
1061     // Disable stereo playout when we only have one channel on the device.
1062     _playChannels = 1;
1063     RTC_LOG(LS_VERBOSE) << "Stereo playout unavailable on this device";
1064   }
1065   WEBRTC_CA_RETURN_ON_ERR(SetDesiredPlayoutFormat());
1066 
1067   // Listen for format changes.
1068   propertyAddress.mSelector = kAudioDevicePropertyStreamFormat;
1069   WEBRTC_CA_LOG_WARN(AudioObjectAddPropertyListener(
1070       _outputDeviceID, &propertyAddress, &objectListenerProc, this));
1071 
1072   // Listen for processor overloads.
1073   propertyAddress.mSelector = kAudioDeviceProcessorOverload;
1074   WEBRTC_CA_LOG_WARN(AudioObjectAddPropertyListener(
1075       _outputDeviceID, &propertyAddress, &objectListenerProc, this));
1076 
1077   if (_twoDevices || !_recIsInitialized) {
1078     WEBRTC_CA_RETURN_ON_ERR(AudioDeviceCreateIOProcID(
1079         _outputDeviceID, deviceIOProc, this, &_deviceIOProcID));
1080   }
1081 
1082   _playIsInitialized = true;
1083 
1084   return 0;
1085 }
1086 
InitRecording()1087 int32_t AudioDeviceMac::InitRecording() {
1088   RTC_LOG(LS_INFO) << "InitRecording";
1089   MutexLock lock(&mutex_);
1090 
1091   if (_recording) {
1092     return -1;
1093   }
1094 
1095   if (!_inputDeviceIsSpecified) {
1096     return -1;
1097   }
1098 
1099   if (_recIsInitialized) {
1100     return 0;
1101   }
1102 
1103   // Initialize the microphone (devices might have been added or removed)
1104   if (InitMicrophoneLocked() == -1) {
1105     RTC_LOG(LS_WARNING) << "InitMicrophone() failed";
1106   }
1107 
1108   if (!SpeakerIsInitialized()) {
1109     // Make this call to check if we are using
1110     // one or two devices (_twoDevices)
1111     bool available = false;
1112     if (SpeakerIsAvailable(available) == -1) {
1113       RTC_LOG(LS_WARNING) << "SpeakerIsAvailable() failed";
1114     }
1115   }
1116 
1117   OSStatus err = noErr;
1118   UInt32 size = 0;
1119 
1120   PaUtil_FlushRingBuffer(_paCaptureBuffer);
1121 
1122   _captureDelayUs = 0;
1123   _captureLatencyUs = 0;
1124   _captureDeviceIsAlive = 1;
1125   _doStopRec = false;
1126 
1127   // Get current stream description
1128   AudioObjectPropertyAddress propertyAddress = {
1129       kAudioDevicePropertyStreamFormat, kAudioDevicePropertyScopeInput, 0};
1130   memset(&_inStreamFormat, 0, sizeof(_inStreamFormat));
1131   size = sizeof(_inStreamFormat);
1132   WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
1133       _inputDeviceID, &propertyAddress, 0, NULL, &size, &_inStreamFormat));
1134 
1135   if (_inStreamFormat.mFormatID != kAudioFormatLinearPCM) {
1136     logCAMsg(rtc::LS_ERROR, "Unacceptable input stream format -> mFormatID",
1137              (const char*)&_inStreamFormat.mFormatID);
1138     return -1;
1139   }
1140 
1141   if (_inStreamFormat.mChannelsPerFrame > N_DEVICE_CHANNELS) {
1142     RTC_LOG(LS_ERROR)
1143         << "Too many channels on input device (mChannelsPerFrame = "
1144         << _inStreamFormat.mChannelsPerFrame << ")";
1145     return -1;
1146   }
1147 
1148   const int io_block_size_samples = _inStreamFormat.mChannelsPerFrame *
1149                                     _inStreamFormat.mSampleRate / 100 *
1150                                     N_BLOCKS_IO;
1151   if (io_block_size_samples > _captureBufSizeSamples) {
1152     RTC_LOG(LS_ERROR) << "Input IO block size (" << io_block_size_samples
1153                       << ") is larger than ring buffer ("
1154                       << _captureBufSizeSamples << ")";
1155     return -1;
1156   }
1157 
1158   RTC_LOG(LS_VERBOSE) << "Input stream format:";
1159   RTC_LOG(LS_VERBOSE) << "mSampleRate = " << _inStreamFormat.mSampleRate
1160                       << ", mChannelsPerFrame = "
1161                       << _inStreamFormat.mChannelsPerFrame;
1162   RTC_LOG(LS_VERBOSE) << "mBytesPerPacket = " << _inStreamFormat.mBytesPerPacket
1163                       << ", mFramesPerPacket = "
1164                       << _inStreamFormat.mFramesPerPacket;
1165   RTC_LOG(LS_VERBOSE) << "mBytesPerFrame = " << _inStreamFormat.mBytesPerFrame
1166                       << ", mBitsPerChannel = "
1167                       << _inStreamFormat.mBitsPerChannel;
1168   RTC_LOG(LS_VERBOSE) << "mFormatFlags = " << _inStreamFormat.mFormatFlags;
1169   logCAMsg(rtc::LS_VERBOSE, "mFormatID",
1170            (const char*)&_inStreamFormat.mFormatID);
1171 
1172   // Our preferred format to work with
1173   if (_inStreamFormat.mChannelsPerFrame >= 2 && (_recChannels == 2)) {
1174     _inDesiredFormat.mChannelsPerFrame = 2;
1175   } else {
1176     // Disable stereo recording when we only have one channel on the device.
1177     _inDesiredFormat.mChannelsPerFrame = 1;
1178     _recChannels = 1;
1179     RTC_LOG(LS_VERBOSE) << "Stereo recording unavailable on this device";
1180   }
1181 
1182   if (_ptrAudioBuffer) {
1183     // Update audio buffer with the selected parameters
1184     _ptrAudioBuffer->SetRecordingSampleRate(N_REC_SAMPLES_PER_SEC);
1185     _ptrAudioBuffer->SetRecordingChannels((uint8_t)_recChannels);
1186   }
1187 
1188   _inDesiredFormat.mSampleRate = N_REC_SAMPLES_PER_SEC;
1189   _inDesiredFormat.mBytesPerPacket =
1190       _inDesiredFormat.mChannelsPerFrame * sizeof(SInt16);
1191   _inDesiredFormat.mFramesPerPacket = 1;
1192   _inDesiredFormat.mBytesPerFrame =
1193       _inDesiredFormat.mChannelsPerFrame * sizeof(SInt16);
1194   _inDesiredFormat.mBitsPerChannel = sizeof(SInt16) * 8;
1195 
1196   _inDesiredFormat.mFormatFlags =
1197       kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked;
1198 #ifdef WEBRTC_ARCH_BIG_ENDIAN
1199   _inDesiredFormat.mFormatFlags |= kLinearPCMFormatFlagIsBigEndian;
1200 #endif
1201   _inDesiredFormat.mFormatID = kAudioFormatLinearPCM;
1202 
1203   WEBRTC_CA_RETURN_ON_ERR(AudioConverterNew(&_inStreamFormat, &_inDesiredFormat,
1204                                             &_captureConverter));
1205 
1206   // First try to set buffer size to desired value (10 ms * N_BLOCKS_IO)
1207   // TODO(xians): investigate this block.
1208   UInt32 bufByteCount =
1209       (UInt32)((_inStreamFormat.mSampleRate / 1000.0) * 10.0 * N_BLOCKS_IO *
1210                _inStreamFormat.mChannelsPerFrame * sizeof(Float32));
1211   if (_inStreamFormat.mFramesPerPacket != 0) {
1212     if (bufByteCount % _inStreamFormat.mFramesPerPacket != 0) {
1213       bufByteCount =
1214           ((UInt32)(bufByteCount / _inStreamFormat.mFramesPerPacket) + 1) *
1215           _inStreamFormat.mFramesPerPacket;
1216     }
1217   }
1218 
1219   // Ensure the buffer size is within the acceptable range provided by the
1220   // device.
1221   propertyAddress.mSelector = kAudioDevicePropertyBufferSizeRange;
1222   AudioValueRange range;
1223   size = sizeof(range);
1224   WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
1225       _inputDeviceID, &propertyAddress, 0, NULL, &size, &range));
1226   if (range.mMinimum > bufByteCount) {
1227     bufByteCount = range.mMinimum;
1228   } else if (range.mMaximum < bufByteCount) {
1229     bufByteCount = range.mMaximum;
1230   }
1231 
1232   propertyAddress.mSelector = kAudioDevicePropertyBufferSize;
1233   size = sizeof(bufByteCount);
1234   WEBRTC_CA_RETURN_ON_ERR(AudioObjectSetPropertyData(
1235       _inputDeviceID, &propertyAddress, 0, NULL, size, &bufByteCount));
1236 
1237   // Get capture device latency
1238   propertyAddress.mSelector = kAudioDevicePropertyLatency;
1239   UInt32 latency = 0;
1240   size = sizeof(UInt32);
1241   WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
1242       _inputDeviceID, &propertyAddress, 0, NULL, &size, &latency));
1243   _captureLatencyUs = (UInt32)((1.0e6 * latency) / _inStreamFormat.mSampleRate);
1244 
1245   // Get capture stream latency
1246   propertyAddress.mSelector = kAudioDevicePropertyStreams;
1247   AudioStreamID stream = 0;
1248   size = sizeof(AudioStreamID);
1249   WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
1250       _inputDeviceID, &propertyAddress, 0, NULL, &size, &stream));
1251   propertyAddress.mSelector = kAudioStreamPropertyLatency;
1252   size = sizeof(UInt32);
1253   latency = 0;
1254   WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
1255       _inputDeviceID, &propertyAddress, 0, NULL, &size, &latency));
1256   _captureLatencyUs +=
1257       (UInt32)((1.0e6 * latency) / _inStreamFormat.mSampleRate);
1258 
1259   // Listen for format changes
1260   // TODO(xians): should we be using kAudioDevicePropertyDeviceHasChanged?
1261   propertyAddress.mSelector = kAudioDevicePropertyStreamFormat;
1262   WEBRTC_CA_LOG_WARN(AudioObjectAddPropertyListener(
1263       _inputDeviceID, &propertyAddress, &objectListenerProc, this));
1264 
1265   // Listen for processor overloads
1266   propertyAddress.mSelector = kAudioDeviceProcessorOverload;
1267   WEBRTC_CA_LOG_WARN(AudioObjectAddPropertyListener(
1268       _inputDeviceID, &propertyAddress, &objectListenerProc, this));
1269 
1270   if (_twoDevices) {
1271     WEBRTC_CA_RETURN_ON_ERR(AudioDeviceCreateIOProcID(
1272         _inputDeviceID, inDeviceIOProc, this, &_inDeviceIOProcID));
1273   } else if (!_playIsInitialized) {
1274     WEBRTC_CA_RETURN_ON_ERR(AudioDeviceCreateIOProcID(
1275         _inputDeviceID, deviceIOProc, this, &_deviceIOProcID));
1276   }
1277 
1278   // Mark recording side as initialized
1279   _recIsInitialized = true;
1280 
1281   return 0;
1282 }
1283 
StartRecording()1284 int32_t AudioDeviceMac::StartRecording() {
1285   RTC_LOG(LS_INFO) << "StartRecording";
1286   MutexLock lock(&mutex_);
1287 
1288   if (!_recIsInitialized) {
1289     return -1;
1290   }
1291 
1292   if (_recording) {
1293     return 0;
1294   }
1295 
1296   if (!_initialized) {
1297     RTC_LOG(LS_ERROR) << "Recording worker thread has not been started";
1298     return -1;
1299   }
1300 
1301   RTC_DCHECK(!capture_worker_thread_.get());
1302   capture_worker_thread_.reset(new rtc::PlatformThread(
1303       RunCapture, this, "CaptureWorkerThread", rtc::kRealtimePriority));
1304   RTC_DCHECK(capture_worker_thread_.get());
1305   capture_worker_thread_->Start();
1306 
1307   OSStatus err = noErr;
1308   if (_twoDevices) {
1309     WEBRTC_CA_RETURN_ON_ERR(
1310         AudioDeviceStart(_inputDeviceID, _inDeviceIOProcID));
1311   } else if (!_playing) {
1312     WEBRTC_CA_RETURN_ON_ERR(AudioDeviceStart(_inputDeviceID, _deviceIOProcID));
1313   }
1314 
1315   _recording = true;
1316 
1317   return 0;
1318 }
1319 
StopRecording()1320 int32_t AudioDeviceMac::StopRecording() {
1321   RTC_LOG(LS_INFO) << "StopRecording";
1322   MutexLock lock(&mutex_);
1323 
1324   if (!_recIsInitialized) {
1325     return 0;
1326   }
1327 
1328   OSStatus err = noErr;
1329   int32_t captureDeviceIsAlive = AtomicGet32(&_captureDeviceIsAlive);
1330   if (_twoDevices && captureDeviceIsAlive == 1) {
1331     // Recording side uses its own dedicated device and IOProc.
1332     if (_recording) {
1333       _recording = false;
1334       _doStopRec = true;  // Signal to io proc to stop audio device
1335       mutex_.Unlock();    // Cannot be under lock, risk of deadlock
1336       if (!_stopEventRec.Wait(2000)) {
1337         MutexLock lockScoped(&mutex_);
1338         RTC_LOG(LS_WARNING) << "Timed out stopping the capture IOProc."
1339                                "We may have failed to detect a device removal.";
1340         WEBRTC_CA_LOG_WARN(AudioDeviceStop(_inputDeviceID, _inDeviceIOProcID));
1341         WEBRTC_CA_LOG_WARN(
1342             AudioDeviceDestroyIOProcID(_inputDeviceID, _inDeviceIOProcID));
1343       }
1344       mutex_.Lock();
1345       _doStopRec = false;
1346       RTC_LOG(LS_INFO) << "Recording stopped (input device)";
1347     } else if (_recIsInitialized) {
1348       WEBRTC_CA_LOG_WARN(
1349           AudioDeviceDestroyIOProcID(_inputDeviceID, _inDeviceIOProcID));
1350       RTC_LOG(LS_INFO) << "Recording uninitialized (input device)";
1351     }
1352   } else {
1353     // We signal a stop for a shared device even when rendering has
1354     // not yet ended. This is to ensure the IOProc will return early as
1355     // intended (by checking |_recording|) before accessing
1356     // resources we free below (e.g. the capture converter).
1357     //
1358     // In the case of a shared devcie, the IOProc will verify
1359     // rendering has ended before stopping itself.
1360     if (_recording && captureDeviceIsAlive == 1) {
1361       _recording = false;
1362       _doStop = true;     // Signal to io proc to stop audio device
1363       mutex_.Unlock();    // Cannot be under lock, risk of deadlock
1364       if (!_stopEvent.Wait(2000)) {
1365         MutexLock lockScoped(&mutex_);
1366         RTC_LOG(LS_WARNING) << "Timed out stopping the shared IOProc."
1367                                "We may have failed to detect a device removal.";
1368         // We assume rendering on a shared device has stopped as well if
1369         // the IOProc times out.
1370         WEBRTC_CA_LOG_WARN(AudioDeviceStop(_outputDeviceID, _deviceIOProcID));
1371         WEBRTC_CA_LOG_WARN(
1372             AudioDeviceDestroyIOProcID(_outputDeviceID, _deviceIOProcID));
1373       }
1374       mutex_.Lock();
1375       _doStop = false;
1376       RTC_LOG(LS_INFO) << "Recording stopped (shared device)";
1377     } else if (_recIsInitialized && !_playing && !_playIsInitialized) {
1378       WEBRTC_CA_LOG_WARN(
1379           AudioDeviceDestroyIOProcID(_outputDeviceID, _deviceIOProcID));
1380       RTC_LOG(LS_INFO) << "Recording uninitialized (shared device)";
1381     }
1382   }
1383 
1384   // Setting this signal will allow the worker thread to be stopped.
1385   AtomicSet32(&_captureDeviceIsAlive, 0);
1386 
1387   if (capture_worker_thread_.get()) {
1388     mutex_.Unlock();
1389     capture_worker_thread_->Stop();
1390     capture_worker_thread_.reset();
1391     mutex_.Lock();
1392   }
1393 
1394   WEBRTC_CA_LOG_WARN(AudioConverterDispose(_captureConverter));
1395 
1396   // Remove listeners.
1397   AudioObjectPropertyAddress propertyAddress = {
1398       kAudioDevicePropertyStreamFormat, kAudioDevicePropertyScopeInput, 0};
1399   WEBRTC_CA_LOG_WARN(AudioObjectRemovePropertyListener(
1400       _inputDeviceID, &propertyAddress, &objectListenerProc, this));
1401 
1402   propertyAddress.mSelector = kAudioDeviceProcessorOverload;
1403   WEBRTC_CA_LOG_WARN(AudioObjectRemovePropertyListener(
1404       _inputDeviceID, &propertyAddress, &objectListenerProc, this));
1405 
1406   _recIsInitialized = false;
1407   _recording = false;
1408 
1409   return 0;
1410 }
1411 
RecordingIsInitialized() const1412 bool AudioDeviceMac::RecordingIsInitialized() const {
1413   return (_recIsInitialized);
1414 }
1415 
Recording() const1416 bool AudioDeviceMac::Recording() const {
1417   return (_recording);
1418 }
1419 
PlayoutIsInitialized() const1420 bool AudioDeviceMac::PlayoutIsInitialized() const {
1421   return (_playIsInitialized);
1422 }
1423 
StartPlayout()1424 int32_t AudioDeviceMac::StartPlayout() {
1425   RTC_LOG(LS_INFO) << "StartPlayout";
1426   MutexLock lock(&mutex_);
1427 
1428   if (!_playIsInitialized) {
1429     return -1;
1430   }
1431 
1432   if (_playing) {
1433     return 0;
1434   }
1435 
1436   RTC_DCHECK(!render_worker_thread_.get());
1437   render_worker_thread_.reset(new rtc::PlatformThread(
1438       RunRender, this, "RenderWorkerThread", rtc::kRealtimePriority));
1439   render_worker_thread_->Start();
1440 
1441   if (_twoDevices || !_recording) {
1442     OSStatus err = noErr;
1443     WEBRTC_CA_RETURN_ON_ERR(AudioDeviceStart(_outputDeviceID, _deviceIOProcID));
1444   }
1445   _playing = true;
1446 
1447   return 0;
1448 }
1449 
StopPlayout()1450 int32_t AudioDeviceMac::StopPlayout() {
1451   RTC_LOG(LS_INFO) << "StopPlayout";
1452   MutexLock lock(&mutex_);
1453 
1454   if (!_playIsInitialized) {
1455     return 0;
1456   }
1457 
1458   OSStatus err = noErr;
1459   int32_t renderDeviceIsAlive = AtomicGet32(&_renderDeviceIsAlive);
1460   if (_playing && renderDeviceIsAlive == 1) {
1461     // We signal a stop for a shared device even when capturing has not
1462     // yet ended. This is to ensure the IOProc will return early as
1463     // intended (by checking |_playing|) before accessing resources we
1464     // free below (e.g. the render converter).
1465     //
1466     // In the case of a shared device, the IOProc will verify capturing
1467     // has ended before stopping itself.
1468     _playing = false;
1469     _doStop = true;     // Signal to io proc to stop audio device
1470     mutex_.Unlock();    // Cannot be under lock, risk of deadlock
1471     if (!_stopEvent.Wait(2000)) {
1472       MutexLock lockScoped(&mutex_);
1473       RTC_LOG(LS_WARNING) << "Timed out stopping the render IOProc."
1474                              "We may have failed to detect a device removal.";
1475 
1476       // We assume capturing on a shared device has stopped as well if the
1477       // IOProc times out.
1478       WEBRTC_CA_LOG_WARN(AudioDeviceStop(_outputDeviceID, _deviceIOProcID));
1479       WEBRTC_CA_LOG_WARN(
1480           AudioDeviceDestroyIOProcID(_outputDeviceID, _deviceIOProcID));
1481     }
1482     mutex_.Lock();
1483     _doStop = false;
1484     RTC_LOG(LS_INFO) << "Playout stopped";
1485   } else if (_twoDevices && _playIsInitialized) {
1486     WEBRTC_CA_LOG_WARN(
1487         AudioDeviceDestroyIOProcID(_outputDeviceID, _deviceIOProcID));
1488     RTC_LOG(LS_INFO) << "Playout uninitialized (output device)";
1489   } else if (!_twoDevices && _playIsInitialized && !_recIsInitialized) {
1490     WEBRTC_CA_LOG_WARN(
1491         AudioDeviceDestroyIOProcID(_outputDeviceID, _deviceIOProcID));
1492     RTC_LOG(LS_INFO) << "Playout uninitialized (shared device)";
1493   }
1494 
1495   // Setting this signal will allow the worker thread to be stopped.
1496   AtomicSet32(&_renderDeviceIsAlive, 0);
1497   if (render_worker_thread_.get()) {
1498     mutex_.Unlock();
1499     render_worker_thread_->Stop();
1500     render_worker_thread_.reset();
1501     mutex_.Lock();
1502   }
1503 
1504   WEBRTC_CA_LOG_WARN(AudioConverterDispose(_renderConverter));
1505 
1506   // Remove listeners.
1507   AudioObjectPropertyAddress propertyAddress = {
1508       kAudioDevicePropertyStreamFormat, kAudioDevicePropertyScopeOutput, 0};
1509   WEBRTC_CA_LOG_WARN(AudioObjectRemovePropertyListener(
1510       _outputDeviceID, &propertyAddress, &objectListenerProc, this));
1511 
1512   propertyAddress.mSelector = kAudioDeviceProcessorOverload;
1513   WEBRTC_CA_LOG_WARN(AudioObjectRemovePropertyListener(
1514       _outputDeviceID, &propertyAddress, &objectListenerProc, this));
1515 
1516   if (_macBookPro) {
1517     Boolean hasProperty =
1518         AudioObjectHasProperty(_outputDeviceID, &propertyAddress);
1519     if (hasProperty) {
1520       propertyAddress.mSelector = kAudioDevicePropertyDataSource;
1521       WEBRTC_CA_LOG_WARN(AudioObjectRemovePropertyListener(
1522           _outputDeviceID, &propertyAddress, &objectListenerProc, this));
1523     }
1524   }
1525 
1526   _playIsInitialized = false;
1527   _playing = false;
1528 
1529   return 0;
1530 }
1531 
PlayoutDelay(uint16_t & delayMS) const1532 int32_t AudioDeviceMac::PlayoutDelay(uint16_t& delayMS) const {
1533   int32_t renderDelayUs = AtomicGet32(&_renderDelayUs);
1534   delayMS =
1535       static_cast<uint16_t>(1e-3 * (renderDelayUs + _renderLatencyUs) + 0.5);
1536   return 0;
1537 }
1538 
Playing() const1539 bool AudioDeviceMac::Playing() const {
1540   return (_playing);
1541 }
1542 
1543 // ============================================================================
1544 //                                 Private Methods
1545 // ============================================================================
1546 
GetNumberDevices(const AudioObjectPropertyScope scope,AudioDeviceID scopedDeviceIds[],const uint32_t deviceListLength)1547 int32_t AudioDeviceMac::GetNumberDevices(const AudioObjectPropertyScope scope,
1548                                          AudioDeviceID scopedDeviceIds[],
1549                                          const uint32_t deviceListLength) {
1550   OSStatus err = noErr;
1551 
1552   AudioObjectPropertyAddress propertyAddress = {
1553       kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal,
1554       kAudioObjectPropertyElementMaster};
1555   UInt32 size = 0;
1556   WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyDataSize(
1557       kAudioObjectSystemObject, &propertyAddress, 0, NULL, &size));
1558   if (size == 0) {
1559     RTC_LOG(LS_WARNING) << "No devices";
1560     return 0;
1561   }
1562 
1563   UInt32 numberDevices = size / sizeof(AudioDeviceID);
1564   const auto deviceIds = std::make_unique<AudioDeviceID[]>(numberDevices);
1565   AudioBufferList* bufferList = NULL;
1566   UInt32 numberScopedDevices = 0;
1567 
1568   // First check if there is a default device and list it
1569   UInt32 hardwareProperty = 0;
1570   if (scope == kAudioDevicePropertyScopeOutput) {
1571     hardwareProperty = kAudioHardwarePropertyDefaultOutputDevice;
1572   } else {
1573     hardwareProperty = kAudioHardwarePropertyDefaultInputDevice;
1574   }
1575 
1576   AudioObjectPropertyAddress propertyAddressDefault = {
1577       hardwareProperty, kAudioObjectPropertyScopeGlobal,
1578       kAudioObjectPropertyElementMaster};
1579 
1580   AudioDeviceID usedID;
1581   UInt32 uintSize = sizeof(UInt32);
1582   WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(kAudioObjectSystemObject,
1583                                                      &propertyAddressDefault, 0,
1584                                                      NULL, &uintSize, &usedID));
1585   if (usedID != kAudioDeviceUnknown) {
1586     scopedDeviceIds[numberScopedDevices] = usedID;
1587     numberScopedDevices++;
1588   } else {
1589     RTC_LOG(LS_WARNING) << "GetNumberDevices(): Default device unknown";
1590   }
1591 
1592   // Then list the rest of the devices
1593   bool listOK = true;
1594 
1595   WEBRTC_CA_LOG_ERR(AudioObjectGetPropertyData(kAudioObjectSystemObject,
1596                                                &propertyAddress, 0, NULL, &size,
1597                                                deviceIds.get()));
1598   if (err != noErr) {
1599     listOK = false;
1600   } else {
1601     propertyAddress.mSelector = kAudioDevicePropertyStreamConfiguration;
1602     propertyAddress.mScope = scope;
1603     propertyAddress.mElement = 0;
1604     for (UInt32 i = 0; i < numberDevices; i++) {
1605       // Check for input channels
1606       WEBRTC_CA_LOG_ERR(AudioObjectGetPropertyDataSize(
1607           deviceIds[i], &propertyAddress, 0, NULL, &size));
1608       if (err == kAudioHardwareBadDeviceError) {
1609         // This device doesn't actually exist; continue iterating.
1610         continue;
1611       } else if (err != noErr) {
1612         listOK = false;
1613         break;
1614       }
1615 
1616       bufferList = (AudioBufferList*)malloc(size);
1617       WEBRTC_CA_LOG_ERR(AudioObjectGetPropertyData(
1618           deviceIds[i], &propertyAddress, 0, NULL, &size, bufferList));
1619       if (err != noErr) {
1620         listOK = false;
1621         break;
1622       }
1623 
1624       if (bufferList->mNumberBuffers > 0) {
1625         if (numberScopedDevices >= deviceListLength) {
1626           RTC_LOG(LS_ERROR) << "Device list is not long enough";
1627           listOK = false;
1628           break;
1629         }
1630 
1631         scopedDeviceIds[numberScopedDevices] = deviceIds[i];
1632         numberScopedDevices++;
1633       }
1634 
1635       free(bufferList);
1636       bufferList = NULL;
1637     }  // for
1638   }
1639 
1640   if (!listOK) {
1641     if (bufferList) {
1642       free(bufferList);
1643       bufferList = NULL;
1644     }
1645     return -1;
1646   }
1647 
1648   return numberScopedDevices;
1649 }
1650 
GetDeviceName(const AudioObjectPropertyScope scope,const uint16_t index,char * name)1651 int32_t AudioDeviceMac::GetDeviceName(const AudioObjectPropertyScope scope,
1652                                       const uint16_t index,
1653                                       char* name) {
1654   OSStatus err = noErr;
1655   UInt32 len = kAdmMaxDeviceNameSize;
1656   AudioDeviceID deviceIds[MaxNumberDevices];
1657 
1658   int numberDevices = GetNumberDevices(scope, deviceIds, MaxNumberDevices);
1659   if (numberDevices < 0) {
1660     return -1;
1661   } else if (numberDevices == 0) {
1662     RTC_LOG(LS_ERROR) << "No devices";
1663     return -1;
1664   }
1665 
1666   // If the number is below the number of devices, assume it's "WEBRTC ID"
1667   // otherwise assume it's a CoreAudio ID
1668   AudioDeviceID usedID;
1669 
1670   // Check if there is a default device
1671   bool isDefaultDevice = false;
1672   if (index == 0) {
1673     UInt32 hardwareProperty = 0;
1674     if (scope == kAudioDevicePropertyScopeOutput) {
1675       hardwareProperty = kAudioHardwarePropertyDefaultOutputDevice;
1676     } else {
1677       hardwareProperty = kAudioHardwarePropertyDefaultInputDevice;
1678     }
1679     AudioObjectPropertyAddress propertyAddress = {
1680         hardwareProperty, kAudioObjectPropertyScopeGlobal,
1681         kAudioObjectPropertyElementMaster};
1682     UInt32 size = sizeof(UInt32);
1683     WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
1684         kAudioObjectSystemObject, &propertyAddress, 0, NULL, &size, &usedID));
1685     if (usedID == kAudioDeviceUnknown) {
1686       RTC_LOG(LS_WARNING) << "GetDeviceName(): Default device unknown";
1687     } else {
1688       isDefaultDevice = true;
1689     }
1690   }
1691 
1692   AudioObjectPropertyAddress propertyAddress = {kAudioDevicePropertyDeviceName,
1693                                                 scope, 0};
1694 
1695   if (isDefaultDevice) {
1696     char devName[len];
1697 
1698     WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(usedID, &propertyAddress,
1699                                                        0, NULL, &len, devName));
1700 
1701     sprintf(name, "default (%s)", devName);
1702   } else {
1703     if (index < numberDevices) {
1704       usedID = deviceIds[index];
1705     } else {
1706       usedID = index;
1707     }
1708 
1709     WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(usedID, &propertyAddress,
1710                                                        0, NULL, &len, name));
1711   }
1712 
1713   return 0;
1714 }
1715 
InitDevice(const uint16_t userDeviceIndex,AudioDeviceID & deviceId,const bool isInput)1716 int32_t AudioDeviceMac::InitDevice(const uint16_t userDeviceIndex,
1717                                    AudioDeviceID& deviceId,
1718                                    const bool isInput) {
1719   OSStatus err = noErr;
1720   UInt32 size = 0;
1721   AudioObjectPropertyScope deviceScope;
1722   AudioObjectPropertySelector defaultDeviceSelector;
1723   AudioDeviceID deviceIds[MaxNumberDevices];
1724 
1725   if (isInput) {
1726     deviceScope = kAudioDevicePropertyScopeInput;
1727     defaultDeviceSelector = kAudioHardwarePropertyDefaultInputDevice;
1728   } else {
1729     deviceScope = kAudioDevicePropertyScopeOutput;
1730     defaultDeviceSelector = kAudioHardwarePropertyDefaultOutputDevice;
1731   }
1732 
1733   AudioObjectPropertyAddress propertyAddress = {
1734       defaultDeviceSelector, kAudioObjectPropertyScopeGlobal,
1735       kAudioObjectPropertyElementMaster};
1736 
1737   // Get the actual device IDs
1738   int numberDevices =
1739       GetNumberDevices(deviceScope, deviceIds, MaxNumberDevices);
1740   if (numberDevices < 0) {
1741     return -1;
1742   } else if (numberDevices == 0) {
1743     RTC_LOG(LS_ERROR) << "InitDevice(): No devices";
1744     return -1;
1745   }
1746 
1747   bool isDefaultDevice = false;
1748   deviceId = kAudioDeviceUnknown;
1749   if (userDeviceIndex == 0) {
1750     // Try to use default system device
1751     size = sizeof(AudioDeviceID);
1752     WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
1753         kAudioObjectSystemObject, &propertyAddress, 0, NULL, &size, &deviceId));
1754     if (deviceId == kAudioDeviceUnknown) {
1755       RTC_LOG(LS_WARNING) << "No default device exists";
1756     } else {
1757       isDefaultDevice = true;
1758     }
1759   }
1760 
1761   if (!isDefaultDevice) {
1762     deviceId = deviceIds[userDeviceIndex];
1763   }
1764 
1765   // Obtain device name and manufacturer for logging.
1766   // Also use this as a test to ensure a user-set device ID is valid.
1767   char devName[128];
1768   char devManf[128];
1769   memset(devName, 0, sizeof(devName));
1770   memset(devManf, 0, sizeof(devManf));
1771 
1772   propertyAddress.mSelector = kAudioDevicePropertyDeviceName;
1773   propertyAddress.mScope = deviceScope;
1774   propertyAddress.mElement = 0;
1775   size = sizeof(devName);
1776   WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(deviceId, &propertyAddress,
1777                                                      0, NULL, &size, devName));
1778 
1779   propertyAddress.mSelector = kAudioDevicePropertyDeviceManufacturer;
1780   size = sizeof(devManf);
1781   WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(deviceId, &propertyAddress,
1782                                                      0, NULL, &size, devManf));
1783 
1784   if (isInput) {
1785     RTC_LOG(LS_INFO) << "Input device: " << devManf << " " << devName;
1786   } else {
1787     RTC_LOG(LS_INFO) << "Output device: " << devManf << " " << devName;
1788   }
1789 
1790   return 0;
1791 }
1792 
SetDesiredPlayoutFormat()1793 OSStatus AudioDeviceMac::SetDesiredPlayoutFormat() {
1794   // Our preferred format to work with.
1795   _outDesiredFormat.mSampleRate = N_PLAY_SAMPLES_PER_SEC;
1796   _outDesiredFormat.mChannelsPerFrame = _playChannels;
1797 
1798   if (_ptrAudioBuffer) {
1799     // Update audio buffer with the selected parameters.
1800     _ptrAudioBuffer->SetPlayoutSampleRate(N_PLAY_SAMPLES_PER_SEC);
1801     _ptrAudioBuffer->SetPlayoutChannels((uint8_t)_playChannels);
1802   }
1803 
1804   _renderDelayOffsetSamples =
1805       _renderBufSizeSamples - N_BUFFERS_OUT * ENGINE_PLAY_BUF_SIZE_IN_SAMPLES *
1806                                   _outDesiredFormat.mChannelsPerFrame;
1807 
1808   _outDesiredFormat.mBytesPerPacket =
1809       _outDesiredFormat.mChannelsPerFrame * sizeof(SInt16);
1810   // In uncompressed audio, a packet is one frame.
1811   _outDesiredFormat.mFramesPerPacket = 1;
1812   _outDesiredFormat.mBytesPerFrame =
1813       _outDesiredFormat.mChannelsPerFrame * sizeof(SInt16);
1814   _outDesiredFormat.mBitsPerChannel = sizeof(SInt16) * 8;
1815 
1816   _outDesiredFormat.mFormatFlags =
1817       kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked;
1818 #ifdef WEBRTC_ARCH_BIG_ENDIAN
1819   _outDesiredFormat.mFormatFlags |= kLinearPCMFormatFlagIsBigEndian;
1820 #endif
1821   _outDesiredFormat.mFormatID = kAudioFormatLinearPCM;
1822 
1823   OSStatus err = noErr;
1824   WEBRTC_CA_RETURN_ON_ERR(AudioConverterNew(
1825       &_outDesiredFormat, &_outStreamFormat, &_renderConverter));
1826 
1827   // Try to set buffer size to desired value set to 20ms.
1828   const uint16_t kPlayBufDelayFixed = 20;
1829   UInt32 bufByteCount = static_cast<UInt32>(
1830       (_outStreamFormat.mSampleRate / 1000.0) * kPlayBufDelayFixed *
1831       _outStreamFormat.mChannelsPerFrame * sizeof(Float32));
1832   if (_outStreamFormat.mFramesPerPacket != 0) {
1833     if (bufByteCount % _outStreamFormat.mFramesPerPacket != 0) {
1834       bufByteCount = (static_cast<UInt32>(bufByteCount /
1835                                           _outStreamFormat.mFramesPerPacket) +
1836                       1) *
1837                      _outStreamFormat.mFramesPerPacket;
1838     }
1839   }
1840 
1841   // Ensure the buffer size is within the range provided by the device.
1842   AudioObjectPropertyAddress propertyAddress = {
1843       kAudioDevicePropertyDataSource, kAudioDevicePropertyScopeOutput, 0};
1844   propertyAddress.mSelector = kAudioDevicePropertyBufferSizeRange;
1845   AudioValueRange range;
1846   UInt32 size = sizeof(range);
1847   WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
1848       _outputDeviceID, &propertyAddress, 0, NULL, &size, &range));
1849   if (range.mMinimum > bufByteCount) {
1850     bufByteCount = range.mMinimum;
1851   } else if (range.mMaximum < bufByteCount) {
1852     bufByteCount = range.mMaximum;
1853   }
1854 
1855   propertyAddress.mSelector = kAudioDevicePropertyBufferSize;
1856   size = sizeof(bufByteCount);
1857   WEBRTC_CA_RETURN_ON_ERR(AudioObjectSetPropertyData(
1858       _outputDeviceID, &propertyAddress, 0, NULL, size, &bufByteCount));
1859 
1860   // Get render device latency.
1861   propertyAddress.mSelector = kAudioDevicePropertyLatency;
1862   UInt32 latency = 0;
1863   size = sizeof(UInt32);
1864   WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
1865       _outputDeviceID, &propertyAddress, 0, NULL, &size, &latency));
1866   _renderLatencyUs =
1867       static_cast<uint32_t>((1.0e6 * latency) / _outStreamFormat.mSampleRate);
1868 
1869   // Get render stream latency.
1870   propertyAddress.mSelector = kAudioDevicePropertyStreams;
1871   AudioStreamID stream = 0;
1872   size = sizeof(AudioStreamID);
1873   WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
1874       _outputDeviceID, &propertyAddress, 0, NULL, &size, &stream));
1875   propertyAddress.mSelector = kAudioStreamPropertyLatency;
1876   size = sizeof(UInt32);
1877   latency = 0;
1878   WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
1879       _outputDeviceID, &propertyAddress, 0, NULL, &size, &latency));
1880   _renderLatencyUs +=
1881       static_cast<uint32_t>((1.0e6 * latency) / _outStreamFormat.mSampleRate);
1882 
1883   RTC_LOG(LS_VERBOSE) << "initial playout status: _renderDelayOffsetSamples="
1884                       << _renderDelayOffsetSamples
1885                       << ", _renderDelayUs=" << _renderDelayUs
1886                       << ", _renderLatencyUs=" << _renderLatencyUs;
1887   return 0;
1888 }
1889 
objectListenerProc(AudioObjectID objectId,UInt32 numberAddresses,const AudioObjectPropertyAddress addresses[],void * clientData)1890 OSStatus AudioDeviceMac::objectListenerProc(
1891     AudioObjectID objectId,
1892     UInt32 numberAddresses,
1893     const AudioObjectPropertyAddress addresses[],
1894     void* clientData) {
1895   AudioDeviceMac* ptrThis = (AudioDeviceMac*)clientData;
1896   RTC_DCHECK(ptrThis != NULL);
1897 
1898   ptrThis->implObjectListenerProc(objectId, numberAddresses, addresses);
1899 
1900   // AudioObjectPropertyListenerProc functions are supposed to return 0
1901   return 0;
1902 }
1903 
implObjectListenerProc(const AudioObjectID objectId,const UInt32 numberAddresses,const AudioObjectPropertyAddress addresses[])1904 OSStatus AudioDeviceMac::implObjectListenerProc(
1905     const AudioObjectID objectId,
1906     const UInt32 numberAddresses,
1907     const AudioObjectPropertyAddress addresses[]) {
1908   RTC_LOG(LS_VERBOSE) << "AudioDeviceMac::implObjectListenerProc()";
1909 
1910   for (UInt32 i = 0; i < numberAddresses; i++) {
1911     if (addresses[i].mSelector == kAudioHardwarePropertyDevices) {
1912       HandleDeviceChange();
1913     } else if (addresses[i].mSelector == kAudioDevicePropertyStreamFormat) {
1914       HandleStreamFormatChange(objectId, addresses[i]);
1915     } else if (addresses[i].mSelector == kAudioDevicePropertyDataSource) {
1916       HandleDataSourceChange(objectId, addresses[i]);
1917     } else if (addresses[i].mSelector == kAudioDeviceProcessorOverload) {
1918       HandleProcessorOverload(addresses[i]);
1919     }
1920   }
1921 
1922   return 0;
1923 }
1924 
HandleDeviceChange()1925 int32_t AudioDeviceMac::HandleDeviceChange() {
1926   OSStatus err = noErr;
1927 
1928   RTC_LOG(LS_VERBOSE) << "kAudioHardwarePropertyDevices";
1929 
1930   // A device has changed. Check if our registered devices have been removed.
1931   // Ensure the devices have been initialized, meaning the IDs are valid.
1932   if (MicrophoneIsInitialized()) {
1933     AudioObjectPropertyAddress propertyAddress = {
1934         kAudioDevicePropertyDeviceIsAlive, kAudioDevicePropertyScopeInput, 0};
1935     UInt32 deviceIsAlive = 1;
1936     UInt32 size = sizeof(UInt32);
1937     err = AudioObjectGetPropertyData(_inputDeviceID, &propertyAddress, 0, NULL,
1938                                      &size, &deviceIsAlive);
1939 
1940     if (err == kAudioHardwareBadDeviceError || deviceIsAlive == 0) {
1941       RTC_LOG(LS_WARNING) << "Capture device is not alive (probably removed)";
1942       AtomicSet32(&_captureDeviceIsAlive, 0);
1943       _mixerManager.CloseMicrophone();
1944     } else if (err != noErr) {
1945       logCAMsg(rtc::LS_ERROR, "Error in AudioDeviceGetProperty()",
1946                (const char*)&err);
1947       return -1;
1948     }
1949   }
1950 
1951   if (SpeakerIsInitialized()) {
1952     AudioObjectPropertyAddress propertyAddress = {
1953         kAudioDevicePropertyDeviceIsAlive, kAudioDevicePropertyScopeOutput, 0};
1954     UInt32 deviceIsAlive = 1;
1955     UInt32 size = sizeof(UInt32);
1956     err = AudioObjectGetPropertyData(_outputDeviceID, &propertyAddress, 0, NULL,
1957                                      &size, &deviceIsAlive);
1958 
1959     if (err == kAudioHardwareBadDeviceError || deviceIsAlive == 0) {
1960       RTC_LOG(LS_WARNING) << "Render device is not alive (probably removed)";
1961       AtomicSet32(&_renderDeviceIsAlive, 0);
1962       _mixerManager.CloseSpeaker();
1963     } else if (err != noErr) {
1964       logCAMsg(rtc::LS_ERROR, "Error in AudioDeviceGetProperty()",
1965                (const char*)&err);
1966       return -1;
1967     }
1968   }
1969 
1970   return 0;
1971 }
1972 
HandleStreamFormatChange(const AudioObjectID objectId,const AudioObjectPropertyAddress propertyAddress)1973 int32_t AudioDeviceMac::HandleStreamFormatChange(
1974     const AudioObjectID objectId,
1975     const AudioObjectPropertyAddress propertyAddress) {
1976   OSStatus err = noErr;
1977 
1978   RTC_LOG(LS_VERBOSE) << "Stream format changed";
1979 
1980   if (objectId != _inputDeviceID && objectId != _outputDeviceID) {
1981     return 0;
1982   }
1983 
1984   // Get the new device format
1985   AudioStreamBasicDescription streamFormat;
1986   UInt32 size = sizeof(streamFormat);
1987   WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
1988       objectId, &propertyAddress, 0, NULL, &size, &streamFormat));
1989 
1990   if (streamFormat.mFormatID != kAudioFormatLinearPCM) {
1991     logCAMsg(rtc::LS_ERROR, "Unacceptable input stream format -> mFormatID",
1992              (const char*)&streamFormat.mFormatID);
1993     return -1;
1994   }
1995 
1996   if (streamFormat.mChannelsPerFrame > N_DEVICE_CHANNELS) {
1997     RTC_LOG(LS_ERROR) << "Too many channels on device (mChannelsPerFrame = "
1998                       << streamFormat.mChannelsPerFrame << ")";
1999     return -1;
2000   }
2001 
2002   if (_ptrAudioBuffer && streamFormat.mChannelsPerFrame != _recChannels) {
2003     RTC_LOG(LS_ERROR) << "Changing channels not supported (mChannelsPerFrame = "
2004                       << streamFormat.mChannelsPerFrame << ")";
2005     return -1;
2006   }
2007 
2008   RTC_LOG(LS_VERBOSE) << "Stream format:";
2009   RTC_LOG(LS_VERBOSE) << "mSampleRate = " << streamFormat.mSampleRate
2010                       << ", mChannelsPerFrame = "
2011                       << streamFormat.mChannelsPerFrame;
2012   RTC_LOG(LS_VERBOSE) << "mBytesPerPacket = " << streamFormat.mBytesPerPacket
2013                       << ", mFramesPerPacket = "
2014                       << streamFormat.mFramesPerPacket;
2015   RTC_LOG(LS_VERBOSE) << "mBytesPerFrame = " << streamFormat.mBytesPerFrame
2016                       << ", mBitsPerChannel = " << streamFormat.mBitsPerChannel;
2017   RTC_LOG(LS_VERBOSE) << "mFormatFlags = " << streamFormat.mFormatFlags;
2018   logCAMsg(rtc::LS_VERBOSE, "mFormatID", (const char*)&streamFormat.mFormatID);
2019 
2020   if (propertyAddress.mScope == kAudioDevicePropertyScopeInput) {
2021     const int io_block_size_samples = streamFormat.mChannelsPerFrame *
2022                                       streamFormat.mSampleRate / 100 *
2023                                       N_BLOCKS_IO;
2024     if (io_block_size_samples > _captureBufSizeSamples) {
2025       RTC_LOG(LS_ERROR) << "Input IO block size (" << io_block_size_samples
2026                         << ") is larger than ring buffer ("
2027                         << _captureBufSizeSamples << ")";
2028       return -1;
2029     }
2030 
2031     memcpy(&_inStreamFormat, &streamFormat, sizeof(streamFormat));
2032 
2033     if (_inStreamFormat.mChannelsPerFrame >= 2 && (_recChannels == 2)) {
2034       _inDesiredFormat.mChannelsPerFrame = 2;
2035     } else {
2036       // Disable stereo recording when we only have one channel on the device.
2037       _inDesiredFormat.mChannelsPerFrame = 1;
2038       _recChannels = 1;
2039       RTC_LOG(LS_VERBOSE) << "Stereo recording unavailable on this device";
2040     }
2041 
2042     // Recreate the converter with the new format
2043     // TODO(xians): make this thread safe
2044     WEBRTC_CA_RETURN_ON_ERR(AudioConverterDispose(_captureConverter));
2045 
2046     WEBRTC_CA_RETURN_ON_ERR(AudioConverterNew(&streamFormat, &_inDesiredFormat,
2047                                               &_captureConverter));
2048   } else {
2049     memcpy(&_outStreamFormat, &streamFormat, sizeof(streamFormat));
2050 
2051     // Our preferred format to work with
2052     if (_outStreamFormat.mChannelsPerFrame < 2) {
2053       _playChannels = 1;
2054       RTC_LOG(LS_VERBOSE) << "Stereo playout unavailable on this device";
2055     }
2056     WEBRTC_CA_RETURN_ON_ERR(SetDesiredPlayoutFormat());
2057   }
2058   return 0;
2059 }
2060 
HandleDataSourceChange(const AudioObjectID objectId,const AudioObjectPropertyAddress propertyAddress)2061 int32_t AudioDeviceMac::HandleDataSourceChange(
2062     const AudioObjectID objectId,
2063     const AudioObjectPropertyAddress propertyAddress) {
2064   OSStatus err = noErr;
2065 
2066   if (_macBookPro &&
2067       propertyAddress.mScope == kAudioDevicePropertyScopeOutput) {
2068     RTC_LOG(LS_VERBOSE) << "Data source changed";
2069 
2070     _macBookProPanRight = false;
2071     UInt32 dataSource = 0;
2072     UInt32 size = sizeof(UInt32);
2073     WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
2074         objectId, &propertyAddress, 0, NULL, &size, &dataSource));
2075     if (dataSource == 'ispk') {
2076       _macBookProPanRight = true;
2077       RTC_LOG(LS_VERBOSE)
2078           << "MacBook Pro using internal speakers; stereo panning right";
2079     } else {
2080       RTC_LOG(LS_VERBOSE) << "MacBook Pro not using internal speakers";
2081     }
2082   }
2083 
2084   return 0;
2085 }
HandleProcessorOverload(const AudioObjectPropertyAddress propertyAddress)2086 int32_t AudioDeviceMac::HandleProcessorOverload(
2087     const AudioObjectPropertyAddress propertyAddress) {
2088   // TODO(xians): we probably want to notify the user in some way of the
2089   // overload. However, the Windows interpretations of these errors seem to
2090   // be more severe than what ProcessorOverload is thrown for.
2091   //
2092   // We don't log the notification, as it's sent from the HAL's IO thread. We
2093   // don't want to slow it down even further.
2094   if (propertyAddress.mScope == kAudioDevicePropertyScopeInput) {
2095     // RTC_LOG(LS_WARNING) << "Capture processor // overload";
2096     //_callback->ProblemIsReported(
2097     // SndCardStreamObserver::ERecordingProblem);
2098   } else {
2099     // RTC_LOG(LS_WARNING) << "Render processor overload";
2100     //_callback->ProblemIsReported(
2101     // SndCardStreamObserver::EPlaybackProblem);
2102   }
2103 
2104   return 0;
2105 }
2106 
2107 // ============================================================================
2108 //                                  Thread Methods
2109 // ============================================================================
2110 
deviceIOProc(AudioDeviceID,const AudioTimeStamp *,const AudioBufferList * inputData,const AudioTimeStamp * inputTime,AudioBufferList * outputData,const AudioTimeStamp * outputTime,void * clientData)2111 OSStatus AudioDeviceMac::deviceIOProc(AudioDeviceID,
2112                                       const AudioTimeStamp*,
2113                                       const AudioBufferList* inputData,
2114                                       const AudioTimeStamp* inputTime,
2115                                       AudioBufferList* outputData,
2116                                       const AudioTimeStamp* outputTime,
2117                                       void* clientData) {
2118   AudioDeviceMac* ptrThis = (AudioDeviceMac*)clientData;
2119   RTC_DCHECK(ptrThis != NULL);
2120 
2121   ptrThis->implDeviceIOProc(inputData, inputTime, outputData, outputTime);
2122 
2123   // AudioDeviceIOProc functions are supposed to return 0
2124   return 0;
2125 }
2126 
outConverterProc(AudioConverterRef,UInt32 * numberDataPackets,AudioBufferList * data,AudioStreamPacketDescription **,void * userData)2127 OSStatus AudioDeviceMac::outConverterProc(AudioConverterRef,
2128                                           UInt32* numberDataPackets,
2129                                           AudioBufferList* data,
2130                                           AudioStreamPacketDescription**,
2131                                           void* userData) {
2132   AudioDeviceMac* ptrThis = (AudioDeviceMac*)userData;
2133   RTC_DCHECK(ptrThis != NULL);
2134 
2135   return ptrThis->implOutConverterProc(numberDataPackets, data);
2136 }
2137 
inDeviceIOProc(AudioDeviceID,const AudioTimeStamp *,const AudioBufferList * inputData,const AudioTimeStamp * inputTime,AudioBufferList *,const AudioTimeStamp *,void * clientData)2138 OSStatus AudioDeviceMac::inDeviceIOProc(AudioDeviceID,
2139                                         const AudioTimeStamp*,
2140                                         const AudioBufferList* inputData,
2141                                         const AudioTimeStamp* inputTime,
2142                                         AudioBufferList*,
2143                                         const AudioTimeStamp*,
2144                                         void* clientData) {
2145   AudioDeviceMac* ptrThis = (AudioDeviceMac*)clientData;
2146   RTC_DCHECK(ptrThis != NULL);
2147 
2148   ptrThis->implInDeviceIOProc(inputData, inputTime);
2149 
2150   // AudioDeviceIOProc functions are supposed to return 0
2151   return 0;
2152 }
2153 
inConverterProc(AudioConverterRef,UInt32 * numberDataPackets,AudioBufferList * data,AudioStreamPacketDescription **,void * userData)2154 OSStatus AudioDeviceMac::inConverterProc(
2155     AudioConverterRef,
2156     UInt32* numberDataPackets,
2157     AudioBufferList* data,
2158     AudioStreamPacketDescription** /*dataPacketDescription*/,
2159     void* userData) {
2160   AudioDeviceMac* ptrThis = static_cast<AudioDeviceMac*>(userData);
2161   RTC_DCHECK(ptrThis != NULL);
2162 
2163   return ptrThis->implInConverterProc(numberDataPackets, data);
2164 }
2165 
implDeviceIOProc(const AudioBufferList * inputData,const AudioTimeStamp * inputTime,AudioBufferList * outputData,const AudioTimeStamp * outputTime)2166 OSStatus AudioDeviceMac::implDeviceIOProc(const AudioBufferList* inputData,
2167                                           const AudioTimeStamp* inputTime,
2168                                           AudioBufferList* outputData,
2169                                           const AudioTimeStamp* outputTime) {
2170   OSStatus err = noErr;
2171   UInt64 outputTimeNs = AudioConvertHostTimeToNanos(outputTime->mHostTime);
2172   UInt64 nowNs = AudioConvertHostTimeToNanos(AudioGetCurrentHostTime());
2173 
2174   if (!_twoDevices && _recording) {
2175     implInDeviceIOProc(inputData, inputTime);
2176   }
2177 
2178   // Check if we should close down audio device
2179   // Double-checked locking optimization to remove locking overhead
2180   if (_doStop) {
2181     MutexLock lock(&mutex_);
2182     if (_doStop) {
2183       if (_twoDevices || (!_recording && !_playing)) {
2184         // In the case of a shared device, the single driving ioProc
2185         // is stopped here
2186         WEBRTC_CA_LOG_ERR(AudioDeviceStop(_outputDeviceID, _deviceIOProcID));
2187         WEBRTC_CA_LOG_WARN(
2188             AudioDeviceDestroyIOProcID(_outputDeviceID, _deviceIOProcID));
2189         if (err == noErr) {
2190           RTC_LOG(LS_VERBOSE) << "Playout or shared device stopped";
2191         }
2192       }
2193 
2194       _doStop = false;
2195       _stopEvent.Set();
2196       return 0;
2197     }
2198   }
2199 
2200   if (!_playing) {
2201     // This can be the case when a shared device is capturing but not
2202     // rendering. We allow the checks above before returning to avoid a
2203     // timeout when capturing is stopped.
2204     return 0;
2205   }
2206 
2207   RTC_DCHECK(_outStreamFormat.mBytesPerFrame != 0);
2208   UInt32 size =
2209       outputData->mBuffers->mDataByteSize / _outStreamFormat.mBytesPerFrame;
2210 
2211   // TODO(xians): signal an error somehow?
2212   err = AudioConverterFillComplexBuffer(_renderConverter, outConverterProc,
2213                                         this, &size, outputData, NULL);
2214   if (err != noErr) {
2215     if (err == 1) {
2216       // This is our own error.
2217       RTC_LOG(LS_ERROR) << "Error in AudioConverterFillComplexBuffer()";
2218       return 1;
2219     } else {
2220       logCAMsg(rtc::LS_ERROR, "Error in AudioConverterFillComplexBuffer()",
2221                (const char*)&err);
2222       return 1;
2223     }
2224   }
2225 
2226   PaRingBufferSize bufSizeSamples =
2227       PaUtil_GetRingBufferReadAvailable(_paRenderBuffer);
2228 
2229   int32_t renderDelayUs =
2230       static_cast<int32_t>(1e-3 * (outputTimeNs - nowNs) + 0.5);
2231   renderDelayUs += static_cast<int32_t>(
2232       (1.0e6 * bufSizeSamples) / _outDesiredFormat.mChannelsPerFrame /
2233           _outDesiredFormat.mSampleRate +
2234       0.5);
2235 
2236   AtomicSet32(&_renderDelayUs, renderDelayUs);
2237 
2238   return 0;
2239 }
2240 
implOutConverterProc(UInt32 * numberDataPackets,AudioBufferList * data)2241 OSStatus AudioDeviceMac::implOutConverterProc(UInt32* numberDataPackets,
2242                                               AudioBufferList* data) {
2243   RTC_DCHECK(data->mNumberBuffers == 1);
2244   PaRingBufferSize numSamples =
2245       *numberDataPackets * _outDesiredFormat.mChannelsPerFrame;
2246 
2247   data->mBuffers->mNumberChannels = _outDesiredFormat.mChannelsPerFrame;
2248   // Always give the converter as much as it wants, zero padding as required.
2249   data->mBuffers->mDataByteSize =
2250       *numberDataPackets * _outDesiredFormat.mBytesPerPacket;
2251   data->mBuffers->mData = _renderConvertData;
2252   memset(_renderConvertData, 0, sizeof(_renderConvertData));
2253 
2254   PaUtil_ReadRingBuffer(_paRenderBuffer, _renderConvertData, numSamples);
2255 
2256   kern_return_t kernErr = semaphore_signal_all(_renderSemaphore);
2257   if (kernErr != KERN_SUCCESS) {
2258     RTC_LOG(LS_ERROR) << "semaphore_signal_all() error: " << kernErr;
2259     return 1;
2260   }
2261 
2262   return 0;
2263 }
2264 
implInDeviceIOProc(const AudioBufferList * inputData,const AudioTimeStamp * inputTime)2265 OSStatus AudioDeviceMac::implInDeviceIOProc(const AudioBufferList* inputData,
2266                                             const AudioTimeStamp* inputTime) {
2267   OSStatus err = noErr;
2268   UInt64 inputTimeNs = AudioConvertHostTimeToNanos(inputTime->mHostTime);
2269   UInt64 nowNs = AudioConvertHostTimeToNanos(AudioGetCurrentHostTime());
2270 
2271   // Check if we should close down audio device
2272   // Double-checked locking optimization to remove locking overhead
2273   if (_doStopRec) {
2274     MutexLock lock(&mutex_);
2275     if (_doStopRec) {
2276       // This will be signalled only when a shared device is not in use.
2277       WEBRTC_CA_LOG_ERR(AudioDeviceStop(_inputDeviceID, _inDeviceIOProcID));
2278       WEBRTC_CA_LOG_WARN(
2279           AudioDeviceDestroyIOProcID(_inputDeviceID, _inDeviceIOProcID));
2280       if (err == noErr) {
2281         RTC_LOG(LS_VERBOSE) << "Recording device stopped";
2282       }
2283 
2284       _doStopRec = false;
2285       _stopEventRec.Set();
2286       return 0;
2287     }
2288   }
2289 
2290   if (!_recording) {
2291     // Allow above checks to avoid a timeout on stopping capture.
2292     return 0;
2293   }
2294 
2295   PaRingBufferSize bufSizeSamples =
2296       PaUtil_GetRingBufferReadAvailable(_paCaptureBuffer);
2297 
2298   int32_t captureDelayUs =
2299       static_cast<int32_t>(1e-3 * (nowNs - inputTimeNs) + 0.5);
2300   captureDelayUs += static_cast<int32_t>((1.0e6 * bufSizeSamples) /
2301                                              _inStreamFormat.mChannelsPerFrame /
2302                                              _inStreamFormat.mSampleRate +
2303                                          0.5);
2304 
2305   AtomicSet32(&_captureDelayUs, captureDelayUs);
2306 
2307   RTC_DCHECK(inputData->mNumberBuffers == 1);
2308   PaRingBufferSize numSamples = inputData->mBuffers->mDataByteSize *
2309                                 _inStreamFormat.mChannelsPerFrame /
2310                                 _inStreamFormat.mBytesPerPacket;
2311   PaUtil_WriteRingBuffer(_paCaptureBuffer, inputData->mBuffers->mData,
2312                          numSamples);
2313 
2314   kern_return_t kernErr = semaphore_signal_all(_captureSemaphore);
2315   if (kernErr != KERN_SUCCESS) {
2316     RTC_LOG(LS_ERROR) << "semaphore_signal_all() error: " << kernErr;
2317   }
2318 
2319   return err;
2320 }
2321 
implInConverterProc(UInt32 * numberDataPackets,AudioBufferList * data)2322 OSStatus AudioDeviceMac::implInConverterProc(UInt32* numberDataPackets,
2323                                              AudioBufferList* data) {
2324   RTC_DCHECK(data->mNumberBuffers == 1);
2325   PaRingBufferSize numSamples =
2326       *numberDataPackets * _inStreamFormat.mChannelsPerFrame;
2327 
2328   while (PaUtil_GetRingBufferReadAvailable(_paCaptureBuffer) < numSamples) {
2329     mach_timespec_t timeout;
2330     timeout.tv_sec = 0;
2331     timeout.tv_nsec = TIMER_PERIOD_MS;
2332 
2333     kern_return_t kernErr = semaphore_timedwait(_captureSemaphore, timeout);
2334     if (kernErr == KERN_OPERATION_TIMED_OUT) {
2335       int32_t signal = AtomicGet32(&_captureDeviceIsAlive);
2336       if (signal == 0) {
2337         // The capture device is no longer alive; stop the worker thread.
2338         *numberDataPackets = 0;
2339         return 1;
2340       }
2341     } else if (kernErr != KERN_SUCCESS) {
2342       RTC_LOG(LS_ERROR) << "semaphore_wait() error: " << kernErr;
2343     }
2344   }
2345 
2346   // Pass the read pointer directly to the converter to avoid a memcpy.
2347   void* dummyPtr;
2348   PaRingBufferSize dummySize;
2349   PaUtil_GetRingBufferReadRegions(_paCaptureBuffer, numSamples,
2350                                   &data->mBuffers->mData, &numSamples,
2351                                   &dummyPtr, &dummySize);
2352   PaUtil_AdvanceRingBufferReadIndex(_paCaptureBuffer, numSamples);
2353 
2354   data->mBuffers->mNumberChannels = _inStreamFormat.mChannelsPerFrame;
2355   *numberDataPackets = numSamples / _inStreamFormat.mChannelsPerFrame;
2356   data->mBuffers->mDataByteSize =
2357       *numberDataPackets * _inStreamFormat.mBytesPerPacket;
2358 
2359   return 0;
2360 }
2361 
RunRender(void * ptrThis)2362 void AudioDeviceMac::RunRender(void* ptrThis) {
2363   AudioDeviceMac* device = static_cast<AudioDeviceMac*>(ptrThis);
2364   while (device->RenderWorkerThread()) {
2365   }
2366 }
2367 
RenderWorkerThread()2368 bool AudioDeviceMac::RenderWorkerThread() {
2369   PaRingBufferSize numSamples =
2370       ENGINE_PLAY_BUF_SIZE_IN_SAMPLES * _outDesiredFormat.mChannelsPerFrame;
2371   while (PaUtil_GetRingBufferWriteAvailable(_paRenderBuffer) -
2372              _renderDelayOffsetSamples <
2373          numSamples) {
2374     mach_timespec_t timeout;
2375     timeout.tv_sec = 0;
2376     timeout.tv_nsec = TIMER_PERIOD_MS;
2377 
2378     kern_return_t kernErr = semaphore_timedwait(_renderSemaphore, timeout);
2379     if (kernErr == KERN_OPERATION_TIMED_OUT) {
2380       int32_t signal = AtomicGet32(&_renderDeviceIsAlive);
2381       if (signal == 0) {
2382         // The render device is no longer alive; stop the worker thread.
2383         return false;
2384       }
2385     } else if (kernErr != KERN_SUCCESS) {
2386       RTC_LOG(LS_ERROR) << "semaphore_timedwait() error: " << kernErr;
2387     }
2388   }
2389 
2390   int8_t playBuffer[4 * ENGINE_PLAY_BUF_SIZE_IN_SAMPLES];
2391 
2392   if (!_ptrAudioBuffer) {
2393     RTC_LOG(LS_ERROR) << "capture AudioBuffer is invalid";
2394     return false;
2395   }
2396 
2397   // Ask for new PCM data to be played out using the AudioDeviceBuffer.
2398   uint32_t nSamples =
2399       _ptrAudioBuffer->RequestPlayoutData(ENGINE_PLAY_BUF_SIZE_IN_SAMPLES);
2400 
2401   nSamples = _ptrAudioBuffer->GetPlayoutData(playBuffer);
2402   if (nSamples != ENGINE_PLAY_BUF_SIZE_IN_SAMPLES) {
2403     RTC_LOG(LS_ERROR) << "invalid number of output samples(" << nSamples << ")";
2404   }
2405 
2406   uint32_t nOutSamples = nSamples * _outDesiredFormat.mChannelsPerFrame;
2407 
2408   SInt16* pPlayBuffer = (SInt16*)&playBuffer;
2409   if (_macBookProPanRight && (_playChannels == 2)) {
2410     // Mix entirely into the right channel and zero the left channel.
2411     SInt32 sampleInt32 = 0;
2412     for (uint32_t sampleIdx = 0; sampleIdx < nOutSamples; sampleIdx += 2) {
2413       sampleInt32 = pPlayBuffer[sampleIdx];
2414       sampleInt32 += pPlayBuffer[sampleIdx + 1];
2415       sampleInt32 /= 2;
2416 
2417       if (sampleInt32 > 32767) {
2418         sampleInt32 = 32767;
2419       } else if (sampleInt32 < -32768) {
2420         sampleInt32 = -32768;
2421       }
2422 
2423       pPlayBuffer[sampleIdx] = 0;
2424       pPlayBuffer[sampleIdx + 1] = static_cast<SInt16>(sampleInt32);
2425     }
2426   }
2427 
2428   PaUtil_WriteRingBuffer(_paRenderBuffer, pPlayBuffer, nOutSamples);
2429 
2430   return true;
2431 }
2432 
RunCapture(void * ptrThis)2433 void AudioDeviceMac::RunCapture(void* ptrThis) {
2434   AudioDeviceMac* device = static_cast<AudioDeviceMac*>(ptrThis);
2435   while (device->CaptureWorkerThread()) {
2436   }
2437 }
2438 
CaptureWorkerThread()2439 bool AudioDeviceMac::CaptureWorkerThread() {
2440   OSStatus err = noErr;
2441   UInt32 noRecSamples =
2442       ENGINE_REC_BUF_SIZE_IN_SAMPLES * _inDesiredFormat.mChannelsPerFrame;
2443   SInt16 recordBuffer[noRecSamples];
2444   UInt32 size = ENGINE_REC_BUF_SIZE_IN_SAMPLES;
2445 
2446   AudioBufferList engineBuffer;
2447   engineBuffer.mNumberBuffers = 1;  // Interleaved channels.
2448   engineBuffer.mBuffers->mNumberChannels = _inDesiredFormat.mChannelsPerFrame;
2449   engineBuffer.mBuffers->mDataByteSize =
2450       _inDesiredFormat.mBytesPerPacket * noRecSamples;
2451   engineBuffer.mBuffers->mData = recordBuffer;
2452 
2453   err = AudioConverterFillComplexBuffer(_captureConverter, inConverterProc,
2454                                         this, &size, &engineBuffer, NULL);
2455   if (err != noErr) {
2456     if (err == 1) {
2457       // This is our own error.
2458       return false;
2459     } else {
2460       logCAMsg(rtc::LS_ERROR, "Error in AudioConverterFillComplexBuffer()",
2461                (const char*)&err);
2462       return false;
2463     }
2464   }
2465 
2466   // TODO(xians): what if the returned size is incorrect?
2467   if (size == ENGINE_REC_BUF_SIZE_IN_SAMPLES) {
2468     int32_t msecOnPlaySide;
2469     int32_t msecOnRecordSide;
2470 
2471     int32_t captureDelayUs = AtomicGet32(&_captureDelayUs);
2472     int32_t renderDelayUs = AtomicGet32(&_renderDelayUs);
2473 
2474     msecOnPlaySide =
2475         static_cast<int32_t>(1e-3 * (renderDelayUs + _renderLatencyUs) + 0.5);
2476     msecOnRecordSide =
2477         static_cast<int32_t>(1e-3 * (captureDelayUs + _captureLatencyUs) + 0.5);
2478 
2479     if (!_ptrAudioBuffer) {
2480       RTC_LOG(LS_ERROR) << "capture AudioBuffer is invalid";
2481       return false;
2482     }
2483 
2484     // store the recorded buffer (no action will be taken if the
2485     // #recorded samples is not a full buffer)
2486     _ptrAudioBuffer->SetRecordedBuffer((int8_t*)&recordBuffer, (uint32_t)size);
2487     _ptrAudioBuffer->SetVQEData(msecOnPlaySide, msecOnRecordSide);
2488     _ptrAudioBuffer->SetTypingStatus(KeyPressed());
2489 
2490     // deliver recorded samples at specified sample rate, mic level etc.
2491     // to the observer using callback
2492     _ptrAudioBuffer->DeliverRecordedData();
2493   }
2494 
2495   return true;
2496 }
2497 
KeyPressed()2498 bool AudioDeviceMac::KeyPressed() {
2499   bool key_down = false;
2500   // Loop through all Mac virtual key constant values.
2501   for (unsigned int key_index = 0; key_index < arraysize(prev_key_state_);
2502        ++key_index) {
2503     bool keyState =
2504         CGEventSourceKeyState(kCGEventSourceStateHIDSystemState, key_index);
2505     // A false -> true change in keymap means a key is pressed.
2506     key_down |= (keyState && !prev_key_state_[key_index]);
2507     // Save current state.
2508     prev_key_state_[key_index] = keyState;
2509   }
2510   return key_down;
2511 }
2512 }  // namespace webrtc
2513