1 /*
2  *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3  *
4  *  Use of this source code is governed by a BSD-style license
5  *  that can be found in the LICENSE file in the root of the source
6  *  tree. An additional intellectual property rights grant can be found
7  *  in the file PATENTS.  All contributing project authors may
8  *  be found in the AUTHORS file in the root of the source tree.
9  */
10 
11 #include <assert.h>
12 
13 #include "webrtc/base/checks.h"
14 
15 #include "webrtc/modules/audio_device/audio_device_config.h"
16 #include "webrtc/modules/audio_device/linux/audio_device_pulse_linux.h"
17 
18 #include "webrtc/system_wrappers/include/event_wrapper.h"
19 #include "webrtc/system_wrappers/include/trace.h"
20 
21 webrtc_adm_linux_pulse::PulseAudioSymbolTable PaSymbolTable;
22 
23 // Accesses Pulse functions through our late-binding symbol table instead of
24 // directly. This way we don't have to link to libpulse, which means our binary
25 // will work on systems that don't have it.
26 #define LATE(sym) \
27   LATESYM_GET(webrtc_adm_linux_pulse::PulseAudioSymbolTable, &PaSymbolTable, sym)
28 
29 namespace webrtc
30 {
31 
AudioDeviceLinuxPulse(const int32_t id)32 AudioDeviceLinuxPulse::AudioDeviceLinuxPulse(const int32_t id) :
33     _ptrAudioBuffer(NULL),
34     _critSect(*CriticalSectionWrapper::CreateCriticalSection()),
35     _timeEventRec(*EventWrapper::Create()),
36     _timeEventPlay(*EventWrapper::Create()),
37     _recStartEvent(*EventWrapper::Create()),
38     _playStartEvent(*EventWrapper::Create()),
39     _id(id),
40     _mixerManager(id),
41     _inputDeviceIndex(0),
42     _outputDeviceIndex(0),
43     _inputDeviceIsSpecified(false),
44     _outputDeviceIsSpecified(false),
45     sample_rate_hz_(0),
46     _recChannels(1),
47     _playChannels(1),
48     _playBufType(AudioDeviceModule::kFixedBufferSize),
49     _initialized(false),
50     _recording(false),
51     _playing(false),
52     _recIsInitialized(false),
53     _playIsInitialized(false),
54     _startRec(false),
55     _stopRec(false),
56     _startPlay(false),
57     _stopPlay(false),
58     _AGC(false),
59     update_speaker_volume_at_startup_(false),
60     _playBufDelayFixed(20),
61     _sndCardPlayDelay(0),
62     _sndCardRecDelay(0),
63     _writeErrors(0),
64     _playWarning(0),
65     _playError(0),
66     _recWarning(0),
67     _recError(0),
68     _deviceIndex(-1),
69     _numPlayDevices(0),
70     _numRecDevices(0),
71     _playDeviceName(NULL),
72     _recDeviceName(NULL),
73     _playDisplayDeviceName(NULL),
74     _recDisplayDeviceName(NULL),
75     _playBuffer(NULL),
76     _playbackBufferSize(0),
77     _playbackBufferUnused(0),
78     _tempBufferSpace(0),
79     _recBuffer(NULL),
80     _recordBufferSize(0),
81     _recordBufferUsed(0),
82     _tempSampleData(NULL),
83     _tempSampleDataSize(0),
84     _configuredLatencyPlay(0),
85     _configuredLatencyRec(0),
86     _paDeviceIndex(-1),
87     _paStateChanged(false),
88     _paMainloop(NULL),
89     _paMainloopApi(NULL),
90     _paContext(NULL),
91     _recStream(NULL),
92     _playStream(NULL),
93     _recStreamFlags(0),
94     _playStreamFlags(0)
95 {
96     WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, id,
97                  "%s created", __FUNCTION__);
98 
99     memset(_paServerVersion, 0, sizeof(_paServerVersion));
100     memset(&_playBufferAttr, 0, sizeof(_playBufferAttr));
101     memset(&_recBufferAttr, 0, sizeof(_recBufferAttr));
102     memset(_oldKeyState, 0, sizeof(_oldKeyState));
103 }
104 
~AudioDeviceLinuxPulse()105 AudioDeviceLinuxPulse::~AudioDeviceLinuxPulse()
106 {
107     WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id,
108                  "%s destroyed", __FUNCTION__);
109     RTC_DCHECK(thread_checker_.CalledOnValidThread());
110     Terminate();
111 
112     if (_recBuffer)
113     {
114         delete [] _recBuffer;
115         _recBuffer = NULL;
116     }
117     if (_playBuffer)
118     {
119         delete [] _playBuffer;
120         _playBuffer = NULL;
121     }
122     if (_playDeviceName)
123     {
124         delete [] _playDeviceName;
125         _playDeviceName = NULL;
126     }
127     if (_recDeviceName)
128     {
129         delete [] _recDeviceName;
130         _recDeviceName = NULL;
131     }
132 
133     delete &_recStartEvent;
134     delete &_playStartEvent;
135     delete &_timeEventRec;
136     delete &_timeEventPlay;
137     delete &_critSect;
138 }
139 
AttachAudioBuffer(AudioDeviceBuffer * audioBuffer)140 void AudioDeviceLinuxPulse::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer)
141 {
142     RTC_DCHECK(thread_checker_.CalledOnValidThread());
143 
144     _ptrAudioBuffer = audioBuffer;
145 
146     // Inform the AudioBuffer about default settings for this implementation.
147     // Set all values to zero here since the actual settings will be done by
148     // InitPlayout and InitRecording later.
149     _ptrAudioBuffer->SetRecordingSampleRate(0);
150     _ptrAudioBuffer->SetPlayoutSampleRate(0);
151     _ptrAudioBuffer->SetRecordingChannels(0);
152     _ptrAudioBuffer->SetPlayoutChannels(0);
153 }
154 
155 // ----------------------------------------------------------------------------
156 //  ActiveAudioLayer
157 // ----------------------------------------------------------------------------
158 
ActiveAudioLayer(AudioDeviceModule::AudioLayer & audioLayer) const159 int32_t AudioDeviceLinuxPulse::ActiveAudioLayer(
160     AudioDeviceModule::AudioLayer& audioLayer) const
161 {
162     audioLayer = AudioDeviceModule::kLinuxPulseAudio;
163     return 0;
164 }
165 
Init()166 int32_t AudioDeviceLinuxPulse::Init()
167 {
168     RTC_DCHECK(thread_checker_.CalledOnValidThread());
169     if (_initialized)
170     {
171         return 0;
172     }
173 
174     // Initialize PulseAudio
175     if (InitPulseAudio() < 0)
176     {
177         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
178                      "  failed to initialize PulseAudio");
179 
180         if (TerminatePulseAudio() < 0)
181         {
182             WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
183                          "  failed to terminate PulseAudio");
184         }
185 
186         return -1;
187     }
188 
189     _playWarning = 0;
190     _playError = 0;
191     _recWarning = 0;
192     _recError = 0;
193 
194     //Get X display handle for typing detection
195     _XDisplay = XOpenDisplay(NULL);
196     if (!_XDisplay)
197     {
198         WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
199           "  failed to open X display, typing detection will not work");
200     }
201 
202     // RECORDING
203     _ptrThreadRec.reset(new rtc::PlatformThread(
204         RecThreadFunc, this, "webrtc_audio_module_rec_thread"));
205 
206     _ptrThreadRec->Start();
207     _ptrThreadRec->SetPriority(rtc::kRealtimePriority);
208 
209     // PLAYOUT
210     _ptrThreadPlay.reset(new rtc::PlatformThread(
211         PlayThreadFunc, this, "webrtc_audio_module_play_thread"));
212     _ptrThreadPlay->Start();
213     _ptrThreadPlay->SetPriority(rtc::kRealtimePriority);
214 
215     _initialized = true;
216 
217     return 0;
218 }
219 
Terminate()220 int32_t AudioDeviceLinuxPulse::Terminate()
221 {
222     RTC_DCHECK(thread_checker_.CalledOnValidThread());
223     if (!_initialized)
224     {
225         return 0;
226     }
227 
228     _mixerManager.Close();
229 
230     // RECORDING
231     if (_ptrThreadRec)
232     {
233         rtc::PlatformThread* tmpThread = _ptrThreadRec.release();
234 
235         _timeEventRec.Set();
236         tmpThread->Stop();
237         delete tmpThread;
238     }
239 
240     // PLAYOUT
241     if (_ptrThreadPlay)
242     {
243         rtc::PlatformThread* tmpThread = _ptrThreadPlay.release();
244 
245         _timeEventPlay.Set();
246         tmpThread->Stop();
247         delete tmpThread;
248     }
249 
250     // Terminate PulseAudio
251     if (TerminatePulseAudio() < 0)
252     {
253         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
254                      "  failed to terminate PulseAudio");
255         return -1;
256     }
257 
258     if (_XDisplay)
259     {
260       XCloseDisplay(_XDisplay);
261       _XDisplay = NULL;
262     }
263 
264     _initialized = false;
265     _outputDeviceIsSpecified = false;
266     _inputDeviceIsSpecified = false;
267 
268     return 0;
269 }
270 
Initialized() const271 bool AudioDeviceLinuxPulse::Initialized() const
272 {
273     RTC_DCHECK(thread_checker_.CalledOnValidThread());
274     return (_initialized);
275 }
276 
InitSpeaker()277 int32_t AudioDeviceLinuxPulse::InitSpeaker()
278 {
279     RTC_DCHECK(thread_checker_.CalledOnValidThread());
280 
281     if (_playing)
282     {
283         return -1;
284     }
285 
286     if (!_outputDeviceIsSpecified)
287     {
288         return -1;
289     }
290 
291     // check if default device
292     if (_outputDeviceIndex == 0)
293     {
294         uint16_t deviceIndex = 0;
295         GetDefaultDeviceInfo(false, NULL, deviceIndex);
296         _paDeviceIndex = deviceIndex;
297     } else
298     {
299         // get the PA device index from
300         // the callback
301         _deviceIndex = _outputDeviceIndex;
302 
303         // get playout devices
304         PlayoutDevices();
305     }
306 
307     // the callback has now set the _paDeviceIndex to
308     // the PulseAudio index of the device
309     if (_mixerManager.OpenSpeaker(_paDeviceIndex) == -1)
310     {
311         return -1;
312     }
313 
314     // clear _deviceIndex
315     _deviceIndex = -1;
316     _paDeviceIndex = -1;
317 
318     return 0;
319 }
320 
InitMicrophone()321 int32_t AudioDeviceLinuxPulse::InitMicrophone()
322 {
323     RTC_DCHECK(thread_checker_.CalledOnValidThread());
324     if (_recording)
325     {
326         return -1;
327     }
328 
329     if (!_inputDeviceIsSpecified)
330     {
331         return -1;
332     }
333 
334     // Check if default device
335     if (_inputDeviceIndex == 0)
336     {
337         uint16_t deviceIndex = 0;
338         GetDefaultDeviceInfo(true, NULL, deviceIndex);
339         _paDeviceIndex = deviceIndex;
340     } else
341     {
342         // Get the PA device index from
343         // the callback
344         _deviceIndex = _inputDeviceIndex;
345 
346         // get recording devices
347         RecordingDevices();
348     }
349 
350     // The callback has now set the _paDeviceIndex to
351     // the PulseAudio index of the device
352     if (_mixerManager.OpenMicrophone(_paDeviceIndex) == -1)
353     {
354         return -1;
355     }
356 
357     // Clear _deviceIndex
358     _deviceIndex = -1;
359     _paDeviceIndex = -1;
360 
361     return 0;
362 }
363 
SpeakerIsInitialized() const364 bool AudioDeviceLinuxPulse::SpeakerIsInitialized() const
365 {
366     RTC_DCHECK(thread_checker_.CalledOnValidThread());
367     return (_mixerManager.SpeakerIsInitialized());
368 }
369 
MicrophoneIsInitialized() const370 bool AudioDeviceLinuxPulse::MicrophoneIsInitialized() const
371 {
372     RTC_DCHECK(thread_checker_.CalledOnValidThread());
373     return (_mixerManager.MicrophoneIsInitialized());
374 }
375 
SpeakerVolumeIsAvailable(bool & available)376 int32_t AudioDeviceLinuxPulse::SpeakerVolumeIsAvailable(bool& available)
377 {
378     RTC_DCHECK(thread_checker_.CalledOnValidThread());
379     bool wasInitialized = _mixerManager.SpeakerIsInitialized();
380 
381     // Make an attempt to open up the
382     // output mixer corresponding to the currently selected output device.
383     if (!wasInitialized && InitSpeaker() == -1)
384     {
385         // If we end up here it means that the selected speaker has no volume
386         // control.
387         available = false;
388         return 0;
389     }
390 
391     // Given that InitSpeaker was successful, we know volume control exists.
392     available = true;
393 
394     // Close the initialized output mixer
395     if (!wasInitialized)
396     {
397         _mixerManager.CloseSpeaker();
398     }
399 
400     return 0;
401 }
402 
SetSpeakerVolume(uint32_t volume)403 int32_t AudioDeviceLinuxPulse::SetSpeakerVolume(uint32_t volume)
404 {
405     RTC_DCHECK(thread_checker_.CalledOnValidThread());
406     if (!_playing) {
407       // Only update the volume if it's been set while we weren't playing.
408       update_speaker_volume_at_startup_ = true;
409     }
410     return (_mixerManager.SetSpeakerVolume(volume));
411 }
412 
SpeakerVolume(uint32_t & volume) const413 int32_t AudioDeviceLinuxPulse::SpeakerVolume(uint32_t& volume) const
414 {
415     RTC_DCHECK(thread_checker_.CalledOnValidThread());
416     uint32_t level(0);
417 
418     if (_mixerManager.SpeakerVolume(level) == -1)
419     {
420         return -1;
421     }
422 
423     volume = level;
424 
425     return 0;
426 }
427 
SetWaveOutVolume(uint16_t volumeLeft,uint16_t volumeRight)428 int32_t AudioDeviceLinuxPulse::SetWaveOutVolume(
429     uint16_t volumeLeft,
430     uint16_t volumeRight)
431 {
432 
433     WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
434                  "  API call not supported on this platform");
435     return -1;
436 }
437 
WaveOutVolume(uint16_t &,uint16_t &) const438 int32_t AudioDeviceLinuxPulse::WaveOutVolume(
439     uint16_t& /*volumeLeft*/,
440     uint16_t& /*volumeRight*/) const
441 {
442 
443     WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
444                  "  API call not supported on this platform");
445     return -1;
446 }
447 
MaxSpeakerVolume(uint32_t & maxVolume) const448 int32_t AudioDeviceLinuxPulse::MaxSpeakerVolume(
449     uint32_t& maxVolume) const
450 {
451     RTC_DCHECK(thread_checker_.CalledOnValidThread());
452     uint32_t maxVol(0);
453 
454     if (_mixerManager.MaxSpeakerVolume(maxVol) == -1)
455     {
456         return -1;
457     }
458 
459     maxVolume = maxVol;
460 
461     return 0;
462 }
463 
MinSpeakerVolume(uint32_t & minVolume) const464 int32_t AudioDeviceLinuxPulse::MinSpeakerVolume(
465     uint32_t& minVolume) const
466 {
467     RTC_DCHECK(thread_checker_.CalledOnValidThread());
468     uint32_t minVol(0);
469 
470     if (_mixerManager.MinSpeakerVolume(minVol) == -1)
471     {
472         return -1;
473     }
474 
475     minVolume = minVol;
476 
477     return 0;
478 }
479 
SpeakerVolumeStepSize(uint16_t & stepSize) const480 int32_t AudioDeviceLinuxPulse::SpeakerVolumeStepSize(
481     uint16_t& stepSize) const
482 {
483     RTC_DCHECK(thread_checker_.CalledOnValidThread());
484     uint16_t delta(0);
485 
486     if (_mixerManager.SpeakerVolumeStepSize(delta) == -1)
487     {
488         return -1;
489     }
490 
491     stepSize = delta;
492 
493     return 0;
494 }
495 
SpeakerMuteIsAvailable(bool & available)496 int32_t AudioDeviceLinuxPulse::SpeakerMuteIsAvailable(bool& available)
497 {
498     RTC_DCHECK(thread_checker_.CalledOnValidThread());
499     bool isAvailable(false);
500     bool wasInitialized = _mixerManager.SpeakerIsInitialized();
501 
502     // Make an attempt to open up the
503     // output mixer corresponding to the currently selected output device.
504     //
505     if (!wasInitialized && InitSpeaker() == -1)
506     {
507         // If we end up here it means that the selected speaker has no volume
508         // control, hence it is safe to state that there is no mute control
509         // already at this stage.
510         available = false;
511         return 0;
512     }
513 
514     // Check if the selected speaker has a mute control
515     _mixerManager.SpeakerMuteIsAvailable(isAvailable);
516 
517     available = isAvailable;
518 
519     // Close the initialized output mixer
520     if (!wasInitialized)
521     {
522         _mixerManager.CloseSpeaker();
523     }
524 
525     return 0;
526 }
527 
SetSpeakerMute(bool enable)528 int32_t AudioDeviceLinuxPulse::SetSpeakerMute(bool enable)
529 {
530     RTC_DCHECK(thread_checker_.CalledOnValidThread());
531     return (_mixerManager.SetSpeakerMute(enable));
532 }
533 
SpeakerMute(bool & enabled) const534 int32_t AudioDeviceLinuxPulse::SpeakerMute(bool& enabled) const
535 {
536     RTC_DCHECK(thread_checker_.CalledOnValidThread());
537     bool muted(0);
538     if (_mixerManager.SpeakerMute(muted) == -1)
539     {
540         return -1;
541     }
542 
543     enabled = muted;
544     return 0;
545 }
546 
MicrophoneMuteIsAvailable(bool & available)547 int32_t AudioDeviceLinuxPulse::MicrophoneMuteIsAvailable(bool& available)
548 {
549     RTC_DCHECK(thread_checker_.CalledOnValidThread());
550     bool isAvailable(false);
551     bool wasInitialized = _mixerManager.MicrophoneIsInitialized();
552 
553     // Make an attempt to open up the
554     // input mixer corresponding to the currently selected input device.
555     //
556     if (!wasInitialized && InitMicrophone() == -1)
557     {
558         // If we end up here it means that the selected microphone has no
559         // volume control, hence it is safe to state that there is no
560         // boost control already at this stage.
561         available = false;
562         return 0;
563     }
564 
565     // Check if the selected microphone has a mute control
566     //
567     _mixerManager.MicrophoneMuteIsAvailable(isAvailable);
568     available = isAvailable;
569 
570     // Close the initialized input mixer
571     //
572     if (!wasInitialized)
573     {
574         _mixerManager.CloseMicrophone();
575     }
576 
577     return 0;
578 }
579 
SetMicrophoneMute(bool enable)580 int32_t AudioDeviceLinuxPulse::SetMicrophoneMute(bool enable)
581 {
582     RTC_DCHECK(thread_checker_.CalledOnValidThread());
583     return (_mixerManager.SetMicrophoneMute(enable));
584 }
585 
MicrophoneMute(bool & enabled) const586 int32_t AudioDeviceLinuxPulse::MicrophoneMute(bool& enabled) const
587 {
588     RTC_DCHECK(thread_checker_.CalledOnValidThread());
589     bool muted(0);
590     if (_mixerManager.MicrophoneMute(muted) == -1)
591     {
592         return -1;
593     }
594 
595     enabled = muted;
596     return 0;
597 }
598 
MicrophoneBoostIsAvailable(bool & available)599 int32_t AudioDeviceLinuxPulse::MicrophoneBoostIsAvailable(bool& available)
600 {
601     RTC_DCHECK(thread_checker_.CalledOnValidThread());
602     bool isAvailable(false);
603     bool wasInitialized = _mixerManager.MicrophoneIsInitialized();
604 
605     // Enumerate all avaliable microphone and make an attempt to open up the
606     // input mixer corresponding to the currently selected input device.
607     //
608     if (!wasInitialized && InitMicrophone() == -1)
609     {
610         // If we end up here it means that the selected microphone has no
611         // volume control, hence it is safe to state that there is no
612         // boost control already at this stage.
613         available = false;
614         return 0;
615     }
616 
617     // Check if the selected microphone has a boost control
618     _mixerManager.MicrophoneBoostIsAvailable(isAvailable);
619     available = isAvailable;
620 
621     // Close the initialized input mixer
622     if (!wasInitialized)
623     {
624         _mixerManager.CloseMicrophone();
625     }
626 
627     return 0;
628 }
629 
SetMicrophoneBoost(bool enable)630 int32_t AudioDeviceLinuxPulse::SetMicrophoneBoost(bool enable)
631 {
632     RTC_DCHECK(thread_checker_.CalledOnValidThread());
633     return (_mixerManager.SetMicrophoneBoost(enable));
634 }
635 
MicrophoneBoost(bool & enabled) const636 int32_t AudioDeviceLinuxPulse::MicrophoneBoost(bool& enabled) const
637 {
638     RTC_DCHECK(thread_checker_.CalledOnValidThread());
639     bool onOff(0);
640 
641     if (_mixerManager.MicrophoneBoost(onOff) == -1)
642     {
643         return -1;
644     }
645 
646     enabled = onOff;
647 
648     return 0;
649 }
650 
StereoRecordingIsAvailable(bool & available)651 int32_t AudioDeviceLinuxPulse::StereoRecordingIsAvailable(bool& available)
652 {
653     RTC_DCHECK(thread_checker_.CalledOnValidThread());
654     if (_recChannels == 2 && _recording) {
655       available = true;
656       return 0;
657     }
658 
659     available = false;
660     bool wasInitialized = _mixerManager.MicrophoneIsInitialized();
661     int error = 0;
662 
663     if (!wasInitialized && InitMicrophone() == -1)
664     {
665         // Cannot open the specified device
666         available = false;
667         return 0;
668     }
669 
670     // Check if the selected microphone can record stereo.
671     bool isAvailable(false);
672     error = _mixerManager.StereoRecordingIsAvailable(isAvailable);
673     if (!error)
674       available = isAvailable;
675 
676     // Close the initialized input mixer
677     if (!wasInitialized)
678     {
679         _mixerManager.CloseMicrophone();
680     }
681 
682     return error;
683 }
684 
SetStereoRecording(bool enable)685 int32_t AudioDeviceLinuxPulse::SetStereoRecording(bool enable)
686 {
687     RTC_DCHECK(thread_checker_.CalledOnValidThread());
688     if (enable)
689         _recChannels = 2;
690     else
691         _recChannels = 1;
692 
693     return 0;
694 }
695 
StereoRecording(bool & enabled) const696 int32_t AudioDeviceLinuxPulse::StereoRecording(bool& enabled) const
697 {
698     RTC_DCHECK(thread_checker_.CalledOnValidThread());
699     if (_recChannels == 2)
700         enabled = true;
701     else
702         enabled = false;
703 
704     return 0;
705 }
706 
StereoPlayoutIsAvailable(bool & available)707 int32_t AudioDeviceLinuxPulse::StereoPlayoutIsAvailable(bool& available)
708 {
709     RTC_DCHECK(thread_checker_.CalledOnValidThread());
710     if (_playChannels == 2 && _playing) {
711       available = true;
712       return 0;
713     }
714 
715     available = false;
716     bool wasInitialized = _mixerManager.SpeakerIsInitialized();
717     int error = 0;
718 
719     if (!wasInitialized && InitSpeaker() == -1)
720     {
721         // Cannot open the specified device.
722         return -1;
723     }
724 
725     // Check if the selected speaker can play stereo.
726     bool isAvailable(false);
727     error = _mixerManager.StereoPlayoutIsAvailable(isAvailable);
728     if (!error)
729       available = isAvailable;
730 
731     // Close the initialized input mixer
732     if (!wasInitialized)
733     {
734         _mixerManager.CloseSpeaker();
735     }
736 
737     return error;
738 }
739 
SetStereoPlayout(bool enable)740 int32_t AudioDeviceLinuxPulse::SetStereoPlayout(bool enable)
741 {
742     RTC_DCHECK(thread_checker_.CalledOnValidThread());
743     if (enable)
744         _playChannels = 2;
745     else
746         _playChannels = 1;
747 
748     return 0;
749 }
750 
StereoPlayout(bool & enabled) const751 int32_t AudioDeviceLinuxPulse::StereoPlayout(bool& enabled) const
752 {
753     RTC_DCHECK(thread_checker_.CalledOnValidThread());
754     if (_playChannels == 2)
755         enabled = true;
756     else
757         enabled = false;
758 
759     return 0;
760 }
761 
SetAGC(bool enable)762 int32_t AudioDeviceLinuxPulse::SetAGC(bool enable)
763 {
764     CriticalSectionScoped lock(&_critSect);
765     _AGC = enable;
766 
767     return 0;
768 }
769 
AGC() const770 bool AudioDeviceLinuxPulse::AGC() const
771 {
772     CriticalSectionScoped lock(&_critSect);
773     return _AGC;
774 }
775 
MicrophoneVolumeIsAvailable(bool & available)776 int32_t AudioDeviceLinuxPulse::MicrophoneVolumeIsAvailable(
777     bool& available)
778 {
779     RTC_DCHECK(thread_checker_.CalledOnValidThread());
780     bool wasInitialized = _mixerManager.MicrophoneIsInitialized();
781 
782     // Make an attempt to open up the
783     // input mixer corresponding to the currently selected output device.
784     if (!wasInitialized && InitMicrophone() == -1)
785     {
786         // If we end up here it means that the selected microphone has no
787         // volume control.
788         available = false;
789         return 0;
790     }
791 
792     // Given that InitMicrophone was successful, we know that a volume control
793     // exists.
794     available = true;
795 
796     // Close the initialized input mixer
797     if (!wasInitialized)
798     {
799         _mixerManager.CloseMicrophone();
800     }
801 
802     return 0;
803 }
804 
SetMicrophoneVolume(uint32_t volume)805 int32_t AudioDeviceLinuxPulse::SetMicrophoneVolume(uint32_t volume)
806 {
807     return (_mixerManager.SetMicrophoneVolume(volume));
808 }
809 
MicrophoneVolume(uint32_t & volume) const810 int32_t AudioDeviceLinuxPulse::MicrophoneVolume(
811     uint32_t& volume) const
812 {
813 
814     uint32_t level(0);
815 
816     if (_mixerManager.MicrophoneVolume(level) == -1)
817     {
818         WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
819                      "  failed to retrive current microphone level");
820         return -1;
821     }
822 
823     volume = level;
824 
825     return 0;
826 }
827 
MaxMicrophoneVolume(uint32_t & maxVolume) const828 int32_t AudioDeviceLinuxPulse::MaxMicrophoneVolume(
829     uint32_t& maxVolume) const
830 {
831 
832     uint32_t maxVol(0);
833 
834     if (_mixerManager.MaxMicrophoneVolume(maxVol) == -1)
835     {
836         return -1;
837     }
838 
839     maxVolume = maxVol;
840 
841     return 0;
842 }
843 
MinMicrophoneVolume(uint32_t & minVolume) const844 int32_t AudioDeviceLinuxPulse::MinMicrophoneVolume(
845     uint32_t& minVolume) const
846 {
847 
848     uint32_t minVol(0);
849 
850     if (_mixerManager.MinMicrophoneVolume(minVol) == -1)
851     {
852         return -1;
853     }
854 
855     minVolume = minVol;
856 
857     return 0;
858 }
859 
MicrophoneVolumeStepSize(uint16_t & stepSize) const860 int32_t AudioDeviceLinuxPulse::MicrophoneVolumeStepSize(
861     uint16_t& stepSize) const
862 {
863     RTC_DCHECK(thread_checker_.CalledOnValidThread());
864     uint16_t delta(0);
865 
866     if (_mixerManager.MicrophoneVolumeStepSize(delta) == -1)
867     {
868         return -1;
869     }
870 
871     stepSize = delta;
872 
873     return 0;
874 }
875 
PlayoutDevices()876 int16_t AudioDeviceLinuxPulse::PlayoutDevices()
877 {
878     PaLock();
879 
880     pa_operation* paOperation = NULL;
881     _numPlayDevices = 1; // init to 1 to account for "default"
882 
883     // get the whole list of devices and update _numPlayDevices
884     paOperation = LATE(pa_context_get_sink_info_list)(_paContext,
885                                                       PaSinkInfoCallback,
886                                                       this);
887 
888     WaitForOperationCompletion(paOperation);
889 
890     PaUnLock();
891 
892     return _numPlayDevices;
893 }
894 
SetPlayoutDevice(uint16_t index)895 int32_t AudioDeviceLinuxPulse::SetPlayoutDevice(uint16_t index)
896 {
897     RTC_DCHECK(thread_checker_.CalledOnValidThread());
898     if (_playIsInitialized)
899     {
900         return -1;
901     }
902 
903     const uint16_t nDevices = PlayoutDevices();
904 
905     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
906                  "  number of availiable output devices is %u", nDevices);
907 
908     if (index > (nDevices - 1))
909     {
910         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
911                      "  device index is out of range [0,%u]", (nDevices - 1));
912         return -1;
913     }
914 
915     _outputDeviceIndex = index;
916     _outputDeviceIsSpecified = true;
917 
918     return 0;
919 }
920 
SetPlayoutDevice(AudioDeviceModule::WindowsDeviceType)921 int32_t AudioDeviceLinuxPulse::SetPlayoutDevice(
922     AudioDeviceModule::WindowsDeviceType /*device*/)
923 {
924     WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
925                  "WindowsDeviceType not supported");
926     return -1;
927 }
928 
PlayoutDeviceName(uint16_t index,char name[kAdmMaxDeviceNameSize],char guid[kAdmMaxGuidSize])929 int32_t AudioDeviceLinuxPulse::PlayoutDeviceName(
930     uint16_t index,
931     char name[kAdmMaxDeviceNameSize],
932     char guid[kAdmMaxGuidSize])
933 {
934     RTC_DCHECK(thread_checker_.CalledOnValidThread());
935     const uint16_t nDevices = PlayoutDevices();
936 
937     if ((index > (nDevices - 1)) || (name == NULL))
938     {
939         return -1;
940     }
941 
942     memset(name, 0, kAdmMaxDeviceNameSize);
943 
944     if (guid != NULL)
945     {
946         memset(guid, 0, kAdmMaxGuidSize);
947     }
948 
949     // Check if default device
950     if (index == 0)
951     {
952         uint16_t deviceIndex = 0;
953         return GetDefaultDeviceInfo(false, name, deviceIndex);
954     }
955 
956     // Tell the callback that we want
957     // The name for this device
958     _playDisplayDeviceName = name;
959     _deviceIndex = index;
960 
961     // get playout devices
962     PlayoutDevices();
963 
964     // clear device name and index
965     _playDisplayDeviceName = NULL;
966     _deviceIndex = -1;
967 
968     return 0;
969 }
970 
RecordingDeviceName(uint16_t index,char name[kAdmMaxDeviceNameSize],char guid[kAdmMaxGuidSize])971 int32_t AudioDeviceLinuxPulse::RecordingDeviceName(
972     uint16_t index,
973     char name[kAdmMaxDeviceNameSize],
974     char guid[kAdmMaxGuidSize])
975 {
976     RTC_DCHECK(thread_checker_.CalledOnValidThread());
977     const uint16_t nDevices(RecordingDevices());
978 
979     if ((index > (nDevices - 1)) || (name == NULL))
980     {
981         return -1;
982     }
983 
984     memset(name, 0, kAdmMaxDeviceNameSize);
985 
986     if (guid != NULL)
987     {
988         memset(guid, 0, kAdmMaxGuidSize);
989     }
990 
991     // Check if default device
992     if (index == 0)
993     {
994         uint16_t deviceIndex = 0;
995         return GetDefaultDeviceInfo(true, name, deviceIndex);
996     }
997 
998     // Tell the callback that we want
999     // the name for this device
1000     _recDisplayDeviceName = name;
1001     _deviceIndex = index;
1002 
1003     // Get recording devices
1004     RecordingDevices();
1005 
1006     // Clear device name and index
1007     _recDisplayDeviceName = NULL;
1008     _deviceIndex = -1;
1009 
1010     return 0;
1011 }
1012 
RecordingDevices()1013 int16_t AudioDeviceLinuxPulse::RecordingDevices()
1014 {
1015     PaLock();
1016 
1017     pa_operation* paOperation = NULL;
1018     _numRecDevices = 1; // Init to 1 to account for "default"
1019 
1020     // Get the whole list of devices and update _numRecDevices
1021     paOperation = LATE(pa_context_get_source_info_list)(_paContext,
1022                                                         PaSourceInfoCallback,
1023                                                         this);
1024 
1025     WaitForOperationCompletion(paOperation);
1026 
1027     PaUnLock();
1028 
1029     return _numRecDevices;
1030 }
1031 
SetRecordingDevice(uint16_t index)1032 int32_t AudioDeviceLinuxPulse::SetRecordingDevice(uint16_t index)
1033 {
1034     RTC_DCHECK(thread_checker_.CalledOnValidThread());
1035     if (_recIsInitialized)
1036     {
1037         return -1;
1038     }
1039 
1040     const uint16_t nDevices(RecordingDevices());
1041 
1042     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
1043                  "  number of availiable input devices is %u", nDevices);
1044 
1045     if (index > (nDevices - 1))
1046     {
1047         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1048                      "  device index is out of range [0,%u]", (nDevices - 1));
1049         return -1;
1050     }
1051 
1052     _inputDeviceIndex = index;
1053     _inputDeviceIsSpecified = true;
1054 
1055     return 0;
1056 }
1057 
SetRecordingDevice(AudioDeviceModule::WindowsDeviceType)1058 int32_t AudioDeviceLinuxPulse::SetRecordingDevice(
1059     AudioDeviceModule::WindowsDeviceType /*device*/)
1060 {
1061     WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1062                  "WindowsDeviceType not supported");
1063     return -1;
1064 }
1065 
PlayoutIsAvailable(bool & available)1066 int32_t AudioDeviceLinuxPulse::PlayoutIsAvailable(bool& available)
1067 {
1068     RTC_DCHECK(thread_checker_.CalledOnValidThread());
1069     available = false;
1070 
1071     // Try to initialize the playout side
1072     int32_t res = InitPlayout();
1073 
1074     // Cancel effect of initialization
1075     StopPlayout();
1076 
1077     if (res != -1)
1078     {
1079         available = true;
1080     }
1081 
1082     return res;
1083 }
1084 
RecordingIsAvailable(bool & available)1085 int32_t AudioDeviceLinuxPulse::RecordingIsAvailable(bool& available)
1086 {
1087     RTC_DCHECK(thread_checker_.CalledOnValidThread());
1088     available = false;
1089 
1090     // Try to initialize the playout side
1091     int32_t res = InitRecording();
1092 
1093     // Cancel effect of initialization
1094     StopRecording();
1095 
1096     if (res != -1)
1097     {
1098         available = true;
1099     }
1100 
1101     return res;
1102 }
1103 
InitPlayout()1104 int32_t AudioDeviceLinuxPulse::InitPlayout()
1105 {
1106     RTC_DCHECK(thread_checker_.CalledOnValidThread());
1107 
1108     if (_playing)
1109     {
1110         return -1;
1111     }
1112 
1113     if (!_outputDeviceIsSpecified)
1114     {
1115         return -1;
1116     }
1117 
1118     if (_playIsInitialized)
1119     {
1120         return 0;
1121     }
1122 
1123     // Initialize the speaker (devices might have been added or removed)
1124     if (InitSpeaker() == -1)
1125     {
1126         WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
1127                      "  InitSpeaker() failed");
1128     }
1129 
1130     // Set the play sample specification
1131     pa_sample_spec playSampleSpec;
1132     playSampleSpec.channels = _playChannels;
1133     playSampleSpec.format = PA_SAMPLE_S16LE;
1134     playSampleSpec.rate = sample_rate_hz_;
1135 
1136     // Create a new play stream
1137     _playStream = LATE(pa_stream_new)(_paContext, "playStream",
1138                                       &playSampleSpec, NULL);
1139 
1140     if (!_playStream)
1141     {
1142         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1143                      "  failed to create play stream, err=%d",
1144                      LATE(pa_context_errno)(_paContext));
1145         return -1;
1146     }
1147 
1148     // Provide the playStream to the mixer
1149     _mixerManager.SetPlayStream(_playStream);
1150 
1151     if (_ptrAudioBuffer)
1152     {
1153         // Update audio buffer with the selected parameters
1154         _ptrAudioBuffer->SetPlayoutSampleRate(sample_rate_hz_);
1155         _ptrAudioBuffer->SetPlayoutChannels((uint8_t) _playChannels);
1156     }
1157 
1158     WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
1159                  "  stream state %d\n",
1160                  LATE(pa_stream_get_state)(_playStream));
1161 
1162     // Set stream flags
1163     _playStreamFlags = (pa_stream_flags_t) (PA_STREAM_AUTO_TIMING_UPDATE
1164         | PA_STREAM_INTERPOLATE_TIMING);
1165 
1166     if (_configuredLatencyPlay != WEBRTC_PA_NO_LATENCY_REQUIREMENTS)
1167     {
1168         // If configuring a specific latency then we want to specify
1169         // PA_STREAM_ADJUST_LATENCY to make the server adjust parameters
1170         // automatically to reach that target latency. However, that flag
1171         // doesn't exist in Ubuntu 8.04 and many people still use that,
1172         // so we have to check the protocol version of libpulse.
1173         if (LATE(pa_context_get_protocol_version)(_paContext)
1174             >= WEBRTC_PA_ADJUST_LATENCY_PROTOCOL_VERSION)
1175         {
1176             _playStreamFlags |= PA_STREAM_ADJUST_LATENCY;
1177         }
1178 
1179         const pa_sample_spec *spec =
1180             LATE(pa_stream_get_sample_spec)(_playStream);
1181         if (!spec)
1182         {
1183             WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1184                          "  pa_stream_get_sample_spec()");
1185             return -1;
1186         }
1187 
1188         size_t bytesPerSec = LATE(pa_bytes_per_second)(spec);
1189         uint32_t latency = bytesPerSec *
1190                            WEBRTC_PA_PLAYBACK_LATENCY_MINIMUM_MSECS /
1191                            WEBRTC_PA_MSECS_PER_SEC;
1192 
1193         // Set the play buffer attributes
1194         _playBufferAttr.maxlength = latency; // num bytes stored in the buffer
1195         _playBufferAttr.tlength = latency; // target fill level of play buffer
1196         // minimum free num bytes before server request more data
1197         _playBufferAttr.minreq = latency / WEBRTC_PA_PLAYBACK_REQUEST_FACTOR;
1198         // prebuffer tlength before starting playout
1199         _playBufferAttr.prebuf = _playBufferAttr.tlength -
1200                                  _playBufferAttr.minreq;
1201 
1202         _configuredLatencyPlay = latency;
1203     }
1204 
1205     // num samples in bytes * num channels
1206     _playbackBufferSize = sample_rate_hz_ / 100 * 2 * _playChannels;
1207     _playbackBufferUnused = _playbackBufferSize;
1208     _playBuffer = new int8_t[_playbackBufferSize];
1209 
1210     // Enable underflow callback
1211     LATE(pa_stream_set_underflow_callback)(_playStream,
1212                                            PaStreamUnderflowCallback, this);
1213 
1214     // Set the state callback function for the stream
1215     LATE(pa_stream_set_state_callback)(_playStream,
1216                                        PaStreamStateCallback, this);
1217 
1218     // Mark playout side as initialized
1219     _playIsInitialized = true;
1220     _sndCardPlayDelay = 0;
1221     _sndCardRecDelay = 0;
1222 
1223     return 0;
1224 }
1225 
InitRecording()1226 int32_t AudioDeviceLinuxPulse::InitRecording()
1227 {
1228     RTC_DCHECK(thread_checker_.CalledOnValidThread());
1229 
1230     if (_recording)
1231     {
1232         return -1;
1233     }
1234 
1235     if (!_inputDeviceIsSpecified)
1236     {
1237         return -1;
1238     }
1239 
1240     if (_recIsInitialized)
1241     {
1242         return 0;
1243     }
1244 
1245     // Initialize the microphone (devices might have been added or removed)
1246     if (InitMicrophone() == -1)
1247     {
1248         WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
1249                      "  InitMicrophone() failed");
1250     }
1251 
1252     // Set the rec sample specification
1253     pa_sample_spec recSampleSpec;
1254     recSampleSpec.channels = _recChannels;
1255     recSampleSpec.format = PA_SAMPLE_S16LE;
1256     recSampleSpec.rate = sample_rate_hz_;
1257 
1258     // Create a new rec stream
1259     _recStream = LATE(pa_stream_new)(_paContext, "recStream", &recSampleSpec,
1260                                      NULL);
1261     if (!_recStream)
1262     {
1263         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1264                      "  failed to create rec stream, err=%d",
1265                      LATE(pa_context_errno)(_paContext));
1266         return -1;
1267     }
1268 
1269     // Provide the recStream to the mixer
1270     _mixerManager.SetRecStream(_recStream);
1271 
1272     if (_ptrAudioBuffer)
1273     {
1274         // Update audio buffer with the selected parameters
1275         _ptrAudioBuffer->SetRecordingSampleRate(sample_rate_hz_);
1276         _ptrAudioBuffer->SetRecordingChannels((uint8_t) _recChannels);
1277     }
1278 
1279     if (_configuredLatencyRec != WEBRTC_PA_NO_LATENCY_REQUIREMENTS)
1280     {
1281         _recStreamFlags = (pa_stream_flags_t) (PA_STREAM_AUTO_TIMING_UPDATE
1282             | PA_STREAM_INTERPOLATE_TIMING);
1283 
1284         // If configuring a specific latency then we want to specify
1285         // PA_STREAM_ADJUST_LATENCY to make the server adjust parameters
1286         // automatically to reach that target latency. However, that flag
1287         // doesn't exist in Ubuntu 8.04 and many people still use that,
1288         //  so we have to check the protocol version of libpulse.
1289         if (LATE(pa_context_get_protocol_version)(_paContext)
1290             >= WEBRTC_PA_ADJUST_LATENCY_PROTOCOL_VERSION)
1291         {
1292             _recStreamFlags |= PA_STREAM_ADJUST_LATENCY;
1293         }
1294 
1295         const pa_sample_spec *spec =
1296             LATE(pa_stream_get_sample_spec)(_recStream);
1297         if (!spec)
1298         {
1299             WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1300                          "  pa_stream_get_sample_spec(rec)");
1301             return -1;
1302         }
1303 
1304         size_t bytesPerSec = LATE(pa_bytes_per_second)(spec);
1305         uint32_t latency = bytesPerSec
1306             * WEBRTC_PA_LOW_CAPTURE_LATENCY_MSECS / WEBRTC_PA_MSECS_PER_SEC;
1307 
1308         // Set the rec buffer attributes
1309         // Note: fragsize specifies a maximum transfer size, not a minimum, so
1310         // it is not possible to force a high latency setting, only a low one.
1311         _recBufferAttr.fragsize = latency; // size of fragment
1312         _recBufferAttr.maxlength = latency + bytesPerSec
1313             * WEBRTC_PA_CAPTURE_BUFFER_EXTRA_MSECS / WEBRTC_PA_MSECS_PER_SEC;
1314 
1315         _configuredLatencyRec = latency;
1316     }
1317 
1318     _recordBufferSize = sample_rate_hz_ / 100 * 2 * _recChannels;
1319     _recordBufferUsed = 0;
1320     _recBuffer = new int8_t[_recordBufferSize];
1321 
1322     // Enable overflow callback
1323     LATE(pa_stream_set_overflow_callback)(_recStream,
1324                                           PaStreamOverflowCallback,
1325                                           this);
1326 
1327     // Set the state callback function for the stream
1328     LATE(pa_stream_set_state_callback)(_recStream,
1329                                        PaStreamStateCallback,
1330                                        this);
1331 
1332     // Mark recording side as initialized
1333     _recIsInitialized = true;
1334 
1335     return 0;
1336 }
1337 
StartRecording()1338 int32_t AudioDeviceLinuxPulse::StartRecording()
1339 {
1340     RTC_DCHECK(thread_checker_.CalledOnValidThread());
1341     if (!_recIsInitialized)
1342     {
1343         return -1;
1344     }
1345 
1346     if (_recording)
1347     {
1348         return 0;
1349     }
1350 
1351     // Set state to ensure that the recording starts from the audio thread.
1352     _startRec = true;
1353 
1354     // The audio thread will signal when recording has started.
1355     _timeEventRec.Set();
1356     if (kEventTimeout == _recStartEvent.Wait(10000))
1357     {
1358         {
1359             CriticalSectionScoped lock(&_critSect);
1360             _startRec = false;
1361         }
1362         StopRecording();
1363         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1364                      "  failed to activate recording");
1365         return -1;
1366     }
1367 
1368     {
1369         CriticalSectionScoped lock(&_critSect);
1370         if (_recording)
1371         {
1372             // The recording state is set by the audio thread after recording
1373             // has started.
1374         } else
1375         {
1376             WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1377                          "  failed to activate recording");
1378             return -1;
1379         }
1380     }
1381 
1382     return 0;
1383 }
1384 
StopRecording()1385 int32_t AudioDeviceLinuxPulse::StopRecording()
1386 {
1387     RTC_DCHECK(thread_checker_.CalledOnValidThread());
1388     CriticalSectionScoped lock(&_critSect);
1389 
1390     if (!_recIsInitialized)
1391     {
1392         return 0;
1393     }
1394 
1395     if (_recStream == NULL)
1396     {
1397         return -1;
1398     }
1399 
1400     _recIsInitialized = false;
1401     _recording = false;
1402 
1403     WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
1404                  "  stopping recording");
1405 
1406     // Stop Recording
1407     PaLock();
1408 
1409     DisableReadCallback();
1410     LATE(pa_stream_set_overflow_callback)(_recStream, NULL, NULL);
1411 
1412     // Unset this here so that we don't get a TERMINATED callback
1413     LATE(pa_stream_set_state_callback)(_recStream, NULL, NULL);
1414 
1415     if (LATE(pa_stream_get_state)(_recStream) != PA_STREAM_UNCONNECTED)
1416     {
1417         // Disconnect the stream
1418         if (LATE(pa_stream_disconnect)(_recStream) != PA_OK)
1419         {
1420             WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1421                          "  failed to disconnect rec stream, err=%d\n",
1422                          LATE(pa_context_errno)(_paContext));
1423             PaUnLock();
1424             return -1;
1425         }
1426 
1427         WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
1428                      "  disconnected recording");
1429     }
1430 
1431     LATE(pa_stream_unref)(_recStream);
1432     _recStream = NULL;
1433 
1434     PaUnLock();
1435 
1436     // Provide the recStream to the mixer
1437     _mixerManager.SetRecStream(_recStream);
1438 
1439     if (_recBuffer)
1440     {
1441         delete [] _recBuffer;
1442         _recBuffer = NULL;
1443     }
1444 
1445     return 0;
1446 }
1447 
RecordingIsInitialized() const1448 bool AudioDeviceLinuxPulse::RecordingIsInitialized() const
1449 {
1450     RTC_DCHECK(thread_checker_.CalledOnValidThread());
1451     return (_recIsInitialized);
1452 }
1453 
Recording() const1454 bool AudioDeviceLinuxPulse::Recording() const
1455 {
1456     RTC_DCHECK(thread_checker_.CalledOnValidThread());
1457     return (_recording);
1458 }
1459 
PlayoutIsInitialized() const1460 bool AudioDeviceLinuxPulse::PlayoutIsInitialized() const
1461 {
1462     RTC_DCHECK(thread_checker_.CalledOnValidThread());
1463     return (_playIsInitialized);
1464 }
1465 
StartPlayout()1466 int32_t AudioDeviceLinuxPulse::StartPlayout()
1467 {
1468     RTC_DCHECK(thread_checker_.CalledOnValidThread());
1469 
1470     if (!_playIsInitialized)
1471     {
1472         return -1;
1473     }
1474 
1475     if (_playing)
1476     {
1477         return 0;
1478     }
1479 
1480     // Set state to ensure that playout starts from the audio thread.
1481     {
1482         CriticalSectionScoped lock(&_critSect);
1483         _startPlay = true;
1484     }
1485 
1486     // Both |_startPlay| and |_playing| needs protction since they are also
1487     // accessed on the playout thread.
1488 
1489     // The audio thread will signal when playout has started.
1490     _timeEventPlay.Set();
1491     if (kEventTimeout == _playStartEvent.Wait(10000))
1492     {
1493         {
1494             CriticalSectionScoped lock(&_critSect);
1495             _startPlay = false;
1496         }
1497         StopPlayout();
1498         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1499                      "  failed to activate playout");
1500         return -1;
1501     }
1502 
1503     {
1504         CriticalSectionScoped lock(&_critSect);
1505         if (_playing)
1506         {
1507             // The playing state is set by the audio thread after playout
1508             // has started.
1509         } else
1510         {
1511             WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1512                          "  failed to activate playing");
1513             return -1;
1514         }
1515     }
1516 
1517     return 0;
1518 }
1519 
StopPlayout()1520 int32_t AudioDeviceLinuxPulse::StopPlayout()
1521 {
1522     RTC_DCHECK(thread_checker_.CalledOnValidThread());
1523     CriticalSectionScoped lock(&_critSect);
1524 
1525     if (!_playIsInitialized)
1526     {
1527         return 0;
1528     }
1529 
1530     if (_playStream == NULL)
1531     {
1532         return -1;
1533     }
1534 
1535     _playIsInitialized = false;
1536     _playing = false;
1537     _sndCardPlayDelay = 0;
1538     _sndCardRecDelay = 0;
1539 
1540     WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
1541                  "  stopping playback");
1542 
1543     // Stop Playout
1544     PaLock();
1545 
1546     DisableWriteCallback();
1547     LATE(pa_stream_set_underflow_callback)(_playStream, NULL, NULL);
1548 
1549     // Unset this here so that we don't get a TERMINATED callback
1550     LATE(pa_stream_set_state_callback)(_playStream, NULL, NULL);
1551 
1552     if (LATE(pa_stream_get_state)(_playStream) != PA_STREAM_UNCONNECTED)
1553     {
1554         // Disconnect the stream
1555         if (LATE(pa_stream_disconnect)(_playStream) != PA_OK)
1556         {
1557             WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1558                          "  failed to disconnect play stream, err=%d",
1559                          LATE(pa_context_errno)(_paContext));
1560             PaUnLock();
1561             return -1;
1562         }
1563 
1564         WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
1565                      "  disconnected playback");
1566     }
1567 
1568     LATE(pa_stream_unref)(_playStream);
1569     _playStream = NULL;
1570 
1571     PaUnLock();
1572 
1573     // Provide the playStream to the mixer
1574     _mixerManager.SetPlayStream(_playStream);
1575 
1576     if (_playBuffer)
1577     {
1578         delete [] _playBuffer;
1579         _playBuffer = NULL;
1580     }
1581 
1582     return 0;
1583 }
1584 
PlayoutDelay(uint16_t & delayMS) const1585 int32_t AudioDeviceLinuxPulse::PlayoutDelay(uint16_t& delayMS) const
1586 {
1587     CriticalSectionScoped lock(&_critSect);
1588     delayMS = (uint16_t) _sndCardPlayDelay;
1589     return 0;
1590 }
1591 
RecordingDelay(uint16_t & delayMS) const1592 int32_t AudioDeviceLinuxPulse::RecordingDelay(uint16_t& delayMS) const
1593 {
1594     RTC_DCHECK(thread_checker_.CalledOnValidThread());
1595     delayMS = (uint16_t) _sndCardRecDelay;
1596     return 0;
1597 }
1598 
Playing() const1599 bool AudioDeviceLinuxPulse::Playing() const
1600 {
1601     RTC_DCHECK(thread_checker_.CalledOnValidThread());
1602     return (_playing);
1603 }
1604 
SetPlayoutBuffer(const AudioDeviceModule::BufferType type,uint16_t sizeMS)1605 int32_t AudioDeviceLinuxPulse::SetPlayoutBuffer(
1606     const AudioDeviceModule::BufferType type,
1607     uint16_t sizeMS)
1608 {
1609     RTC_DCHECK(thread_checker_.CalledOnValidThread());
1610     if (type != AudioDeviceModule::kFixedBufferSize)
1611     {
1612         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1613                      " Adaptive buffer size not supported on this platform");
1614         return -1;
1615     }
1616 
1617     _playBufType = type;
1618     _playBufDelayFixed = sizeMS;
1619 
1620     return 0;
1621 }
1622 
PlayoutBuffer(AudioDeviceModule::BufferType & type,uint16_t & sizeMS) const1623 int32_t AudioDeviceLinuxPulse::PlayoutBuffer(
1624     AudioDeviceModule::BufferType& type,
1625     uint16_t& sizeMS) const
1626 {
1627     RTC_DCHECK(thread_checker_.CalledOnValidThread());
1628     type = _playBufType;
1629     sizeMS = _playBufDelayFixed;
1630 
1631     return 0;
1632 }
1633 
CPULoad(uint16_t &) const1634 int32_t AudioDeviceLinuxPulse::CPULoad(uint16_t& /*load*/) const
1635 {
1636 
1637     WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
1638                  "  API call not supported on this platform");
1639     return -1;
1640 }
1641 
PlayoutWarning() const1642 bool AudioDeviceLinuxPulse::PlayoutWarning() const
1643 {
1644   CriticalSectionScoped lock(&_critSect);
1645   return (_playWarning > 0);
1646 }
1647 
PlayoutError() const1648 bool AudioDeviceLinuxPulse::PlayoutError() const
1649 {
1650   CriticalSectionScoped lock(&_critSect);
1651   return (_playError > 0);
1652 }
1653 
RecordingWarning() const1654 bool AudioDeviceLinuxPulse::RecordingWarning() const
1655 {
1656   CriticalSectionScoped lock(&_critSect);
1657   return (_recWarning > 0);
1658 }
1659 
RecordingError() const1660 bool AudioDeviceLinuxPulse::RecordingError() const
1661 {
1662   CriticalSectionScoped lock(&_critSect);
1663   return (_recError > 0);
1664 }
1665 
ClearPlayoutWarning()1666 void AudioDeviceLinuxPulse::ClearPlayoutWarning()
1667 {
1668   CriticalSectionScoped lock(&_critSect);
1669   _playWarning = 0;
1670 }
1671 
ClearPlayoutError()1672 void AudioDeviceLinuxPulse::ClearPlayoutError()
1673 {
1674   CriticalSectionScoped lock(&_critSect);
1675   _playError = 0;
1676 }
1677 
ClearRecordingWarning()1678 void AudioDeviceLinuxPulse::ClearRecordingWarning()
1679 {
1680   CriticalSectionScoped lock(&_critSect);
1681   _recWarning = 0;
1682 }
1683 
ClearRecordingError()1684 void AudioDeviceLinuxPulse::ClearRecordingError()
1685 {
1686   CriticalSectionScoped lock(&_critSect);
1687   _recError = 0;
1688 }
1689 
1690 // ============================================================================
1691 //                                 Private Methods
1692 // ============================================================================
1693 
PaContextStateCallback(pa_context * c,void * pThis)1694 void AudioDeviceLinuxPulse::PaContextStateCallback(pa_context *c, void *pThis)
1695 {
1696     static_cast<AudioDeviceLinuxPulse*> (pThis)->
1697         PaContextStateCallbackHandler(c);
1698 }
1699 
1700 // ----------------------------------------------------------------------------
1701 //  PaSinkInfoCallback
1702 // ----------------------------------------------------------------------------
1703 
PaSinkInfoCallback(pa_context *,const pa_sink_info * i,int eol,void * pThis)1704 void AudioDeviceLinuxPulse::PaSinkInfoCallback(pa_context */*c*/,
1705                                                const pa_sink_info *i, int eol,
1706                                                void *pThis)
1707 {
1708     static_cast<AudioDeviceLinuxPulse*> (pThis)->PaSinkInfoCallbackHandler(
1709         i, eol);
1710 }
1711 
PaSourceInfoCallback(pa_context *,const pa_source_info * i,int eol,void * pThis)1712 void AudioDeviceLinuxPulse::PaSourceInfoCallback(pa_context */*c*/,
1713                                                  const pa_source_info *i,
1714                                                  int eol, void *pThis)
1715 {
1716     static_cast<AudioDeviceLinuxPulse*> (pThis)->PaSourceInfoCallbackHandler(
1717         i, eol);
1718 }
1719 
PaServerInfoCallback(pa_context *,const pa_server_info * i,void * pThis)1720 void AudioDeviceLinuxPulse::PaServerInfoCallback(pa_context */*c*/,
1721                                                  const pa_server_info *i,
1722                                                  void *pThis)
1723 {
1724     static_cast<AudioDeviceLinuxPulse*> (pThis)->
1725         PaServerInfoCallbackHandler(i);
1726 }
1727 
PaStreamStateCallback(pa_stream * p,void * pThis)1728 void AudioDeviceLinuxPulse::PaStreamStateCallback(pa_stream *p, void *pThis)
1729 {
1730     static_cast<AudioDeviceLinuxPulse*> (pThis)->
1731         PaStreamStateCallbackHandler(p);
1732 }
1733 
PaContextStateCallbackHandler(pa_context * c)1734 void AudioDeviceLinuxPulse::PaContextStateCallbackHandler(pa_context *c)
1735 {
1736     WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
1737                  "  context state cb");
1738 
1739     pa_context_state_t state = LATE(pa_context_get_state)(c);
1740     switch (state)
1741     {
1742         case PA_CONTEXT_UNCONNECTED:
1743             WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
1744                          "  unconnected");
1745             break;
1746         case PA_CONTEXT_CONNECTING:
1747         case PA_CONTEXT_AUTHORIZING:
1748         case PA_CONTEXT_SETTING_NAME:
1749             WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
1750                          "  no state");
1751             break;
1752         case PA_CONTEXT_FAILED:
1753         case PA_CONTEXT_TERMINATED:
1754             WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
1755                          "  failed");
1756             _paStateChanged = true;
1757             LATE(pa_threaded_mainloop_signal)(_paMainloop, 0);
1758             break;
1759         case PA_CONTEXT_READY:
1760             WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
1761                          "  ready");
1762             _paStateChanged = true;
1763             LATE(pa_threaded_mainloop_signal)(_paMainloop, 0);
1764             break;
1765     }
1766 }
1767 
PaSinkInfoCallbackHandler(const pa_sink_info * i,int eol)1768 void AudioDeviceLinuxPulse::PaSinkInfoCallbackHandler(const pa_sink_info *i,
1769                                                       int eol)
1770 {
1771     if (eol)
1772     {
1773         // Signal that we are done
1774         LATE(pa_threaded_mainloop_signal)(_paMainloop, 0);
1775         return;
1776     }
1777 
1778     if (_numPlayDevices == _deviceIndex)
1779     {
1780         // Convert the device index to the one of the sink
1781         _paDeviceIndex = i->index;
1782 
1783         if (_playDeviceName)
1784         {
1785             // Copy the sink name
1786             strncpy(_playDeviceName, i->name, kAdmMaxDeviceNameSize);
1787             _playDeviceName[kAdmMaxDeviceNameSize - 1] = '\0';
1788         }
1789         if (_playDisplayDeviceName)
1790         {
1791             // Copy the sink display name
1792             strncpy(_playDisplayDeviceName, i->description,
1793                     kAdmMaxDeviceNameSize);
1794             _playDisplayDeviceName[kAdmMaxDeviceNameSize - 1] = '\0';
1795         }
1796     }
1797 
1798     _numPlayDevices++;
1799 }
1800 
PaSourceInfoCallbackHandler(const pa_source_info * i,int eol)1801 void AudioDeviceLinuxPulse::PaSourceInfoCallbackHandler(
1802     const pa_source_info *i,
1803     int eol)
1804 {
1805     if (eol)
1806     {
1807         // Signal that we are done
1808         LATE(pa_threaded_mainloop_signal)(_paMainloop, 0);
1809         return;
1810     }
1811 
1812     // We don't want to list output devices
1813      if (i->monitor_of_sink == PA_INVALID_INDEX)
1814     {
1815         if (_numRecDevices == _deviceIndex)
1816         {
1817             // Convert the device index to the one of the source
1818             _paDeviceIndex = i->index;
1819 
1820             if (_recDeviceName)
1821             {
1822                 // copy the source name
1823                 strncpy(_recDeviceName, i->name, kAdmMaxDeviceNameSize);
1824                 _recDeviceName[kAdmMaxDeviceNameSize - 1] = '\0';
1825             }
1826             if (_recDisplayDeviceName)
1827             {
1828                 // Copy the source display name
1829                 strncpy(_recDisplayDeviceName, i->description,
1830                         kAdmMaxDeviceNameSize);
1831                 _recDisplayDeviceName[kAdmMaxDeviceNameSize - 1] = '\0';
1832             }
1833         }
1834 
1835         _numRecDevices++;
1836     }
1837 }
1838 
PaServerInfoCallbackHandler(const pa_server_info * i)1839 void AudioDeviceLinuxPulse::PaServerInfoCallbackHandler(
1840     const pa_server_info *i)
1841 {
1842     // Use PA native sampling rate
1843     sample_rate_hz_ = i->sample_spec.rate;
1844 
1845     // Copy the PA server version
1846     strncpy(_paServerVersion, i->server_version, 31);
1847     _paServerVersion[31] = '\0';
1848 
1849     if (_recDisplayDeviceName)
1850     {
1851         // Copy the source name
1852         strncpy(_recDisplayDeviceName, i->default_source_name,
1853                 kAdmMaxDeviceNameSize);
1854         _recDisplayDeviceName[kAdmMaxDeviceNameSize - 1] = '\0';
1855     }
1856 
1857     if (_playDisplayDeviceName)
1858     {
1859         // Copy the sink name
1860         strncpy(_playDisplayDeviceName, i->default_sink_name,
1861                 kAdmMaxDeviceNameSize);
1862         _playDisplayDeviceName[kAdmMaxDeviceNameSize - 1] = '\0';
1863     }
1864 
1865     LATE(pa_threaded_mainloop_signal)(_paMainloop, 0);
1866 }
1867 
PaStreamStateCallbackHandler(pa_stream * p)1868 void AudioDeviceLinuxPulse::PaStreamStateCallbackHandler(pa_stream *p)
1869 {
1870     WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
1871                  "  stream state cb");
1872 
1873     pa_stream_state_t state = LATE(pa_stream_get_state)(p);
1874     switch (state)
1875     {
1876         case PA_STREAM_UNCONNECTED:
1877             WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
1878                          "  unconnected");
1879             break;
1880         case PA_STREAM_CREATING:
1881             WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
1882                          "  creating");
1883             break;
1884         case PA_STREAM_FAILED:
1885         case PA_STREAM_TERMINATED:
1886             WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
1887                          "  failed");
1888             break;
1889         case PA_STREAM_READY:
1890             WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
1891                          "  ready");
1892             break;
1893     }
1894 
1895     LATE(pa_threaded_mainloop_signal)(_paMainloop, 0);
1896 }
1897 
CheckPulseAudioVersion()1898 int32_t AudioDeviceLinuxPulse::CheckPulseAudioVersion()
1899 {
1900     PaLock();
1901 
1902     pa_operation* paOperation = NULL;
1903 
1904     // get the server info and update deviceName
1905     paOperation = LATE(pa_context_get_server_info)(_paContext,
1906                                                    PaServerInfoCallback,
1907                                                    this);
1908 
1909     WaitForOperationCompletion(paOperation);
1910 
1911     PaUnLock();
1912 
1913     WEBRTC_TRACE(kTraceStateInfo, kTraceAudioDevice, -1,
1914                  "  checking PulseAudio version: %s", _paServerVersion);
1915 
1916     return 0;
1917 }
1918 
InitSamplingFrequency()1919 int32_t AudioDeviceLinuxPulse::InitSamplingFrequency()
1920 {
1921     PaLock();
1922 
1923     pa_operation* paOperation = NULL;
1924 
1925     // Get the server info and update sample_rate_hz_
1926     paOperation = LATE(pa_context_get_server_info)(_paContext,
1927                                                    PaServerInfoCallback,
1928                                                    this);
1929 
1930     WaitForOperationCompletion(paOperation);
1931 
1932     PaUnLock();
1933 
1934     return 0;
1935 }
1936 
GetDefaultDeviceInfo(bool recDevice,char * name,uint16_t & index)1937 int32_t AudioDeviceLinuxPulse::GetDefaultDeviceInfo(bool recDevice,
1938                                                     char* name,
1939                                                     uint16_t& index)
1940 {
1941     char tmpName[kAdmMaxDeviceNameSize] = {0};
1942     // subtract length of "default: "
1943     uint16_t nameLen = kAdmMaxDeviceNameSize - 9;
1944     char* pName = NULL;
1945 
1946     if (name)
1947     {
1948         // Add "default: "
1949         strcpy(name, "default: ");
1950         pName = &name[9];
1951     }
1952 
1953     // Tell the callback that we want
1954     // the name for this device
1955     if (recDevice)
1956     {
1957         _recDisplayDeviceName = tmpName;
1958     } else
1959     {
1960         _playDisplayDeviceName = tmpName;
1961     }
1962 
1963     // Set members
1964     _paDeviceIndex = -1;
1965     _deviceIndex = 0;
1966     _numPlayDevices = 0;
1967     _numRecDevices = 0;
1968 
1969     PaLock();
1970 
1971     pa_operation* paOperation = NULL;
1972 
1973     // Get the server info and update deviceName
1974     paOperation = LATE(pa_context_get_server_info)(_paContext,
1975                                                    PaServerInfoCallback,
1976                                                    this);
1977 
1978     WaitForOperationCompletion(paOperation);
1979 
1980     // Get the device index
1981     if (recDevice)
1982     {
1983         paOperation
1984             = LATE(pa_context_get_source_info_by_name)(_paContext,
1985                                                        (char *) tmpName,
1986                                                        PaSourceInfoCallback,
1987                                                        this);
1988     } else
1989     {
1990         paOperation
1991             = LATE(pa_context_get_sink_info_by_name)(_paContext,
1992                                                      (char *) tmpName,
1993                                                      PaSinkInfoCallback,
1994                                                      this);
1995     }
1996 
1997     WaitForOperationCompletion(paOperation);
1998 
1999     PaUnLock();
2000 
2001     // Set the index
2002     index = _paDeviceIndex;
2003 
2004     if (name)
2005     {
2006         // Copy to name string
2007         strncpy(pName, tmpName, nameLen);
2008     }
2009 
2010     // Clear members
2011     _playDisplayDeviceName = NULL;
2012     _recDisplayDeviceName = NULL;
2013     _paDeviceIndex = -1;
2014     _deviceIndex = -1;
2015     _numPlayDevices = 0;
2016     _numRecDevices = 0;
2017 
2018     return 0;
2019 }
2020 
InitPulseAudio()2021 int32_t AudioDeviceLinuxPulse::InitPulseAudio()
2022 {
2023     int retVal = 0;
2024 
2025     // Load libpulse
2026     if (!PaSymbolTable.Load())
2027     {
2028         // Most likely the Pulse library and sound server are not installed on
2029         // this system
2030         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2031                      "  failed to load symbol table");
2032         return -1;
2033     }
2034 
2035     // Create a mainloop API and connection to the default server
2036     // the mainloop is the internal asynchronous API event loop
2037     if (_paMainloop) {
2038         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2039                      "  PA mainloop has already existed");
2040         return -1;
2041     }
2042     _paMainloop = LATE(pa_threaded_mainloop_new)();
2043     if (!_paMainloop)
2044     {
2045         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2046                      "  could not create mainloop");
2047         return -1;
2048     }
2049 
2050     // Start the threaded main loop
2051     retVal = LATE(pa_threaded_mainloop_start)(_paMainloop);
2052     if (retVal != PA_OK)
2053     {
2054         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2055                      "  failed to start main loop, error=%d", retVal);
2056         return -1;
2057     }
2058 
2059     WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
2060                  "  mainloop running!");
2061 
2062     PaLock();
2063 
2064     _paMainloopApi = LATE(pa_threaded_mainloop_get_api)(_paMainloop);
2065     if (!_paMainloopApi)
2066     {
2067         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2068                      "  could not create mainloop API");
2069         PaUnLock();
2070         return -1;
2071     }
2072 
2073     // Create a new PulseAudio context
2074     if (_paContext){
2075         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2076                      "  PA context has already existed");
2077         PaUnLock();
2078         return -1;
2079     }
2080     _paContext = LATE(pa_context_new)(_paMainloopApi, "WEBRTC VoiceEngine");
2081 
2082     if (!_paContext)
2083     {
2084         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2085                      "  could not create context");
2086         PaUnLock();
2087         return -1;
2088     }
2089 
2090     // Set state callback function
2091     LATE(pa_context_set_state_callback)(_paContext, PaContextStateCallback,
2092                                         this);
2093 
2094     // Connect the context to a server (default)
2095     _paStateChanged = false;
2096     retVal = LATE(pa_context_connect)(_paContext,
2097                                       NULL,
2098                                       PA_CONTEXT_NOAUTOSPAWN,
2099                                       NULL);
2100 
2101     if (retVal != PA_OK)
2102     {
2103         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2104                      "  failed to connect context, error=%d", retVal);
2105         PaUnLock();
2106         return -1;
2107     }
2108 
2109     // Wait for state change
2110     while (!_paStateChanged)
2111     {
2112         LATE(pa_threaded_mainloop_wait)(_paMainloop);
2113     }
2114 
2115     // Now check to see what final state we reached.
2116     pa_context_state_t state = LATE(pa_context_get_state)(_paContext);
2117 
2118     if (state != PA_CONTEXT_READY)
2119     {
2120         if (state == PA_CONTEXT_FAILED)
2121         {
2122             WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2123                          "  failed to connect to PulseAudio sound server");
2124         } else if (state == PA_CONTEXT_TERMINATED)
2125         {
2126             WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2127                          "  PulseAudio connection terminated early");
2128         } else
2129         {
2130             // Shouldn't happen, because we only signal on one of those three
2131             // states
2132             WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2133                          "  unknown problem connecting to PulseAudio");
2134         }
2135         PaUnLock();
2136         return -1;
2137     }
2138 
2139     PaUnLock();
2140 
2141     // Give the objects to the mixer manager
2142     _mixerManager.SetPulseAudioObjects(_paMainloop, _paContext);
2143 
2144     // Check the version
2145     if (CheckPulseAudioVersion() < 0)
2146     {
2147         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2148                      "  PulseAudio version %s not supported",
2149                      _paServerVersion);
2150         return -1;
2151     }
2152 
2153     // Initialize sampling frequency
2154     if (InitSamplingFrequency() < 0 || sample_rate_hz_ == 0)
2155     {
2156         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2157                      "  failed to initialize sampling frequency,"
2158                      " set to %d Hz",
2159                      sample_rate_hz_);
2160         return -1;
2161     }
2162 
2163     return 0;
2164 }
2165 
TerminatePulseAudio()2166 int32_t AudioDeviceLinuxPulse::TerminatePulseAudio()
2167 {
2168     // Do nothing if the instance doesn't exist
2169     // likely PaSymbolTable.Load() fails
2170     if (!_paMainloop) {
2171         return 0;
2172     }
2173 
2174     PaLock();
2175 
2176     // Disconnect the context
2177     if (_paContext)
2178     {
2179         LATE(pa_context_disconnect)(_paContext);
2180     }
2181 
2182     // Unreference the context
2183     if (_paContext)
2184     {
2185         LATE(pa_context_unref)(_paContext);
2186     }
2187 
2188     PaUnLock();
2189     _paContext = NULL;
2190 
2191     // Stop the threaded main loop
2192     if (_paMainloop)
2193     {
2194         LATE(pa_threaded_mainloop_stop)(_paMainloop);
2195     }
2196 
2197     // Free the mainloop
2198     if (_paMainloop)
2199     {
2200         LATE(pa_threaded_mainloop_free)(_paMainloop);
2201     }
2202 
2203     _paMainloop = NULL;
2204 
2205     WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
2206                  "  PulseAudio terminated");
2207 
2208     return 0;
2209 }
2210 
PaLock()2211 void AudioDeviceLinuxPulse::PaLock()
2212 {
2213     LATE(pa_threaded_mainloop_lock)(_paMainloop);
2214 }
2215 
PaUnLock()2216 void AudioDeviceLinuxPulse::PaUnLock()
2217 {
2218     LATE(pa_threaded_mainloop_unlock)(_paMainloop);
2219 }
2220 
WaitForOperationCompletion(pa_operation * paOperation) const2221 void AudioDeviceLinuxPulse::WaitForOperationCompletion(
2222     pa_operation* paOperation) const
2223 {
2224     if (!paOperation)
2225     {
2226         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2227                      "paOperation NULL in WaitForOperationCompletion");
2228         return;
2229     }
2230 
2231     while (LATE(pa_operation_get_state)(paOperation) == PA_OPERATION_RUNNING)
2232     {
2233         LATE(pa_threaded_mainloop_wait)(_paMainloop);
2234     }
2235 
2236     LATE(pa_operation_unref)(paOperation);
2237 }
2238 
2239 // ============================================================================
2240 //                                  Thread Methods
2241 // ============================================================================
2242 
EnableWriteCallback()2243 void AudioDeviceLinuxPulse::EnableWriteCallback()
2244 {
2245     if (LATE(pa_stream_get_state)(_playStream) == PA_STREAM_READY)
2246     {
2247         // May already have available space. Must check.
2248         _tempBufferSpace = LATE(pa_stream_writable_size)(_playStream);
2249         if (_tempBufferSpace > 0)
2250         {
2251             // Yup, there is already space available, so if we register a
2252             // write callback then it will not receive any event. So dispatch
2253             // one ourself instead.
2254             _timeEventPlay.Set();
2255             return;
2256         }
2257     }
2258 
2259     LATE(pa_stream_set_write_callback)(_playStream, &PaStreamWriteCallback,
2260                                        this);
2261 }
2262 
DisableWriteCallback()2263 void AudioDeviceLinuxPulse::DisableWriteCallback()
2264 {
2265     LATE(pa_stream_set_write_callback)(_playStream, NULL, NULL);
2266 }
2267 
PaStreamWriteCallback(pa_stream *,size_t buffer_space,void * pThis)2268 void AudioDeviceLinuxPulse::PaStreamWriteCallback(pa_stream */*unused*/,
2269                                                   size_t buffer_space,
2270                                                   void *pThis)
2271 {
2272     static_cast<AudioDeviceLinuxPulse*> (pThis)->PaStreamWriteCallbackHandler(
2273         buffer_space);
2274 }
2275 
PaStreamWriteCallbackHandler(size_t bufferSpace)2276 void AudioDeviceLinuxPulse::PaStreamWriteCallbackHandler(size_t bufferSpace)
2277 {
2278     _tempBufferSpace = bufferSpace;
2279 
2280     // Since we write the data asynchronously on a different thread, we have
2281     // to temporarily disable the write callback or else Pulse will call it
2282     // continuously until we write the data. We re-enable it below.
2283     DisableWriteCallback();
2284     _timeEventPlay.Set();
2285 }
2286 
PaStreamUnderflowCallback(pa_stream *,void * pThis)2287 void AudioDeviceLinuxPulse::PaStreamUnderflowCallback(pa_stream */*unused*/,
2288                                                       void *pThis)
2289 {
2290     static_cast<AudioDeviceLinuxPulse*> (pThis)->
2291         PaStreamUnderflowCallbackHandler();
2292 }
2293 
PaStreamUnderflowCallbackHandler()2294 void AudioDeviceLinuxPulse::PaStreamUnderflowCallbackHandler()
2295 {
2296     WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
2297                  "  Playout underflow");
2298 
2299     if (_configuredLatencyPlay == WEBRTC_PA_NO_LATENCY_REQUIREMENTS)
2300     {
2301         // We didn't configure a pa_buffer_attr before, so switching to
2302         // one now would be questionable.
2303         return;
2304     }
2305 
2306     // Otherwise reconfigure the stream with a higher target latency.
2307 
2308     const pa_sample_spec *spec = LATE(pa_stream_get_sample_spec)(_playStream);
2309     if (!spec)
2310     {
2311         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2312                      "  pa_stream_get_sample_spec()");
2313         return;
2314     }
2315 
2316     size_t bytesPerSec = LATE(pa_bytes_per_second)(spec);
2317     uint32_t newLatency = _configuredLatencyPlay + bytesPerSec *
2318                           WEBRTC_PA_PLAYBACK_LATENCY_INCREMENT_MSECS /
2319                           WEBRTC_PA_MSECS_PER_SEC;
2320 
2321     // Set the play buffer attributes
2322     _playBufferAttr.maxlength = newLatency;
2323     _playBufferAttr.tlength = newLatency;
2324     _playBufferAttr.minreq = newLatency / WEBRTC_PA_PLAYBACK_REQUEST_FACTOR;
2325     _playBufferAttr.prebuf = _playBufferAttr.tlength - _playBufferAttr.minreq;
2326 
2327     pa_operation *op = LATE(pa_stream_set_buffer_attr)(_playStream,
2328                                                        &_playBufferAttr, NULL,
2329                                                        NULL);
2330     if (!op)
2331     {
2332         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2333                      "  pa_stream_set_buffer_attr()");
2334         return;
2335     }
2336 
2337     // Don't need to wait for this to complete.
2338     LATE(pa_operation_unref)(op);
2339 
2340     // Save the new latency in case we underflow again.
2341     _configuredLatencyPlay = newLatency;
2342 }
2343 
EnableReadCallback()2344 void AudioDeviceLinuxPulse::EnableReadCallback()
2345 {
2346     LATE(pa_stream_set_read_callback)(_recStream,
2347                                       &PaStreamReadCallback,
2348                                       this);
2349 }
2350 
DisableReadCallback()2351 void AudioDeviceLinuxPulse::DisableReadCallback()
2352 {
2353     LATE(pa_stream_set_read_callback)(_recStream, NULL, NULL);
2354 }
2355 
PaStreamReadCallback(pa_stream *,size_t,void * pThis)2356 void AudioDeviceLinuxPulse::PaStreamReadCallback(pa_stream */*unused1*/,
2357                                                  size_t /*unused2*/,
2358                                                  void *pThis)
2359 {
2360     static_cast<AudioDeviceLinuxPulse*> (pThis)->
2361         PaStreamReadCallbackHandler();
2362 }
2363 
PaStreamReadCallbackHandler()2364 void AudioDeviceLinuxPulse::PaStreamReadCallbackHandler()
2365 {
2366     // We get the data pointer and size now in order to save one Lock/Unlock
2367     // in the worker thread.
2368     if (LATE(pa_stream_peek)(_recStream,
2369                              &_tempSampleData,
2370                              &_tempSampleDataSize) != 0)
2371     {
2372         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2373                      "  Can't read data!");
2374         return;
2375     }
2376 
2377     // Since we consume the data asynchronously on a different thread, we have
2378     // to temporarily disable the read callback or else Pulse will call it
2379     // continuously until we consume the data. We re-enable it below.
2380     DisableReadCallback();
2381     _timeEventRec.Set();
2382 }
2383 
PaStreamOverflowCallback(pa_stream *,void * pThis)2384 void AudioDeviceLinuxPulse::PaStreamOverflowCallback(pa_stream */*unused*/,
2385                                                      void *pThis)
2386 {
2387     static_cast<AudioDeviceLinuxPulse*> (pThis)->
2388         PaStreamOverflowCallbackHandler();
2389 }
2390 
PaStreamOverflowCallbackHandler()2391 void AudioDeviceLinuxPulse::PaStreamOverflowCallbackHandler()
2392 {
2393     WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
2394                  "  Recording overflow");
2395 }
2396 
LatencyUsecs(pa_stream * stream)2397 int32_t AudioDeviceLinuxPulse::LatencyUsecs(pa_stream *stream)
2398 {
2399     if (!WEBRTC_PA_REPORT_LATENCY)
2400     {
2401         return 0;
2402     }
2403 
2404     if (!stream)
2405     {
2406         return 0;
2407     }
2408 
2409     pa_usec_t latency;
2410     int negative;
2411     if (LATE(pa_stream_get_latency)(stream, &latency, &negative) != 0)
2412     {
2413         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2414                      "  Can't query latency");
2415         // We'd rather continue playout/capture with an incorrect delay than
2416         // stop it altogether, so return a valid value.
2417         return 0;
2418     }
2419 
2420     if (negative)
2421     {
2422         WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
2423                      "  warning: pa_stream_get_latency reported negative "
2424                      "delay");
2425 
2426         // The delay can be negative for monitoring streams if the captured
2427         // samples haven't been played yet. In such a case, "latency"
2428         // contains the magnitude, so we must negate it to get the real value.
2429         int32_t tmpLatency = (int32_t) -latency;
2430         if (tmpLatency < 0)
2431         {
2432             // Make sure that we don't use a negative delay.
2433             tmpLatency = 0;
2434         }
2435 
2436         return tmpLatency;
2437     } else
2438     {
2439         return (int32_t) latency;
2440     }
2441 }
2442 
ReadRecordedData(const void * bufferData,size_t bufferSize)2443 int32_t AudioDeviceLinuxPulse::ReadRecordedData(
2444     const void* bufferData,
2445     size_t bufferSize)  EXCLUSIVE_LOCKS_REQUIRED(_critSect)
2446 {
2447     size_t size = bufferSize;
2448     uint32_t numRecSamples = _recordBufferSize / (2 * _recChannels);
2449 
2450     // Account for the peeked data and the used data.
2451     uint32_t recDelay = (uint32_t) ((LatencyUsecs(_recStream)
2452         / 1000) + 10 * ((size + _recordBufferUsed) / _recordBufferSize));
2453 
2454     _sndCardRecDelay = recDelay;
2455 
2456     if (_playStream)
2457     {
2458         // Get the playout delay.
2459         _sndCardPlayDelay = (uint32_t) (LatencyUsecs(_playStream) / 1000);
2460     }
2461 
2462     if (_recordBufferUsed > 0)
2463     {
2464         // Have to copy to the buffer until it is full.
2465         size_t copy = _recordBufferSize - _recordBufferUsed;
2466         if (size < copy)
2467         {
2468             copy = size;
2469         }
2470 
2471         memcpy(&_recBuffer[_recordBufferUsed], bufferData, copy);
2472         _recordBufferUsed += copy;
2473         bufferData = static_cast<const char *> (bufferData) + copy;
2474         size -= copy;
2475 
2476         if (_recordBufferUsed != _recordBufferSize)
2477         {
2478             // Not enough data yet to pass to VoE.
2479             return 0;
2480         }
2481 
2482         // Provide data to VoiceEngine.
2483         if (ProcessRecordedData(_recBuffer, numRecSamples, recDelay) == -1)
2484         {
2485             // We have stopped recording.
2486             return -1;
2487         }
2488 
2489         _recordBufferUsed = 0;
2490     }
2491 
2492     // Now process full 10ms sample sets directly from the input.
2493     while (size >= _recordBufferSize)
2494     {
2495         // Provide data to VoiceEngine.
2496         if (ProcessRecordedData(
2497             static_cast<int8_t *> (const_cast<void *> (bufferData)),
2498             numRecSamples, recDelay) == -1)
2499         {
2500             // We have stopped recording.
2501             return -1;
2502         }
2503 
2504         bufferData = static_cast<const char *> (bufferData) +
2505                      _recordBufferSize;
2506         size -= _recordBufferSize;
2507 
2508         // We have consumed 10ms of data.
2509         recDelay -= 10;
2510     }
2511 
2512     // Now save any leftovers for later.
2513     if (size > 0)
2514     {
2515         memcpy(_recBuffer, bufferData, size);
2516         _recordBufferUsed = size;
2517     }
2518 
2519     return 0;
2520 }
2521 
ProcessRecordedData(int8_t * bufferData,uint32_t bufferSizeInSamples,uint32_t recDelay)2522 int32_t AudioDeviceLinuxPulse::ProcessRecordedData(
2523     int8_t *bufferData,
2524     uint32_t bufferSizeInSamples,
2525     uint32_t recDelay) EXCLUSIVE_LOCKS_REQUIRED(_critSect)
2526 {
2527     uint32_t currentMicLevel(0);
2528     uint32_t newMicLevel(0);
2529 
2530     _ptrAudioBuffer->SetRecordedBuffer(bufferData, bufferSizeInSamples);
2531 
2532     if (AGC())
2533     {
2534         // Store current mic level in the audio buffer if AGC is enabled
2535         if (MicrophoneVolume(currentMicLevel) == 0)
2536         {
2537             // This call does not affect the actual microphone volume
2538             _ptrAudioBuffer->SetCurrentMicLevel(currentMicLevel);
2539         }
2540     }
2541 
2542     const uint32_t clockDrift(0);
2543     // TODO(andrew): this is a temporary hack, to avoid non-causal far- and
2544     // near-end signals at the AEC for PulseAudio. I think the system delay is
2545     // being correctly calculated here, but for legacy reasons we add +10 ms
2546     // to the value in the AEC. The real fix will be part of a larger
2547     // investigation into managing system delay in the AEC.
2548     if (recDelay > 10)
2549         recDelay -= 10;
2550     else
2551         recDelay = 0;
2552     _ptrAudioBuffer->SetVQEData(_sndCardPlayDelay, recDelay, clockDrift);
2553     _ptrAudioBuffer->SetTypingStatus(KeyPressed());
2554     // Deliver recorded samples at specified sample rate,
2555     // mic level etc. to the observer using callback.
2556     UnLock();
2557     _ptrAudioBuffer->DeliverRecordedData();
2558     Lock();
2559 
2560     // We have been unlocked - check the flag again.
2561     if (!_recording)
2562     {
2563         return -1;
2564     }
2565 
2566     if (AGC())
2567     {
2568         newMicLevel = _ptrAudioBuffer->NewMicLevel();
2569         if (newMicLevel != 0)
2570         {
2571             // The VQE will only deliver non-zero microphone levels when a
2572             // change is needed.
2573             // Set this new mic level (received from the observer as return
2574             // value in the callback).
2575             WEBRTC_TRACE(kTraceStream, kTraceAudioDevice, _id,
2576                          "  AGC change of volume: old=%u => new=%u",
2577                          currentMicLevel, newMicLevel);
2578             if (SetMicrophoneVolume(newMicLevel) == -1)
2579             {
2580                 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice,
2581                              _id,
2582                              "  the required modification of the microphone "
2583                              "volume failed");
2584             }
2585         }
2586     }
2587 
2588     return 0;
2589 }
2590 
PlayThreadFunc(void * pThis)2591 bool AudioDeviceLinuxPulse::PlayThreadFunc(void* pThis)
2592 {
2593     return (static_cast<AudioDeviceLinuxPulse*> (pThis)->PlayThreadProcess());
2594 }
2595 
RecThreadFunc(void * pThis)2596 bool AudioDeviceLinuxPulse::RecThreadFunc(void* pThis)
2597 {
2598     return (static_cast<AudioDeviceLinuxPulse*> (pThis)->RecThreadProcess());
2599 }
2600 
PlayThreadProcess()2601 bool AudioDeviceLinuxPulse::PlayThreadProcess()
2602 {
2603     switch (_timeEventPlay.Wait(1000))
2604     {
2605         case kEventSignaled:
2606             break;
2607         case kEventError:
2608             WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
2609                          "EventWrapper::Wait() failed");
2610             return true;
2611         case kEventTimeout:
2612             return true;
2613     }
2614 
2615     CriticalSectionScoped lock(&_critSect);
2616 
2617     if (_startPlay)
2618     {
2619         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
2620                      "_startPlay true, performing initial actions");
2621 
2622         _startPlay = false;
2623         _playDeviceName = NULL;
2624 
2625         // Set if not default device
2626         if (_outputDeviceIndex > 0)
2627         {
2628             // Get the playout device name
2629             _playDeviceName = new char[kAdmMaxDeviceNameSize];
2630             _deviceIndex = _outputDeviceIndex;
2631             PlayoutDevices();
2632         }
2633 
2634         // Start muted only supported on 0.9.11 and up
2635         if (LATE(pa_context_get_protocol_version)(_paContext)
2636             >= WEBRTC_PA_ADJUST_LATENCY_PROTOCOL_VERSION)
2637         {
2638             // Get the currently saved speaker mute status
2639             // and set the initial mute status accordingly
2640             bool enabled(false);
2641             _mixerManager.SpeakerMute(enabled);
2642             if (enabled)
2643             {
2644                 _playStreamFlags |= PA_STREAM_START_MUTED;
2645             }
2646         }
2647 
2648         // Get the currently saved speaker volume
2649         uint32_t volume = 0;
2650         if (update_speaker_volume_at_startup_)
2651           _mixerManager.SpeakerVolume(volume);
2652 
2653         PaLock();
2654 
2655         // NULL gives PA the choice of startup volume.
2656         pa_cvolume* ptr_cvolume = NULL;
2657         if (update_speaker_volume_at_startup_) {
2658           pa_cvolume cVolumes;
2659           ptr_cvolume = &cVolumes;
2660 
2661           // Set the same volume for all channels
2662           const pa_sample_spec *spec =
2663               LATE(pa_stream_get_sample_spec)(_playStream);
2664           LATE(pa_cvolume_set)(&cVolumes, spec->channels, volume);
2665           update_speaker_volume_at_startup_ = false;
2666         }
2667 
2668         // Connect the stream to a sink
2669         if (LATE(pa_stream_connect_playback)(
2670             _playStream,
2671             _playDeviceName,
2672             &_playBufferAttr,
2673             (pa_stream_flags_t) _playStreamFlags,
2674             ptr_cvolume, NULL) != PA_OK)
2675         {
2676             WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2677                          "  failed to connect play stream, err=%d",
2678                          LATE(pa_context_errno)(_paContext));
2679         }
2680 
2681         WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
2682                      "  play stream connected");
2683 
2684         // Wait for state change
2685         while (LATE(pa_stream_get_state)(_playStream) != PA_STREAM_READY)
2686         {
2687             LATE(pa_threaded_mainloop_wait)(_paMainloop);
2688         }
2689 
2690         WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
2691                      "  play stream ready");
2692 
2693         // We can now handle write callbacks
2694         EnableWriteCallback();
2695 
2696         PaUnLock();
2697 
2698         // Clear device name
2699         if (_playDeviceName)
2700         {
2701             delete [] _playDeviceName;
2702             _playDeviceName = NULL;
2703         }
2704 
2705         _playing = true;
2706         _playStartEvent.Set();
2707 
2708         return true;
2709     }
2710 
2711     if (_playing)
2712     {
2713         if (!_recording)
2714         {
2715             // Update the playout delay
2716             _sndCardPlayDelay = (uint32_t) (LatencyUsecs(_playStream)
2717                 / 1000);
2718         }
2719 
2720         if (_playbackBufferUnused < _playbackBufferSize)
2721         {
2722 
2723             size_t write = _playbackBufferSize - _playbackBufferUnused;
2724             if (_tempBufferSpace < write)
2725             {
2726                 write = _tempBufferSpace;
2727             }
2728 
2729             PaLock();
2730             if (LATE(pa_stream_write)(
2731                 _playStream,
2732                 (void *) &_playBuffer[_playbackBufferUnused],
2733                 write, NULL, (int64_t) 0,
2734                 PA_SEEK_RELATIVE) != PA_OK)
2735             {
2736                 _writeErrors++;
2737                 if (_writeErrors > 10)
2738                 {
2739                     if (_playError == 1)
2740                     {
2741                         WEBRTC_TRACE(kTraceWarning,
2742                                      kTraceUtility, _id,
2743                                      "  pending playout error exists");
2744                     }
2745                     // Triggers callback from module process thread.
2746                     _playError = 1;
2747                     WEBRTC_TRACE(
2748                                  kTraceError,
2749                                  kTraceUtility,
2750                                  _id,
2751                                  "  kPlayoutError message posted: "
2752                                  "_writeErrors=%u, error=%d",
2753                                  _writeErrors,
2754                                  LATE(pa_context_errno)(_paContext));
2755                     _writeErrors = 0;
2756                 }
2757             }
2758             PaUnLock();
2759 
2760             _playbackBufferUnused += write;
2761             _tempBufferSpace -= write;
2762         }
2763 
2764         uint32_t numPlaySamples = _playbackBufferSize / (2 * _playChannels);
2765         // Might have been reduced to zero by the above.
2766         if (_tempBufferSpace > 0)
2767         {
2768             // Ask for new PCM data to be played out using the
2769             // AudioDeviceBuffer ensure that this callback is executed
2770             // without taking the audio-thread lock.
2771             UnLock();
2772             WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
2773                          "  requesting data");
2774             uint32_t nSamples =
2775                 _ptrAudioBuffer->RequestPlayoutData(numPlaySamples);
2776             Lock();
2777 
2778             // We have been unlocked - check the flag again.
2779             if (!_playing)
2780             {
2781                 return true;
2782             }
2783 
2784             nSamples = _ptrAudioBuffer->GetPlayoutData(_playBuffer);
2785             if (nSamples != numPlaySamples)
2786             {
2787                 WEBRTC_TRACE(kTraceError, kTraceAudioDevice,
2788                              _id, "  invalid number of output samples(%d)",
2789                              nSamples);
2790             }
2791 
2792             size_t write = _playbackBufferSize;
2793             if (_tempBufferSpace < write)
2794             {
2795                 write = _tempBufferSpace;
2796             }
2797 
2798             WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
2799                          "  will write");
2800             PaLock();
2801             if (LATE(pa_stream_write)(_playStream, (void *) &_playBuffer[0],
2802                                       write, NULL, (int64_t) 0,
2803                                       PA_SEEK_RELATIVE) != PA_OK)
2804             {
2805                 _writeErrors++;
2806                 if (_writeErrors > 10)
2807                 {
2808                     if (_playError == 1)
2809                     {
2810                         WEBRTC_TRACE(kTraceWarning,
2811                                      kTraceUtility, _id,
2812                                      "  pending playout error exists");
2813                     }
2814                      // Triggers callback from module process thread.
2815                     _playError = 1;
2816                     WEBRTC_TRACE(
2817                                  kTraceError,
2818                                  kTraceUtility,
2819                                  _id,
2820                                  "  kPlayoutError message posted: "
2821                                  "_writeErrors=%u, error=%d",
2822                                  _writeErrors,
2823                                  LATE(pa_context_errno)(_paContext));
2824                     _writeErrors = 0;
2825                 }
2826             }
2827             PaUnLock();
2828 
2829             _playbackBufferUnused = write;
2830         }
2831 
2832         _tempBufferSpace = 0;
2833         PaLock();
2834         EnableWriteCallback();
2835         PaUnLock();
2836 
2837     }  // _playing
2838 
2839     return true;
2840 }
2841 
RecThreadProcess()2842 bool AudioDeviceLinuxPulse::RecThreadProcess()
2843 {
2844     switch (_timeEventRec.Wait(1000))
2845     {
2846         case kEventSignaled:
2847             break;
2848         case kEventError:
2849             WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
2850                          "EventWrapper::Wait() failed");
2851             return true;
2852         case kEventTimeout:
2853             return true;
2854     }
2855 
2856     CriticalSectionScoped lock(&_critSect);
2857 
2858     if (_startRec)
2859     {
2860         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
2861                      "_startRec true, performing initial actions");
2862 
2863         _recDeviceName = NULL;
2864 
2865         // Set if not default device
2866         if (_inputDeviceIndex > 0)
2867         {
2868             // Get the recording device name
2869             _recDeviceName = new char[kAdmMaxDeviceNameSize];
2870             _deviceIndex = _inputDeviceIndex;
2871             RecordingDevices();
2872         }
2873 
2874         PaLock();
2875 
2876         WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
2877                      "  connecting stream");
2878 
2879         // Connect the stream to a source
2880         if (LATE(pa_stream_connect_record)(_recStream,
2881             _recDeviceName,
2882             &_recBufferAttr,
2883             (pa_stream_flags_t) _recStreamFlags) != PA_OK)
2884         {
2885             WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2886                          "  failed to connect rec stream, err=%d",
2887                          LATE(pa_context_errno)(_paContext));
2888         }
2889 
2890         WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
2891                      "  connected");
2892 
2893         // Wait for state change
2894         while (LATE(pa_stream_get_state)(_recStream) != PA_STREAM_READY)
2895         {
2896             LATE(pa_threaded_mainloop_wait)(_paMainloop);
2897         }
2898 
2899         WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
2900                      "  done");
2901 
2902         // We can now handle read callbacks
2903         EnableReadCallback();
2904 
2905         PaUnLock();
2906 
2907         // Clear device name
2908         if (_recDeviceName)
2909         {
2910             delete [] _recDeviceName;
2911             _recDeviceName = NULL;
2912         }
2913 
2914         _startRec = false;
2915         _recording = true;
2916         _recStartEvent.Set();
2917 
2918         return true;
2919     }
2920 
2921     if (_recording)
2922     {
2923         // Read data and provide it to VoiceEngine
2924         if (ReadRecordedData(_tempSampleData, _tempSampleDataSize) == -1)
2925         {
2926             return true;
2927         }
2928 
2929         _tempSampleData = NULL;
2930         _tempSampleDataSize = 0;
2931 
2932         PaLock();
2933         while (true)
2934         {
2935             // Ack the last thing we read
2936             if (LATE(pa_stream_drop)(_recStream) != 0)
2937             {
2938                 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice,
2939                              _id, "  failed to drop, err=%d\n",
2940                              LATE(pa_context_errno)(_paContext));
2941             }
2942 
2943             if (LATE(pa_stream_readable_size)(_recStream) <= 0)
2944             {
2945                 // Then that was all the data
2946                 break;
2947             }
2948 
2949             // Else more data.
2950             const void *sampleData;
2951             size_t sampleDataSize;
2952 
2953             if (LATE(pa_stream_peek)(_recStream, &sampleData, &sampleDataSize)
2954                 != 0)
2955             {
2956                 _recError = 1; // triggers callback from module process thread
2957                 WEBRTC_TRACE(kTraceError, kTraceAudioDevice,
2958                              _id, "  RECORD_ERROR message posted, error = %d",
2959                              LATE(pa_context_errno)(_paContext));
2960                 break;
2961             }
2962 
2963             _sndCardRecDelay = (uint32_t) (LatencyUsecs(_recStream)
2964                 / 1000);
2965 
2966             // Drop lock for sigslot dispatch, which could take a while.
2967             PaUnLock();
2968             // Read data and provide it to VoiceEngine
2969             if (ReadRecordedData(sampleData, sampleDataSize) == -1)
2970             {
2971                 return true;
2972             }
2973             PaLock();
2974 
2975             // Return to top of loop for the ack and the check for more data.
2976         }
2977 
2978         EnableReadCallback();
2979         PaUnLock();
2980 
2981     }  // _recording
2982 
2983     return true;
2984 }
2985 
KeyPressed() const2986 bool AudioDeviceLinuxPulse::KeyPressed() const{
2987 
2988   char szKey[32];
2989   unsigned int i = 0;
2990   char state = 0;
2991 
2992   if (!_XDisplay)
2993     return false;
2994 
2995   // Check key map status
2996   XQueryKeymap(_XDisplay, szKey);
2997 
2998   // A bit change in keymap means a key is pressed
2999   for (i = 0; i < sizeof(szKey); i++)
3000     state |= (szKey[i] ^ _oldKeyState[i]) & szKey[i];
3001 
3002   // Save old state
3003   memcpy((char*)_oldKeyState, (char*)szKey, sizeof(_oldKeyState));
3004   return (state != 0);
3005 }
3006 }
3007