/external/ltp/testcases/kernel/device-drivers/v4l/user_space/ |
D | test_VIDIOC_ENUMAUDIO.c | 41 struct v4l2_audio audio; in test_VIDIOC_ENUMAUDIO() local 47 memset(&audio, 0xff, sizeof(audio)); in test_VIDIOC_ENUMAUDIO() 48 audio.index = i; in test_VIDIOC_ENUMAUDIO() 49 ret_enum = ioctl(get_video_fd(), VIDIOC_ENUMAUDIO, &audio); in test_VIDIOC_ENUMAUDIO() 58 CU_ASSERT_EQUAL(audio.index, i); in test_VIDIOC_ENUMAUDIO() 60 CU_ASSERT(0 < strlen((char *)audio.name)); in test_VIDIOC_ENUMAUDIO() 62 ((char *)audio.name, sizeof(audio.name))); in test_VIDIOC_ENUMAUDIO() 66 CU_ASSERT_EQUAL(audio.reserved[0], 0); in test_VIDIOC_ENUMAUDIO() 67 CU_ASSERT_EQUAL(audio.reserved[1], 0); in test_VIDIOC_ENUMAUDIO() 75 audio2.index = audio.index; in test_VIDIOC_ENUMAUDIO() [all …]
|
D | test_VIDIOC_AUDIO.c | 67 struct v4l2_audio audio; in test_VIDIOC_G_AUDIO() local 70 memset(&audio, 0xff, sizeof(audio)); in test_VIDIOC_G_AUDIO() 71 ret_get = ioctl(get_video_fd(), VIDIOC_G_AUDIO, &audio); in test_VIDIOC_G_AUDIO() 82 CU_ASSERT(0 < strlen((char *)audio.name)); in test_VIDIOC_G_AUDIO() 83 CU_ASSERT(valid_string((char *)audio.name, sizeof(audio.name))); in test_VIDIOC_G_AUDIO() 85 CU_ASSERT(valid_audio_capability(audio.capability)); in test_VIDIOC_G_AUDIO() 86 CU_ASSERT(valid_audio_mode(audio.mode)); in test_VIDIOC_G_AUDIO() 88 CU_ASSERT_EQUAL(audio.reserved[0], 0); in test_VIDIOC_G_AUDIO() 89 CU_ASSERT_EQUAL(audio.reserved[1], 0); in test_VIDIOC_G_AUDIO() 97 audio2.index = audio.index; in test_VIDIOC_G_AUDIO() [all …]
|
/external/vboot_reference/firmware/lib/ |
D | vboot_audio.c | 62 static void VbGetDevMusicNotes(VbAudioContext *audio, int use_short) in VbGetDevMusicNotes() argument 85 if (!audio->background_beep) in VbGetDevMusicNotes() 192 audio->music_notes = notebuf; in VbGetDevMusicNotes() 193 audio->note_count = count; in VbGetDevMusicNotes() 194 audio->free_notes_when_done = 1; in VbGetDevMusicNotes() 200 audio->music_notes = builtin; in VbGetDevMusicNotes() 201 audio->note_count = count; in VbGetDevMusicNotes() 202 audio->free_notes_when_done = 0; in VbGetDevMusicNotes() 212 VbAudioContext *audio = &au; in VbAudioOpen() local 227 Memset(audio, 0, sizeof(*audio)); in VbAudioOpen() [all …]
|
/external/webrtc/webrtc/modules/audio_processing/ |
D | noise_suppression_impl.cc | 70 void NoiseSuppressionImpl::AnalyzeCaptureAudio(AudioBuffer* audio) { in AnalyzeCaptureAudio() argument 71 RTC_DCHECK(audio); in AnalyzeCaptureAudio() 78 RTC_DCHECK_GE(160u, audio->num_frames_per_band()); in AnalyzeCaptureAudio() 79 RTC_DCHECK_EQ(suppressors_.size(), audio->num_channels()); in AnalyzeCaptureAudio() 82 audio->split_bands_const_f(i)[kBand0To8kHz]); in AnalyzeCaptureAudio() 87 void NoiseSuppressionImpl::ProcessCaptureAudio(AudioBuffer* audio) { in ProcessCaptureAudio() argument 88 RTC_DCHECK(audio); in ProcessCaptureAudio() 94 RTC_DCHECK_GE(160u, audio->num_frames_per_band()); in ProcessCaptureAudio() 95 RTC_DCHECK_EQ(suppressors_.size(), audio->num_channels()); in ProcessCaptureAudio() 99 audio->split_bands_const_f(i), in ProcessCaptureAudio() [all …]
|
D | gain_control_impl.cc | 69 int GainControlImpl::ProcessRenderAudio(AudioBuffer* audio) { in ProcessRenderAudio() argument 75 assert(audio->num_frames_per_band() <= 160); in ProcessRenderAudio() 81 WebRtcAgc_GetAddFarendError(my_handle, audio->num_frames_per_band()); in ProcessRenderAudio() 88 render_queue_buffer_.end(), audio->mixed_low_pass_data(), in ProcessRenderAudio() 89 (audio->mixed_low_pass_data() + audio->num_frames_per_band())); in ProcessRenderAudio() 127 int GainControlImpl::AnalyzeCaptureAudio(AudioBuffer* audio) { in AnalyzeCaptureAudio() argument 134 assert(audio->num_frames_per_band() <= 160); in AnalyzeCaptureAudio() 135 assert(audio->num_channels() == num_handles()); in AnalyzeCaptureAudio() 145 audio->split_bands(i), in AnalyzeCaptureAudio() 146 audio->num_bands(), in AnalyzeCaptureAudio() [all …]
|
D | echo_control_mobile_impl.cc | 93 int EchoControlMobileImpl::ProcessRenderAudio(const AudioBuffer* audio) { in ProcessRenderAudio() argument 100 assert(audio->num_frames_per_band() <= 160); in ProcessRenderAudio() 101 assert(audio->num_channels() == apm_->num_reverse_channels()); in ProcessRenderAudio() 108 for (size_t j = 0; j < audio->num_channels(); j++) { in ProcessRenderAudio() 111 my_handle, audio->split_bands_const(j)[kBand0To8kHz], in ProcessRenderAudio() 112 audio->num_frames_per_band()); in ProcessRenderAudio() 119 audio->split_bands_const(j)[kBand0To8kHz], in ProcessRenderAudio() 120 (audio->split_bands_const(j)[kBand0To8kHz] + in ProcessRenderAudio() 121 audio->num_frames_per_band())); in ProcessRenderAudio() 167 int EchoControlMobileImpl::ProcessCaptureAudio(AudioBuffer* audio) { in ProcessCaptureAudio() argument [all …]
|
D | echo_cancellation_impl.cc | 88 int EchoCancellationImpl::ProcessRenderAudio(const AudioBuffer* audio) { in ProcessRenderAudio() argument 94 assert(audio->num_frames_per_band() <= 160); in ProcessRenderAudio() 95 assert(audio->num_channels() == apm_->num_reverse_channels()); in ProcessRenderAudio() 103 for (size_t j = 0; j < audio->num_channels(); j++) { in ProcessRenderAudio() 108 my_handle, audio->split_bands_const_f(j)[kBand0To8kHz], in ProcessRenderAudio() 109 audio->num_frames_per_band()); in ProcessRenderAudio() 117 audio->split_bands_const_f(j)[kBand0To8kHz], in ProcessRenderAudio() 118 (audio->split_bands_const_f(j)[kBand0To8kHz] + in ProcessRenderAudio() 119 audio->num_frames_per_band())); in ProcessRenderAudio() 162 int EchoCancellationImpl::ProcessCaptureAudio(AudioBuffer* audio) { in ProcessCaptureAudio() argument [all …]
|
D | level_estimator_impl.cc | 31 void LevelEstimatorImpl::ProcessStream(AudioBuffer* audio) { in ProcessStream() argument 32 RTC_DCHECK(audio); in ProcessStream() 38 for (size_t i = 0; i < audio->num_channels(); i++) { in ProcessStream() 39 rms_->Process(audio->channels_const()[i], audio->num_frames()); in ProcessStream()
|
/external/webrtc/webrtc/audio/ |
D | webrtc_audio.gypi | 17 'audio/audio_receive_stream.cc', 18 'audio/audio_receive_stream.h', 19 'audio/audio_send_stream.cc', 20 'audio/audio_send_stream.h', 21 'audio/audio_sink.h', 22 'audio/audio_state.cc', 23 'audio/audio_state.h', 24 'audio/conversion.h', 25 'audio/scoped_voe_interface.h',
|
/external/webrtc/webrtc/modules/audio_device/ios/ |
D | audio_device_ios.mm | 34 // audio session. This variable is used to ensure that we only activate an audio 60 // will be set to this value as well to avoid resampling the the audio unit's 67 // ~10.6667ms or 512 audio frames per buffer. The FineAudioBuffer instance will 74 // in the I/O audio unit. Initial tests have shown that it is possible to use 78 // audio unit. Hence, we will not hit a RTC_CHECK in 82 // Number of bytes per audio sample for 16-bit signed integer representation. 98 // Verifies that the current audio session supports input audio and that the 102 // Ensure that the device currently supports audio input. 104 LOG(LS_ERROR) << "No audio input path is available!"; 121 // Activates an audio session suitable for full duplex VoIP sessions when [all …]
|
/external/autotest/client/site_tests/audio_AudioCorruption/ |
D | control | 7 PURPOSE = "Verify that Chrome can handle corrupted mp3 audio" 9 This test will fail if Chrome can't catch error for playing corrupted mp3 audio. 14 TEST_CLASS = "audio" 18 This test verifies Chrome can catch error for playing corrupted mp3 audio. 21 audio = 'http://commondatastorage.googleapis.com/chromiumos-test-assets-public/audio_AudioCorruptio… 22 job.run_test('audio_AudioCorruption', audio=audio)
|
/external/libvorbis/doc/ |
D | a1-encapsulation-ogg.tex | 9 streams to encapsulate Vorbis compressed audio packet data into file 13 of Vorbis audio packets. 36 The Ogg stream must be unmultiplexed (only one stream, a Vorbis audio stream, per link) 44 for low-bitrate movies consisting of DivX video and Vorbis audio. 45 However, a 'Vorbis I audio file' is taken to imply Vorbis audio 47 audio player' is not required to implement Ogg support beyond the 59 while visual media should use \literal{video/ogg}, and audio 60 \literal{audio/ogg}. Vorbis data encapsulated in Ogg may appear 62 \literal{audio/vorbis} + \literal{audio/vorbis-config}. 73 uniquely identifies a stream as Vorbis audio, is placed alone in the [all …]
|
/external/webrtc/webrtc/modules/audio_coding/codecs/g711/ |
D | audio_encoder_pcm.cc | 82 rtc::ArrayView<const int16_t> audio, in EncodeInternal() argument 88 speech_buffer_.insert(speech_buffer_.end(), audio.begin(), audio.end()); in EncodeInternal() 110 size_t AudioEncoderPcmA::EncodeCall(const int16_t* audio, in EncodeCall() argument 113 return WebRtcG711_EncodeA(audio, input_len, encoded); in EncodeCall() 123 size_t AudioEncoderPcmU::EncodeCall(const int16_t* audio, in EncodeCall() argument 126 return WebRtcG711_EncodeU(audio, input_len, encoded); in EncodeCall()
|
/external/autotest/client/site_tests/audio_AlsaLoopback/ |
D | control | 5 AUTHOR = 'The Chromium OS Audiovideo Team, chromeos-audio@google.com' 7 PURPOSE = 'Test that audio played to line out can be heard at mic in.' 9 Check if the audio played to line out is heard by arecord at mic in. 11 ATTRIBUTES = "suite:audio" 14 TEST_CLASS = "audio" 19 Test that audio playback and capture are working.
|
/external/autotest/client/site_tests/audio_CrasLoopback/ |
D | control | 5 AUTHOR = 'The Chromium OS Audiovideo Team, chromeos-audio@google.com' 7 PURPOSE = 'Test that audio played to line out can be heard at mic in.' 9 Check if the audio played to line out is heard by cras_test_client at mic in. 11 ATTRIBUTES = "suite:audio, suite:partners" 14 TEST_CLASS = "audio" 19 Test that audio playback and capture are working.
|
/external/autotest/test_suites/ |
D | control.chameleon_audio_nightly | 7 PURPOSE = "A Chameleon audio test suite." 15 Audio tests which require chameleon and audio boards connected. 16 The Audio and Chameleon boards can emulate audio jack audio activity 17 in order to test the Chrome OS audio stack. 20 audio-box environment for end-to-end testing. Details on go/audioboard 21 go/audiobox, go/ab-care-and-feed, and go/chameleon-audio-conf. 47 'chromeos-audio-bugs@google.com']
|
D | control.chameleon_audio_perbuild | 7 PURPOSE = "A Chameleon audio test suite." 15 Audio tests which require chameleon and audio boards connected. 16 The Audio and Chameleon boards can emulate audio jack audio activity 17 in order to test the Chrome OS audio stack. 20 audio-box environment for end-to-end testing. Details on go/audioboard 21 go/audiobox, go/ab-care-and-feed, and go/chameleon-audio-conf. 47 'chromeos-audio-bugs@google.com']
|
D | control.chameleon_audio | 7 PURPOSE = "A Chameleon audio test suite." 15 Audio tests which require chameleon and audio boards connected. 16 The Audio and Chameleon boards can emulate audio jack audio activity 17 in order to test the Chrome OS audio stack. 20 audio-box environment for end-to-end testing. Details on go/audioboard 21 go/audiobox, go/ab-care-and-feed, and go/chameleon-audio-conf. 47 'chromeos-audio-bugs@google.com']
|
/external/webrtc/webrtc/tools/e2e_quality/audio/ |
D | audio_e2e_harness.cc | 36 VoEAudioProcessing* audio = VoEAudioProcessing::GetInterface(voe); in RunHarness() local 37 ASSERT_TRUE(audio != NULL); in RunHarness() 88 ASSERT_EQ(0, audio->SetAgcStatus(false)); in RunHarness() 89 ASSERT_EQ(0, audio->SetEcStatus(false)); in RunHarness() 90 ASSERT_EQ(0, audio->EnableHighPassFilter(false)); in RunHarness() 91 ASSERT_EQ(0, audio->SetNsStatus(false)); in RunHarness()
|
/external/webrtc/talk/app/webrtc/ |
D | remoteaudiosource.cc | 63 void OnData(const AudioSinkInterface::Data& audio) override { in OnData() argument 65 source_->OnData(audio); in OnData() 154 void RemoteAudioSource::OnData(const AudioSinkInterface::Data& audio) { in OnData() argument 158 sink->OnData(audio.data, 16, audio.sample_rate, audio.channels, in OnData() 159 audio.samples_per_channel); in OnData()
|
/external/webrtc/webrtc/modules/audio_processing/agc/ |
D | agc.cc | 42 float Agc::AnalyzePreproc(const int16_t* audio, size_t length) { in AnalyzePreproc() argument 46 if (audio[i] == 32767 || audio[i] == -32768) in AnalyzePreproc() 52 int Agc::Process(const int16_t* audio, size_t length, int sample_rate_hz) { in Process() argument 53 vad_.ProcessChunk(audio, length, sample_rate_hz); in Process()
|
/external/autotest/server/site_tests/audio_AudioNodeSwitch/ |
D | control.USB | 5 from autotest_lib.client.cros.audio import audio_test_data 11 PURPOSE = "Check if correct audio channel selected." 12 CRITERIA = "This test will fail if expected audio channel is not selected." 15 TEST_CLASS = "audio" 20 This test remotely tests audio nodes selection.
|
D | control.HDMI | 5 from autotest_lib.client.cros.audio import audio_test_data 11 PURPOSE = "Check if correct audio channel selected." 12 CRITERIA = "This test will fail if expected audio channel is not selected." 15 TEST_CLASS = "audio" 20 This test remotely tests audio nodes selection.
|
D | control.JACK | 5 from autotest_lib.client.cros.audio import audio_test_data 11 PURPOSE = "Check if correct audio channel selected." 12 CRITERIA = "This test will fail if expected audio channel is not selected." 15 TEST_CLASS = "audio" 20 This test remotely tests audio nodes selection.
|
/external/autotest/client/site_tests/audio_LoopbackLatency/ |
D | control | 7 PURPOSE = 'Test that audio loopback latency' 9 Check if the audio played to line out can be heard mic in, and assert 12 ATTRIBUTES = "suite:audio" 15 TEST_CLASS = "audio" 20 Test that audio loopback latency is within certain limit.
|