Home
last modified time | relevance | path

Searched refs:audio_processor (Results 1 – 7 of 7) sorted by relevance

/external/tensorflow/tensorflow/examples/speech_commands/
Dinput_data_test.py80 audio_processor = input_data.AudioProcessor(
82 result_data, result_labels = audio_processor.get_data(
104 audio_processor = input_data.AudioProcessor("", tmp_dir, 10, 10,
107 self.assertLess(0, audio_processor.set_size("training"))
108 self.assertIn("training", audio_processor.data_index)
109 self.assertIn("validation", audio_processor.data_index)
110 self.assertIn("testing", audio_processor.data_index)
112 audio_processor.word_to_index["c"])
140 audio_processor = input_data.AudioProcessor("", tmp_dir, 10, 10,
143 self.assertEqual(10, len(audio_processor.background_data))
[all …]
Dtrain.py102 audio_processor = input_data.AudioProcessor(
222 f.write('\n'.join(audio_processor.words_list))
235 train_fingerprints, train_ground_truth = audio_processor.get_data(
264 set_size = audio_processor.set_size('validation')
269 audio_processor.get_data(FLAGS.batch_size, i, model_settings, 0.0,
300 set_size = audio_processor.set_size('testing')
305 test_fingerprints, test_ground_truth = audio_processor.get_data(
Dgenerate_streaming_test_wav.py92 audio_processor = input_data.AudioProcessor(
116 background_index = np.random.randint(len(audio_processor.background_data))
117 background_samples = audio_processor.background_data[background_index]
135 all_test_data, all_test_labels = audio_processor.get_unprocessed_data(
Dwav_to_features.py70 audio_processor = input_data.AudioProcessor(None, None, 0, 0, '', 0, 0,
73 results = audio_processor.get_features_for_wav(input_wav, model_settings,
/external/webrtc/sdk/android/src/jni/pc/
Dpeer_connection_factory.cc260 rtc::scoped_refptr<AudioProcessing> audio_processor, in CreatePeerConnectionFactoryForJava() argument
319 media_dependencies.audio_processing = std::move(audio_processor); in CreatePeerConnectionFactoryForJava()
357 rtc::scoped_refptr<AudioProcessing> audio_processor = in JNI_PeerConnectionFactory_CreatePeerConnectionFactory() local
365 audio_processor ? audio_processor : CreateAudioProcessing(), in JNI_PeerConnectionFactory_CreatePeerConnectionFactory()
/external/tensorflow/tensorflow/lite/micro/examples/micro_speech/train/
Dtrain_micro_speech_model.ipynb419 "audio_processor = input_data.AudioProcessor(\n",
448 " data, _ = audio_processor.get_data(1, i*1, model_settings,\n",
489 " test_data, test_labels = audio_processor.get_data(\n",
/external/webrtc/pc/
Dstats_collector.cc1215 auto audio_processor(track->GetAudioProcessor()); in UpdateReportFromAudioTrack() local
1217 if (audio_processor.get()) { in UpdateReportFromAudioTrack()
1219 audio_processor->GetStats(has_remote_tracks); in UpdateReportFromAudioTrack()