Home
last modified time | relevance | path

Searched refs:num_frames (Results 1 – 25 of 103) sorted by relevance

12345

/external/honggfuzz/linux/
Dunwind.c124 size_t num_frames = 0, mapsCnt = 0; in arch_unwindStack() local
133 return num_frames; in arch_unwindStack()
142 return num_frames; in arch_unwindStack()
152 return num_frames; in arch_unwindStack()
155 for (num_frames = 0; unw_step(&c) > 0 && num_frames < _HF_MAX_FUNCS; num_frames++) { in arch_unwindStack()
160 LOG_E("[pid='%d'] [%zd] failed to read IP (%s)", pid, num_frames, UNW_ER[-ret]); in arch_unwindStack()
161 funcs[num_frames].pc = 0; in arch_unwindStack()
163 funcs[num_frames].pc = (void*)(uintptr_t)ip; in arch_unwindStack()
166 memcpy(funcs[num_frames].mapName, mapName, sizeof(funcs[num_frames].mapName)); in arch_unwindStack()
168 strncpy(funcs[num_frames].mapName, "UNKNOWN", sizeof(funcs[num_frames].mapName)); in arch_unwindStack()
[all …]
/external/compiler-rt/test/asan/TestCases/
Ddebug_stacks.cc29 size_t num_frames = 100; in main() local
31 num_frames = __asan_get_alloc_stack(mem, trace, num_frames, &thread_id); in main()
33 fprintf(stderr, "alloc stack retval %s\n", (num_frames > 0 && num_frames < 10) in main()
43 num_frames = 100; in main()
44 num_frames = __asan_get_free_stack(mem, trace, num_frames, &thread_id); in main()
46 fprintf(stderr, "free stack retval %s\n", (num_frames > 0 && num_frames < 10) in main()
/external/tensorflow/tensorflow/lite/experimental/microfrontend/ops/
Daudio_microfrontend_op.cc87 DimensionHandle num_frames = ctx->Dim(input, 0); in __anon888eebf60102() local
88 if (ctx->Value(num_frames) < window_size) { in __anon888eebf60102()
89 num_frames = ctx->MakeDim(0); in __anon888eebf60102()
91 TF_RETURN_IF_ERROR(ctx->Subtract(num_frames, window_size, &num_frames)); in __anon888eebf60102()
93 ctx->Divide(num_frames, window_step, false, &num_frames)); in __anon888eebf60102()
95 ctx->Divide(num_frames, frame_stride, false, &num_frames)); in __anon888eebf60102()
96 TF_RETURN_IF_ERROR(ctx->Add(num_frames, 1, &num_frames)); in __anon888eebf60102()
104 ShapeHandle output = ctx->MakeShape({num_frames, num_features}); in __anon888eebf60102()
220 int num_frames = 0; in Compute() local
223 num_frames = (audio_size - window_size) / window_step + 1; in Compute()
[all …]
/external/webrtc/webrtc/modules/audio_processing/vad/
Dvad_audio_proc_unittest.cc49 if (features.num_frames > 0) { in TEST()
50 ASSERT_LT(features.num_frames, kMaxNumFrames); in TEST()
52 const size_t num_frames = features.num_frames; in TEST() local
53 ASSERT_EQ(num_frames, fread(sp, sizeof(sp[0]), num_frames, peak_file)); in TEST()
54 for (size_t n = 0; n < features.num_frames; n++) in TEST()
Dstandalone_vad_unittest.cc81 int num_frames = 0; in TEST() local
84 num_frames++; in TEST()
85 if (num_frames == kNumVadFramesToProcess) { in TEST()
86 num_frames = 0; in TEST()
/external/libvpx/libvpx/test/
Dvpxdec.sh101 local num_frames=$(${VPX_TEST_PREFIX} "${decoder}" \
104 if [ "$num_frames" -ne "$expected" ]; then
105 elog "Output frames ($num_frames) != expected ($expected)"
118 local num_frames=$(${TIMEOUT} ${VPX_TEST_PREFIX} "${decoder}" \
121 if [ -z "$num_frames" ] || [ "$num_frames" -ne "$expected" ]; then
122 elog "Output frames ($num_frames) != expected ($expected)"
/external/webrtc/webrtc/common_audio/include/
Daudio_util.h73 int num_frames, in CopyAudioIfNeeded() argument
78 std::copy(src[i], src[i] + num_frames, dest[i]); in CopyAudioIfNeeded()
125 int num_frames, in UpmixMonoToInterleaved() argument
129 for (int i = 0; i < num_frames; ++i) { in UpmixMonoToInterleaved()
138 size_t num_frames, in DownmixToMono() argument
141 for (size_t i = 0; i < num_frames; ++i) { in DownmixToMono()
154 size_t num_frames, in DownmixInterleavedToMonoImpl() argument
158 RTC_DCHECK_GT(num_frames, 0u); in DownmixInterleavedToMonoImpl()
160 const T* const end = interleaved + num_frames * num_channels; in DownmixInterleavedToMonoImpl()
176 size_t num_frames,
[all …]
/external/webrtc/webrtc/common_audio/
Dblocker.cc24 size_t num_frames, in AddFrames() argument
29 for (size_t j = 0; j < num_frames; ++j) { in AddFrames()
39 size_t num_frames, in CopyFrames() argument
46 num_frames * sizeof(dst[i][dst_start_index])); in CopyFrames()
53 size_t num_frames, in MoveFrames() argument
60 num_frames * sizeof(dst[i][dst_start_index])); in MoveFrames()
66 size_t num_frames, in ZeroOut() argument
70 num_frames * sizeof(buffer[i][starting_idx])); in ZeroOut()
77 size_t num_frames, in ApplyWindow() argument
81 for (size_t j = 0; j < num_frames; ++j) { in ApplyWindow()
Dchannel_buffer.h42 ChannelBuffer(size_t num_frames,
45 : data_(new T[num_frames * num_channels]()), in data_()
48 num_frames_(num_frames), in data_()
49 num_frames_per_band_(num_frames / num_bands), in data_()
116 size_t num_frames() const { return num_frames_; } in num_frames() function
145 IFChannelBuffer(size_t num_frames, size_t num_channels, size_t num_bands = 1);
152 size_t num_frames() const { return ibuf_.num_frames(); } in num_frames() function
Dblocker_unittest.cc22 size_t num_frames, in ProcessBlock() argument
27 for (size_t j = 0; j < num_frames; ++j) { in ProcessBlock()
38 size_t num_frames, in ProcessBlock() argument
43 for (size_t j = 0; j < num_frames; ++j) { in ProcessBlock()
61 size_t num_frames, in RunTest() argument
70 while (end < num_frames) { in RunTest()
87 size_t num_frames) { in ValidateSignalEquality() argument
89 for (size_t j = 0; j < num_frames; ++j) { in ValidateSignalEquality()
97 size_t num_frames, in ValidateInitialDelay() argument
100 for (size_t j = 0; j < num_frames; ++j) { in ValidateInitialDelay()
[all …]
Dchannel_buffer.cc15 IFChannelBuffer::IFChannelBuffer(size_t num_frames, in IFChannelBuffer() argument
19 ibuf_(num_frames, num_channels, num_bands), in IFChannelBuffer()
21 fbuf_(num_frames, num_channels, num_bands) {} in IFChannelBuffer()
51 for (size_t j = 0; j < ibuf_.num_frames(); ++j) { in RefreshF()
66 ibuf_.num_frames(), in RefreshI()
Dlapped_transform.cc23 size_t num_frames, in ProcessBlock() argument
29 RTC_CHECK_EQ(parent_->block_length_, num_frames); in ProcessBlock()
33 num_frames * sizeof(*input[0])); in ProcessBlock()
39 RealFourier::FftOrder(num_frames)); in ProcessBlock()
51 num_frames * sizeof(*input[0])); in ProcessBlock()
Daudio_converter_unittest.cc40 EXPECT_EQ(ref.num_frames(), test.num_frames()); in VerifyParams()
55 delay <= std::min(expected_delay + 1, ref.num_frames()); in ComputeSNR()
61 for (size_t j = 0; j < ref.num_frames() - delay; ++j) { in ComputeSNR()
69 const size_t length = ref.num_channels() * (ref.num_frames() - delay); in ComputeSNR()
/external/tensorflow/tensorflow/python/ops/signal/
Dshape_ops.py40 num_frames = None
47 num_frames = max(0, -(-frame_axis // frame_step))
50 num_frames = max(
52 return outer_dimensions + [num_frames, frame_length] + inner_dimensions
129 num_frames = -(-length_samples // frame_step)
134 0, frame_length + frame_step * (num_frames - 1) - length_samples)
147 num_frames = math_ops.maximum(
169 math_ops.range(num_frames) * subframes_per_hop, [num_frames, 1])
186 array_ops.concat([outer_dimensions, [num_frames, frame_length],
/external/webrtc/webrtc/modules/video_coding/codecs/test/
Dvideoprocessor_integrationtest.cc65 int num_frames; member
232 void ResetRateControlMetrics(int num_frames) { in ResetRateControlMetrics() argument
254 num_frames_to_hit_target_ = num_frames; in ResetRateControlMetrics()
442 int num_frames = rate_profile.num_frames; in ProcessFramesAndVerify() local
449 frame_number < num_frames) { in ProcessFramesAndVerify()
490 EXPECT_EQ(num_frames, frame_number); in ProcessFramesAndVerify()
491 EXPECT_EQ(num_frames + 1, static_cast<int>(stats_.stats_.size())); in ProcessFramesAndVerify()
590 rate_profile.num_frames = kNbrFramesShort; in TEST_F()
612 rate_profile.num_frames = kNbrFramesShort; in TEST_F()
638 rate_profile.num_frames = kNbrFramesLong; in TEST_F()
[all …]
/external/libaom/libaom/examples/
Dlightfield_decoder.c255 int num_frames = 0; in main() local
257 ++num_frames; in main()
259 if (num_frames < 1) die("Input light field has no frames."); in main()
263 (unsigned char **)malloc(num_frames * sizeof(unsigned char *)); in main()
264 size_t *frame_sizes = (size_t *)malloc(num_frames * sizeof(size_t)); in main()
267 for (int f = 0; f < num_frames; ++f) { in main()
274 printf("Read %d frames.\n", num_frames); in main()
316 if (image_idx >= num_frames) { in main()
318 num_frames); in main()
351 for (int f = 0; f < num_frames; ++f) { in main()
Dlightfield_bitstream_parsing.c261 int num_frames = 0; in main() local
263 ++num_frames; in main()
265 if (num_frames < 1) die("Input light field has no frames."); in main()
269 (unsigned char **)malloc(num_frames * sizeof(unsigned char *)); in main()
270 size_t *frame_sizes = (size_t *)malloc(num_frames * sizeof(size_t)); in main()
273 for (int f = 0; f < num_frames; ++f) { in main()
282 printf("Read %d frames.\n", num_frames); in main()
380 if (tiles[num_tiles].image_idx >= num_frames) { in main()
382 tiles[num_tiles].image_idx, num_frames); in main()
403 for (int f = 0; f < num_frames; ++f) { in main()
/external/webrtc/webrtc/modules/audio_processing/
Dsplitting_filter.cc21 size_t num_frames) in SplittingFilter() argument
28 three_band_filter_banks_.push_back(new ThreeBandFilterBank(num_frames)); in SplittingFilter()
37 RTC_DCHECK_EQ(data->num_frames(), in Analysis()
50 RTC_DCHECK_EQ(data->num_frames(), in Synthesis()
64 data->num_frames(), in TwoBandsAnalysis()
90 data->num_frames(), in ThreeBandsAnalysis()
Daudio_processing_impl.cc352 formats_.api_format.reverse_output_stream().num_frames() == 0 in InitializeLocked()
353 ? formats_.rev_proc_format.num_frames() in InitializeLocked()
354 : formats_.api_format.reverse_output_stream().num_frames(); in InitializeLocked()
357 formats_.api_format.reverse_input_stream().num_frames(), in InitializeLocked()
359 formats_.rev_proc_format.num_frames(), in InitializeLocked()
365 formats_.api_format.reverse_input_stream().num_frames(), in InitializeLocked()
367 formats_.api_format.reverse_output_stream().num_frames()); in InitializeLocked()
376 new AudioBuffer(formats_.api_format.input_stream().num_frames(), in InitializeLocked()
378 capture_nonlocked_.fwd_proc_format.num_frames(), in InitializeLocked()
380 formats_.api_format.output_stream().num_frames())); in InitializeLocked()
[all …]
/external/webrtc/webrtc/modules/audio_processing/test/
Ddebug_dump_test.cc34 if (!buffer_ref.get() || buffer_ref->num_frames() != config.num_frames() || in MaybeResetBuffer()
36 buffer_ref.reset(new ChannelBuffer<float>(config.num_frames(), in MaybeResetBuffer()
128 input_(new ChannelBuffer<float>(input_config_.num_frames(), in DebugDumpGenerator()
130 reverse_(new ChannelBuffer<float>(reverse_config_.num_frames(), in DebugDumpGenerator()
132 output_(new ChannelBuffer<float>(output_config_.num_frames(), in DebugDumpGenerator()
215 const size_t num_frames = config.num_frames(); in ReadAndDeinterleave() local
218 std::vector<int16_t> signal(channels * num_frames); in ReadAndDeinterleave()
220 audio->Read(num_frames * channels, &signal[0]); in ReadAndDeinterleave()
225 for (size_t i = 0; i < num_frames; ++i) { in ReadAndDeinterleave()
332 ASSERT_EQ(input_config_.num_frames() * sizeof(float), in OnStreamEvent()
[all …]
/external/webrtc/webrtc/modules/audio_device/ios/
Daudio_device_unittest_ios.cc96 virtual void Write(const void* source, size_t num_frames) = 0;
97 virtual void Read(void* destination, size_t num_frames) = 0;
127 void Write(const void* source, size_t num_frames) override {} in Write() argument
131 void Read(void* destination, size_t num_frames) override { in Read() argument
133 num_frames * sizeof(int16_t)); in Read()
134 file_pos_ += num_frames; in Read()
177 void Write(const void* source, size_t num_frames) override { in Write() argument
178 ASSERT_EQ(num_frames, frames_per_buffer_); in Write()
198 void Read(void* destination, size_t num_frames) override { in Read() argument
199 ASSERT_EQ(num_frames, frames_per_buffer_); in Read()
[all …]
/external/tensorflow/tensorflow/python/kernel_tests/
Dctc_loss_op_test.py315 num_frames = 12
320 logits = random_ops.random_uniform([num_frames, batch_size, num_labels])
327 logit_length = [num_frames] * batch_size
361 num_frames = 12
362 logits = random_ops.random_uniform([num_frames, batch_size, num_labels])
373 logit_lengths = [num_frames] * batch_size
414 num_frames = 12
415 logits = random_ops.random_uniform([num_frames, batch_size, num_labels])
426 logit_lengths = [num_frames] * batch_size
468 num_frames = 12
[all …]
/external/webrtc/webrtc/modules/audio_device/android/
Daudio_device_unittest.cc94 virtual void Write(const void* source, size_t num_frames) = 0;
95 virtual void Read(void* destination, size_t num_frames) = 0;
125 void Write(const void* source, size_t num_frames) override {} in Write() argument
129 void Read(void* destination, size_t num_frames) override { in Read() argument
132 num_frames * sizeof(int16_t)); in Read()
133 file_pos_ += num_frames; in Read()
178 void Write(const void* source, size_t num_frames) override { in Write() argument
179 ASSERT_EQ(num_frames, frames_per_buffer_); in Write()
201 void Read(void* destination, size_t num_frames) override { in Read() argument
202 ASSERT_EQ(num_frames, frames_per_buffer_); in Read()
[all …]
/external/tensorflow/tensorflow/lite/experimental/microfrontend/
Daudio_microfrontend.cc106 int num_frames = 0; in Prepare() local
108 num_frames = (input->dims->data[0] - data->state->window.size) / in Prepare()
112 output_size->data[0] = num_frames; in Prepare()
127 int num_frames = 0; in GenerateFeatures() local
129 num_frames = (input->dims->data[0] - data->state->window.size) / in GenerateFeatures()
133 std::vector<std::vector<T>> frame_buffer(num_frames); in GenerateFeatures()
/external/webrtc/webrtc/tools/agc/
Dactivity_metric.cc111 if (features.num_frames > 0) { in AddAudio()
119 for (size_t n = 0; n < features.num_frames; n++) { in AddAudio()
128 for (size_t n = 0; n < features.num_frames; n++) { in AddAudio()
142 return static_cast<int>(features.num_frames); in AddAudio()
236 int num_frames = 0; in void_main() local
277 num_frames++; in void_main()

12345