/external/tensorflow/tensorflow/lite/kernels/ |
D | cast.cc | 52 void copyCast(const FromT* in, ToT* out, int num_elements) { in copyCast() argument 53 std::transform(in, in + num_elements, out, in copyCast() 58 void copyCast(const std::complex<float>* in, ToT* out, int num_elements) { in copyCast() argument 59 std::transform(in, in + num_elements, out, [](std::complex<float> a) { in copyCast() 66 int num_elements) { in copyCast() argument 67 std::transform(in, in + num_elements, out, in copyCast() 73 int num_elements) { in copyToTensor() argument 76 copyCast(in, out->data.i64, num_elements); in copyToTensor() 79 copyCast(in, out->data.i32, num_elements); in copyToTensor() 82 copyCast(in, out->data.uint8, num_elements); in copyToTensor() [all …]
|
D | neg.cc | 39 void Negate(const T* in_data, int num_elements, T* out_data) { in Negate() argument 41 for (int i = 0; i < num_elements; ++i) { in Negate() 49 const int num_elements = NumElements(input); in Eval() local 52 Negate(input->data.i64, num_elements, output->data.i64); in Eval() 55 Negate(input->data.i32, num_elements, output->data.i32); in Eval() 58 Negate(input->data.f, num_elements, output->data.f); in Eval()
|
/external/mesa3d/src/gallium/auxiliary/util/ |
D | u_idalloc.c | 57 if (new_num_elements > buf->num_elements) { in util_idalloc_resize() 63 for (i = buf->num_elements / 32; i < new_num_elements / 32; i++) in util_idalloc_resize() 65 buf->num_elements = new_num_elements; in util_idalloc_resize() 72 unsigned num_elements = buf->num_elements; in util_idalloc_alloc() local 74 for (unsigned i = 0; i < num_elements / 32; i++) { in util_idalloc_alloc() 84 util_idalloc_resize(buf, num_elements * 2); in util_idalloc_alloc() 86 buf->data[num_elements / 32] |= 1 << (num_elements % 32); in util_idalloc_alloc() 88 return num_elements; in util_idalloc_alloc() 94 assert(id < buf->num_elements); in util_idalloc_free()
|
/external/libchrome/mojo/public/cpp/bindings/lib/ |
D | array_internal.h | 53 static uint32_t GetStorageSize(uint32_t num_elements) { in GetStorageSize() 54 DCHECK(num_elements <= kMaxNumElements); in GetStorageSize() 55 return sizeof(ArrayHeader) + sizeof(StorageType) * num_elements; in GetStorageSize() 96 static uint32_t GetStorageSize(uint32_t num_elements) { 97 return sizeof(ArrayHeader) + ((num_elements + 7) / 8); 139 for (uint32_t i = 0; i < header->num_elements; ++i) { 158 for (uint32_t i = 0; i < header->num_elements; ++i) { 171 header->num_elements, i) 190 for (uint32_t i = 0; i < header->num_elements; ++i) { 196 header->num_elements, [all …]
|
/external/eigen/test/ |
D | umeyama.cpp | 91 void run_test(int dim, int num_elements) in run_test() argument 109 MatrixX src = MatrixX::Random(dim+1, num_elements); in run_test() 110 src.row(dim) = Matrix<Scalar, 1, Dynamic>::Constant(num_elements, Scalar(1)); in run_test() 114 MatrixX cR_t_umeyama = umeyama(src.block(0,0,dim,num_elements), dst.block(0,0,dim,num_elements)); in run_test() 121 void run_fixed_size_test(int num_elements) in run_fixed_size_test() argument 143 MatrixX src = MatrixX::Random(dim+1, num_elements); in run_fixed_size_test() 144 src.row(dim) = Matrix<Scalar, 1, Dynamic>::Constant(num_elements, Scalar(1)); in run_fixed_size_test() 148 Block<MatrixX, Dimension, Dynamic> src_block(src,0,0,dim,num_elements); in run_fixed_size_test() 149 Block<MatrixX, Dimension, Dynamic> dst_block(dst,0,0,dim,num_elements); in run_fixed_size_test() 162 const int num_elements = internal::random<int>(40,500); in test_umeyama() local [all …]
|
/external/libcups/cups/ |
D | array.c | 46 int num_elements, /* Number of array elements */ member 232 for (i = a->num_elements, e = a->elements; i > 0; i --, e ++) in cupsArrayClear() 241 a->num_elements = 0; in cupsArrayClear() 269 return (a->num_elements); in cupsArrayCount() 296 if (a->current >= 0 && a->current < a->num_elements) in cupsArrayCurrent() 332 for (i = a->num_elements, e = a->elements; i > 0; i --, e ++) in cupsArrayDelete() 386 if (a->num_elements) in cupsArrayDup() 392 da->elements = malloc((size_t)a->num_elements * sizeof(void *)); in cupsArrayDup() 411 for (i = 0; i < a->num_elements; i ++) in cupsArrayDup() 420 memcpy(da->elements, a->elements, (size_t)a->num_elements * sizeof(void *)); in cupsArrayDup() [all …]
|
/external/tensorflow/tensorflow/core/util/ |
D | example_proto_helper.cc | 65 const std::size_t num_elements = shape.num_elements(); in FeatureDenseCopy() local 66 const std::size_t offset = out_index * num_elements; in FeatureDenseCopy() 71 if (static_cast<size_t>(values.value_size()) != num_elements) { in FeatureDenseCopy() 79 std::copy_n(values.value().data(), num_elements, out_p); in FeatureDenseCopy() 84 if (static_cast<size_t>(values.value_size()) != num_elements) { in FeatureDenseCopy() 92 std::copy_n(values.value().data(), num_elements, out_p); in FeatureDenseCopy() 97 if (static_cast<size_t>(values.value_size()) != num_elements) { in FeatureDenseCopy() 106 values.value().data() + num_elements, out_p, in FeatureDenseCopy() 121 const int64 num_elements = values.value_size(); in FeatureSparseCopy() local 122 Tensor out(dtype, TensorShape({num_elements})); in FeatureSparseCopy() [all …]
|
D | example_proto_fast_parsing.cc | 99 bool GetNumElementsInBytesList(int* num_elements) { in GetNumElementsInBytesList() argument 106 *num_elements = 0; in GetNumElementsInBytesList() 112 ++*num_elements; in GetNumElementsInBytesList() 475 LimitedArraySlice(T* begin, size_t num_elements) in LimitedArraySlice() argument 476 : current_(begin), begin_(begin), end_(begin + num_elements) {} in LimitedArraySlice() 615 const std::size_t num_elements = config.dense[d].elements_per_stride; in FastParseSerializedExample() local 620 output_stats->feature_values_count += num_elements; in FastParseSerializedExample() 623 const std::size_t offset = example_index * num_elements; in FastParseSerializedExample() 637 LimitedArraySlice<int64> slice(out_p, num_elements); in FastParseSerializedExample() 640 return shape_error(num_elements - slice.EndDistance(), "int64"); in FastParseSerializedExample() [all …]
|
/external/tensorflow/tensorflow/core/framework/ |
D | tensor_shape_test.cc | 40 EXPECT_EQ(s.num_elements(), 1); in TEST() 49 EXPECT_EQ(100, s.num_elements()); in TEST() 54 EXPECT_EQ(40, s.num_elements()); in TEST() 60 EXPECT_EQ(5, s.num_elements()); in TEST() 69 EXPECT_EQ(20000, s.num_elements()); in TEST() 78 EXPECT_EQ(30, s.num_elements()); in TEST() 91 ASSERT_EQ(210, s0.num_elements()); in TEST() 97 ASSERT_EQ(210, s0.num_elements()); in TEST() 104 ASSERT_EQ(42, s1.num_elements()); in TEST() 110 ASSERT_EQ(210, s2.num_elements()); in TEST() [all …]
|
/external/tensorflow/tensorflow/compiler/xla/service/gpu/ |
D | partition_assignment.cc | 65 int64 num_elements = ShapeUtil::ElementsIn(shape); in CalculateLaunchDimensions() local 66 if (num_elements <= 1) { in CalculateLaunchDimensions() 70 CHECK_EQ(num_elements % unroll_factor, 0); in CalculateLaunchDimensions() 71 num_elements = num_elements / unroll_factor; in CalculateLaunchDimensions() 85 if (num_elements < threads_per_block) { in CalculateLaunchDimensions() 86 threads_per_block = num_elements; in CalculateLaunchDimensions() 91 int64 block_count = CeilOfRatio(num_elements, threads_per_block); in CalculateLaunchDimensions() 95 num_elements, threads_per_block, block_count); in CalculateLaunchDimensions()
|
/external/tensorflow/tensorflow/compiler/xla/tests/ |
D | iota_test.cc | 27 std::vector<T> GetR1Expected(const int64 num_elements) { in GetR1Expected() argument 28 std::vector<T> result(num_elements); in GetR1Expected() 40 const int64 num_elements = std::get<1>(spec); in TEST_P() local 42 Iota(&builder, element_type, num_elements); in TEST_P() 44 ComputeAndCompareR1<float>(&builder, GetR1Expected<float>(num_elements), {}, in TEST_P() 47 ComputeAndCompareR1<uint32>(&builder, GetR1Expected<uint32>(num_elements), in TEST_P() 51 ComputeAndCompareR1<int32>(&builder, GetR1Expected<int32>(num_elements), in TEST_P() 69 const int64 num_elements = std::get<1>(spec); in TEST_P() local 73 dimensions.insert(dimensions.begin() + iota_dim, num_elements); in TEST_P() 96 const int64 num_elements = std::get<1>(spec); in TEST_P() local [all …]
|
/external/tensorflow/tensorflow/contrib/boosted_trees/kernels/ |
D | split_handler_ops.cc | 183 int32 num_elements = partition_boundaries.size() - 1; in Compute() local 187 num_elements = 0; in Compute() 193 TensorShape({num_elements}), in Compute() 201 int32 size_output = num_elements; in Compute() 203 num_elements > 0) { in Compute() 219 if (num_elements == 0) { in Compute() 226 &state, normalizer_ratio, num_elements, partition_boundaries, in Compute() 233 &state, normalizer_ratio, num_elements, partition_boundaries, in Compute() 244 const int num_elements, const std::vector<int32>& partition_boundaries, in ComputeNormalDecisionTree() argument 252 for (int root_idx = 0; root_idx < num_elements; ++root_idx) { in ComputeNormalDecisionTree() [all …]
|
/external/tensorflow/tensorflow/python/data/benchmarks/ |
D | from_tensor_slices_benchmark.py | 34 num_elements = input_size * num_epochs // batch_size 44 num_elements=num_elements, 52 num_elements = num_epochs * reshape_dim[0] 62 num_elements=num_elements, 70 num_elements = input_size * num_epochs // batch_size 80 num_elements=num_elements,
|
D | benchmark_base.py | 33 def run_benchmark(self, dataset, num_elements, iters=1): argument 57 dataset = dataset.skip(num_elements - 1) 74 return np.median(deltas) / float(num_elements) 78 num_elements, argument 83 wall_time = self.run_benchmark(dataset, num_elements, iters) 87 extras["num_elements"] = num_elements
|
/external/tensorflow/tensorflow/compiler/xla/ |
D | array.h | 105 : sizes_(sizes.begin(), sizes.end()), values_(new T[num_elements()]) { 121 CHECK(idx == num_elements()); 139 CHECK(idx == num_elements()); 159 CHECK(idx == num_elements()); 176 CHECK(idx == num_elements()); 200 CHECK(idx == num_elements()); 220 CHECK(idx == num_elements()); 248 CHECK(idx == num_elements()); 252 : sizes_(other.sizes_), values_(new T[num_elements()]) { 253 std::copy(&other.values_[0], &other.values_[0] + num_elements(), [all …]
|
/external/tensorflow/tensorflow/compiler/tf2xla/kernels/ |
D | reshape_op.cc | 43 const int64 num_dims = sizes_shape.num_elements(); in Compile() 77 const int64 missing = input_shape.num_elements() / product; in Compile() 79 ctx, product * missing == input_shape.num_elements(), in Compile() 81 "Input to reshape is a tensor with ", input_shape.num_elements(), in Compile() 86 OP_REQUIRES(ctx, shape.num_elements() == input_shape.num_elements(), in Compile() 88 input_shape.num_elements(), in Compile() 90 shape.num_elements())); in Compile()
|
/external/tensorflow/tensorflow/python/data/experimental/benchmarks/ |
D | parallel_interleave_benchmark.py | 45 def make_dataset(time_us, num_elements): argument 46 return dataset_ops.Dataset.range(num_elements).apply(sleep.sleep(time_us)) 57 def _benchmark(self, dataset_fn, iters, num_elements): argument 67 for _ in range(num_elements): 72 mean_wall_time = np.mean(deltas) / num_elements 81 self._benchmark(dataset_fn=dataset_fn, iters=10, num_elements=100) 91 self._benchmark(dataset_fn=dataset_fn, iters=100, num_elements=1000) 101 self._benchmark(dataset_fn=dataset_fn, iters=100, num_elements=1000)
|
/external/tensorflow/tensorflow/python/profiler/internal/ |
D | flops_registry.py | 68 return ops.OpStats("flops", in_shape.num_elements() * ops_per_element) 121 return ops.OpStats("flops", in_shape.num_elements() * 3 - 1) 144 return ops.OpStats("flops", out_shape.num_elements() * ops_per_element) 247 num_flops = (in_shape.num_elements() * reduce_flops 248 + out_shape.num_elements() * (finalize_flops - reduce_flops)) 321 return ops.OpStats("flops", kernel_area * out_shape.num_elements()) 350 kernel_area * out_backprop_shape.num_elements() * 2) 377 max_pool_ops = kernel_area * orig_out_shape.num_elements() 378 return ops.OpStats("flops", max_pool_ops + orig_out_shape.num_elements()) 408 (2 * out_shape.num_elements() [all …]
|
/external/mesa3d/src/gallium/state_trackers/va/ |
D | buffer.c | 40 unsigned int size, unsigned int num_elements, void *data, in vlVaCreateBuffer() argument 55 buf->num_elements = num_elements; in vlVaCreateBuffer() 56 buf->data = MALLOC(size * num_elements); in vlVaCreateBuffer() 64 memcpy(buf->data, data, size * num_elements); in vlVaCreateBuffer() 76 unsigned int num_elements) in vlVaBufferSetNumElements() argument 94 buf->data = REALLOC(buf->data, buf->size * buf->num_elements, in vlVaBufferSetNumElements() 95 buf->size * num_elements); in vlVaBufferSetNumElements() 96 buf->num_elements = num_elements; in vlVaBufferSetNumElements() 220 unsigned int *size, unsigned int *num_elements) in vlVaBufferInfo() argument 237 *num_elements = buf->num_elements; in vlVaBufferInfo() [all …]
|
/external/webrtc/webrtc/common_audio/ |
D | ring_buffer_unittest.cc | 33 static int SetIncrementingData(int* data, int num_elements, in SetIncrementingData() argument 35 for (int i = 0; i < num_elements; i++) { in SetIncrementingData() 41 static int CheckIncrementingData(int* data, int num_elements, in CheckIncrementingData() argument 43 for (int i = 0; i < num_elements; i++) { in CheckIncrementingData() 71 const int num_elements = rand() % buffer_size; in RandomStressTest() local 76 const int expected_elements = std::min(num_elements, buffer_available); in RandomStressTest() 81 num_elements)); in RandomStressTest() 85 const int expected_elements = std::min(num_elements, in RandomStressTest() 93 num_elements)); in RandomStressTest()
|
/external/tensorflow/tensorflow/python/ops/ |
D | list_ops.py | 59 def tensor_list_reserve(element_shape, num_elements, element_dtype, name=None): argument 62 num_elements=num_elements, 118 num_elements=-1, 124 num_elements=-1, argument 131 num_elements=num_elements, 229 num_elements = t.shape.dims[0].value 231 num_elements = None 241 num_elements=num_elements) 290 num_elements = gen_list_ops.tensor_list_length(input_list) 291 dlist = tensor_list_reserve(element_shape, num_elements, dtensor.dtype)
|
/external/tensorflow/tensorflow/lite/tools/benchmark/ |
D | benchmark_tflite_model.cc | 121 void FillRandomValue(T* ptr, int num_elements, in FillRandomValue() argument 123 for (int i = 0; i < num_elements; ++i) { in FillRandomValue() 131 int num_elements = 1; in FillRandomString() local 133 num_elements *= dim; in FillRandomString() 135 for (int i = 0; i < num_elements; ++i) { in FillRandomString() 281 int num_elements = 1; in PrepareInputData() local 284 num_elements *= sizes[i]; in PrepareInputData() 288 t_data.bytes = sizeof(float) * num_elements; in PrepareInputData() 290 FillRandomValue<float>(t_data.data.f, num_elements, []() { in PrepareInputData() 296 t_data.bytes = sizeof(int32_t) * num_elements; in PrepareInputData() [all …]
|
/external/tensorflow/tensorflow/compiler/tests/ |
D | add_n_test.py | 35 element_shape=[], element_dtype=dtypes.float32, num_elements=3) 37 element_shape=[], element_dtype=dtypes.float32, num_elements=3) 49 element_shape=[], element_dtype=dtypes.float32, num_elements=2) 51 element_shape=[], element_dtype=dtypes.float32, num_elements=3) 67 num_elements=3) 71 num_elements=3)
|
/external/tensorflow/tensorflow/python/data/experimental/kernel_tests/serialization/ |
D | stats_dataset_serialization_test.py | 36 def _build_dataset_bytes_stats(self, num_elements): argument 37 return dataset_ops.Dataset.range(num_elements).map( 57 def _build_dataset_latency_stats(self, num_elements, tag="record_latency"): argument 58 return dataset_ops.Dataset.range(num_elements).apply( 62 num_elements, argument 65 return dataset_ops.Dataset.range(num_elements).apply(
|
/external/tensorflow/tensorflow/lite/tools/optimize/ |
D | quantization_utils.cc | 34 TfLiteStatus NumElements(const TensorT& tensor, uint64_t* num_elements) { in NumElements() argument 38 *num_elements = 1; in NumElements() 40 *num_elements *= dim; in NumElements() 173 uint64_t num_elements; in SymmetricQuantizeTensor() local 174 TF_LITE_ENSURE_STATUS(utils::NumElements(*tensor, &num_elements)); in SymmetricQuantizeTensor() 177 quantized_buffer.resize(num_elements); in SymmetricQuantizeTensor() 180 tensor_utils::SymmetricQuantizeFloats(float_data, num_elements, in SymmetricQuantizeTensor() 192 uint8_buffer + num_elements); in SymmetricQuantizeTensor()
|