/external/tensorflow/tensorflow/core/kernels/fuzzing/ |
D | scatter_nd_fuzz.cc | 83 size_t num_indices = 1; in FuzzImpl() local 87 num_indices *= dim; in FuzzImpl() 94 for (i = 0; i < num_indices && data_ix < size; i++) { in FuzzImpl() 97 for (; i < num_indices; i++) { in FuzzImpl() 103 num_indices = 1; in FuzzImpl() 107 num_indices *= indices_dims[i]; in FuzzImpl() 112 num_indices *= flat_shape(i); in FuzzImpl() 118 for (i = 0; i < num_indices; i++) { in FuzzImpl()
|
/external/tensorflow/tensorflow/compiler/xla/ |
D | sparse_index_array.cc | 76 void SparseIndexArray::Resize(int64 num_indices) { in Resize() argument 78 indices_.resize(rank_ * num_indices); in Resize() 85 int64 num_indices = index_count(); in Validate() local 86 if (num_indices > LayoutUtil::MaxSparseElements(shape.layout())) { in Validate() 89 if (num_indices < 2) { in Validate() 96 for (int64 n = 1; n < num_indices; ++n) { in Validate()
|
/external/tensorflow/tensorflow/core/kernels/ |
D | segment_reduction_ops.cc | 59 const int64 num_indices = segment_ids.NumElements(); in SegmentReductionValidationHelper() local 60 OP_REQUIRES(context, num_indices == input.dim_size(0), in SegmentReductionValidationHelper() 90 const int64 num_indices = segment_ids.NumElements(); in Compute() local 98 num_indices > 0 in Compute() 99 ? internal::SubtleMustCopy(segment_vec(num_indices - 1)) + 1 in Compute() 111 if (num_indices == 0) return; in Compute() 130 while (end <= num_indices) { in Compute() 135 if (end < num_indices) { in Compute() 190 if (end >= num_indices) break; in Compute() 228 const int64 num_indices = segment_ids.NumElements(); in ComputeAsync() local [all …]
|
D | string_split_op.cc | 200 std::vector<int64> num_indices(batch_size); in Compute() local 206 num_indices[i] = n_entries; in Compute() 229 for (size_t j = 0; j < num_indices[i]; ++j) { in Compute() 273 std::vector<int64> num_indices(batch_size); in Compute() local 277 num_indices[i] = n_entries; in Compute() 299 for (size_t j = 0; j < num_indices[i]; ++j) { in Compute()
|
D | tensor_array_ops.cc | 619 int32 num_indices; in Compute() local 623 OP_REQUIRES_OK(ctx, tensor_array->PackOrConcatSize(&num_indices)); in Compute() 624 indices.resize(num_indices); in Compute() 634 num_indices = tensor_indices->NumElements(); in Compute() 635 indices.resize(num_indices); in Compute() 636 std::copy(indices_t.data(), indices_t.data() + num_indices, in Compute() 642 if (num_indices == 0) { in Compute() 673 output_shape.InsertDim(0, num_indices); in Compute() 684 input_tensors_flat.reserve(num_indices); in Compute() 692 for (int i = 1; i < num_indices; ++i) { in Compute()
|
D | scatter_nd_op.cc | 56 bool ValidEmptyOutputShape(int64 num_inputs, int64 num_indices, in ValidEmptyOutputShape() argument 58 if (num_indices == 0 && num_updates == 0) { in ValidEmptyOutputShape() 62 return (num_inputs != 0 && num_indices != 0 && num_updates != 0); in ValidEmptyOutputShape() 644 size_t num_indices = indices.NumElements(); in operator ()() local 645 indices_host_ = new Index[num_indices]; in operator ()() 647 auto size = sizeof(Index) * num_indices; in operator ()()
|
D | scatter_nd_op_gpu.cu.cc | 85 const Eigen::array<int64, IXDIM> batch_strides, const int64 num_indices, in ScatterNdOpKernel() argument 89 CUDA_1D_KERNEL_LOOP(index, num_indices) { in ScatterNdOpKernel()
|
/external/flac/libFLAC/ |
D | metadata_object.c | 151 FLAC__ASSERT(from->num_indices == 0); in copy_track_() 155 FLAC__ASSERT(from->num_indices > 0); in copy_track_() 156 …if ((x = safe_malloc_mul_2op_p(from->num_indices, /*times*/sizeof(FLAC__StreamMetadata_CueSheet_In… in copy_track_() 158 memcpy(x, from->indices, from->num_indices * sizeof(FLAC__StreamMetadata_CueSheet_Index)); in copy_track_() 335 object->length += object->data.cue_sheet.tracks[i].num_indices * ( in cuesheet_calculate_length_() 343 static FLAC__StreamMetadata_CueSheet_Index *cuesheet_track_index_array_new_(unsigned num_indices) in cuesheet_track_index_array_new_() argument 345 FLAC__ASSERT(num_indices > 0); in cuesheet_track_index_array_new_() 347 return safe_calloc_(num_indices, sizeof(FLAC__StreamMetadata_CueSheet_Index)); in cuesheet_track_index_array_new_() 365 FLAC__ASSERT(object_array[i].num_indices > 0); in cuesheet_track_array_delete_() 404 …FLAC__ASSERT((src->indices != NULL && src->num_indices > 0) || (src->indices == NULL && src->num_i… in cuesheet_set_track_() [all …]
|
D | format.c | 461 if(cue_sheet->tracks[i].num_indices == 0) { in FLAC__format_cuesheet_is_legal() 472 for(j = 0; j < cue_sheet->tracks[i].num_indices; j++) { in FLAC__format_cuesheet_is_legal()
|
/external/tensorflow/tensorflow/lite/kernels/ |
D | sparse_to_dense.cc | 91 const int num_indices, in GetIndicesVector() argument 98 for (int i = 0; i < num_indices; ++i) { in GetIndicesVector() 107 for (int i = 0; i < num_indices; ++i) { in GetIndicesVector() 205 const int num_indices = SizeOfDimension(indices, 0); in SparseToDenseImpl() local 208 indices_vector.reserve(num_indices); in SparseToDenseImpl() 209 TF_LITE_ENSURE_OK(context, GetIndicesVector<TI>(context, indices, num_indices, in SparseToDenseImpl()
|
/external/tensorflow/tensorflow/compiler/xla/service/ |
D | dynamic_index_splitter.cc | 48 int64 num_indices = dynamic_op->operand(0)->shape().rank(); in Run() local 50 if (num_indices == 0) { in Run() 74 for (int64 dim = 0; dim < num_indices; ++dim) { in Run()
|
/external/tensorflow/tensorflow/compiler/tf2xla/kernels/ |
D | gather_op.cc | 47 int64 num_indices = 1; in XlaGather() local 52 num_indices *= indices_shape.dim_size(i); in XlaGather() 57 num_indices *= indices_shape.dim_size(i); in XlaGather() 62 if (num_indices == 0) { in XlaGather()
|
D | tensor_array_ops.cc | 376 const int num_indices = indices_shape.dim_size(0); in Compile() local 388 if (status.ok() && num_indices == value_shape.dim_size(0)) { in Compile() 390 for (auto i = 0; i < num_indices; i++) { in Compile() 415 for (int i = 0; i < num_indices; ++i) { in Compile()
|
/external/tensorflow/tensorflow/compiler/tf2xla/lib/ |
D | scatter.cc | 61 int64 num_indices = 1; in XlaScatter() local 63 num_indices *= dim; in XlaScatter() 67 if (num_indices == 0) { in XlaScatter()
|
/external/mesa3d/src/compiler/nir/ |
D | nir_intrinsics.h | 326 #define SYSTEM_VALUE(name, components, num_indices, idx0, idx1, idx2) \ argument 328 INTRINSIC(load_##name, 0, ARR(0), true, components, 0, num_indices, \ 427 #define LOAD(name, srcs, num_indices, idx0, idx1, idx2, flags) \ argument 428 INTRINSIC(load_##name, srcs, ARR(1, 1, 1, 1), true, 0, 0, num_indices, idx0, idx1, idx2, flags) 462 #define STORE(name, srcs, num_indices, idx0, idx1, idx2, flags) \ argument 463 INTRINSIC(store_##name, srcs, ARR(0, 1, 1, 1), false, 0, 0, num_indices, idx0, idx1, idx2, flags)
|
D | nir_intrinsics.c | 42 .num_indices = _num_indices, \
|
D | nir_serialize.c | 487 unsigned num_indices = nir_intrinsic_infos[intrin->intrinsic].num_indices; in write_intrinsic() local 500 for (unsigned i = 0; i < num_indices; i++) in write_intrinsic() 513 unsigned num_indices = nir_intrinsic_infos[op].num_indices; in read_intrinsic() local 526 for (unsigned i = 0; i < num_indices; i++) in read_intrinsic()
|
/external/tensorflow/tensorflow/python/ops/ragged/ |
D | ragged_batch_gather_ops.py | 104 num_indices = indices.row_lengths() 106 adjustments = ragged_util.repeat(params_starts, num_indices, axis=0)
|
D | ragged_getitem.py | 304 num_indices = sum(1 for idx in key_list if idx is not array_ops.newaxis) 305 if num_indices > num_remaining_dims + 1: 307 elif num_indices == num_remaining_dims + 1:
|
/external/eigen/unsupported/Eigen/CXX11/src/Tensor/ |
D | TensorRef.h | 200 const std::size_t num_indices = (sizeof...(otherIndices) + 1); in operator() local 201 const array<Index, num_indices> indices{{firstIndex, otherIndices...}}; in operator() 207 const std::size_t num_indices = (sizeof...(otherIndices) + 1); in coeffRef() local 208 const array<Index, num_indices> indices{{firstIndex, otherIndices...}}; in coeffRef()
|
/external/mesa3d/src/gallium/auxiliary/draw/ |
D | draw_prim_assembler.c | 92 unsigned *indices, unsigned num_indices) in copy_verts() argument 99 for (i = 0; i < num_indices; ++i) { in copy_verts()
|
/external/tensorflow/tensorflow/python/kernel_tests/ |
D | segment_reduction_ops_test.py | 468 def _sparse_input(self, input_shape, num_indices, dtype=dtypes_lib.int32): argument 470 indices = np.random.randint(0, input_shape[0], num_indices).astype(np.int32) 506 num_indices = len(segment_indices) 510 shape, num_indices, dtype=dtype) 762 num_indices = len(segment_indices) 766 shape, num_indices, dtype=dtypes_lib.float64) 782 num_indices = len(segment_indices) 789 shape, num_indices, dtype=dtypes_lib.float64)
|
/external/swiftshader/third_party/SPIRV-Tools/source/val/ |
D | validate_composites.cpp | 44 const uint32_t num_indices = num_words - word_index; in GetExtractInsertValueType() local 46 if (num_indices > kCompositeExtractInsertMaxNumIndices) { in GetExtractInsertValueType() 50 << ". Found " << num_indices << " indexes."; in GetExtractInsertValueType()
|
/external/deqp-deps/SPIRV-Tools/source/val/ |
D | validate_composites.cpp | 44 const uint32_t num_indices = num_words - word_index; in GetExtractInsertValueType() local 46 if (num_indices > kCompositeExtractInsertMaxNumIndices) { in GetExtractInsertValueType() 50 << ". Found " << num_indices << " indexes."; in GetExtractInsertValueType()
|
/external/v8/src/runtime/ |
D | runtime-array.cc | 71 int num_indices = keys.is_null() ? limit : keys->length(); in RemoveArrayHolesGeneric() local 82 for (int i = 0; i < num_indices; ++i) { in RemoveArrayHolesGeneric() 139 for (int i = num_indices - 1; i >= 0; --i) { in RemoveArrayHolesGeneric() 348 uint32_t num_indices = keys->length(); in CopyFromPrototype() local 349 for (uint32_t i = 0; i < num_indices; ++i) { in CopyFromPrototype()
|