/external/tensorflow/tensorflow/core/util/sparse/ |
D | sparse_tensor.h | 221 static inline int GetSliceIndex(const int dim, const int split_size, in GetSliceIndex() argument 223 DCHECK_GT(split_size, 0); in GetSliceIndex() 225 if (residual == 0) return dim / split_size; in GetSliceIndex() 226 const int offset = residual * (split_size + 1); in GetSliceIndex() 228 return dim / (split_size + 1); in GetSliceIndex() 230 return residual + ((dim - offset) / split_size); in GetSliceIndex() 235 static inline int GetDimensionInSlice(const int dim, const int split_size, in GetDimensionInSlice() argument 237 DCHECK_GT(split_size, 0); in GetDimensionInSlice() 239 if (residual == 0) return dim % split_size; in GetDimensionInSlice() 240 const int offset = residual * (split_size + 1); in GetDimensionInSlice() [all …]
|
/external/tensorflow/tensorflow/core/kernels/ |
D | concat_lib_gpu_impl.cu.cc | 38 GpuDeviceArrayStruct<const T*> input_ptr_data, int split_size, in concat_fixed_kernel() argument 46 IntType split = gidx / split_size; in concat_fixed_kernel() 48 IntType col_offset = gidx % split_size; in concat_fixed_kernel() 52 input_ptr[gidy * split_size + col_offset]; in concat_fixed_kernel() 141 bool fixed_size, int split_size, in ConcatGPUImpl() argument 149 config.thread_per_block, 0, gpu_device.stream(), input_ptrs, split_size, in ConcatGPUImpl() 195 int split_size, typename TTypes<T, 2>::Matrix* output); 202 int split_size, typename TTypes<T, 2>::Matrix* output);
|
D | split_op.cc | 237 auto make_sizes = [&](Eigen::DenseIndex split_size) { in Compute() argument 238 return Eigen::DSizes<Eigen::DenseIndex, 2>{split_size, suffix_dim_size}; in Compute() 240 auto reshape_result = [&](Tensor* result, Eigen::DenseIndex split_size) { in Compute() argument 241 return result->shaped<T, 2>({split_size, suffix_dim_size}); in Compute() 250 auto make_sizes = [&](Eigen::DenseIndex split_size) { in Compute() argument 251 return Eigen::DSizes<Eigen::DenseIndex, 3>{prefix_dim_size, split_size, in Compute() 254 auto reshape_result = [&](Tensor* result, Eigen::DenseIndex split_size) { in Compute() argument 256 {prefix_dim_size, split_size, suffix_dim_size}); in Compute()
|
D | ragged_tensor_to_variant_op.cc | 84 int split_size = -1; in UnbatchRaggedZerothDim() local 87 split_size = in UnbatchRaggedZerothDim() 92 split_size = ragged_component_splits_vec[j - 1](last_index) + 1; in UnbatchRaggedZerothDim() 95 Tensor(DataTypeToEnum<SPLIT_TYPE>::value, TensorShape({split_size}))); in UnbatchRaggedZerothDim() 100 for (int k = 1; k < split_size; k++, index[j]++) { in UnbatchRaggedZerothDim()
|
D | split_v_op.cc | 302 auto make_sizes = [&](Eigen::DenseIndex split_size) { in Compute() argument 303 return Eigen::DSizes<Eigen::DenseIndex, 2>{split_size, suffix_dim_size}; in Compute() 305 auto reshape_result = [&](Tensor* result, Tlen split_size) { in Compute() argument 306 return result->shaped<T, 2>({split_size, suffix_dim_size}); in Compute() 315 auto make_sizes = [&](Eigen::DenseIndex split_size) { in Compute() argument 316 return Eigen::DSizes<Eigen::DenseIndex, 3>{prefix_dim_size, split_size, in Compute() 319 auto reshape_result = [&](Tensor* result, Tlen split_size) { in Compute() argument 321 {prefix_dim_size, split_size, suffix_dim_size}); in Compute()
|
D | concat_lib_gpu.h | 62 int split_size, typename TTypes<T, 2>::Matrix* output); \ 67 int split_size, typename TTypes<T, 2>::Matrix* output);
|
D | ragged_tensor_from_variant_op.cc | 117 int split_size = 1; in NestedStackRaggedTensors() local 120 split_size += ragged_components[j].splits(i).NumElements() - 1; in NestedStackRaggedTensors() 124 Tensor(DataTypeToEnum<SPLIT_TYPE>::value, TensorShape({split_size}))); in NestedStackRaggedTensors()
|
/external/tensorflow/tensorflow/compiler/xla/service/ |
D | all_to_all_decomposer.cc | 63 int64 split_size = in ExpandInstruction() local 75 new_all_to_all_shape.add_dimensions(split_size); in ExpandInstruction() 98 slice_limits[split_dim] = split_size; in ExpandInstruction() 100 slice_shape.set_dimensions(split_dim, split_size); in ExpandInstruction() 110 slice_limits[split_dim] += split_size; in ExpandInstruction()
|
/external/tflite-support/tensorflow_lite_support/custom_ops/kernel/sentencepiece/ |
D | sentencepiece_detokenizer_tflite.cc | 72 const int split_size = input_splits_data[i + 1] - input_splits_data[i]; in Eval() local 75 input_encoded_data + input_offset + split_size, in Eval() 81 input_offset += split_size; in Eval()
|
D | sentencepiece_detokenizer_op.cc | 64 const int split_size = input_splits_flat(i + 1) - input_splits_flat(i); in Compute() local 66 codes_for_split.reserve(split_size); in Compute() 67 for (int j = 0; j < split_size; ++j) { in Compute()
|
/external/tensorflow/tensorflow/lite/micro/kernels/ |
D | split_v.cc | 40 int64_t split_size = 0; in SplitImpl() local 43 split_size += in SplitImpl() 46 TFLITE_DCHECK_EQ(split_size, input_dims->data[axis_value]); in SplitImpl()
|
D | split.cc | 42 int64_t split_size = output_dims->data[axis] * output_count; in SplitImpl() local 44 TFLITE_DCHECK_EQ(split_size, input_dims->data[axis]); in SplitImpl()
|
D | split_v_test.cc | 54 constexpr int split_size = 1; in TestSplitVFloat() local 58 input_size + output_size + axis_size + split_size; in TestSplitVFloat()
|
/external/libgav1/libgav1/src/tile/ |
D | tile.cc | 2274 const BlockSize split_size = kSubSize[kPartitionSplit][block_size]; in ProcessPartition() local 2311 if (!ProcessBlock(row4x4, column4x4, split_size, scratch_buffer, in ProcessPartition() 2313 !ProcessBlock(row4x4, column4x4 + half_block4x4, split_size, in ProcessPartition() 2323 !ProcessBlock(row4x4 + half_block4x4, column4x4, split_size, in ProcessPartition() 2326 split_size, scratch_buffer, residual)) { in ProcessPartition() 2331 if (!ProcessBlock(row4x4, column4x4, split_size, scratch_buffer, in ProcessPartition() 2333 !ProcessBlock(row4x4 + half_block4x4, column4x4, split_size, in ProcessPartition() 2343 !ProcessBlock(row4x4, column4x4 + half_block4x4, split_size, in ProcessPartition() 2346 split_size, scratch_buffer, residual)) { in ProcessPartition()
|
/external/tensorflow/tensorflow/python/ops/ragged/ |
D | ragged_array_ops.py | 198 split_size = math_ops.cumprod(mask_shape) + 1 201 masked_splits = math_ops.range(split_size[dim]) * elt_size
|
/external/tensorflow/tensorflow/python/distribute/ |
D | cross_device_ops.py | 743 split_size = total_grad_size // num_splits 744 split_size_last = total_grad_size - split_size * (num_splits - 1) 745 split_sizes = [split_size] * (num_splits - 1) + [split_size_last]
|
D | tpu_strategy.py | 533 split_size = partition_dimensions[dim_index] 534 if dim_size % split_size != 0: 538 dim_index, split_size))
|
/external/tensorflow/tensorflow/lite/kernels/internal/reference/ |
D | reference_ops.h | 842 int64_t split_size = 0; in Split() local 850 split_size += output_shapes[i]->Dims(axis); in Split() 852 TFLITE_DCHECK_EQ(split_size, input_shape.Dims(axis)); in Split()
|
/external/libaom/libaom/av1/decoder/ |
D | decodeframe.c | 1022 int min_txs, int split_size, int txs, in set_inter_tx_size() argument 1024 for (int idy = 0; idy < tx_size_high_unit[split_size]; in set_inter_tx_size() 1026 for (int idx = 0; idx < tx_size_wide_unit[split_size]; in set_inter_tx_size()
|