Home
last modified time | relevance | path

Searched refs:dimensions_size (Results 1 – 25 of 83) sorted by relevance

1234

/external/tensorflow/tensorflow/compiler/xla/service/
Dgather_expander.cc33 if (start_indices_shape.dimensions_size() == index_vector_dim) { in TransposeIndexVectorDimToLast()
37 if (index_vector_dim == (start_indices_shape.dimensions_size() - 1)) { in TransposeIndexVectorDimToLast()
42 permutation.reserve(start_indices_shape.dimensions_size()); in TransposeIndexVectorDimToLast()
43 for (int64 i = 0, e = start_indices_shape.dimensions_size(); i < e; i++) { in TransposeIndexVectorDimToLast()
63 index_vector_dim == start_indices->shape().dimensions_size(); in CanonicalizeGatherIndices()
73 if (shape.dimensions_size() == index_dims_in_start_indices) { in CanonicalizeGatherIndices()
80 shape.dimensions_size() - index_dims_in_start_indices); in CanonicalizeGatherIndices()
90 batch_dim_bounds.reserve(start_indices_shape.dimensions_size()); in AdjustBatchDimsInAccumulator()
91 for (int64 i = 0, e = start_indices_shape.dimensions_size(); i < e; i++) { in AdjustBatchDimsInAccumulator()
159 bool has_scalar_indices = start_indices->shape().dimensions_size() == 1; in GatherLoopBody()
[all …]
Dconvolution_4d_expander_test.cc47 EXPECT_EQ(root->window().dimensions_size(), 4); in TEST_F()
55 EXPECT_EQ(new_convolution->window().dimensions_size(), 2); in TEST_F()
72 EXPECT_EQ(root->window().dimensions_size(), 4); in TEST_F()
82 EXPECT_EQ(new_convolution->window().dimensions_size(), 3); in TEST_F()
99 EXPECT_EQ(root->window().dimensions_size(), 4); in TEST_F()
107 EXPECT_EQ(new_convolution->window().dimensions_size(), 0); in TEST_F()
124 EXPECT_EQ(root->window().dimensions_size(), 3); in TEST_F()
143 EXPECT_EQ(root->window().dimensions_size(), 4); in TEST_F()
162 EXPECT_EQ(root->window().dimensions_size(), 4); in TEST_F()
Dscatter_expander.cc35 if (scatter_indices_shape.dimensions_size() == index_vector_dim) { in TransposeIndexVectorDimToLast()
39 if (index_vector_dim == (scatter_indices_shape.dimensions_size() - 1)) { in TransposeIndexVectorDimToLast()
44 permutation.reserve(scatter_indices_shape.dimensions_size()); in TransposeIndexVectorDimToLast()
45 for (int64 i = 0, e = scatter_indices_shape.dimensions_size(); i < e; i++) { in TransposeIndexVectorDimToLast()
70 index_vector_dim == scatter_indices->shape().dimensions_size(); in CanonicalizeScatterIndices()
80 if (shape.dimensions_size() == index_dims_in_scatter_indices) { in CanonicalizeScatterIndices()
87 shape.dimensions_size() - index_dims_in_scatter_indices); in CanonicalizeScatterIndices()
117 int64 num_scatter_dims = scatter_indices_shape.dimensions_size(); in AdjustScatterDims()
118 if (index_vector_dim < scatter_indices_shape.dimensions_size()) { in AdjustScatterDims()
229 bool has_scalar_indices = scatter_indices->shape().dimensions_size() == 1; in ScatterLoopBody()
[all …]
Dshape_inference.cc165 if (window.dimensions_size() != base_shape.rank()) { in InferWindowOutputShape()
168 window.dimensions_size(), base_shape.rank()); in InferWindowOutputShape()
171 std::vector<int64> output_dimensions(window.dimensions_size()); in InferWindowOutputShape()
172 std::vector<bool> output_is_dynamic(window.dimensions_size()); in InferWindowOutputShape()
173 for (int64 i = 0; i < window.dimensions_size(); ++i) { in InferWindowOutputShape()
442 for (int64 i = 0; i < shape->dimensions_size(); ++i) { in InferConcatOpShape()
540 if (operand_shape.rank() != padding_config.dimensions_size()) { in InferPadShape()
566 for (int64 i = 0; i < operand_shape.dimensions_size(); ++i) { in InferPadShape()
839 for (int i = 0; i < smaller_shape.dimensions_size(); ++i) { in InferInDimBroadcastShape()
846 if (dimension_to_match >= larger_shape.dimensions_size()) { in InferInDimBroadcastShape()
[all …]
Dindexed_array_analysis.cc203 std::vector<IndexComponent> simulated_index(a->shape().dimensions_size(), in FoldGatherOfGather()
256 if (dim_numbers.index_vector_dim() != indices->shape().dimensions_size()) { in ComputeArrayForGather()
278 for (int64 i = 0, e = source->shape().dimensions_size(); i < e; i++) { in ComputeArrayForGather()
292 for (int64 i = 0, e = shape.dimensions_size(); i < e; i++) { in ComputeArrayForGather()
478 for (int64 i = 0, e = source_shape.dimensions_size(); i < e; i++) { in ReshapeToRemoveDegenerateDims()
500 for (int64 i = 0, e = shape.dimensions_size(); i < e; i++) { in ReshapeToRemoveDegenerateDims()
536 operand->shape().dimensions_size()); in ReshapeToAddDegenerateDims()
623 for (int64 i = 0, e = shape.dimensions_size(); i < e; i++) { in FoldReshapeOfGather()
892 broadcast_instr->shape().dimensions_size(), IndexComponent::Broadcasted); in ComputeArrayForElementwiseBinaryOp()
Dhlo_creation_utils.cc443 CHECK_GE(operand_shape.dimensions_size(), n); in CollapseFirstNDims()
450 new_shape_dims.reserve(operand_shape.dimensions_size() - n + 1); in CollapseFirstNDims()
468 new_shape_dims.reserve(n + operand_shape.dimensions_size()); in PrependDegenerateDims()
476 CHECK_GT(operand->shape().dimensions_size(), 0); in ExpandFirstDimIntoNDims()
481 operand->shape().dimensions_size() - 1); in ExpandFirstDimIntoNDims()
506 operand_shape.dimensions_size() + dims_to_insert.size(); in InsertDegenerateDims()
536 CHECK_EQ(operand->shape().dimensions_size(), 1); in PadVectorWithZeros()
Dhlo_sharding.cc269 return std::vector<int64>(shape.dimensions_size(), 0); in TileOffsetForDevice()
272 CHECK_EQ(shape.dimensions_size(), tile_assignment_.num_dimensions() - 1); in TileOffsetForDevice()
274 CHECK_EQ(shape.dimensions_size(), tile_assignment_.num_dimensions()); in TileOffsetForDevice()
295 CHECK_EQ(shape.dimensions_size() + (ReplicateOnLastTileDim() ? 1 : 0), in TileLimitForDevice()
572 for (int64 i = 0; i < shape.dimensions_size(); ++i) { in TileShape()
Dhlo_evaluator_typed_visitor.h1106 CHECK_EQ(window.dimensions_size(), num_spatial_dims);
1282 CHECK_EQ(window.dimensions_size(), num_spatial_dims);
1538 pad->padding_config().dimensions_size());
1588 std::vector<int64> zero_base(evaluated_operand.shape().dimensions_size(),
1590 std::vector<int64> step(evaluated_operand.shape().dimensions_size(), 1);
2067 if (indices.shape().dimensions_size() != index_vector_dim) {
2083 int64 updates_rank = updates_shape.dimensions_size();
2102 int64 updates_rank = updates_shape.dimensions_size();
2132 for (int64 i = 0; i < updates_shape.dimensions_size(); i++) {
2137 for (int64 i = 0; i < input_shape.dimensions_size(); i++) {
[all …]
Dtriangular_solve_expander_test.cc85 EXPECT_EQ(x_shape.dimensions_size(), 2); in TEST_P()
/external/tensorflow/tensorflow/compiler/xla/client/lib/
Dsorting.cc32 int last_dim = input_shape.dimensions_size() - 1; in TopK()
63 std::vector<int64> start_indices(input_shape.dimensions_size(), 0); in TopK()
66 std::vector<int64> strides(input_shape.dimensions_size(), 1); in TopK()
80 int last_dim = input_shape.dimensions_size() - 1; in TopKWithPartitions()
126 std::vector<int64> start_indices(input_shape.dimensions_size(), 0); in TopKWithPartitions()
128 std::vector<int64> strides(input_shape.dimensions_size(), 1); in TopKWithPartitions()
141 std::vector<int64> start_indices(input_shape.dimensions_size(), 0); in TopKWithPartitions()
143 std::vector<int64> strides(input_shape.dimensions_size(), 1); in TopKWithPartitions()
Dquantize.h124 std::vector<int64> shift_transpose_dimensions(shape.dimensions_size());
128 shape.dimensions_size());
159 std::vector<int64> transpose_dimensions(shape.dimensions_size());
176 std::vector<int64> result_dimensions(shape.dimensions_size());
/external/tensorflow/tensorflow/compiler/xla/service/cpu/
Dir_emission_utils.cc105 dnums.input_feature_dimension() == input_shape.dimensions_size() - 1 && in PotentiallyImplementedAsEigenConvolution()
108 output_shape.dimensions_size() - 1 && in PotentiallyImplementedAsEigenConvolution()
110 kernel_shape.dimensions_size() - 2 && in PotentiallyImplementedAsEigenConvolution()
112 kernel_shape.dimensions_size() - 1; in PotentiallyImplementedAsEigenConvolution()
Dcpu_layout_assignment.cc82 std::vector<int64> dimension_order(new_shape.dimensions_size()); in RowMajorShape()
90 std::vector<int64> dimension_order(new_shape.dimensions_size()); in ColMajorShape()
Dcpu_instruction_fusion.cc48 hlo->opcode() == HloOpcode::kDot && hlo_shape.dimensions_size() <= 1 && in IsNonComplexNonBatchedMatrixVectorDot()
150 if (output_shape.dimensions_size() <= 1) { in ShouldFuse()
/external/tensorflow/tensorflow/compiler/xla/service/gpu/
Dgpu_conv_padding_legalization.cc63 MakeNoPaddingConfig(input->shape().dimensions_size()); in MaybePaddedAndSlicedInput()
94 std::vector<int64> start_indices(input->shape().dimensions_size(), 0); in MaybePaddedAndSlicedInput()
97 std::vector<int64> strides(input->shape().dimensions_size(), 1); in MaybePaddedAndSlicedInput()
132 for (size_t i = 0; i < kernel->shape().dimensions_size(); ++i) { in MaybePaddedKernel()
167 for (size_t i = 0; i < new_conv_window.dimensions_size(); ++i) { in CanonicalizeForwardConvolution()
225 for (size_t i = 0; i < backward_conv->window().dimensions_size(); ++i) { in CanonicalizeBackwardFilterConvolution()
290 for (size_t i = 0; i < backward_conv->window().dimensions_size(); ++i) { in CanonicalizeBackwardInputConvolution()
351 std::vector<int64> start_indices(new_backward_conv->shape().dimensions_size(), in CanonicalizeBackwardInputConvolution()
356 std::vector<int64> strides(new_backward_conv->shape().dimensions_size(), 1LL); in CanonicalizeBackwardInputConvolution()
357 for (size_t i = 0; i < backward_conv->window().dimensions_size(); ++i) { in CanonicalizeBackwardInputConvolution()
/external/tensorflow/tensorflow/compiler/xla/
Dshape.cc26 dimensions_.reserve(shape_proto.dimensions_size()); in Shape()
35 if (shape_proto.dimensions_size() != in Shape()
45 shape_proto.dimensions_size(), shape_proto.is_dynamic_dimension_size()); in Shape()
61 proto.mutable_dimensions()->Reserve(dimensions_size()); in ToProto()
Dtext_literal_reader.cc113 if (coordinate_values.size() != shape.dimensions_size()) { in ReadAllLines()
117 shape.dimensions_size(), coordinate_values.size(), line); in ReadAllLines()
Dindex_util.cc30 DCHECK_EQ(shape.dimensions_size(), multi_index.size()); in MultidimensionalIndexToLinearIndex()
104 std::vector<int64> multi_index(shape.dimensions_size()); in LinearIndexToMultidimensionalIndex()
Dliteral_comparison.cc174 if (dimension == expected.shape().dimensions_size()) { in Equal()
698 std::vector<int64> multi_index(expected.shape().dimensions_size(), 0); in EqualHelper()
893 if (expected.dimensions_size() != actual.dimensions_size()) { in EqualShapes()
895 expected.dimensions_size(), in EqualShapes()
896 actual.dimensions_size()); in EqualShapes()
898 for (int i = 0; i < expected.dimensions_size(); ++i) { in EqualShapes()
Dshape_util.cc330 std::vector<int64> dims(shape.dimensions_size()); in MakeShapeWithDescendingLayoutAndSamePhysicalLayout()
331 for (int i = 0; i < shape.dimensions_size(); ++i) { in MakeShapeWithDescendingLayoutAndSamePhysicalLayout()
341 for (int i = 0; i < shape.dimensions_size(); ++i) { in MakeShapeWithDescendingLayoutAndSamePhysicalLayout()
537 DCHECK_EQ(shape.dimensions_size(), shape.rank()); in ElementsIn()
592 for (int i = 0; i < shape.dimensions_size(); ++i) { in HumanString()
786 if (shape.dimensions_size() != 0) { in ValidateShapeWithOptionalLayoutInternal()
803 if (shape.dimensions_size() != 0) { in ValidateShapeWithOptionalLayoutInternal()
1640 for (int i = 0; i < shape.dimensions_size(); ++i) { in Hash()
/external/tensorflow/tensorflow/compiler/tf2xla/kernels/
Dtensor_list_utils.cc282 TF_RET_CHECK(dynamic_dims[i].size() == shape.dimensions_size()); in CreateZerosTensorListWithShape()
283 for (int64 dim = 0; dim < shape.dimensions_size(); ++dim) { in CreateZerosTensorListWithShape()
342 for (int64 dim = 0; dim < shape.dimensions_size() - 1; ++dim) { in GetInitializedTensorListForElement()
381 element_part_shape.dimensions_size() + 1, in ExecuteTensorListPushBack()
397 std::vector<xla::XlaOp> start_indices(element_shape.dimensions_size() + 1, in ExecuteTensorListPushBack()
436 std::vector<xla::XlaOp> start_indices(list_part_shape.dimensions_size(), in ExecuteTensorListPopBack()
484 std::vector<xla::XlaOp> start_indices(element_shape.dimensions_size() + 1, in ExecuteTensorListSetItem()
517 std::vector<xla::XlaOp> start_indices(buffer_shape.dimensions_size(), in ExecuteTensorListGetItem()
528 for (int64 i = 1; i < buffer_shape.dimensions_size(); ++i) { in ExecuteTensorListGetItem()
Dcast_op.cc165 xla_input_shape.dimensions_size() - 1); in Compile()
191 input_xla_shape.dimensions_size() - 1)); in Compile()
195 {input_xla_shape.dimensions_size() - 1}); in Compile()
Dconv_op_helpers.cc50 int64 input_feature_dim = filter_shape.dimensions_size() - 2; in GroupedFilterShapeForDepthwiseConvolution()
51 int64 output_feature_dim = filter_shape.dimensions_size() - 1; in GroupedFilterShapeForDepthwiseConvolution()
69 int num_dims = filter_shape.dimensions_size(); in TransposeFilterForGroupConvolutionBackpropInput()
199 if (input_shape.dimensions_size() != num_dims) { in MakeXlaForwardConvOp()
203 if (filter_shape.dimensions_size() != num_dims) { in MakeXlaForwardConvOp()
/external/tensorflow/tensorflow/compiler/xla/service/llvm_ir/
Dloop_emitter.cc42 CHECK_EQ(dynamic_dims.size(), shape_.dimensions_size()); in LoopEmitter()
101 std::vector<llvm::Value*> array_multi_index(shape_.dimensions_size()); in EmitStaticIndex()
120 std::vector<llvm::Value*> array_multi_index(shape_.dimensions_size()); in EmitDynamicIndex()
/external/tensorflow/tensorflow/compiler/tf2xla/lib/
Dscatter.cc140 ? indices_shape.dimensions_size() - 1 in XlaScatter()
141 : indices_shape.dimensions_size()); in XlaScatter()

1234