Home
last modified time | relevance | path

Searched refs:indices_shape (Results 1 – 25 of 66) sorted by relevance

123

/external/tensorflow/tensorflow/compiler/tf2xla/kernels/
Dgather_op.cc36 const xla::XlaOp& indices, const TensorShape& indices_shape, in XlaGather() argument
55 CHECK_GE(indices_shape.dims(), 1); in XlaGather()
56 num_index_dims = indices_shape.dim_size(indices_shape.dims() - 1); in XlaGather()
57 for (int64 i = 0, e = indices_shape.dims() - 1; i < e; i++) { in XlaGather()
58 num_indices *= indices_shape.dim_size(i); in XlaGather()
62 for (int64 i = 0, e = indices_shape.dims(); i < e; i++) { in XlaGather()
63 num_indices *= indices_shape.dim_size(i); in XlaGather()
74 TensorShape indices_shape_no_index_vectors{indices_shape}; in XlaGather()
140 indices_are_nd ? (indices_shape.dims() - 1) : indices_shape.dims(); in XlaGather()
145 dim_numbers.set_index_vector_dim(indices_are_nd ? (indices_shape.dims() - 1) in XlaGather()
[all …]
Dscatter_nd_op.cc35 const TensorShape& indices_shape, in ValidateUpdateShape() argument
37 if (indices_shape.dims() < 1) { in ValidateUpdateShape()
40 indices_shape.DebugString()); in ValidateUpdateShape()
43 const int64 num_index_dims = indices_shape.dim_size(indices_shape.dims() - 1); in ValidateUpdateShape()
44 const int64 batch_dim = indices_shape.dims() - 1; in ValidateUpdateShape()
51 ", indices.shape: ", indices_shape.DebugString(), in ValidateUpdateShape()
66 if (updates_shape.dim_size(d) != indices_shape.dim_size(d)) { in ValidateUpdateShape()
86 TensorShape indices_shape = context->InputShape(0); in Compile() local
99 buffer_shape.num_elements() > 0 || (indices_shape.num_elements() == 0 && in Compile()
103 indices_shape.DebugString())); in Compile()
[all …]
Dsparse_to_dense_op.cc31 const TensorShape indices_shape = context->InputShape(0); in Compile() local
32 OP_REQUIRES(context, indices_shape.dims() <= 2, in Compile()
36 indices_shape.DebugString())); in Compile()
38 indices_shape.dims() > 0 ? indices_shape.dim_size(0) : 1; in Compile()
40 indices_shape.dims() > 1 ? indices_shape.dim_size(1) : 1; in Compile()
77 /*indices_are_vectors=*/indices_shape.dims() > 1, in Compile()
Dsegment_reduction_ops.cc57 TensorShape indices_shape = ctx->InputShape(1); in Compile() local
62 OP_REQUIRES(ctx, data_shape.dims() >= indices_shape.dims(), in Compile()
67 for (int d = 0; d < indices_shape.dims(); ++d) { in Compile()
69 ctx, (data_shape.dim_size(d) == indices_shape.dim_size(d)), in Compile()
74 " vs. ", indices_shape.dim_size(d))); in Compile()
82 buffer_shape.RemoveDimRange(0, indices_shape.dims()); in Compile()
102 for (int64 i = indices_shape.dims(); i < data_shape.dims(); ++i) { in Compile()
Ddynamic_stitch_op.cc71 TensorShape indices_shape; in Compile() local
74 &indices_shape)); in Compile()
77 ctx, TensorShapeUtils::StartsWith(data_shape, indices_shape), in Compile()
81 "].shape = ", indices_shape.DebugString())); in Compile()
85 data_shape, indices_shape), in Compile()
88 input_num, "].shape[", indices_shape.dims(), in Compile()
93 "].shape = ", indices_shape.DebugString())); in Compile()
97 {indices_shape.num_elements()}, in Compile()
Done_hot_op.cc33 const TensorShape indices_shape = ctx->InputShape(0); in Compile() local
38 const int indices_dims = indices_shape.dims(); in Compile()
68 indices_shape, ctx->Input(0), ctx->Input(2), in Compile()
/external/tensorflow/tensorflow/python/ops/ragged/
Dragged_gather_op_test.py314 dict(params_shape=[3, 4], indices_shape=[], axis=0),
315 dict(params_shape=[3, 4], indices_shape=[5], axis=0),
316 dict(params_shape=[3, 4], indices_shape=[2, 5], axis=0),
318 dict(params_shape=[3, 4], indices_shape=[], axis=1),
319 dict(params_shape=[3, 4], indices_shape=[2], axis=1),
320 dict(params_shape=[3, 4], indices_shape=[2, 5], axis=1),
321 dict(params_shape=[7, 3, 1], indices_shape=[2, 4], axis=1),
322 dict(params_shape=[3, 4, 5, 6], indices_shape=[2, 1, 7], axis=1),
323 dict(params_shape=[7, 3, 5], indices_shape=[], axis=2),
324 dict(params_shape=[7, 3, 5], indices_shape=[2], axis=2),
[all …]
Dragged_one_hot_op_test.py140 dict(indices_shape=[5, 7], depth=6, axis=-1),
141 dict(indices_shape=[5, 7], depth=6, axis=2),
142 dict(indices_shape=[5, 2, 7], depth=3, axis=-1),
143 dict(indices_shape=[5, 2, 7], depth=3, axis=3),
144 dict(indices_shape=[5, 2, 7], depth=3, axis=2),
145 dict(indices_shape=[5, 2, 7, 4], depth=3, axis=-1),
146 dict(indices_shape=[5, 2, 7, 4], depth=3, axis=4),
147 dict(indices_shape=[5, 2, 7, 4], depth=3, axis=3),
148 dict(indices_shape=[5, 2, 7, 4], depth=3, axis=2),
149 dict(indices_shape=[5, 2, 7], depth=3, on_value=True, off_value=False),
[all …]
Dragged_gather_ops.py165 indices_shape = array_ops.shape(indices, out_type=params.row_splits.dtype)
166 shape_cumprod = math_ops.cumprod(indices_shape)
170 target._uniform_row_length = indices_shape[dim + 1]
384 indices_shape = indices.shape
385 indices_ndims = indices_shape.ndims
396 index_size = tensor_shape.dimension_value(indices_shape[-1])
/external/tensorflow/tensorflow/core/tpu/kernels/xla/
Dsegment_reduction_ops.cc48 TensorShape indices_shape = ctx->InputShape(1); in Compile() local
53 OP_REQUIRES(ctx, data_shape.dims() >= indices_shape.dims(), in Compile()
58 for (int d = 0; d < indices_shape.dims(); ++d) { in Compile()
59 OP_REQUIRES(ctx, (data_shape.dim_size(d) == indices_shape.dim_size(d)), in Compile()
64 indices_shape.dim_size(d))); in Compile()
72 buffer_shape.RemoveDimRange(0, indices_shape.dims()); in Compile()
92 for (int64 i = indices_shape.dims(); i < data_shape.dims(); ++i) { in Compile()
/external/tensorflow/tensorflow/core/kernels/
Dgather_nd_op.h63 const TensorShape& indices_shape(indices.shape()); in DoGatherNd()
64 const int64 indices_nd = indices_shape.dim_size(indices_shape.dims() - 1); in DoGatherNd()
68 for (int i = 0; i < indices_shape.dims() - 1; ++i) { in DoGatherNd()
69 N_big *= indices_shape.dim_size(i); in DoGatherNd()
86 for (int i = 0; i < indices_shape.dims() - 1; ++i) { in DoGatherNd()
87 N_result *= indices_shape.dim_size(i); in DoGatherNd()
93 TensorShape result_shape(indices_shape); in DoGatherNd()
Dsparse_add_op_test.cc64 const auto indices_shape = TensorShape({4, 2}); in TEST_F() local
71 AddInputFromArray<int64>(indices_shape, indices); \ in TEST_F()
82 Tensor expected_indices(allocator(), DT_INT64, indices_shape); in TEST_F()
104 const auto indices_shape = TensorShape({4, 2}); \
110 AddInputFromArray<int64>(indices_shape, indices); \
161 const auto indices_shape = TensorShape({4, 2}); \
167 auto AddSparseTensor = [indices, indices_shape, shape, \
169 AddInputFromArray<int64>(indices_shape, indices); \
Done_hot_op.cc56 const TensorShape& indices_shape = indices.shape(); in Compute() local
58 const int indices_dims = indices_shape.dims(); in Compute()
85 MultiplyWithoutOverflow(indices_shape.num_elements(), depth_v) >= 0, in Compute()
87 indices_shape.DebugString(), " + [", depth_v, in Compute()
90 TensorShape output_shape = indices_shape; in Compute()
105 prefix_dim_size *= indices_shape.dim_size(i); in Compute()
107 int64 suffix_dim_size = indices_shape.num_elements() / prefix_dim_size; in Compute()
Dsparse_dense_binary_op_shared_test.cc100 const auto indices_shape = TensorShape({4, 2}); in TEST_F() local
111 AddInputFromArray<int64>(indices_shape, indices); in TEST_F()
129 const auto indices_shape = TensorShape({4, 2}); in TEST_F() local
139 AddInputFromArray<int64>(indices_shape, indices); in TEST_F()
156 const auto indices_shape = TensorShape({4, 2}); in TEST_F() local
166 AddInputFromArray<int64>(indices_shape, indices); in TEST_F()
188 const auto indices_shape = TensorShape({4, 2}); in TEST_F() local
199 AddInputFromArray<int64>(indices_shape, indices); in TEST_F()
Dsparse_reduce_sum_op_test.cc54 const auto indices_shape = TensorShape({4, 2}); in TEST_F() local
60 AddInputFromArray<int64>(indices_shape, indices); in TEST_F()
96 const auto indices_shape = TensorShape({4, 2}); in TEST_F() local
102 AddInputFromArray<int64>(indices_shape, indices); in TEST_F()
Dragged_gather_op_test.cc35 const TensorShape& indices_shape, const std::vector<INDEX_TYPE>& indices, in BuildRaggedGatherGraph() argument
42 int64 num_splits = PARAMS_RAGGED_RANK + indices_shape.dims() - 1; in BuildRaggedGatherGraph()
60 AddInputFromArray<INDEX_TYPE>(indices_shape, indices); in BuildRaggedGatherGraph()
/external/tensorflow/tensorflow/python/kernel_tests/array_ops/
Dgather_op_test.py114 for indices_shape in (), (0,), (2, 0), (2, 3):
118 indices = np.random.randint(shape[axis], size=indices_shape)
120 indices_shape=indices_shape,
179 for indices_shape in (), (0,), (2, 0), (2, 3):
183 indices = np.random.randint(shape[axis], size=indices_shape)
185 indices_shape=indices_shape,
474 indices_shape=[2, 3, 8, 9, 10],
482 indices_shape=[2, 3, 8, 9, 10],
490 indices_shape=[2, 3, 8, 9, 10],
498 indices_shape=[2, 3, 8, 9, 10],
[all …]
/external/tensorflow/tensorflow/compiler/tf2xla/lib/
Dscatter.cc41 TF_ASSIGN_OR_RETURN(xla::Shape indices_shape, builder->GetShape(indices)); in XlaScatter()
43 xla::AsInt64Slice(indices_shape.dimensions()); in XlaScatter()
54 xla::ShapeUtil::HumanString(indices_shape), in XlaScatter()
140 ? indices_shape.dimensions_size() - 1 in XlaScatter()
141 : indices_shape.dimensions_size()); in XlaScatter()
190 VLOG(3) << " Indices: " << xla::ShapeUtil::HumanString(indices_shape); in XlaScatter()
/external/tensorflow/tensorflow/python/kernel_tests/
Dbatch_scatter_ops_test.py57 for indices_shape in (2,), (3, 7), (3, 4, 7):
60 sparse_dim = len(indices_shape) - 1
62 indices_shape[sparse_dim], size=indices_shape, dtype=itype)
64 np.random.randn(*(indices_shape + extra_shape)), vtype)
66 old = _AsType(np.random.randn(*(indices_shape + extra_shape)), vtype)
/external/tensorflow/tensorflow/compiler/xla/tests/
Dtest_utils_test.cc262 const Shape& indices_shape = args[1].shape(); in XLA_TEST_F() local
264 ShapeUtil::Equal(indices_shape, ShapeUtil::MakeShape(S32, {10, 2}))) in XLA_TEST_F()
265 << ShapeUtil::HumanString(indices_shape); in XLA_TEST_F()
300 const Shape& indices_shape = args[1].shape(); in XLA_TEST_F() local
302 ShapeUtil::Equal(indices_shape, ShapeUtil::MakeShape(S32, {10, 2}))) in XLA_TEST_F()
303 << ShapeUtil::HumanString(indices_shape); in XLA_TEST_F()
/external/tensorflow/tensorflow/python/framework/
Dindexed_slices.py170 indices_shape = self._indices.shape.merge_with(self._values.shape[:1])
180 dense_shape_dtype, indices_shape)
187 indices_shape = shape[:1]
194 dense_shape_dtype, indices_shape)
215 indices_shape=None): argument
237 self._indices_shape = tensor_shape.as_shape(indices_shape).with_rank(1)
/external/tensorflow/tensorflow/core/ops/
Dstate_ops.cc118 ShapeHandle indices_shape = c->input(1); in ScatterUpdateShape() local
124 TF_RETURN_IF_ERROR(c->Concatenate(indices_shape, var_subshape, &concat)); in ScatterUpdateShape()
142 ShapeHandle indices_shape; in ScatterNdUpdateShape() local
143 TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(1), 1, &indices_shape)); in ScatterNdUpdateShape()
146 return shape_inference::ScatterNdShapeHelper(c, indices_shape, updates_shape, in ScatterNdUpdateShape()
Dresource_variable_ops.cc250 ShapeHandle indices_shape = c->input(1); in __anon4d581ac00402() local
263 c->WithRankAtLeast(indices_shape, batch_dims, &unused)); in __anon4d581ac00402()
275 c->Subshape(indices_shape, batch_dims, &indices_subshape)); in __anon4d581ac00402()
311 ShapeHandle indices_shape = c->input(1); in ResourceScatterUpdateShape() local
317 TF_RETURN_IF_ERROR(c->Concatenate(indices_shape, var_subshape, &concat)); in ResourceScatterUpdateShape()
/external/tensorflow/tensorflow/lite/kernels/perception/
Dmax_unpooling_2d.cc76 const RuntimeShape indices_shape = GetTensorShape(indices); in Prepare() local
78 context, input_shape.DimensionsCount() == indices_shape.DimensionsCount(), in Prepare()
81 TF_LITE_ENSURE_MSG(context, input_shape.Dims(i) == indices_shape.Dims(i), in Prepare()
/external/tensorflow/tensorflow/python/kernel_tests/v1_compat_tests/
Dscatter_nd_ops_test.py86 for ref_shape, indices_shape in zip(ref_shapes, indices_shapes):
87 num_updates = indices_shape[0]
88 ixdim = indices_shape[-1]

123