/external/tensorflow/tensorflow/python/framework/ |
D | common_shapes_test.py | 33 def _assert_incompatible_broadcast(self, shape1, shape2): argument 34 if shape1.dims is not None and shape2.dims is not None: 35 zeros1 = np.zeros(shape1.as_list()) 41 self.assertFalse(common_shapes.is_broadcast_compatible(shape1, shape2)) 42 self.assertFalse(common_shapes.is_broadcast_compatible(shape2, shape1)) 44 common_shapes.broadcast_shape(shape1, shape2) 46 common_shapes.broadcast_shape(shape2, shape1) 50 def _assert_broadcast(self, expected, shape1, shape2): argument 51 if shape1.dims is not None and shape2.dims is not None: 53 zeros1 = np.zeros(shape1.as_list()) [all …]
|
/external/tensorflow/tensorflow/python/keras/layers/ |
D | merge.py | 48 def _compute_elemwise_op_output_shape(self, shape1, shape2): argument 64 if None in [shape1, shape2]: 66 elif len(shape1) < len(shape2): 67 return self._compute_elemwise_op_output_shape(shape2, shape1) 69 return shape1 70 output_shape = list(shape1[:-len(shape2)]) 71 for i, j in zip(shape1[-len(shape2):], shape2): 82 'together with shapes ' + str(shape1) + ' ' + str(shape2)) 489 shape1 = input_shape[0] 491 if shape1 is None or shape2 is None: [all …]
|
/external/tensorflow/tensorflow/compiler/xla/ |
D | shape_util_test.cc | 86 Shape shape1 = ShapeUtil::MakeShape(F32, {3, 2}); in TEST() local 88 ASSERT_TRUE(ShapeUtil::Compatible(shape1, shape2)); in TEST() 144 Shape shape1 = ShapeUtil::MakeShape(BF16, {3, 2}); in TEST() local 146 ASSERT_TRUE(ShapeUtil::CompatibleIgnoringFpPrecision(shape1, shape2)); in TEST() 150 Shape shape1 = ShapeUtil::MakeShape(BF16, {3, 2}); in TEST() local 152 ASSERT_FALSE(ShapeUtil::CompatibleIgnoringFpPrecision(shape1, shape2)); in TEST() 252 Shape shape1 = ShapeUtil::MakeShape(F32, {}); in TEST() local 255 EXPECT_FALSE(ShapeUtil::Compatible(shape1, shape2)); in TEST() 256 EXPECT_FALSE(ShapeUtil::Compatible(shape2, shape1)); in TEST() 257 EXPECT_FALSE(ShapeUtil::CompatibleIgnoringElementType(shape1, shape2)); in TEST() [all …]
|
/external/tensorflow/tensorflow/contrib/training/python/training/ |
D | batch_sequences_with_states_test.py | 52 shape1 = np.array([self.value_length, 6]) 56 array_ops.placeholder_with_default(shape1, shape=[2])) 260 shape1 = np.array([self.batch_size, num_unroll, 6]) 287 ind1_1, val1_1, shape1) 289 ind1_2, val1_2, shape1) 408 shape1 = np.array([self.batch_size, num_unroll, 6]) 435 ind1_1, val1_1, shape1) 437 ind1_2, val1_2, shape1) 459 shape1 = np.array([self.batch_size, num_unroll, 6]) 486 ind1_1, val1_1, shape1) [all …]
|
/external/tensorflow/tensorflow/compiler/jit/ |
D | encapsulate_subgraphs_pass_test.cc | 1034 GraphDefBuilder shape1(GraphDefBuilder::kFailImmediately); in TEST() local 1035 Node* key_constant = KeyPlaceholder("F1", shape1.opts()); in TEST() 1038 shape1.opts().WithAttr(kXlaHasHostTransferAttrName, true)); in TEST() 1040 shape1.opts() in TEST() 1045 shape1.opts().WithAttr(kXlaHasHostTransferAttrName, true)); in TEST() 1047 AddGraphDefToFunctionLibrary(shape1, "F1_O1", &library_expected)); in TEST() 1704 GraphDefBuilder shape1(GraphDefBuilder::kFailImmediately); in TEST() local 1705 Node* key_constant = KeyPlaceholder("F1", shape1.opts()); in TEST() 1708 shape1.opts().WithAttr(kXlaHasHostTransferAttrName, true)); in TEST() 1709 Node* e = Unary(ops::NodeOut(recv1, 0), shape1.opts() in TEST() [all …]
|
/external/gemmlowp/test/ |
D | benchmark_all_sizes.cc | 212 bool operator<(const Shape& shape1, const Shape& shape2) { in operator <() argument 213 return shape1.depth < shape2.depth || in operator <() 214 (shape1.depth == shape2.depth && in operator <() 215 (shape1.rows < shape2.rows || in operator <() 216 (shape1.rows == shape2.rows && shape1.cols < shape2.cols))); in operator <()
|
/external/tensorflow/tensorflow/c/eager/ |
D | c_api_debug.cc | 100 const xla::Shape& shape1 = in TFE_TensorHandleTensorDebugInfo() local 102 if (shape0.IsTuple() || shape1.IsTuple()) { in TFE_TensorHandleTensorDebugInfo() 108 if (!xla::ShapeUtil::Equal(shape0, shape1)) { in TFE_TensorHandleTensorDebugInfo()
|
/external/tensorflow/tensorflow/core/kernels/ |
D | set_kernels.cc | 380 Status CheckShapesMatch(VarDimArray shape1, VarDimArray shape2) { in CheckShapesMatch() argument 381 if (shape1 != shape2) { in CheckShapesMatch() 383 str_util::Join(shape1, ","), "] vs [", in CheckShapesMatch() 391 Status GroupShapeFromInputs(VarDimArray shape1, VarDimArray shape2, in GroupShapeFromInputs() argument 394 TF_RETURN_IF_ERROR(GroupShape(shape1, &group_shape_1)); in GroupShapeFromInputs() 434 const auto shape1 = TensorShapeToArray(set1_t.shape()); in ComputeDenseToDense() local 436 OP_REQUIRES_OK(ctx, GroupShapeFromInputs(shape1, shape2, &group_shape)); in ComputeDenseToDense() 438 const auto set1_strides = Strides(shape1); in ComputeDenseToDense()
|
D | segment_reduction_ops_test.cc | 51 TensorShape shape1({num_rows, num_cols}); in BM_SegmentReduction() local 52 Tensor input1(DT_FLOAT, shape1); in BM_SegmentReduction()
|
D | nn_ops_test.cc | 825 TensorShape shape1({batch_size, rows, cols, depth}); in BM_AvgPool() local 826 Tensor input1(DT_FLOAT, shape1); in BM_AvgPool() 1021 TensorShape shape1({batch_size, rows, cols, depth}); in BM_MaxPool() local 1022 Tensor input1(DT_FLOAT, shape1); in BM_MaxPool() 1201 TensorShape shape1({batch_size, rows, cols, depth}); in BM_ReluFloat() local 1202 Tensor input1(DT_FLOAT, shape1); in BM_ReluFloat()
|
/external/tensorflow/tensorflow/python/ops/ |
D | image_ops_impl.py | 2723 shape1 = img1.get_shape().with_rank_at_least(3) 2725 shape1[-3:].assert_is_compatible_with(shape2[-3:]) 2727 if shape1.ndims is not None and shape2.ndims is not None: 2728 for dim1, dim2 in zip(reversed(shape1.dims[:-3]), 2732 'Two images are not compatible: %s and %s' % (shape1, shape2)) 2735 shape1, shape2 = array_ops.shape_n([img1, img2]) 2740 math_ops.greater_equal(array_ops.size(shape1), 3), 2741 [shape1, shape2], summarize=10)) 2743 math_ops.reduce_all(math_ops.equal(shape1[-3:], shape2[-3:])), 2744 [shape1, shape2], summarize=10)) [all …]
|
/external/tensorflow/tensorflow/core/grappler/optimizers/ |
D | shape_optimizer.cc | 110 const TensorShapeProto& shape1 = prop1[0].shape(); in Optimize() local 112 int64 result = ComputeSizeRatio(shape1, shape2); in Optimize()
|
D | constant_folding.cc | 565 BCast::Vec shape1; in MaterializeBroadcastGradientArgs() local 566 if (!ExtractShape(*shape_node1, properties, &shape1, &min_id)) { in MaterializeBroadcastGradientArgs() 576 for (auto& id : shape1) { in MaterializeBroadcastGradientArgs() 591 const int common_dims = std::min(shape1.size(), shape2.size()); in MaterializeBroadcastGradientArgs() 593 if (shape1[i] >= 0 && shape2[i] >= 0) { in MaterializeBroadcastGradientArgs() 596 if (shape1[i] != shape2[i]) { in MaterializeBroadcastGradientArgs() 606 for (int i = common_dims; i < shape1.size(); ++i) { in MaterializeBroadcastGradientArgs() 607 if (shape1[i] < 0) { in MaterializeBroadcastGradientArgs() 617 BCast bcast(shape1, shape2); in MaterializeBroadcastGradientArgs()
|
/external/tensorflow/tensorflow/contrib/graph_editor/ |
D | reroute.py | 63 shape0, shape1 = t0.get_shape(), t1.get_shape() 64 if not shape0.is_compatible_with(shape1): 66 shape1))
|
/external/tensorflow/tensorflow/lite/kernels/internal/ |
D | types.h | 426 inline int MatchingDim(const RuntimeShape& shape1, int index1, in MatchingDim() argument 428 TFLITE_DCHECK_EQ(shape1.Dims(index1), shape2.Dims(index2)); in MatchingDim() 429 return shape1.Dims(index1); in MatchingDim() 433 int MatchingDim(const RuntimeShape& shape1, int index1, in MatchingDim() argument 435 TFLITE_DCHECK_EQ(shape1.Dims(index1), shape2.Dims(index2)); in MatchingDim() 436 return MatchingDim(shape1, index1, args...); in MatchingDim()
|
/external/tensorflow/tensorflow/contrib/lite/kernels/internal/ |
D | types.h | 410 inline int MatchingDim(const RuntimeShape& shape1, int index1, in MatchingDim() argument 412 TFLITE_DCHECK_EQ(shape1.Dims(index1), shape2.Dims(index2)); in MatchingDim() 413 return shape1.Dims(index1); in MatchingDim() 417 int MatchingDim(const RuntimeShape& shape1, int index1, in MatchingDim() argument 419 TFLITE_DCHECK_EQ(shape1.Dims(index1), shape2.Dims(index2)); in MatchingDim() 420 return MatchingDim(shape1, index1, args...); in MatchingDim()
|
/external/tensorflow/tensorflow/core/ops/ |
D | io_ops.cc | 94 ShapeHandle shape0, shape1, shape2; in __anonec23b74b0302() local 96 TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 1, &shape1)); in __anonec23b74b0302() 98 TF_RETURN_IF_ERROR(c->Merge(shape1, shape2, &shape0)); in __anonec23b74b0302()
|
/external/tensorflow/tensorflow/lite/toco/ |
D | tooling_util.cc | 661 bool ShapesAgreeUpToBroadcasting(const Shape& shape0, const Shape& shape1) { in ShapesAgreeUpToBroadcasting() argument 663 CheckNonEmptyShapeDimensions(shape1); in ShapesAgreeUpToBroadcasting() 666 const Shape* shorter = &shape1; in ShapesAgreeUpToBroadcasting() 667 if (shape1.dimensions_count() > shape0.dimensions_count()) { in ShapesAgreeUpToBroadcasting() 668 longer = &shape1; in ShapesAgreeUpToBroadcasting() 689 bool ShapesAgreeUpToExtending(const Shape& shape0, const Shape& shape1) { in ShapesAgreeUpToExtending() argument 691 CheckNonEmptyShapeDimensions(shape1); in ShapesAgreeUpToExtending() 694 const Shape* shorter = &shape1; in ShapesAgreeUpToExtending() 695 if (shape1.dimensions_count() > shape0.dimensions_count()) { in ShapesAgreeUpToExtending() 696 longer = &shape1; in ShapesAgreeUpToExtending()
|
D | tooling_util.h | 130 bool ShapesAgreeUpToBroadcasting(const Shape& shape0, const Shape& shape1); 141 bool ShapesAgreeUpToExtending(const Shape& shape0, const Shape& shape1);
|
/external/tensorflow/tensorflow/compiler/tests/ |
D | concat_ops_test.py | 261 for shape1 in (), (3,): 264 x0 = np.random.randn(*(shape0 + (n0,) + shape1)) 265 x1 = np.random.randn(*(shape0 + (n1,) + shape1))
|
/external/tensorflow/tensorflow/python/ops/distributions/ |
D | util.py | 700 shape1, shape2, name="prefer_static_broadcast_shape"): argument 712 with ops.name_scope(name, values=[shape1, shape2]): 732 shape1_ = get_tensor_shape(shape1) 737 shape1_ = get_shape_tensor(shape1)
|
/external/tensorflow/tensorflow/compiler/xla/service/ |
D | reshape_mover_test.cc | 493 auto shape1 = ShapeUtil::MakeShape(F32, {1, 8, 1, 7}); in TEST_F() local 498 HloInstruction::CreateParameter(0, shape1, "param0")); in TEST_F() 500 HloInstruction::CreateParameter(1, shape1, "param1")); in TEST_F()
|
/external/tensorflow/tensorflow/core/grappler/costs/ |
D | graph_properties.cc | 931 ShapeHandle shape1, ShapeHandle shape2) { in OutputAsUnion() argument 932 if (shape1.SameHandle(shape2)) { in OutputAsUnion() 933 return shape1; in OutputAsUnion() 936 ShapeHandle relaxed = shape1; in OutputAsUnion() 937 const int rank = ctx->Rank(shape1); in OutputAsUnion() 942 if (!ctx->Dim(shape1, d).SameHandle(ctx->Dim(shape2, d))) { in OutputAsUnion() 943 int64 val1 = ctx->Value(ctx->Dim(shape1, d)); in OutputAsUnion()
|
/external/tensorflow/tensorflow/python/kernel_tests/ |
D | concat_op_test.py | 377 for shape1 in (), (3,): 380 x0 = np.random.randn(*(shape0 + (n0,) + shape1)) 381 x1 = np.random.randn(*(shape0 + (n1,) + shape1))
|
/external/tensorflow/tensorflow/compiler/xla/tests/ |
D | fusion_test.cc | 856 Shape shape1 = ShapeUtil::MakeShape(F32, {param1_dim0, param1_dim1}); in BM_ParallelFusion() local 857 auto param1 = Parameter(&builder, 1, shape1, "param1"); in BM_ParallelFusion()
|