/external/eigen/Eigen/src/Geometry/ |
D | Transform.h | 24 Dim = Transform::Dim, enumerator 42 int Dim, 58 int Dim, 208 Dim = _Dim, ///< space dimension in which the transformation holds 210 Rows = int(Mode)==(AffineCompact) ? Dim : HDim 221 typedef Matrix<Scalar,Dim,Dim,Options> LinearMatrixType; 223 typedef Block<MatrixType,Dim,Dim,int(Mode)==(AffineCompact) && (Options&RowMajor)==0> LinearPart; 225 …typedef const Block<ConstMatrixType,Dim,Dim,int(Mode)==(AffineCompact) && (Options&RowMajor)==0> C… 229 Block<MatrixType,Dim,HDim> >::type AffinePart; 233 const Block<const MatrixType,Dim,HDim> >::type ConstAffinePart; [all …]
|
D | RotationBase.h | 32 enum { Dim = _Dim }; enumerator 37 typedef Matrix<Scalar,Dim,Dim> RotationMatrixType; 38 typedef Matrix<Scalar,Dim,1> VectorType; 56 …EIGEN_DEVICE_FUNC inline Transform<Scalar,Dim,Isometry> operator*(const Translation<Scalar,Dim>& t… 57 { return Transform<Scalar,Dim,Isometry>(*this) * t; } 80 …EIGEN_DEVICE_FUNC friend inline Transform<Scalar,Dim,Affine> operator*(const DiagonalMatrix<Scalar… 82 Transform<Scalar,Dim,Affine> res(r); 89 …EIGEN_DEVICE_FUNC inline Transform<Scalar,Dim,Mode> operator*(const Transform<Scalar,Dim,Mode,Opti… 103 enum { Dim = RotationDerived::Dim }; 104 typedef Matrix<typename RotationDerived::Scalar,Dim,Dim> ReturnType; [all …]
|
D | Translation.h | 35 enum { Dim = _Dim }; enumerator 39 typedef Matrix<Scalar,Dim,1> VectorType; 41 typedef Matrix<Scalar,Dim,Dim> LinearMatrixType; 43 typedef Transform<Scalar,Dim,Affine> AffineTransformType; 45 typedef Transform<Scalar,Dim,Isometry> IsometryTransformType; 58 eigen_assert(Dim==2); in Translation() 65 eigen_assert(Dim==3); in Translation() 106 EIGEN_DEVICE_FUNC inline IsometryTransformType operator*(const RotationBase<Derived,Dim>& r) const 118 res.matrix().row(Dim).setZero(); 119 res(Dim,Dim) = Scalar(1); [all …]
|
D | Scaling.h | 58 template<int Dim> 59 inline Transform<Scalar,Dim,Affine> operator* (const Translation<Scalar,Dim>& t) const; 62 template<int Dim, int Mode, int Options> 63 …inline Transform<Scalar,Dim,(int(Mode)==int(Isometry)?Affine:Mode)> operator* (const Transform<Sca… 65 Transform<Scalar,Dim,(int(Mode)==int(Isometry)?Affine:Mode)> res = t; 76 template<typename Derived,int Dim> 77 inline Matrix<Scalar,Dim,Dim> operator*(const RotationBase<Derived,Dim>& r) const 156 template<int Dim> 157 inline Transform<Scalar,Dim,Affine> 158 UniformScaling<Scalar>::operator* (const Translation<Scalar,Dim>& t) const [all …]
|
D | Homogeneous.h | 96 template<typename Scalar, int Dim, int Mode, int Options> friend 97 EIGEN_DEVICE_FUNC inline const Product<Transform<Scalar,Dim,Mode,Options>, Homogeneous > 98 operator* (const Transform<Scalar,Dim,Mode,Options>& lhs, const Homogeneous& rhs) 101 return Product<Transform<Scalar,Dim,Mode,Options>, Homogeneous>(lhs,rhs); 222 template<typename Scalar, int Dim, int Mode,int Options> 223 struct take_matrix_for_product<Transform<Scalar, Dim, Mode, Options> > 225 typedef Transform<Scalar, Dim, Mode, Options> TransformType; 230 template<typename Scalar, int Dim, int Options> 231 struct take_matrix_for_product<Transform<Scalar, Dim, Projective, Options> > 233 typedef Transform<Scalar, Dim, Projective, Options> TransformType; [all …]
|
/external/eigen/unsupported/test/ |
D | BVH.cpp | 17 …e<typename Scalar, int Dim> AlignedBox<Scalar, Dim> bounding_box(const Matrix<Scalar, Dim, 1> &v) … in bounding_box() argument 22 template<int Dim> 25 EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(double, Dim) 27 typedef Matrix<double, Dim, 1> VectorType; 35 template<int Dim> AlignedBox<double, Dim> bounding_box(const Ball<Dim> &b) in bounding_box() 36 { return AlignedBox<double, Dim>(b.center.array() - b.radius, b.center.array() + b.radius); } in bounding_box() 40 template<int Dim> 44 typedef Matrix<double, Dim, 1> VectorType; 45 typedef Ball<Dim> BallType; 46 typedef AlignedBox<double, Dim> BoxType; [all …]
|
/external/llvm/lib/Target/AMDGPU/ |
D | SIMachineFunctionInfo.h | 361 int getDebuggerWorkGroupIDStackObjectIndex(unsigned Dim) const { in getDebuggerWorkGroupIDStackObjectIndex() argument 362 assert(Dim < 3); in getDebuggerWorkGroupIDStackObjectIndex() 363 return DebuggerWorkGroupIDStackObjectIndices[Dim]; in getDebuggerWorkGroupIDStackObjectIndex() 367 void setDebuggerWorkGroupIDStackObjectIndex(unsigned Dim, int ObjectIdx) { in setDebuggerWorkGroupIDStackObjectIndex() argument 368 assert(Dim < 3); in setDebuggerWorkGroupIDStackObjectIndex() 369 DebuggerWorkGroupIDStackObjectIndices[Dim] = ObjectIdx; in setDebuggerWorkGroupIDStackObjectIndex() 373 int getDebuggerWorkItemIDStackObjectIndex(unsigned Dim) const { in getDebuggerWorkItemIDStackObjectIndex() argument 374 assert(Dim < 3); in getDebuggerWorkItemIDStackObjectIndex() 375 return DebuggerWorkItemIDStackObjectIndices[Dim]; in getDebuggerWorkItemIDStackObjectIndex() 379 void setDebuggerWorkItemIDStackObjectIndex(unsigned Dim, int ObjectIdx) { in setDebuggerWorkItemIDStackObjectIndex() argument [all …]
|
/external/tensorflow/tensorflow/core/ops/ |
D | image_ops.cc | 36 TF_RETURN_IF_ERROR(c->WithValue(c->Dim(size, 0), 2, &unused)); in SetOutputToSizedImage() 65 return SetOutputToSizedImage(c, c->Dim(input, 0), 1 /* size_input_idx */, in ResizeShapeFn() 66 c->Dim(input, 3)); in ResizeShapeFn() 103 TF_RETURN_IF_ERROR(c->WithValue(c->Dim(input, -1), 3, &last_dim)); in ColorspaceShapeFn() 126 TF_RETURN_IF_ERROR(c->Merge(c->Dim(boxes, 0), c->Dim(scores, 0), &unused)); in NMSShapeFn() 128 TF_RETURN_IF_ERROR(c->WithValue(c->Dim(boxes, 1), 4, &unused)); in NMSShapeFn() 152 TF_RETURN_IF_ERROR(c->Merge(c->Dim(boxes, 0), c->Dim(scores, 0), &unused)); in CombinedNMSShapeFn() 154 TF_RETURN_IF_ERROR(c->Merge(c->Dim(boxes, 1), c->Dim(scores, 1), &unused)); in CombinedNMSShapeFn() 156 TF_RETURN_IF_ERROR(c->WithValue(c->Dim(boxes, 3), 4, &unused)); in CombinedNMSShapeFn() 158 DimensionHandle d = c->Dim(boxes, 2); in CombinedNMSShapeFn() [all …]
|
D | linalg_ops.cc | 35 TF_RETURN_IF_ERROR(c->Merge(c->Dim(s, -2), c->Dim(s, -1), &d)); in MakeBatchSquareMatrix() 73 TF_RETURN_IF_ERROR(c->Merge(c->Dim(lhs, -2), c->Dim(rhs, -2), &m)); in MatrixSolveShapeFn() 74 DimensionHandle n = c->Dim(lhs, -1); in MatrixSolveShapeFn() 82 TF_RETURN_IF_ERROR(c->Concatenate(out, c->Vector(c->Dim(rhs, -1)), &out)); in MatrixSolveShapeFn() 94 TF_RETURN_IF_ERROR(c->Merge(c->Dim(input, -2), c->Dim(input, -1), &n)); in SelfAdjointEigV2ShapeFn() 120 TF_RETURN_IF_ERROR(c->Merge(c->Dim(input, -2), c->Dim(input, -1), &n)); in LuShapeFn() 144 DimensionHandle m = c->Dim(input, -2); in QrShapeFn() 145 DimensionHandle n = c->Dim(input, -1); in QrShapeFn() 175 DimensionHandle m = c->Dim(input, -2); in SvdShapeFn() 176 DimensionHandle n = c->Dim(input, -1); in SvdShapeFn() [all …]
|
D | ctc_ops.cc | 49 TF_RETURN_IF_ERROR(c->Merge(c->Dim(labels_indices, 0), in __anon7dc6794d0102() 50 c->Dim(labels_values, 0), &unused)); in __anon7dc6794d0102() 56 c->Merge(c->Dim(inputs, 1), c->Dim(sequence_length, 0), &batch_size)); in __anon7dc6794d0102() 82 c->Merge(c->Dim(inputs, 1), c->Dim(sequence_length, 0), &batch_size)); in __anon7dc6794d0202() 112 c->Merge(c->Dim(inputs, 1), c->Dim(sequence_length, 0), &batch_size)); in __anon7dc6794d0302()
|
D | sparse_ops.cc | 57 c->set_output(0, c->Vector(c->Dim(a_indices, 0))); in __anone6e195410202() 58 c->set_output(1, c->Vector(c->Dim(b_indices, 0))); in __anone6e195410202() 79 0, c->Matrix(InferenceContext::kUnknownDim, c->Dim(a_shape, 0))); in __anone6e195410302() 111 DimensionHandle output_right = c->Dim(b, adjoint_b ? 0 : 1); in __anone6e195410402() 112 DimensionHandle output_left = c->Dim(a_shape, adjoint_a ? 1 : 0); in __anone6e195410402() 113 DimensionHandle inner_left = c->Dim(a_shape, adjoint_a ? 0 : 1); in __anone6e195410402() 114 DimensionHandle inner_right = c->Dim(b, adjoint_b ? 1 : 0); in __anone6e195410402() 162 TF_RETURN_IF_ERROR(c->WithValue(c->Dim(c->input(0), -1), 3, &unused)); in __anone6e195410702() 182 c->WithValue(c->Dim(serialized_sparse, 1), 3, &unused)); in __anone6e195410802() 236 TF_RETURN_IF_ERROR(c->Merge(c->Dim(ind, 0), c->Dim(val, 0), &num_dim)); in __anone6e195410a02() [all …]
|
D | array_ops.cc | 68 TF_RETURN_IF_ERROR(c->Add(c->Dim(input, i), pad0 + pad1, &dims[i])); in PadKnown() 79 TF_RETURN_IF_ERROR(c->WithValue(c->Dim(paddings, 1), 2, &unused)); in PadShapeFn() 83 DimensionHandle n_dim = c->Dim(paddings, 0); in PadShapeFn() 168 dims[i] = c->Dim(input, in_idx); in TransposeShapeFn() 201 DimensionHandle dim = c->Dim(out, i); in SetOutputShapeForReshape() 219 DimensionHandle dim = c->Dim(in, i); in SetOutputShapeForReshape() 258 DimensionHandle unknown_in_dim = c->Dim(in, in_unknown_idx); in SetOutputShapeForReshape() 265 DimensionHandle unknown_in_dim = c->Dim(in, in_unknown_idx); in SetOutputShapeForReshape() 304 if (!c->WithValue(c->Dim(c->input(i), 0), 1, &unused).ok()) { in __anondb9326b20202() 345 while (index < axis) dims.push_back(c->Dim(cur, index++)); in __anondb9326b20302() [all …]
|
D | boosted_trees_ops.cc | 168 TF_RETURN_IF_ERROR(c->Merge(c->Dim(node_ids_shape, 0), in __anon8a3fd5e50602() 169 c->Dim(gradients_shape, 0), &unused_dim)); in __anon8a3fd5e50602() 175 TF_RETURN_IF_ERROR(c->Merge(c->Dim(node_ids_shape, 0), in __anon8a3fd5e50602() 176 c->Dim(bucketized_feature_shape, 0), in __anon8a3fd5e50602() 207 c->MakeShape({c->Dim(feature_shape, 0), logits_dimension}); in __anon8a3fd5e50702() 232 auto batch_size = c->MakeShape({c->Dim(feature_shape, 0)}); in __anon8a3fd5e50802() 278 c->MakeShape({c->Dim(feature_shape, 0), logits_dimension}); in __anon8a3fd5e50a02() 282 c->set_output(1, c->MakeShape({c->Dim(feature_shape, 0)})); in __anon8a3fd5e50a02() 284 c->set_output(2, c->MakeShape({c->Dim(feature_shape, 0)})); in __anon8a3fd5e50a02() 314 auto shape_rank_1 = c->MakeShape({c->Dim(shape_handle, 0)}); in __anon8a3fd5e50b02() [all …]
|
D | nn_ops.cc | 44 DimensionHandle d = c->Dim(input, i); in FractionalPoolShapeFn() 113 DimensionHandle last_dim = c->Dim(input, 3); in __anon4650176f0302() 117 TF_RETURN_IF_ERROR(c->Merge(last_dim, c->Dim(vec, 0), &last_dim)); in __anon4650176f0302() 147 DimensionHandle last_dim = c->Dim(input, 3); in __anon4650176f0402() 151 TF_RETURN_IF_ERROR(c->Merge(last_dim, c->Dim(vec, 0), &last_dim)); in __anon4650176f0402() 373 c->WithRank(resized, c->Value(c->Dim(paddings, 0)), &resized)); in CommonFusedConvCalculations() 382 DimensionHandle dim = c->Dim(resized, i); in CommonFusedConvCalculations() 411 DimensionHandle batch_size_dim = c->Dim(padded, 0); in CommonFusedConvCalculations() 412 DimensionHandle in_rows_dim = c->Dim(padded, 1); in CommonFusedConvCalculations() 413 DimensionHandle in_cols_dim = c->Dim(padded, 2); in CommonFusedConvCalculations() [all …]
|
D | cudnn_rnn_ops.cc | 85 auto seq_length = c->Dim(input_shape, 0); in __anon3a5366b80302() 86 auto batch_size = c->Dim(input_shape, 1); in __anon3a5366b80302() 87 auto num_units = c->Dim(input_h_shape, 2); in __anon3a5366b80302() 128 auto seq_length = c->Dim(input_shape, 0); in __anon3a5366b80402() 129 auto batch_size = c->Dim(input_shape, 1); in __anon3a5366b80402() 130 auto num_units = c->Dim(input_h_shape, 2); in __anon3a5366b80402() 174 auto max_seq_length = c->Dim(input_shape, 0); in __anon3a5366b80502() 175 auto batch_size = c->Dim(input_shape, 1); in __anon3a5366b80502() 176 auto num_units = c->Dim(input_h_shape, 2); in __anon3a5366b80502()
|
/external/tensorflow/tensorflow/core/framework/ |
D | common_shape_fns.cc | 223 DimensionHandle output_rows = transpose_a ? c->Dim(a, 1) : c->Dim(a, 0); in MatMulShape() 224 DimensionHandle output_cols = transpose_b ? c->Dim(b, 0) : c->Dim(b, 1); in MatMulShape() 227 DimensionHandle inner_a = transpose_a ? c->Dim(a, 0) : c->Dim(a, 1); in MatMulShape() 228 DimensionHandle inner_b = transpose_b ? c->Dim(b, 1) : c->Dim(b, 0); in MatMulShape() 251 DimensionHandle bias_dim = c->Dim(bias_shape, 0); in BiasAddShape() 270 DimensionHandle input_bias_dim = c->Dim(input_shape, 1); in BiasAddShape() 282 DimensionHandle input_bias_dim = c->Dim(input_shape, -1); in BiasAddShape() 303 c->set_output(0, c->Vector(c->Dim(input_shape, 1))); in BiasAddGradShape() 306 c->set_output(0, c->Vector(c->Dim(input_shape, -1))); in BiasAddGradShape() 319 DimensionHandle vect_dim = c->Dim( in CheckFormatConstraintsOnShape() [all …]
|
D | shape_inference_test.cc | 266 EXPECT_EQ("?", c.DebugString(c.Dim(in0, 0))); in TEST_F() 267 EXPECT_EQ("?", c.DebugString(c.Dim(in0, -1))); in TEST_F() 268 EXPECT_EQ("?", c.DebugString(c.Dim(in0, 1000))); in TEST_F() 274 auto d = c.Dim(in1, 0); in TEST_F() 276 EXPECT_TRUE(SameHandle(d, c.Dim(in1, -3))); in TEST_F() 279 d = c.Dim(in1, 1); in TEST_F() 282 EXPECT_TRUE(SameHandle(d, c.Dim(in1, -2))); in TEST_F() 284 d = c.Dim(in1, 2); in TEST_F() 286 EXPECT_TRUE(SameHandle(d, c.Dim(in1, -1))); in TEST_F() 305 EXPECT_FALSE(SameHandle(c.Dim(c.input(1), 1), c.NumElements(c.input(1)))); in TEST_F() [all …]
|
/external/tensorflow/tensorflow/contrib/boosted_trees/ops/ |
D | split_handler_ops.cc | 54 TF_RETURN_IF_ERROR(c->Merge(c->Dim(partition_ids_shape, 0), in __anond11615e50102() 55 c->Dim(gradients_shape, 0), &unused_dim)); in __anond11615e50102() 58 TF_RETURN_IF_ERROR(c->Merge(c->Dim(partition_ids_shape, 0), in __anond11615e50102() 59 c->Dim(hessians_shape, 0), &unused_dim)); in __anond11615e50102() 126 TF_RETURN_IF_ERROR(c->Merge(c->Dim(partition_ids_shape, 0), in __anond11615e50202() 127 c->Dim(gradients_shape, 0), &unused_dim)); in __anond11615e50202() 130 TF_RETURN_IF_ERROR(c->Merge(c->Dim(partition_ids_shape, 0), in __anond11615e50202() 131 c->Dim(hessians_shape, 0), &unused_dim)); in __anond11615e50202() 197 TF_RETURN_IF_ERROR(c->Merge(c->Dim(partition_ids_shape, 0), in __anond11615e50302() 198 c->Dim(gradients_shape, 0), &unused_dim)); in __anond11615e50302() [all …]
|
D | stats_accumulator_ops.cc | 80 TF_RETURN_IF_ERROR(c->Merge(c->Dim(partition_ids_shape, 0), in __anon72691d0a0202() 81 c->Dim(gradients_shape, 0), &unused_dim)); in __anon72691d0a0202() 85 TF_RETURN_IF_ERROR(c->Merge(c->Dim(partition_ids_shape, 0), in __anon72691d0a0202() 86 c->Dim(hessians_shape, 0), &unused_dim)); in __anon72691d0a0202() 165 TF_RETURN_IF_ERROR(c->Merge(c->Dim(partition_ids_shape, 0), in __anon72691d0a0402() 166 c->Dim(gradients_shape, 0), &unused_dim)); in __anon72691d0a0402() 169 TF_RETURN_IF_ERROR(c->Merge(c->Dim(partition_ids_shape, 0), in __anon72691d0a0402() 170 c->Dim(hessians_shape, 0), &unused_dim)); in __anon72691d0a0402() 298 TF_RETURN_IF_ERROR(c->Merge(c->Dim(partition_ids_shape, 0), in __anon72691d0a0702() 299 c->Dim(gradients_shape, 0), &unused_dim)); in __anon72691d0a0702() [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/lib/Target/AMDGPU/ |
D | SIMachineFunctionInfo.h | 569 int getDebuggerWorkGroupIDStackObjectIndex(unsigned Dim) const { in getDebuggerWorkGroupIDStackObjectIndex() argument 570 assert(Dim < 3); in getDebuggerWorkGroupIDStackObjectIndex() 571 return DebuggerWorkGroupIDStackObjectIndices[Dim]; in getDebuggerWorkGroupIDStackObjectIndex() 575 void setDebuggerWorkGroupIDStackObjectIndex(unsigned Dim, int ObjectIdx) { in setDebuggerWorkGroupIDStackObjectIndex() argument 576 assert(Dim < 3); in setDebuggerWorkGroupIDStackObjectIndex() 577 DebuggerWorkGroupIDStackObjectIndices[Dim] = ObjectIdx; in setDebuggerWorkGroupIDStackObjectIndex() 581 int getDebuggerWorkItemIDStackObjectIndex(unsigned Dim) const { in getDebuggerWorkItemIDStackObjectIndex() argument 582 assert(Dim < 3); in getDebuggerWorkItemIDStackObjectIndex() 583 return DebuggerWorkItemIDStackObjectIndices[Dim]; in getDebuggerWorkItemIDStackObjectIndex() 587 void setDebuggerWorkItemIDStackObjectIndex(unsigned Dim, int ObjectIdx) { in setDebuggerWorkItemIDStackObjectIndex() argument [all …]
|
/external/eigen/bench/ |
D | geometry.cpp | 43 enum {Dim = T::Dim}; enumerator 57 template<typename Scalar, int Dim, typename Data> 58 EIGEN_DONT_INLINE void transform(const Transform<Scalar,Dim,Projective>& t, Data& data) in transform() argument 60 data = (t * data.colwise().homogeneous()).template block<Dim,Data::ColsAtCompileTime>(0,0); in transform() 63 template<typename T> struct get_dim { enum { Dim = T::Dim }; }; enumerator 65 struct get_dim<Matrix<S,R,C,O,MR,MC> > { enum { Dim = R }; }; enumerator 72 Matrix<typename Transformation::Scalar,get_dim<Transformation>::Dim,N> data; in run()
|
/external/eigen/unsupported/Eigen/src/BVH/ |
D | KdBVH.h | 18 template<typename Scalar, int Dim> 21 EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(Scalar, Dim) 22 typedef Matrix<Scalar, Dim, 1> VectorType; 70 enum { Dim = _Dim }; 74 typedef AlignedBox<Scalar, Dim> Volume; 170 typedef internal::vector_int_pair<Scalar, Dim> VIPair; 172 typedef Matrix<Scalar, Dim, 1> VectorType; 195 build(objCenters, from, mid, objBoxes, (dim + 1) % Dim); 205 build(objCenters, from, mid, objBoxes, (dim + 1) % Dim); 207 build(objCenters, mid, to, objBoxes, (dim + 1) % Dim);
|
/external/tensorflow/tensorflow/contrib/rnn/ops/ |
D | gru_ops.cc | 40 DimensionHandle batch_size = c->Dim(x, 0); in __anon6fce7f260102() 41 DimensionHandle cell_size = c->Dim(h_prev, 1); in __anon6fce7f260102() 119 DimensionHandle batch_size = c->Dim(x, 0); in __anon6fce7f260202() 120 DimensionHandle cell_size = c->Dim(h_prev, 1); in __anon6fce7f260202() 121 DimensionHandle twice_cell_size = c->Dim(w_ru, 1); in __anon6fce7f260202()
|
D | lstm_ops.cc | 50 DimensionHandle batch_size = c->Dim(x, 0); in __anonad8b69780102() 51 DimensionHandle cell_size = c->Dim(cs_prev, 1); in __anonad8b69780102() 137 DimensionHandle batch_size = c->Dim(x, 0); in __anonad8b69780202() 138 DimensionHandle cell_size = c->Dim(cs_prev, 1); in __anonad8b69780202() 205 DimensionHandle timelen = c->Dim(x, 0); in __anonad8b69780302() 206 DimensionHandle batch_size = c->Dim(x, 1); in __anonad8b69780302() 209 c->Divide(c->Dim(b, 0), 4, true /* evenly_divisible */, &cell_size)); in __anonad8b69780302()
|
/external/tensorflow/tensorflow/contrib/image/ops/ |
D | image_ops.cc | 36 TF_RETURN_IF_ERROR(c->WithValue(c->Dim(size, 0), 2, &unused)); in SetOutputToSizedImage() 66 return SetOutputToSizedImage(c, c->Dim(input, 0), 2 /* size_input_idx */, in ResizeShapeFn() 67 c->Dim(input, 3)); in ResizeShapeFn() 130 c->set_output(0, c->MakeShape({c->Dim(input, 0)})); in __anone7bf74500302() 131 c->set_output(1, c->MakeShape({c->Dim(input, 1)})); in __anone7bf74500302()
|