/external/tensorflow/tensorflow/core/framework/ |
D | tensor.h | 409 template <typename T, size_t NDIMS> 410 typename TTypes<T, NDIMS>::Tensor tensor(); 417 template <typename T, size_t NDIMS> 418 typename TTypes<T, NDIMS>::Tensor bit_casted_tensor(); 427 template <typename T, size_t NDIMS> 428 typename TTypes<T, NDIMS>::Tensor reinterpret_last_dimension(); 473 template <typename T, size_t NDIMS = 2> 474 typename TTypes<T, NDIMS>::Tensor flat_inner_dims(); 480 template <typename T, size_t NDIMS = 2> 481 typename TTypes<T, NDIMS>::Tensor flat_outer_dims(); [all …]
|
D | tensor_shape.h | 374 template <int NDIMS, typename IndexType = Eigen::DenseIndex> 375 Eigen::DSizes<IndexType, NDIMS> AsEigenDSizes() const; 381 template <int NDIMS, typename IndexType = Eigen::DenseIndex> 382 Status AsEigenDSizesWithStatus(Eigen::DSizes<IndexType, NDIMS>* out) const; 388 template <int NDIMS, typename IndexType = Eigen::DenseIndex> 389 Eigen::DSizes<IndexType, NDIMS> AsEigenDSizesWithPadding() const; 395 template <int NDIMS, typename IndexType = Eigen::DenseIndex> 397 Eigen::DSizes<IndexType, NDIMS>* out) const; 402 void CheckDimsEqual(int NDIMS) const; 404 void CheckDimsAtLeast(int NDIMS) const; [all …]
|
D | tensor_slice.h | 120 template <int NDIMS> 123 Eigen::DSizes<Eigen::DenseIndex, NDIMS>* indices, 124 Eigen::DSizes<Eigen::DenseIndex, NDIMS>* sizes) const; 198 template <int NDIMS> 200 const TensorShape& shape, Eigen::DSizes<Eigen::DenseIndex, NDIMS>* indices, in FillIndicesAndSizes() 201 Eigen::DSizes<Eigen::DenseIndex, NDIMS>* sizes) const { in FillIndicesAndSizes() 205 CHECK_GE(NDIMS, dims()) << "Asking for a " << NDIMS << "-dim slice from " in FillIndicesAndSizes() 216 for (int d = dims(); d < NDIMS; ++d) { in FillIndicesAndSizes()
|
D | tensor_types.h | 24 template <typename T, int NDIMS = 1, typename IndexType = Eigen::DenseIndex> 27 typedef Eigen::TensorMap<Eigen::Tensor<T, NDIMS, Eigen::RowMajor, IndexType>, 31 Eigen::Tensor<const T, NDIMS, Eigen::RowMajor, IndexType>, Eigen::Aligned> 35 typedef Eigen::TensorMap<Eigen::Tensor<T, NDIMS, Eigen::RowMajor, IndexType> > 38 Eigen::Tensor<const T, NDIMS, Eigen::RowMajor, IndexType> > 41 typedef Eigen::TensorMap<Eigen::Tensor<T, NDIMS, Eigen::RowMajor, int>,
|
D | numeric_op.h | 86 #define NDIM_CASE(NDIMS) \ in Compute() argument 87 case NDIMS: { \ in Compute() 88 static_cast<CHILD*>(this)->template Operate<NDIMS>(context, a, b, output); \ in Compute()
|
/external/tensorflow/tensorflow/core/kernels/ |
D | strided_slice_op.h | 30 template <typename Device, typename T, int NDIMS> 32 void operator()(const Device& d, typename TTypes<T, NDIMS>::Tensor output, in operator() 33 typename TTypes<T, NDIMS>::ConstTensor input, in operator() 34 const Eigen::DSizes<Eigen::DenseIndex, NDIMS>& start_indices, in operator() 35 const Eigen::DSizes<Eigen::DenseIndex, NDIMS>& stop_indices, in operator() 36 const Eigen::DSizes<Eigen::DenseIndex, NDIMS>& strides) { in operator() 40 Eigen::DSizes<int, NDIMS> start_i, stop_i, strides_i; in operator() 41 for (int i = 0; i < NDIMS; ++i) { in operator() 55 template <typename T, int NDIMS, typename Device> 57 static void run(const Device& d, typename TTypes<T, NDIMS>::Tensor output) { in run() [all …]
|
D | broadcast_to_op.h | 34 template <int NDIMS> 35 void DoBCast32Bit(const Device &device, typename TTypes<T, NDIMS>::Tensor out, in DoBCast32Bit() 36 typename TTypes<T, NDIMS>::ConstTensor in, in DoBCast32Bit() 37 const typename Eigen::array<int, NDIMS> &bcast) const { in DoBCast32Bit() 41 template <int NDIMS> 43 const Device &device, typename TTypes<T, NDIMS>::Tensor out, in DoBCast() 44 typename TTypes<T, NDIMS>::ConstTensor in, in DoBCast() 45 const typename Eigen::array<Eigen::DenseIndex, NDIMS> &bcast) const { in DoBCast() 49 template <int NDIMS> 56 DoBCast32Bit<NDIMS>( in ReshapeAndBCast() [all …]
|
D | slice_op.h | 27 template <typename Device, typename T, int NDIMS> 29 void operator()(const Device& d, typename TTypes<T, NDIMS>::Tensor output, in operator() 30 typename TTypes<T, NDIMS>::ConstTensor input, in operator() 31 const Eigen::DSizes<Eigen::DenseIndex, NDIMS>& slice_indices, in operator() 32 const Eigen::DSizes<Eigen::DenseIndex, NDIMS>& slice_sizes) { in operator() 36 Eigen::DSizes<int, NDIMS> indices; in operator() 37 for (int i = 0; i < NDIMS; ++i) { in operator() 40 Eigen::DSizes<int, NDIMS> sizes; in operator() 41 for (int i = 0; i < NDIMS; ++i) { in operator()
|
D | conv_2d_gpu.h | 421 template <typename T, int NDIMS> 423 int nthreads, const T* __restrict__ input, Dimension<NDIMS> input_dims, 424 T* __restrict__ output, Dimension<NDIMS> output_dims, 425 Dimension<NDIMS - 2> padding_left, T padding_value) { 428 Index<NDIMS> output_tensor_index = 431 Index<NDIMS> input_tensor_index; 434 for (int i = 1; i < NDIMS - 1; i++) { 439 input_tensor_index[NDIMS - 1] = output_tensor_index[NDIMS - 1]; // channels 450 template <typename T, int NDIMS> 452 int nthreads, const T* __restrict__ input, Dimension<NDIMS> input_dims, [all …]
|
D | conv_2d.h | 206 template <typename Device, typename T, typename IndexType, int NDIMS> 209 typename TTypes<T, NDIMS, IndexType>::ConstTensor in, 210 typename TTypes<T, NDIMS, IndexType>::Tensor out) { 212 Eigen::DSizes<IndexType, NDIMS - 2> spatial_dims; 220 merged_dims[1] = in.dimension(NDIMS - 2); // input filters [I] 221 merged_dims[2] = in.dimension(NDIMS - 1); // output filters [O] 226 Eigen::DSizes<IndexType, NDIMS> expanded_dims; 241 expanded_dims[NDIMS - 1] = merged_dims[1]; // [I] 334 template <typename Device, typename T, typename IndexType, int NDIMS> 337 typename TTypes<T, NDIMS, IndexType>::ConstTensor in, [all …]
|
D | reverse_op.cc | 126 template <typename Device, typename T, int NDIMS> 133 if (NDIMS == 3 && std::is_same<Device, CPUDevice>::value && in HandleReverseCase() 142 typename Eigen::array<bool, NDIMS> axes_di; in HandleReverseCase() 143 for (int i = 0; i < NDIMS; i++) { in HandleReverseCase() 146 functor::Reverse<Device, T, NDIMS>()(context->eigen_device<Device>(), in HandleReverseCase() 147 input.tensor<T, NDIMS>(), axes_di, in HandleReverseCase() 148 result->tensor<T, NDIMS>()); in HandleReverseCase() 182 #define HANDLE_REVERSE(NDIMS) \ in Compute() argument 183 case NDIMS: \ in Compute() 184 HandleReverseCase<Device, T, NDIMS>(context, dims.vec<bool>(), output); \ in Compute() [all …]
|
D | cwise_op_gpu_select.cu.cc | 26 template <typename T, int NDIMS> 27 struct BCastSelectFunctor<GPUDevice, T, NDIMS> { 29 typename TTypes<T, NDIMS>::Tensor output_tensor, in operator ()() 30 typename TTypes<bool, NDIMS>::ConstTensor cond_tensor, in operator ()() 31 typename TTypes<T, NDIMS>::ConstTensor then_tensor, in operator ()() 32 typename TTypes<T, NDIMS>::ConstTensor else_tensor, in operator ()() 33 typename Eigen::array<Eigen::DenseIndex, NDIMS> cond_bcast, in operator ()() 34 typename Eigen::array<Eigen::DenseIndex, NDIMS> then_bcast, in operator ()() 35 typename Eigen::array<Eigen::DenseIndex, NDIMS> else_bcast) { in operator ()()
|
D | cwise_op_select.cc | 194 #define HANDLE_DIM(NDIMS) \ in Compute() argument 196 functor::BCastSelectFunctor<Device, T, NDIMS> func; \ in Compute() 198 output->shaped<T, NDIMS>(bcast.result_shape()), \ in Compute() 199 cond->template shaped<bool, NDIMS>(cond_bcast.y_reshape()), \ in Compute() 200 then->template shaped<T, NDIMS>(then_bcast.y_reshape()), \ in Compute() 201 else_->template shaped<T, NDIMS>(else_bcast.y_reshape()), \ in Compute() 202 BCast::ToIndexArray<NDIMS>(cond_bcast.y_bcast()), \ in Compute() 203 BCast::ToIndexArray<NDIMS>(then_bcast.y_bcast()), \ in Compute() 204 BCast::ToIndexArray<NDIMS>(else_bcast.y_bcast())); \ in Compute() 399 template <typename Device, typename T, int NDIMS> [all …]
|
D | cwise_ops_common.h | 344 template <typename Functor, int NDIMS> 345 struct BinaryFunctor<CPUDevice, Functor, NDIMS, false> { 378 typename TTypes<typename Functor::out_type, NDIMS>::Tensor out, 379 typename TTypes<typename Functor::in_type, NDIMS>::ConstTensor in0, 380 typename Eigen::array<Eigen::DenseIndex, NDIMS> bcast0, 381 typename TTypes<typename Functor::in_type, NDIMS>::ConstTensor in1, 382 typename Eigen::array<Eigen::DenseIndex, NDIMS> bcast1, 385 if (AllOne<NDIMS>(bcast0) && AllOne<NDIMS>(bcast1)) { 387 } else if (AllOne<NDIMS>(bcast0)) { 390 } else if (AllOne<NDIMS>(bcast1)) { [all …]
|
D | cwise_ops_gpu_common.cu.h | 49 template <typename Functor, int NDIMS, bool has_errors> 50 struct BinaryFunctor<GPUDevice, Functor, NDIMS, has_errors> { 79 typename TTypes<typename Functor::out_type, NDIMS>::Tensor out, 80 typename TTypes<typename Functor::in_type, NDIMS>::ConstTensor in0, 81 typename Eigen::array<Eigen::DenseIndex, NDIMS> bcast0, 82 typename TTypes<typename Functor::in_type, NDIMS>::ConstTensor in1, 83 typename Eigen::array<Eigen::DenseIndex, NDIMS> bcast1, 87 if ((NDIMS == 2) && Functor::use_bcast_optimization && 89 const bool bcast0_all_one = AllOne<NDIMS>(bcast0); 90 const bool bcast1_all_one = AllOne<NDIMS>(bcast1);
|
D | transpose_functor.h | 146 template <typename Device, typename T, int NDIMS> 150 Eigen::array<int, NDIMS> p; in TransposeUsingEigen() 151 for (int i = 0; i < NDIMS; ++i) p[i] = perm[i]; in TransposeUsingEigen() 152 auto x = typename TTypes<T, NDIMS>::ConstTensor( in TransposeUsingEigen() 154 in.shape().AsEigenDSizes<NDIMS>()); in TransposeUsingEigen() 155 auto y = typename TTypes<T, NDIMS>::Tensor( in TransposeUsingEigen() 157 out->shape().AsEigenDSizes<NDIMS>()); in TransposeUsingEigen()
|
D | padding_fifo_queue.cc | 311 template <typename T, int NDIMS> 321 auto element_t = element.tensor<T, NDIMS>(); in HandleElementToLargerSlice() 322 auto parent_t = parent->tensor<T, NDIMS + 1>(); in HandleElementToLargerSlice() 323 Eigen::DSizes<Eigen::DenseIndex, NDIMS + 1> slice_indices; in HandleElementToLargerSlice() 325 Eigen::DSizes<Eigen::DenseIndex, NDIMS + 1> slice_size; in HandleElementToLargerSlice() 336 template <int NDIMS> 341 return HandleElementToLargerSlice<T, NDIMS>(element, parent, index); \ in HandleElementToLargerSliceWithRank() 365 #define HANDLE_DIMS(NDIMS) \ in CopyElementToLargerSlice() argument 366 case NDIMS: { \ in CopyElementToLargerSlice() 368 HandleElementToLargerSliceWithRank<NDIMS>(element, parent, index)); \ in CopyElementToLargerSlice()
|
D | sparse_tensor_dense_add_op.cc | 124 template <typename T, typename Index, int NDIMS> 125 struct ScatterNdFunctor<CPUDevice, T, Index, NDIMS, scatter_op::UpdateOp::ADD> { 129 typename TTypes<T, NDIMS>::Tensor out) { in operator ()() 130 Eigen::array<Eigen::DenseIndex, NDIMS> idx; in operator ()() 133 for (int d = 0; d < NDIMS; ++d) { in operator ()()
|
D | sparse_tensor_dense_add_op.h | 30 template <typename Device, typename T, typename Index, int NDIMS, 36 typename TTypes<T, NDIMS>::Tensor out);
|
D | relu_op.h | 75 template <int NDIMS> 117 template <int NDIMS> 174 template <int NDIMS> 220 template <int NDIMS> 262 template <int NDIMS>
|
D | cwise_ops.h | 1281 template <typename Device, typename Functor, int NDIMS, 1305 typename TTypes<typename Functor::out_type, NDIMS>::Tensor out, 1306 typename TTypes<typename Functor::in_type, NDIMS>::ConstTensor in0, 1307 typename Eigen::array<Eigen::DenseIndex, NDIMS> bcast0, 1308 typename TTypes<typename Functor::in_type, NDIMS>::ConstTensor in1, 1309 typename Eigen::array<Eigen::DenseIndex, NDIMS> bcast1, 1320 template <int NDIMS> 1321 bool AllOne(const typename Eigen::array<Eigen::DenseIndex, NDIMS>& a) { 1353 template <typename Device, typename T, int NDIMS> 1356 typename TTypes<T, NDIMS>::Tensor output_tensor, [all …]
|
/external/libopus/celt/tests/ |
D | test_unit_cwrs32.c | 56 #define NDIMS (44) macro 57 static const int pn[NDIMS]={ 64 static const int pkmax[NDIMS]={ 74 #define NDIMS (22) macro 75 static const int pn[NDIMS]={ 80 static const int pkmax[NDIMS]={ 92 for(t=0;t<NDIMS;t++){ in main()
|
/external/tensorflow/tensorflow/core/util/ |
D | bcast.h | 400 template <typename IndexType, int NDIMS> 401 static Eigen::array<IndexType, NDIMS> ToIndexArrayType( in ToIndexArrayType() 403 CHECK_EQ(vec.size(), NDIMS); in ToIndexArrayType() 404 Eigen::array<IndexType, NDIMS> ret; in ToIndexArrayType() 405 for (int i = 0; i < NDIMS; ++i) ret[i] = vec[i]; in ToIndexArrayType() 409 template <int NDIMS> 410 static Eigen::array<Eigen::DenseIndex, NDIMS> ToIndexArray( in ToIndexArray() 412 return ToIndexArrayType<Eigen::DenseIndex, NDIMS>(vec); in ToIndexArray()
|
D | batch_util.cc | 329 template <typename T, int NDIMS> 336 auto element_t = element.tensor<T, NDIMS>(); in HandleElementToLargerSlice() 337 auto parent_t = parent->tensor<T, NDIMS + 1>(); in HandleElementToLargerSlice() 338 Eigen::DSizes<Eigen::DenseIndex, NDIMS + 1> slice_indices; in HandleElementToLargerSlice() 340 Eigen::DSizes<Eigen::DenseIndex, NDIMS + 1> slice_size; in HandleElementToLargerSlice() 349 template <int NDIMS> 354 return HandleElementToLargerSlice<T, NDIMS>(element, parent, index); \ in HandleElementToLargerSliceWithRank() 376 #define HANDLE_DIMS(NDIMS) \ in CopyElementToLargerSlice() argument 377 case NDIMS: { \ in CopyElementToLargerSlice() 379 HandleElementToLargerSliceWithRank<NDIMS>(element, parent, index)); \ in CopyElementToLargerSlice()
|
/external/tensorflow/tensorflow/core/util/sparse/ |
D | README.md | 17 The shape of `ix` is `N x NDIMS`, and each row corresponds to the 23 Shape must be a `TensorShape` with `dims() == NDIMS`. 101 Tensor indices(DT_INT64, TensorShape({N, NDIMS}); 105 sp.Reorder<tstring>({1, 2, 0, 3, ...}); // Must provide NDIMS dims. 128 Tensor indices(DT_INT64, TensorShape({N, NDIMS});
|