/external/tensorflow/tensorflow/core/kernels/ |
D | mkl_relu_op.cc | 44 memory::desc src_md; member in tensorflow::MklEltwiseFwdParams 49 MklEltwiseFwdParams(memory::dims src_dims, memory::desc src_md, in MklEltwiseFwdParams() argument 52 src_md(src_md), in MklEltwiseFwdParams() 65 static_cast<mkldnn::memory::format>(fwdParams.src_md.data.format); in MklEltwiseFwdPrimitive() 111 std::shared_ptr<memory::desc> src_md; member 129 src_md(nullptr), in EltwiseFwdContext() 139 context_.src_md.reset(new memory::desc(fwdParams.src_md.data)); in Setup() 141 new memory::primitive_desc(*context_.src_md, cpu_engine_)); in Setup() 145 prop_kind::forward, fwdParams.alg_kind, *context_.src_md, in Setup() 174 static_cast<mkldnn::memory::format>(fwdParams.src_md.data.format); in Get() [all …]
|
D | mkl_concat_op.cc | 305 auto src_md = mkl_input_shapes[k].GetMklLayout(); in Compute() local 306 srcs[k].SetUsrMem(src_md, &input_tensors[k]); in Compute() 316 auto src_md = mkl_input_shapes[k].GetMklLayout(); in Compute() local 317 srcs[k].SetUsrMem(src_md, &input_tensors[k]); in Compute() 319 if (src_md.data.format != mkl_common_format) { in Compute() 320 memory::dims src_dims(src_md.data.dims, in Compute() 321 &src_md.data.dims[src_md.data.ndims]); in Compute() 322 src_md = in Compute() 326 srcs_pd.push_back(memory::primitive_desc(src_md, cpu_engine)); in Compute() 338 auto src_md = in Compute() local [all …]
|
D | mkl_softmax_op.cc | 121 auto src_md = src_mkl_shape.IsMklTensor() in Compute() local 127 src.SetUsrMem(src_md, &src_tensor); in Compute() 160 dst.SetUsrMem(src_md, output_tensor); in Compute()
|
D | mkl_fused_batch_norm_op.cc | 147 auto src_md = memory::desc({fwdParams.src_dims}, MklDnnType<T>(), in Setup() local 152 context_.pkind, src_md, fwdParams.eps, context_.flags); in Setup() 157 context_.src_mem.reset(new memory({src_md, cpu_engine_}, DummyData)); in Setup() 388 auto src_md = memory::desc({bwdParams.src_dims}, MklDnnType<T>(), in Setup() local 403 prop_kind::forward_training, src_md, bwdParams.eps, in Setup() 416 prop_kind::backward, diff_dst_md, src_md, bwdParams.eps, in Setup() 423 context_.src_mem.reset(new memory({src_md, cpu_engine_}, DummyData)); in Setup() 433 context_.diff_src_mem.reset(new memory({src_md, cpu_engine_}, DummyData)); in Setup() 615 auto src_md = dnn_shape_src.IsMklTensor() in Compute() local 646 if (src_md.data.format != bn_fwd->GetSrcFmt()) { in Compute() [all …]
|
D | mkl_conv_grad_filter_ops.cc | 180 std::shared_ptr<mkldnn::memory::desc> src_md; member 200 src_md(nullptr), in ConvBwdFilterContext() 210 context_.src_md.reset(new memory::desc( in Setup() 229 convolution_direct, *context_.src_md, *context_.diff_filter_md, in Setup() 236 convolution_direct, *context_.src_md, *context_.diff_filter_md, in Setup() 244 prop_kind::forward, convolution_direct, *context_.src_md, in Setup()
|
D | mkl_conv_ops.cc | 186 std::shared_ptr<mkldnn::memory::desc> src_md; member 206 src_md(nullptr), in ConvFwdContext() 216 context_.src_md.reset(new memory::desc( in Setup() 232 prop_kind::forward, convolution_direct, *context_.src_md, in Setup() 238 prop_kind::forward, convolution_direct, *context_.src_md, in Setup() 1030 auto src_md = src_mkl_shape.IsMklTensor() in Compute() local 1033 src.SetUsrMem(src_md, &src_tensor); in Compute() 1100 if (src_md.data.format != conv_fwd->GetSrcMemoryFormat()) { in Compute() 1102 src.SetUsrMem(src_md, &src_tensor); in Compute()
|
D | mkl_lrn_op.cc | 126 memory::desc src_md = src_dnn_shape.GetCurLayout(); in Compute() local 133 src_dnn_data.SetUsrMem(src_md, &src_tensor); in Compute() 137 dst_dnn_data.SetUsrMem(src_md); in Compute()
|
D | mkl_pooling_ops_common.cc | 57 context_.src_md.reset(new memory::desc({fwdParams.src_dims}, MklDnnType<T>(), in Setup() 64 fwdParams.prop_kind, fwdParams.alg_kind, *context_.src_md, in Setup()
|
D | mkl_pooling_ops_common.h | 125 std::shared_ptr<mkldnn::memory::desc> src_md; member 142 src_md(nullptr), in PoolingFwdContext()
|