Home
last modified time | relevance | path

Searched refs:Relu (Results 1 – 25 of 50) sorted by relevance

12

/external/tensorflow/tensorflow/core/api_def/base_api/
Dapi_def_ReluGrad.pbtxt7 The backpropagated gradients to the corresponding Relu operation.
13 The features passed as input to the corresponding Relu operation, OR
23 summary: "Computes rectified linear gradients for a Relu operation."
Dapi_def_Relu.pbtxt2 graph_op_name: "Relu"
Dapi_def_SparseMatMul.pbtxt13 in the input gradient when that gradient comes from a Relu.
/external/tensorflow/tensorflow/core/api_def/java_api/
Dapi_def_Relu.pbtxt2 graph_op_name: "Relu"
4 name: "nn.Relu"
/external/tensorflow/tensorflow/core/kernels/
Drelu_op.cc82 void Relu<GPUDevice, T>::operator()( \
85 extern template struct Relu<GPUDevice, T>; \
147 void Relu<GPUDevice, qint8>::operator()(
150 extern template struct Relu<GPUDevice, qint8>;
203 functor::Relu<Device, qint8> func; in Operate()
Drelu_op_gpu.cu.cc124 struct Relu<Device, qint8> { struct
149 template struct functor::Relu<GPUDevice, T>; \
162 template struct functor::Relu<GPUDevice, qint8>; member in tensorflow::functor
Dunary_ops_composition.cc176 auto relu = functor::Relu<Eigen::DefaultDevice, T>(); \
252 REGISTER_COMPUTE_FN(Relu); in UnaryOpsCompositionSupport()
317 REGISTER_COMPUTE_FN(Relu); in UnaryOpsCompositionSupport()
381 REGISTER_COMPUTE_FN(Relu); in UnaryOpsCompositionSupport()
Drelu_op_functor.h28 struct Relu { struct
Dconv_ops_fused_impl.h128 struct Relu { struct
233 using WithBiasAddAndRelu = BiasAddOutputKernel<T, Relu>;
237 using WithFusedBatchNormAndRelu = FusedBatchNormOutputKernel<T, Relu>;
Dmkl_fused_ops_test.cc166 auto with_relu = ops::Relu(root.WithOpName("with_relu"), with_bias); in RunConv2DWithBiasAndRelu()
590 auto with_relu = ops::Relu(root.WithOpName("with_relu"), with_bias); in RunMklPadWithFusedConv2DAndBiasRelu()
Dconv_ops_test.cc643 ops::Relu with_relu = ops::Relu(root.WithOpName("with_relu"), with_bias); in RunConv2DWithBiasAndRelu()
701 ops::Relu with_relu = in RunConv2DWithBatchNormAndRelu()
702 ops::Relu(root.WithOpName("with_relu"), with_fused_batch_norm.y); in RunConv2DWithBatchNormAndRelu()
/external/tensorflow/tensorflow/core/graph/
Dquantize_training_test.cc92 Node* relu = test::graph::Relu(g, a); in TEST_F()
143 Node* relu = test::graph::Relu(g, a); in TEST_F()
192 Node* relu = test::graph::Relu(g, a); in TEST_F()
246 Node* relu = test::graph::Relu(g, a); in TEST_F()
294 Node* relu = test::graph::Relu(graph, const_a); in TEST_F()
331 Node* relu = test::graph::Relu(graph, const_a); in TEST_F()
373 Node* relu = test::graph::Relu(g, a); in TEST_F()
467 Node* relu = test::graph::Relu(g, a); in TEST_F()
Dtestlib.h199 Node* Relu(Graph* g, Node* in);
/external/tensorflow/tensorflow/core/grappler/costs/graph_properties_testdata/
Dlarge_function_graph.pbtxt443 name: "InceptionV2/InceptionV2/Conv2d_1a_7x7/Relu"
470 name: "InceptionV2/InceptionV2/Conv2d_1a_7x7/Relu"
471 op: "Relu"
586 key: "InceptionV2/InceptionV2/Conv2d_1a_7x7/Relu"
587 value: "InceptionV2/InceptionV2/Conv2d_1a_7x7/Relu:activations:0"
/external/tensorflow/tensorflow/lite/g3doc/convert/
Dcmdline_examples.md160Relu,InceptionV1/InceptionV1/Mixed_3b/Branch_2/Conv2d_0a_1x1/Relu,InceptionV1/InceptionV1/Mixed_3b…
180 …InceptionV1/Mixed_3b/Branch_1/Conv2d_0a_1x1/Relu,InceptionV1/InceptionV1/Mixed_3b/Branch_2/Conv2d_…
201Relu,InceptionV1/InceptionV1/Mixed_3b/Branch_2/Conv2d_0a_1x1/Relu,InceptionV1/InceptionV1/Mixed_3b…
209 MatMul, BiasAdd, Relu...), it is typically represented as a single "fused" op
/external/tensorflow/tensorflow/contrib/receptive_field/
DREADME.md85 g.as_graph_def(), 'input_image', 'InceptionResnetV2/Conv2d_7b_1x1/Relu')
90 the node `'InceptionResnetV2/Conv2d_7b_1x1/Relu'` is computed from a region
101 `'InceptionResnetV2/Conv2d_7b_1x1/Relu'` is centered in the original image in
171 --output_node InceptionResnetV2/Conv2d_7b_1x1/Relu
/external/tensorflow/tensorflow/core/api_def/python_api/
Dapi_def_Relu.pbtxt2 graph_op_name: "Relu"
/external/tensorflow/tensorflow/tools/graph_transforms/
Dquantize_nodes_test.cc404 Output relu_op = Relu(root.WithOpName("relu_op"), constant_op); in TestQuantizeRelu()
767 Output relu_op = Relu(root.WithOpName("relu_op"), dequantize_op); in TestRemoveRedundantQuantizationWithMultipleOutputs()
791 Output relu_op = Relu(root.WithOpName("relu_op"), placeholder_op); in TestQuantizePlaceholders()
1093 Output relu_op = Relu(root.WithOpName("relu_op"), bias_add_op); in TestHoistFakeQuants()
1284 Output a_relu_op = Relu(root.WithOpName("a_relu_op"), a_op); in TestMergeDuplicatesNested()
1290 Output b_relu_op = Relu(root.WithOpName("b_relu_op"), b_op); in TestMergeDuplicatesNested()
1334 Output a_relu_op = Relu(root.WithOpName("a_relu_op"), a_op); in TestMergeDuplicatesInOut()
1340 Output b_relu_op = Relu(root.WithOpName("b_relu_op"), b_op); in TestMergeDuplicatesInOut()
1407 Relu(root.WithOpName("excluded_relu_op"), excluded_reshape_op); in TestExcludeNonFloat()
1413 Relu(root.WithOpName("included_relu_op"), included_reshape_op); in TestExcludeNonFloat()
/external/tensorflow/tensorflow/contrib/fused_conv/ops/
Dfused_conv2d_bias_activation_op.cc147 Must be "Relu" or "None".
/external/tensorflow/tensorflow/contrib/specs/
DREADME.md15 - built-in layers are capitalized, not CamelCase (Relu, Fs, etc.)
17 - less common operations are longer (Relu, Conc, etc.)
50 - `Relu` = tf.nn.relu
/external/tensorflow/tensorflow/core/grappler/costs/
Danalytical_cost_estimator_test.cc70 auto relu = ops::Relu(s.WithOpName("relu"), bias); in CreateMiniGraph()
/external/tensorflow/tensorflow/cc/gradients/
Dnn_grad_test.cc43 using ops::Relu;
144 auto y = Relu(scope_, x); in TEST_F()
/external/tensorflow/tensorflow/contrib/specs/python/
Dspecs_ops.py118 Relu = Fun(nn_ops.relu) variable
/external/tensorflow/tensorflow/core/profiler/g3doc/
Dprofile_memory.md32 Relu 8462.80MB (30.83%, 7.03%)
/external/tensorflow/tensorflow/core/grappler/optimizers/
Dremapper_test.cc185 auto relu = ops::Relu(s.WithOpName("relu"), bias_add); in TEST_F()
333 auto relu = ops::Relu(s.WithOpName("relu"), batch_norm.y); in TEST_F()

12