1 /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 
16 // Native XLA implementations of XLA Relu Ops
17 
18 #include "tensorflow/compiler/tf2xla/kernels/relu_op.h"
19 
20 #include "tensorflow/compiler/tf2xla/xla_helpers.h"
21 #include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
22 #include "tensorflow/compiler/tf2xla/xla_op_registry.h"
23 #include "tensorflow/compiler/xla/literal.h"
24 
25 namespace xla {
Relu(XlaOp x)26 XlaOp Relu(XlaOp x) { return Max(ScalarLike(x, 0), x); }
27 
Relu6(XlaOp x)28 XlaOp Relu6(XlaOp x) {
29   auto zero = ScalarLike(x, 0);
30   auto six = ScalarLike(x, 6);
31   return Clamp(zero, x, six);
32 }
33 }  // namespace xla
34 
35 namespace tensorflow {
36 namespace {
37 
38 class ReluOp : public XlaOpKernel {
39  public:
ReluOp(OpKernelConstruction * ctx)40   explicit ReluOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {}
41   // Computes the max of the scalar input x and 0.
Compile(XlaOpKernelContext * ctx)42   void Compile(XlaOpKernelContext* ctx) override {
43     ctx->SetOutput(0, xla::Relu(ctx->Input(0)));
44   }
45 };
46 REGISTER_XLA_OP(Name("Relu"), ReluOp);
47 
48 class Relu6Op : public XlaOpKernel {
49  public:
Relu6Op(OpKernelConstruction * ctx)50   explicit Relu6Op(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {}
51   // Clamp the scalar input between 0 and 6.
Compile(XlaOpKernelContext * ctx)52   void Compile(XlaOpKernelContext* ctx) override {
53     ctx->SetOutput(0, xla::Relu6(ctx->Input(0)));
54   }
55 };
56 REGISTER_XLA_OP(Name("Relu6"), Relu6Op);
57 
58 class LeakyReluOp : public XlaOpKernel {
59  public:
LeakyReluOp(OpKernelConstruction * ctx)60   explicit LeakyReluOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {
61     OP_REQUIRES_OK(ctx, ctx->GetAttr("alpha", &alpha_));
62   }
Compile(XlaOpKernelContext * ctx)63   void Compile(XlaOpKernelContext* ctx) override {
64     auto features = ctx->Input("features");
65     auto prod_with_alpha = features * xla::ScalarLike(features, alpha_);
66     auto gt_zero = xla::Gt(features, xla::ScalarLike(features, 0));
67     auto output = xla::Select(gt_zero, features, prod_with_alpha);
68     ctx->SetOutput(0, output);
69   }
70   float alpha_;
71 };
72 REGISTER_XLA_OP(Name("LeakyRelu"), LeakyReluOp);
73 
74 class ReluGradOp : public XlaOpKernel {
75  public:
ReluGradOp(OpKernelConstruction * ctx)76   explicit ReluGradOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {}
77   // Return the lhs (incoming gradient) if the rhs (input feature) > 0,
78   // otherwise return 0.
Compile(XlaOpKernelContext * ctx)79   void Compile(XlaOpKernelContext* ctx) override {
80     xla::XlaBuilder* b = ctx->builder();
81     const TensorShape shape = ctx->InputShape(0);
82     const auto zero =
83         xla::Broadcast(XlaHelpers::Zero(b, input_type(0)), shape.dim_sizes());
84     const auto pred = xla::Gt(ctx->Input(1), zero);
85     ctx->SetOutput(0, xla::Select(pred, ctx->Input(0), zero));
86   }
87 };
88 REGISTER_XLA_OP(Name("ReluGrad"), ReluGradOp);
89 
90 class Relu6GradOp : public XlaOpKernel {
91  public:
Relu6GradOp(OpKernelConstruction * ctx)92   explicit Relu6GradOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {}
93   // Return the lhs (incoming gradient) if the rhs (input feature) > 0,
94   // otherwise return 0.
Compile(XlaOpKernelContext * ctx)95   void Compile(XlaOpKernelContext* ctx) override {
96     xla::XlaBuilder* b = ctx->builder();
97     const TensorShape shape = ctx->InputShape(0);
98     const auto zero =
99         xla::Broadcast(XlaHelpers::Zero(b, input_type(0)), shape.dim_sizes());
100     const auto six = xla::Broadcast(
101         XlaHelpers::IntegerLiteral(b, input_type(0), 6), shape.dim_sizes());
102     auto out = xla::Select(
103         xla::And(xla::Lt(ctx->Input(1), six), xla::Gt(ctx->Input(1), zero)),
104         ctx->Input(0), zero);
105     ctx->SetOutput(0, out);
106   }
107 };
108 REGISTER_XLA_OP(Name("Relu6Grad"), Relu6GradOp);
109 
110 class LeakyReluGradOp : public XlaOpKernel {
111  public:
LeakyReluGradOp(OpKernelConstruction * ctx)112   explicit LeakyReluGradOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {
113     OP_REQUIRES_OK(ctx, ctx->GetAttr("alpha", &alpha_));
114   }
Compile(XlaOpKernelContext * ctx)115   void Compile(XlaOpKernelContext* ctx) override {
116     auto gradients = ctx->Input("gradients");
117     auto features = ctx->Input("features");
118     auto output =
119         xla::Select(xla::Gt(features, xla::ScalarLike(features, 0)), gradients,
120                     gradients * xla::ScalarLike(gradients, alpha_));
121     ctx->SetOutput(0, output);
122   }
123   float alpha_;
124 };
125 REGISTER_XLA_OP(Name("LeakyReluGrad"), LeakyReluGradOp);
126 
127 }  // namespace
128 }  // namespace tensorflow
129