1 /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 #include "tensorflow/lite/delegates/hexagon/builders/arithmetic_builder.h"
16 
17 #include <stdint.h>
18 
19 #include <limits>
20 
21 #include "hexagon/hexagon_nn_ops.h"
22 #include "tensorflow/lite/c/builtin_op_data.h"
23 #include "tensorflow/lite/delegates/hexagon/hexagon_nn/hexagon_nn.h"
24 #include "tensorflow/lite/kernels/kernel_util.h"
25 
26 namespace tflite {
27 namespace delegates {
28 namespace hexagon {
PopulateSubGraph(const TfLiteIntArray * inputs,const TfLiteIntArray * outputs,TfLiteContext * context)29 TfLiteStatus ArithmeticOpBuilder::PopulateSubGraph(
30     const TfLiteIntArray* inputs, const TfLiteIntArray* outputs,
31     TfLiteContext* context) {
32   // First input data tensor.
33   int tensor_id = inputs->data[0];
34   const auto& input1_tensor = context->tensors[tensor_id];
35   AddInput(graph_builder_->GetHexagonTensorId(tensor_id));
36 
37   // Second input data tensor.
38   tensor_id = inputs->data[1];
39   const auto& input2_tensor = context->tensors[tensor_id];
40   AddInput(graph_builder_->GetHexagonTensorId(tensor_id));
41 
42   // Inputs min/max
43   TF_LITE_ENSURE_STATUS(ComputeAndAddMinAndMax(context, input1_tensor));
44   TF_LITE_ENSURE_STATUS(ComputeAndAddMinAndMax(context, input2_tensor));
45 
46   // Output details.
47   TF_LITE_ENSURE_STATUS(ComputeMinAndMaxQuantValues(
48       context->tensors[outputs->data[0]], &output_min_, &output_max_));
49   auto* output_min_const = graph_builder_->AddConstNodeWithData(
50       kScalarShape, reinterpret_cast<char*>(&output_min_), sizeof(output_min_));
51   auto* output_max_const = graph_builder_->AddConstNodeWithData(
52       kScalarShape, reinterpret_cast<char*>(&output_max_), sizeof(output_max_));
53   int output_batch_size, output_height_size, output_width_size,
54       output_depth_size;
55   GetDims(&output_batch_size, &output_height_size, &output_width_size,
56           &output_depth_size, context->tensors[outputs->data[0]].dims);
57 
58   if (op_node_.op_type == OP_QuantizedAdd_8p8to8 && output_max_ != 0) {
59     // Hexagon's QuantizedAdd supports output min/max as input.
60     AddInput(TensorID(output_min_const->GetID(), 0));
61     AddInput(TensorID(output_max_const->GetID(), 0));
62   }
63 
64   if (op_node_.op_type == OP_QuantizedMul_8x8to32) {
65     const auto& math_out = AddOutput(sizeof(int), 4,
66                                      {output_batch_size, output_height_size,
67                                       output_width_size, output_depth_size});
68     const auto& math_out_min = AddOutput(sizeof(float), 4, kScalarShape);
69     const auto& math_out_max = AddOutput(sizeof(float), 4, kScalarShape);
70 
71     auto* requantize_op = graph_builder_->AddNode(GetTFLiteNodeID());
72     requantize_op->SetOpType(OP_Requantize_32to8);
73     requantize_op->AddInput(math_out);
74     requantize_op->AddInput(math_out_min);
75     requantize_op->AddInput(math_out_max);
76     requantize_op->AddInput(TensorID(output_min_const->GetID(), 0));
77     requantize_op->AddInput(TensorID(output_max_const->GetID(), 0));
78     node_output_ =
79         requantize_op->AddOutput(sizeof(uint8_t), 4,
80                                  {output_batch_size, output_height_size,
81                                   output_width_size, output_depth_size});
82     requantize_op->AddOutput(sizeof(float), 4, kScalarShape);
83     requantize_op->AddOutput(sizeof(float), 4, kScalarShape);
84   } else {
85     auto result_out = AddOutput(sizeof(uint8_t), 4,
86                                 {output_batch_size, output_height_size,
87                                  output_width_size, output_depth_size});
88     auto result_min = AddOutput(sizeof(float), 4, kScalarShape);
89     auto result_max = AddOutput(sizeof(float), 4, kScalarShape);
90 
91     auto* requantize_op = graph_builder_->AddNode(GetTFLiteNodeID());
92     requantize_op->SetOpType(OP_Requantize_8to8);
93     requantize_op->AddInput(result_out);
94     requantize_op->AddInput(result_min);
95     requantize_op->AddInput(result_max);
96     requantize_op->AddInput(TensorID(output_min_const->GetID(), 0));
97     requantize_op->AddInput(TensorID(output_max_const->GetID(), 0));
98     node_output_ =
99         requantize_op->AddOutput(sizeof(uint8_t), 4,
100                                  {output_batch_size, output_height_size,
101                                   output_width_size, output_depth_size});
102     requantize_op->AddOutput(sizeof(float), 4, kScalarShape);
103     requantize_op->AddOutput(sizeof(float), 4, kScalarShape);
104   }
105 
106   return kTfLiteOk;
107 }
108 
RegisterOutputs(const TfLiteIntArray * outputs,TfLiteContext * context)109 TfLiteStatus ArithmeticOpBuilder::RegisterOutputs(const TfLiteIntArray* outputs,
110                                                   TfLiteContext* context) {
111   // Should be only 1 output.
112   graph_builder_->AddTensorWithID(outputs->data[0], node_output_.first,
113                                   node_output_.second);
114   return kTfLiteOk;
115 }
116 
~ArithmeticOpBuilder()117 ArithmeticOpBuilder::~ArithmeticOpBuilder() {}
118 
CreateArithmeticBuilder(GraphBuilder * graph_builder,int op_type)119 OpBuilder* CreateArithmeticBuilder(GraphBuilder* graph_builder, int op_type) {
120   return new ArithmeticOpBuilder(graph_builder, op_type);
121 }
122 
123 }  // namespace hexagon
124 }  // namespace delegates
125 }  // namespace tflite
126