1 /* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
2
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6
7 http://www.apache.org/licenses/LICENSE-2.0
8
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 #include <memory>
16 #include <random>
17
18 #include <gmock/gmock.h>
19 #include <gtest/gtest.h>
20 #include "absl/flags/flag.h"
21 #include "absl/flags/parse.h"
22 #include "tensorflow/lite/c/common.h"
23 #include "tensorflow/lite/delegates/hexagon/hexagon_delegate.h"
24 #include "tensorflow/lite/interpreter.h"
25 #include "tensorflow/lite/interpreter_builder.h"
26 #include "tensorflow/lite/kernels/kernel_util.h"
27 #include "tensorflow/lite/kernels/register.h"
28 #include "tensorflow/lite/kernels/test_util.h"
29 #include "tensorflow/lite/model_builder.h"
30 #include "tensorflow/lite/testing/util.h"
31 #include "tensorflow/lite/tools/benchmark/benchmark_utils.h"
32 #include "tensorflow/lite/tools/logging.h"
33
34 ABSL_FLAG(std::string, model_file_path, "", "Path to the test model file.");
35 ABSL_FLAG(std::string, model_input_shapes, "",
36 "List of different input shapes for testing, the input will "
37 "resized for each one in order and tested. They Should be "
38 "separated by : and each shape has dimensions separated by ,");
39 ABSL_FLAG(int, max_batch_size, -1,
40 "Maximum batch size for a single run by hexagon.");
41 ABSL_FLAG(double, error_epsilon, 0.2,
42 "Maximum error allowed while diffing the output.");
43
44 namespace tflite {
45 namespace {
46 // Returns a randomly generated data of size 'num_elements'.
GetData(int num_elements)47 std::vector<uint8> GetData(int num_elements) {
48 std::vector<uint8> result(num_elements);
49 std::random_device random_engine;
50 std::uniform_int_distribution<uint32_t> distribution(0, 254);
51 std::generate_n(result.data(), num_elements, [&]() {
52 return static_cast<uint8>(distribution(random_engine));
53 });
54 return result;
55 }
56
57 // Returns the total number of elements.
NumElements(const std::vector<int> & shape)58 int NumElements(const std::vector<int>& shape) {
59 int num_elements = 1;
60 for (int dim : shape) num_elements *= dim;
61 return num_elements;
62 }
63
64 // Returns true if 'control' and 'exp' values match up to 'epsilon'
DiffOutput(const std::vector<float> & control,const std::vector<float> & exp,double epsilon)65 bool DiffOutput(const std::vector<float>& control,
66 const std::vector<float>& exp, double epsilon) {
67 if (control.size() != exp.size()) {
68 TFLITE_LOG(ERROR) << "Mismatch size Expected" << control.size() << " got "
69 << exp.size();
70 return false;
71 }
72 bool has_diff = false;
73 for (int i = 0; i < control.size(); ++i) {
74 if (abs(control[i] - exp[i]) > epsilon) {
75 TFLITE_LOG(ERROR) << control[i] << " " << exp[i];
76 has_diff = true;
77 }
78 }
79 return !has_diff;
80 }
81
DiffOutput(const std::vector<float> & control,const std::vector<float> & exp)82 bool DiffOutput(const std::vector<float>& control,
83 const std::vector<float>& exp) {
84 return DiffOutput(control, exp, absl::GetFlag(FLAGS_error_epsilon));
85 }
86 } // namespace
87
88 class TestModel {
89 public:
__anonbe5bfbe50302(TfLiteDelegate* delegate) 90 TestModel() : delegate_(nullptr, [](TfLiteDelegate* delegate) {}) {}
91
92 // Initialize the model by reading the model from file and build
93 // interpreter.
Init()94 void Init() {
95 model_ = tflite::FlatBufferModel::BuildFromFile(
96 absl::GetFlag(FLAGS_model_file_path).c_str());
97 ASSERT_TRUE(model_ != nullptr);
98
99 resolver_.reset(new ops::builtin::BuiltinOpResolver());
100 InterpreterBuilder(*model_, *resolver_)(&interpreter_);
101 ASSERT_TRUE(interpreter_ != nullptr);
102 }
103
104 // Add Hexagon delegate to the graph.
ApplyDelegate(int max_batch_size,const std::vector<int> & input_batch_dimensions,const std::vector<int> & output_batch_dimensions)105 void ApplyDelegate(int max_batch_size,
106 const std::vector<int>& input_batch_dimensions,
107 const std::vector<int>& output_batch_dimensions) {
108 TfLiteIntArray* input_batch_dim =
109 TfLiteIntArrayCreate(input_batch_dimensions.size());
110 TfLiteIntArray* output_batch_dim =
111 TfLiteIntArrayCreate(output_batch_dimensions.size());
112 for (int i = 0; i < input_batch_dimensions.size(); ++i)
113 input_batch_dim->data[i] = input_batch_dimensions[i];
114 for (int i = 0; i < output_batch_dimensions.size(); ++i)
115 output_batch_dim->data[i] = output_batch_dimensions[i];
116 ::TfLiteHexagonDelegateOptions options = {0};
117 options.enable_dynamic_batch_size = true;
118 options.max_batch_size = max_batch_size;
119 options.input_batch_dimensions = input_batch_dim;
120 options.output_batch_dimensions = output_batch_dim;
121 TfLiteDelegate* delegate = TfLiteHexagonDelegateCreate(&options);
122 ASSERT_TRUE(delegate != nullptr);
123 delegate_ = std::unique_ptr<TfLiteDelegate, void (*)(TfLiteDelegate*)>(
124 delegate, [](TfLiteDelegate* delegate) {
125 TfLiteHexagonDelegateDelete(delegate);
126 });
127 ASSERT_TRUE(interpreter_->ModifyGraphWithDelegate(delegate_.get()) ==
128 kTfLiteOk);
129 }
130
Run(const std::vector<int> & input_shape,const std::vector<uint8> & input_data)131 void Run(const std::vector<int>& input_shape,
132 const std::vector<uint8>& input_data) {
133 // Resize Inputs.
134 auto interpreter_inputs = interpreter_->inputs();
135 interpreter_->ResizeInputTensor(interpreter_inputs[0], input_shape);
136 ASSERT_EQ(kTfLiteOk, interpreter_->AllocateTensors());
137
138 TfLiteTensor* input_tensor =
139 interpreter_->tensor(interpreter_->inputs()[0]);
140 memcpy(input_tensor->data.raw, input_data.data(),
141 input_data.size() * sizeof(uint8));
142
143 ASSERT_EQ(kTfLiteOk, interpreter_->Invoke());
144 }
145
GetOutput(int output_index)146 std::vector<float> GetOutput(int output_index) {
147 auto* tensor = interpreter_->output_tensor(output_index);
148 uint8* data = interpreter_->typed_output_tensor<uint8>(output_index);
149 std::vector<float> result;
150 result.resize(NumElements(tensor));
151 const auto scale =
152 reinterpret_cast<TfLiteAffineQuantization*>(tensor->quantization.params)
153 ->scale->data[0];
154 const auto zero_point =
155 reinterpret_cast<TfLiteAffineQuantization*>(tensor->quantization.params)
156 ->zero_point->data[0];
157 for (int i = 0; i < result.size(); ++i) {
158 result[i] = scale * (data[i] - zero_point);
159 }
160 return result;
161 }
162
163 private:
164 std::unique_ptr<TfLiteDelegate, void (*)(TfLiteDelegate*)> delegate_;
165 std::unique_ptr<FlatBufferModel> model_;
166 std::unique_ptr<tflite::OpResolver> resolver_;
167 std::unique_ptr<Interpreter> interpreter_;
168 };
169
ParseInputShapes()170 std::vector<std::vector<int>> ParseInputShapes() {
171 std::vector<string> str_input_shapes;
172 benchmark::util::SplitAndParse(absl::GetFlag(FLAGS_model_input_shapes), ':',
173 &str_input_shapes);
174 std::vector<std::vector<int>> input_shapes(str_input_shapes.size());
175 for (int i = 0; i < str_input_shapes.size(); ++i) {
176 benchmark::util::SplitAndParse(str_input_shapes[i], ',', &input_shapes[i]);
177 }
178 return input_shapes;
179 }
180
TEST(HexagonDynamicBatch,MultipleResizes)181 TEST(HexagonDynamicBatch, MultipleResizes) {
182 int num_failed_tests = 0;
183 int num_test = 0;
184 auto test_input_shapes = ParseInputShapes();
185 auto default_model = std::make_unique<TestModel>();
186 auto delegated_model = std::make_unique<TestModel>();
187 default_model->Init();
188 delegated_model->Init();
189 delegated_model->ApplyDelegate(absl::GetFlag(FLAGS_max_batch_size), {0}, {0});
190 for (const auto& input_shape : test_input_shapes) {
191 const auto input = GetData(NumElements(input_shape));
192 default_model->Run(input_shape, input);
193 delegated_model->Run(input_shape, input);
194 const auto default_output = default_model->GetOutput(0);
195 const auto delegated_output = delegated_model->GetOutput(0);
196 if (!DiffOutput(default_output, delegated_output)) {
197 TFLITE_LOG(ERROR) << "Failed for input " << num_test;
198 num_failed_tests++;
199 }
200 num_test++;
201 }
202 if (num_failed_tests == 0) {
203 TFLITE_LOG(INFO) << "All Tests PASSED";
204 } else {
205 TFLITE_LOG(INFO) << "Failed " << num_failed_tests << " out of " << num_test;
206 }
207 }
208 } // namespace tflite
209
main(int argc,char ** argv)210 int main(int argc, char** argv) {
211 ::tflite::LogToStderr();
212 absl::ParseCommandLine(argc, argv);
213 testing::InitGoogleTest();
214
215 TfLiteHexagonInit();
216 int return_val = RUN_ALL_TESTS();
217 TfLiteHexagonTearDown();
218 return return_val;
219 }
220