1 /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6
7 http://www.apache.org/licenses/LICENSE-2.0
8
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15
16 #include <math.h>
17
18 #include "tensorflow/lite/micro/all_ops_resolver.h"
19 #include "tensorflow/lite/micro/examples/hello_world/model.h"
20 #include "tensorflow/lite/micro/micro_error_reporter.h"
21 #include "tensorflow/lite/micro/micro_interpreter.h"
22 #include "tensorflow/lite/micro/testing/micro_test.h"
23 #include "tensorflow/lite/schema/schema_generated.h"
24
25 TF_LITE_MICRO_TESTS_BEGIN
26
TF_LITE_MICRO_TEST(LoadModelAndPerformInference)27 TF_LITE_MICRO_TEST(LoadModelAndPerformInference) {
28 // Define the input and the expected output
29 float x = 0.0f;
30 float y_true = sin(x);
31
32 // Set up logging
33 tflite::MicroErrorReporter micro_error_reporter;
34
35 // Map the model into a usable data structure. This doesn't involve any
36 // copying or parsing, it's a very lightweight operation.
37 const tflite::Model* model = ::tflite::GetModel(g_model);
38 if (model->version() != TFLITE_SCHEMA_VERSION) {
39 TF_LITE_REPORT_ERROR(µ_error_reporter,
40 "Model provided is schema version %d not equal "
41 "to supported version %d.\n",
42 model->version(), TFLITE_SCHEMA_VERSION);
43 }
44
45 // This pulls in all the operation implementations we need
46 tflite::AllOpsResolver resolver;
47
48 constexpr int kTensorArenaSize = 2000;
49 uint8_t tensor_arena[kTensorArenaSize];
50
51 // Build an interpreter to run the model with
52 tflite::MicroInterpreter interpreter(model, resolver, tensor_arena,
53 kTensorArenaSize, µ_error_reporter);
54 // Allocate memory from the tensor_arena for the model's tensors
55 TF_LITE_MICRO_EXPECT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
56
57 // Obtain a pointer to the model's input tensor
58 TfLiteTensor* input = interpreter.input(0);
59
60 // Make sure the input has the properties we expect
61 TF_LITE_MICRO_EXPECT_NE(nullptr, input);
62 // The property "dims" tells us the tensor's shape. It has one element for
63 // each dimension. Our input is a 2D tensor containing 1 element, so "dims"
64 // should have size 2.
65 TF_LITE_MICRO_EXPECT_EQ(2, input->dims->size);
66 // The value of each element gives the length of the corresponding tensor.
67 // We should expect two single element tensors (one is contained within the
68 // other).
69 TF_LITE_MICRO_EXPECT_EQ(1, input->dims->data[0]);
70 TF_LITE_MICRO_EXPECT_EQ(1, input->dims->data[1]);
71 // The input is an 8 bit integer value
72 TF_LITE_MICRO_EXPECT_EQ(kTfLiteInt8, input->type);
73
74 // Get the input quantization parameters
75 float input_scale = input->params.scale;
76 int input_zero_point = input->params.zero_point;
77
78 // Quantize the input from floating-point to integer
79 int8_t x_quantized = x / input_scale + input_zero_point;
80 // Place the quantized input in the model's input tensor
81 input->data.int8[0] = x_quantized;
82
83 // Run the model and check that it succeeds
84 TfLiteStatus invoke_status = interpreter.Invoke();
85 TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, invoke_status);
86
87 // Obtain a pointer to the output tensor and make sure it has the
88 // properties we expect. It should be the same as the input tensor.
89 TfLiteTensor* output = interpreter.output(0);
90 TF_LITE_MICRO_EXPECT_EQ(2, output->dims->size);
91 TF_LITE_MICRO_EXPECT_EQ(1, output->dims->data[0]);
92 TF_LITE_MICRO_EXPECT_EQ(1, output->dims->data[1]);
93 TF_LITE_MICRO_EXPECT_EQ(kTfLiteInt8, output->type);
94
95 // Get the output quantization parameters
96 float output_scale = output->params.scale;
97 int output_zero_point = output->params.zero_point;
98
99 // Obtain the quantized output from model's output tensor
100 int8_t y_pred_quantized = output->data.int8[0];
101 // Dequantize the output from integer to floating-point
102 float y_pred = (y_pred_quantized - output_zero_point) * output_scale;
103
104 // Check if the output is within a small range of the expected output
105 float epsilon = 0.05f;
106 TF_LITE_MICRO_EXPECT_NEAR(y_true, y_pred, epsilon);
107
108 // Run inference on several more values and confirm the expected outputs
109 x = 1.f;
110 y_true = sin(x);
111 input->data.int8[0] = x / input_scale + input_zero_point;
112 interpreter.Invoke();
113 y_pred = (output->data.int8[0] - output_zero_point) * output_scale;
114 TF_LITE_MICRO_EXPECT_NEAR(y_true, y_pred, epsilon);
115
116 x = 3.f;
117 y_true = sin(x);
118 input->data.int8[0] = x / input_scale + input_zero_point;
119 interpreter.Invoke();
120 y_pred = (output->data.int8[0] - output_zero_point) * output_scale;
121 TF_LITE_MICRO_EXPECT_NEAR(y_true, y_pred, epsilon);
122
123 x = 5.f;
124 y_true = sin(x);
125 input->data.int8[0] = x / input_scale + input_zero_point;
126 interpreter.Invoke();
127 y_pred = (output->data.int8[0] - output_zero_point) * output_scale;
128 TF_LITE_MICRO_EXPECT_NEAR(y_true, y_pred, epsilon);
129 }
130
131 TF_LITE_MICRO_TESTS_END
132