1 /*
2 * Copyright (C) 2018 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "TestHarness.h"
18 #include "TestNeuralNetworksWrapper.h"
19
20 #include <gtest/gtest.h>
21
22 #include <tuple>
23 #include <vector>
24
25 using namespace android::nn::test_wrapper;
26 using namespace test_helper;
27
28 namespace {
29
30 const uint32_t INTENDED_SIZE = 3;
31 const uint32_t OTHER_SIZE = 2;
32 const uint32_t UNKNOWN_SIZE = 0;
33
34 // We test three basic scenarios for each tensor dimension:
35 // INTENDED_AT_COMPILE_AND_EXECUTE: set the dimension at compile
36 // (addOperand) time to INTENDED_SIZE, use same size at execution
37 // (setInput/setOutput) time. This should always work.
38 //
39 // INTENDED_AT_COMPILE_NOT_SET_AT_EXECUTE: set the dimension at compile
40 // (addOperand) time to INTENDED_SIZE, give no size at execution time.
41 // This should always work.
42 //
43 // UNKNOWN_AT_COMPILE_INTENDED_AT_EXECUTE: don't set the dimension at
44 // compile (addOperand) time, use INTENDED_SIZE at execute
45 // (setInput/setOutput) time. Note for constants, this just means using an
46 // unknown dimension at addOperand as there is no type parameter to
47 // setOperandValue. This should work for inputs and outputs and give an
48 // error for constants at compile time.
49 //
50 // UNKNOWN_AT_COMPILE_OTHER_AT_EXECUTE: don't set the dimension at compile
51 // (addOperand) time, use OTHER_SIZE at execute (setInput/setOutput) time.
52 // This should give an error at execute time (as the constant value will
53 // have a different size).
54 //
55 // All relevant combinations of the basic scenarios are then iterated over in
56 // TestAll. Note that we don't want to just use googletest's parametrized tests (TEST_P) as
57 // the 16k combinations generated too many lines of output for the test
58 // infrastructure to handle correctly. However, running all 16k in one test
59 // makes the ASAN version take so long that the automatic test runner things the
60 // command has become unresponsinve, so we split on the first level.
61 enum class DimensionKind { INTENDED_AT_COMPILE_AND_EXECUTE,
62 INTENDED_AT_COMPILE_NOT_SET_AT_EXECUTE,
63 UNKNOWN_AT_COMPILE_INTENDED_AT_EXECUTE,
64 UNKNOWN_AT_COMPILE_OTHER_AT_EXECUTE };
65 typedef std::tuple<DimensionKind, DimensionKind> OperandParams;
66 std::vector<DimensionKind> ioDimensionValues = {
67 DimensionKind::INTENDED_AT_COMPILE_AND_EXECUTE,
68 DimensionKind::INTENDED_AT_COMPILE_NOT_SET_AT_EXECUTE,
69 DimensionKind::UNKNOWN_AT_COMPILE_INTENDED_AT_EXECUTE,
70 DimensionKind::UNKNOWN_AT_COMPILE_OTHER_AT_EXECUTE };
71 std::vector<DimensionKind> constantDimensionValues = {
72 DimensionKind::INTENDED_AT_COMPILE_NOT_SET_AT_EXECUTE,
73 DimensionKind::UNKNOWN_AT_COMPILE_INTENDED_AT_EXECUTE };
74 std::vector<OperandParams> Combine(const std::vector<DimensionKind>& firsts,
75 const std::vector<DimensionKind>& seconds);
76 auto ioValues = Combine(ioDimensionValues, ioDimensionValues);
77 auto constantValues = Combine(constantDimensionValues, constantDimensionValues);
78
79 class UnknownDimensionsTest : public ::testing::TestWithParam<OperandParams> {
80 protected:
81 template <class T, Type TensorType>
82 void TestOne(const OperandParams& paramsForInput0, const OperandParams& paramsForInput1,
83 const OperandParams& paramsForConst, const OperandParams& paramsForOutput);
84 template <class T, Type TensorType>
85 void TestAll();
86
87 template <typename T>
88 void CompareResults(std::map<int, std::vector<T>>& expected,
89 std::map<int, std::vector<T>>& actual);
90 };
91
92 template <>
CompareResults(std::map<int,std::vector<float>> & golden,std::map<int,std::vector<float>> & test)93 void UnknownDimensionsTest::CompareResults<float>(std::map<int, std::vector<float>>& golden,
94 std::map<int, std::vector<float>>& test) {
95 size_t totalNumberOfErrors = 0;
96 float fpAtol = 1e-5f, fpRtol = 1e-5f;
97 compare_<float>(golden, test,
98 [&totalNumberOfErrors, fpAtol, fpRtol](float expected, float actual) {
99 // Compute the range based on both absolute tolerance and relative tolerance
100 float fpRange = fpAtol + fpRtol * std::abs(expected);
101 if (totalNumberOfErrors < gMaximumNumberOfErrorMessages) {
102 EXPECT_NEAR(expected, actual, fpRange);
103 }
104 if (std::abs(expected - actual) > fpRange) {
105 totalNumberOfErrors++;
106 }
107 });
108 EXPECT_EQ(size_t{0}, totalNumberOfErrors);
109 }
110
111 template <>
CompareResults(std::map<int,std::vector<uint8_t>> & golden,std::map<int,std::vector<uint8_t>> & test)112 void UnknownDimensionsTest::CompareResults<uint8_t>(std::map<int, std::vector<uint8_t>>& golden,
113 std::map<int, std::vector<uint8_t>>& test) {
114 size_t totalNumberOfErrors = 0;
115 compare_<uint8_t>(golden, test, [&totalNumberOfErrors](uint8_t expected, uint8_t actual) {
116 if (totalNumberOfErrors < gMaximumNumberOfErrorMessages) {
117 EXPECT_NEAR(expected, actual, 1);
118 }
119 if (std::abs(expected - actual) > 1) {
120 totalNumberOfErrors++;
121 }
122 });
123 EXPECT_EQ(size_t{0}, totalNumberOfErrors);
124 }
125
126 template <>
CompareResults(std::map<int,std::vector<_Float16>> & golden,std::map<int,std::vector<_Float16>> & test)127 void UnknownDimensionsTest::CompareResults<_Float16>(std::map<int, std::vector<_Float16>>& golden,
128 std::map<int, std::vector<_Float16>>& test) {
129 size_t totalNumberOfErrors = 0;
130 float fpAtol = 5.0f * 0.0009765625f, fpRtol = 5.0f * 0.0009765625f;
131 compare_<_Float16>(golden, test,
132 [&totalNumberOfErrors, fpAtol, fpRtol](_Float16 expected, _Float16 actual) {
133 // Compute the range based on both absolute tolerance and relative
134 // tolerance
135 float fpRange = fpAtol + fpRtol * std::abs(static_cast<float>(expected));
136 if (totalNumberOfErrors < gMaximumNumberOfErrorMessages) {
137 EXPECT_NEAR(expected, actual, fpRange);
138 }
139 if (std::abs(static_cast<float>(expected - actual)) > fpRange) {
140 totalNumberOfErrors++;
141 }
142 });
143 EXPECT_EQ(size_t{0}, totalNumberOfErrors);
144 }
145
TestOne(const OperandParams & paramsForInput0,const OperandParams & paramsForInput1,const OperandParams & paramsForConst,const OperandParams & paramsForOutput)146 template<class T, Type TensorType> void UnknownDimensionsTest::TestOne(
147 const OperandParams& paramsForInput0, const OperandParams& paramsForInput1,
148 const OperandParams& paramsForConst, const OperandParams& paramsForOutput) {
149 typedef T IntendedMatrix[INTENDED_SIZE][INTENDED_SIZE];
150 static const IntendedMatrix ones = { { 1, 1, 1 }, { 1, 1, 1 }, { 1, 1, 1 } };
151 static const IntendedMatrix twos = { { 2, 2, 2 }, { 2, 2, 2 }, { 2, 2, 2 } };
152 static const IntendedMatrix fives = { { 5, 5, 5 }, { 5, 5, 5 }, { 5, 5, 5 } };
153 const float scale = TensorType == Type::TENSOR_QUANT8_ASYMM ? 1.f : 0.f;
154
155 Model model;
156 std::string input0Scope("Input 0:"), input1Scope("Input 1:"),
157 constantScope("Constant:"), outputScope("Output:");
158
159 auto getDimForCompile = [](DimensionKind kind, std::string* scope) {
160 switch (kind) {
161 case DimensionKind::INTENDED_AT_COMPILE_AND_EXECUTE:
162 if (scope) scope->append(" INTENDED_AT_COMPILE_AND_EXECUTE");
163 return INTENDED_SIZE;
164 case DimensionKind::INTENDED_AT_COMPILE_NOT_SET_AT_EXECUTE:
165 if (scope) scope->append(" INTENDED_AT_COMPILE_NOT_SET_AT_EXECUTE");
166 return INTENDED_SIZE;
167 case DimensionKind::UNKNOWN_AT_COMPILE_INTENDED_AT_EXECUTE:
168 if (scope) scope->append(" UNKNOWN_AT_COMPILE_INTENDED_AT_EXECUTE");
169 return UNKNOWN_SIZE;
170 case DimensionKind::UNKNOWN_AT_COMPILE_OTHER_AT_EXECUTE:
171 if (scope) scope->append(" UNKNOWN_AT_COMPILE_OTHER_AT_EXECUTE");
172 return UNKNOWN_SIZE;
173 }
174 };
175 auto addOperand = [&model, &getDimForCompile, scale](OperandParams params,
176 std::string* scope = nullptr) {
177 OperandType matrixTypeWithPotentiallyUnknownDims(
178 TensorType,
179 { getDimForCompile(std::get<0>(params), scope),
180 getDimForCompile(std::get<1>(params), scope) },
181 scale);
182 return model.addOperand(&matrixTypeWithPotentiallyUnknownDims);
183 };
184 auto inputOpd0 = addOperand(paramsForInput0, &input0Scope);
185 auto inputOpd1 = addOperand(paramsForInput1, &input1Scope);
186 auto intermediateOpd0 = addOperand(OperandParams{
187 // Dimensions for intermediate operand actually deduced at execution time
188 DimensionKind::UNKNOWN_AT_COMPILE_INTENDED_AT_EXECUTE,
189 DimensionKind::UNKNOWN_AT_COMPILE_INTENDED_AT_EXECUTE});
190 auto constantOpd0 = addOperand(paramsForConst, &constantScope);
191 auto outputOpd0 = addOperand(paramsForOutput, &outputScope);
192
193 // Make the gtest failure easier to read
194 SCOPED_TRACE(input0Scope);
195 SCOPED_TRACE(input1Scope);
196 SCOPED_TRACE(constantScope);
197 SCOPED_TRACE(outputScope);
198
199 OperandType scalarType(Type::INT32, {});
200 int32_t activation(ANEURALNETWORKS_FUSED_NONE);
201 auto activationOpd0 = model.addOperand(&scalarType);
202
203 model.setOperandValue(activationOpd0, &activation, sizeof(activation));
204 model.setOperandValue(constantOpd0, twos, sizeof(twos));
205 model.addOperation(ANEURALNETWORKS_ADD,
206 {inputOpd0, inputOpd1, activationOpd0},
207 {intermediateOpd0});
208 model.addOperation(ANEURALNETWORKS_ADD,
209 {intermediateOpd0, constantOpd0, activationOpd0},
210 {outputOpd0});
211 model.identifyInputsAndOutputs({inputOpd0, inputOpd1}, {outputOpd0});
212 if (std::get<0>(paramsForConst) == DimensionKind::INTENDED_AT_COMPILE_NOT_SET_AT_EXECUTE &&
213 std::get<1>(paramsForConst) == DimensionKind::INTENDED_AT_COMPILE_NOT_SET_AT_EXECUTE) {
214 ASSERT_TRUE(model.isValid());
215 ASSERT_EQ(model.finish(), Result::NO_ERROR);
216 } else {
217 ASSERT_FALSE(model.isValid());
218 // There is no contract (yet) for specific errors in NeuralNetworks.h,
219 // so we just assert on not being successful.
220 ASSERT_NE(model.finish(), Result::NO_ERROR);
221 return;
222 }
223
224 Compilation compilation(&model);
225 ASSERT_EQ(compilation.finish(), Result::NO_ERROR);
226
227 IntendedMatrix actual = { { 10, 10, 10 }, { 10, 10, 10 }, { 10, 10, 10 } };
228 Execution execution(&compilation);
229
230 OperandType matrixTypeIntended(TensorType, {INTENDED_SIZE, INTENDED_SIZE}, scale);
231 OperandType matrixTypeFirstOther(TensorType, {OTHER_SIZE, INTENDED_SIZE}, scale);
232 OperandType matrixTypeSecondOther(TensorType, {INTENDED_SIZE, OTHER_SIZE}, scale);
233 OperandType matrixTypeBothOther(TensorType, {OTHER_SIZE, OTHER_SIZE}, scale);
234 bool allAreIntendedSizeAtExecution = true;
235
236 // Helper to return appropriate "type" parameter to setInput/setOutput based
237 // on OperandParams
238 auto typeAtSet = [&](OperandParams params) {
239 auto first = std::get<0>(params), second = std::get<1>(params);
240 if (first == DimensionKind::UNKNOWN_AT_COMPILE_OTHER_AT_EXECUTE &&
241 second == DimensionKind::UNKNOWN_AT_COMPILE_OTHER_AT_EXECUTE) {
242 allAreIntendedSizeAtExecution = false;
243 return &matrixTypeBothOther.operandType;
244 } else if (first == DimensionKind::UNKNOWN_AT_COMPILE_OTHER_AT_EXECUTE) {
245 allAreIntendedSizeAtExecution = false;
246 return &matrixTypeFirstOther.operandType;
247 } else if (second == DimensionKind::UNKNOWN_AT_COMPILE_OTHER_AT_EXECUTE) {
248 allAreIntendedSizeAtExecution = false;
249 return &matrixTypeSecondOther.operandType;
250 } else if (first == DimensionKind::INTENDED_AT_COMPILE_AND_EXECUTE &&
251 second == DimensionKind::INTENDED_AT_COMPILE_AND_EXECUTE) {
252 return &matrixTypeIntended.operandType;
253 } else if (first == DimensionKind::INTENDED_AT_COMPILE_NOT_SET_AT_EXECUTE &&
254 second == DimensionKind::INTENDED_AT_COMPILE_NOT_SET_AT_EXECUTE) {
255 return static_cast<ANeuralNetworksOperandType*>(nullptr);
256 } else {
257 return &matrixTypeIntended.operandType;
258 }
259 };
260 // Helper to return appropriate "size" parameter to setInput/setOutput based
261 // on OperandParams
262 auto sizeAtSet = [](OperandParams params) {
263 auto first = std::get<0>(params), second = std::get<1>(params);
264 size_t firstDim = (first == DimensionKind::UNKNOWN_AT_COMPILE_OTHER_AT_EXECUTE) ?
265 OTHER_SIZE : INTENDED_SIZE;
266 size_t secondDim = (second == DimensionKind::UNKNOWN_AT_COMPILE_OTHER_AT_EXECUTE) ?
267 OTHER_SIZE : INTENDED_SIZE;
268 return firstDim * secondDim * sizeof(fives[0][0]);
269 };
270 ASSERT_EQ(execution.setInput(0, ones, sizeAtSet(paramsForInput0), typeAtSet(paramsForInput0)),
271 Result::NO_ERROR);
272 ASSERT_EQ(execution.setInput(1, twos, sizeAtSet(paramsForInput1), typeAtSet(paramsForInput1)),
273 Result::NO_ERROR);
274 ASSERT_EQ(execution.setOutput(0, actual, sizeAtSet(paramsForOutput),
275 typeAtSet(paramsForOutput)),
276 Result::NO_ERROR);
277
278 if (allAreIntendedSizeAtExecution) {
279 ASSERT_EQ(execution.compute(), Result::NO_ERROR);
280 } else {
281 // There is no contract (yet) for specific errors in NeuralNetworks.h,
282 // so we just assert on not being successful.
283 ASSERT_NE(execution.compute(), Result::NO_ERROR);
284 return;
285 }
286
287 typedef std::vector<T> vec;
288 typedef std::map<int, vec> Operands;
289 constexpr size_t count = sizeof(fives) / sizeof(fives[0][0]);
290 Operands expected_opds{{0, vec{&fives[0][0], &fives[0][0] + count}}};
291 Operands actual_opds{{0, vec{&actual[0][0], &actual[0][0] + count}}};
292 CompareResults(expected_opds, actual_opds);
293 }
294
Combine(const std::vector<DimensionKind> & firsts,const std::vector<DimensionKind> & seconds)295 std::vector<OperandParams> Combine(const std::vector<DimensionKind>& firsts,
296 const std::vector<DimensionKind>& seconds) {
297 std::vector<OperandParams> ret;
298 for (auto first: firsts) {
299 for (auto second: seconds) {
300 ret.push_back({first, second});
301 }
302 }
303 return ret;
304 }
305
TestAll()306 template<class T, Type TensorType> void UnknownDimensionsTest::TestAll() {
307 const OperandParams paramsForInput0 = GetParam();
308 for (auto paramsForInput1: ioValues) {
309 for (auto paramsForConst: constantValues) {
310 for (auto paramsForOutput: ioValues) {
311 TestOne<T, TensorType>(paramsForInput0, paramsForInput1,
312 paramsForConst, paramsForOutput);
313 }
314 }
315 }
316 }
317
TEST_P(UnknownDimensionsTest,Float)318 TEST_P(UnknownDimensionsTest, Float) {
319 TestAll<float, Type::TENSOR_FLOAT32>();
320 }
321
TEST_P(UnknownDimensionsTest,Quantized)322 TEST_P(UnknownDimensionsTest, Quantized) {
323 TestAll<uint8_t, Type::TENSOR_QUANT8_ASYMM>();
324 }
325
TEST_P(UnknownDimensionsTest,Float16)326 TEST_P(UnknownDimensionsTest, Float16) {
327 TestAll<_Float16, Type::TENSOR_FLOAT16>();
328 }
329
330 INSTANTIATE_TEST_CASE_P(UnknownCombinationsTest, UnknownDimensionsTest,
331 ::testing::ValuesIn(ioValues));
332 } // end namespace
333