1 /*
2 * Copyright (C) 2018 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <gtest/gtest.h>
18
19 #include <cmath>
20 #include <functional>
21 #include <string>
22 #include <tuple>
23 #include <vector>
24
25 #include "TestNeuralNetworksWrapper.h"
26
27 using namespace android::nn::test_wrapper;
28
29 namespace {
30
31 const uint32_t INTENDED_SIZE = 3;
32 const uint32_t OTHER_SIZE = 2;
33 const uint32_t UNKNOWN_SIZE = 0;
34
35 // We test three basic scenarios for each tensor dimension:
36 // INTENDED_AT_COMPILE_AND_EXECUTE: set the dimension at compile
37 // (addOperand) time to INTENDED_SIZE, use same size at execution
38 // (setInput/setOutput) time. This should always work.
39 //
40 // INTENDED_AT_COMPILE_NOT_SET_AT_EXECUTE: set the dimension at compile
41 // (addOperand) time to INTENDED_SIZE, give no size at execution time.
42 // This should always work.
43 //
44 // UNKNOWN_AT_COMPILE_INTENDED_AT_EXECUTE: don't set the dimension at
45 // compile (addOperand) time, use INTENDED_SIZE at execute
46 // (setInput/setOutput) time. Note for constants, this just means using an
47 // unknown dimension at addOperand as there is no type parameter to
48 // setOperandValue. This should work for inputs and outputs and give an
49 // error for constants at compile time.
50 //
51 // UNKNOWN_AT_COMPILE_OTHER_AT_EXECUTE: don't set the dimension at compile
52 // (addOperand) time, use OTHER_SIZE at execute (setInput/setOutput) time.
53 // This should give an error at execute time (as the constant value will
54 // have a different size).
55 //
56 // All relevant combinations of the basic scenarios are then iterated over in
57 // TestAll. Note that we don't want to just use googletest's parametrized tests (TEST_P) as
58 // the 16k combinations generated too many lines of output for the test
59 // infrastructure to handle correctly. However, running all 16k in one test
60 // makes the ASAN version take so long that the automatic test runner things the
61 // command has become unresponsinve, so we split on the first level.
62 enum class DimensionKind {
63 INTENDED_AT_COMPILE_AND_EXECUTE,
64 INTENDED_AT_COMPILE_NOT_SET_AT_EXECUTE,
65 UNKNOWN_AT_COMPILE_INTENDED_AT_EXECUTE,
66 UNKNOWN_AT_COMPILE_OTHER_AT_EXECUTE
67 };
68 typedef std::tuple<DimensionKind, DimensionKind> OperandParams;
69 std::vector<DimensionKind> ioDimensionValues = {
70 DimensionKind::INTENDED_AT_COMPILE_AND_EXECUTE,
71 DimensionKind::INTENDED_AT_COMPILE_NOT_SET_AT_EXECUTE,
72 DimensionKind::UNKNOWN_AT_COMPILE_INTENDED_AT_EXECUTE,
73 DimensionKind::UNKNOWN_AT_COMPILE_OTHER_AT_EXECUTE};
74 std::vector<DimensionKind> constantDimensionValues = {
75 DimensionKind::INTENDED_AT_COMPILE_NOT_SET_AT_EXECUTE,
76 DimensionKind::UNKNOWN_AT_COMPILE_INTENDED_AT_EXECUTE};
77 std::vector<OperandParams> Combine(const std::vector<DimensionKind>& firsts,
78 const std::vector<DimensionKind>& seconds);
79 auto ioValues = Combine(ioDimensionValues, ioDimensionValues);
80 auto constantValues = Combine(constantDimensionValues, constantDimensionValues);
81 std::vector<Execution::ComputeMode> computeModes = {
82 Execution::ComputeMode::SYNC,
83 Execution::ComputeMode::FENCED};
84
85 class UnknownDimensionsTest : public ::testing::TestWithParam<OperandParams> {
86 protected:
87 template <class T, Type TensorType>
88 void TestOne(const OperandParams& paramsForInput0, const OperandParams& paramsForInput1,
89 const OperandParams& paramsForConst, const OperandParams& paramsForOutput,
90 Execution::ComputeMode computeMode);
91 template <class T, Type TensorType>
92 void TestAll();
93
94 template <typename T>
95 void CompareResults(const std::vector<T>& expected, const std::vector<T>& actual);
96 };
97
98 template <typename T>
CompareGeneric(const std::vector<T> & golden,const std::vector<T> & test,std::function<void (T,T)> cmp)99 void CompareGeneric(const std::vector<T>& golden, const std::vector<T>& test,
100 std::function<void(T, T)> cmp) {
101 ASSERT_EQ(golden.size(), test.size());
102 for (uint32_t i = 0; i < golden.size(); i++) {
103 SCOPED_TRACE(testing::Message() << "When comparing element " << i);
104 cmp(golden[i], test[i]);
105 }
106 }
107
108 constexpr size_t gMaximumNumberOfErrorMessages = 10;
109
110 template <>
CompareResults(const std::vector<float> & golden,const std::vector<float> & test)111 void UnknownDimensionsTest::CompareResults<float>(const std::vector<float>& golden,
112 const std::vector<float>& test) {
113 size_t totalNumberOfErrors = 0;
114 float fpAtol = 1e-5f, fpRtol = 1e-5f;
115 CompareGeneric<float>(golden, test,
116 [&totalNumberOfErrors, fpAtol, fpRtol](float expected, float actual) {
117 // Compute the range based on both absolute tolerance and relative
118 // tolerance
119 float fpRange = fpAtol + fpRtol * std::abs(expected);
120 if (totalNumberOfErrors < gMaximumNumberOfErrorMessages) {
121 EXPECT_NEAR(expected, actual, fpRange);
122 }
123 if (std::abs(expected - actual) > fpRange) {
124 totalNumberOfErrors++;
125 }
126 });
127 EXPECT_EQ(size_t{0}, totalNumberOfErrors);
128 }
129
130 template <>
CompareResults(const std::vector<uint8_t> & golden,const std::vector<uint8_t> & test)131 void UnknownDimensionsTest::CompareResults<uint8_t>(const std::vector<uint8_t>& golden,
132 const std::vector<uint8_t>& test) {
133 size_t totalNumberOfErrors = 0;
134 CompareGeneric<uint8_t>(golden, test, [&totalNumberOfErrors](uint8_t expected, uint8_t actual) {
135 if (totalNumberOfErrors < gMaximumNumberOfErrorMessages) {
136 EXPECT_NEAR(expected, actual, 1);
137 }
138 if (std::abs(expected - actual) > 1) {
139 totalNumberOfErrors++;
140 }
141 });
142 EXPECT_EQ(size_t{0}, totalNumberOfErrors);
143 }
144
145 template <>
CompareResults(const std::vector<_Float16> & golden,const std::vector<_Float16> & test)146 void UnknownDimensionsTest::CompareResults<_Float16>(const std::vector<_Float16>& golden,
147 const std::vector<_Float16>& test) {
148 size_t totalNumberOfErrors = 0;
149 float fpAtol = 5.0f * 0.0009765625f, fpRtol = 5.0f * 0.0009765625f;
150 CompareGeneric<_Float16>(
151 golden, test,
152 [&totalNumberOfErrors, fpAtol, fpRtol](_Float16 expected, _Float16 actual) {
153 // Compute the range based on both absolute tolerance and relative
154 // tolerance
155 float fpRange = fpAtol + fpRtol * std::abs(static_cast<float>(expected));
156 if (totalNumberOfErrors < gMaximumNumberOfErrorMessages) {
157 EXPECT_NEAR(expected, actual, fpRange);
158 }
159 if (std::abs(static_cast<float>(expected - actual)) > fpRange) {
160 totalNumberOfErrors++;
161 }
162 });
163 EXPECT_EQ(size_t{0}, totalNumberOfErrors);
164 }
165
166 template <class T, Type TensorType>
TestOne(const OperandParams & paramsForInput0,const OperandParams & paramsForInput1,const OperandParams & paramsForConst,const OperandParams & paramsForOutput,Execution::ComputeMode computeMode)167 void UnknownDimensionsTest::TestOne(const OperandParams& paramsForInput0,
168 const OperandParams& paramsForInput1,
169 const OperandParams& paramsForConst,
170 const OperandParams& paramsForOutput,
171 Execution::ComputeMode computeMode) {
172 typedef T IntendedMatrix[INTENDED_SIZE][INTENDED_SIZE];
173 static const IntendedMatrix ones = {{1, 1, 1}, {1, 1, 1}, {1, 1, 1}};
174 static const IntendedMatrix twos = {{2, 2, 2}, {2, 2, 2}, {2, 2, 2}};
175 static const IntendedMatrix fives = {{5, 5, 5}, {5, 5, 5}, {5, 5, 5}};
176 const float scale = TensorType == Type::TENSOR_QUANT8_ASYMM ? 1.f : 0.f;
177
178 Model model;
179 std::string input0Scope("Input 0:"), input1Scope("Input 1:"), constantScope("Constant:"),
180 outputScope("Output:");
181
182 auto getDimForCompile = [](DimensionKind kind, std::string* scope) {
183 switch (kind) {
184 case DimensionKind::INTENDED_AT_COMPILE_AND_EXECUTE:
185 if (scope) scope->append(" INTENDED_AT_COMPILE_AND_EXECUTE");
186 return INTENDED_SIZE;
187 case DimensionKind::INTENDED_AT_COMPILE_NOT_SET_AT_EXECUTE:
188 if (scope) scope->append(" INTENDED_AT_COMPILE_NOT_SET_AT_EXECUTE");
189 return INTENDED_SIZE;
190 case DimensionKind::UNKNOWN_AT_COMPILE_INTENDED_AT_EXECUTE:
191 if (scope) scope->append(" UNKNOWN_AT_COMPILE_INTENDED_AT_EXECUTE");
192 return UNKNOWN_SIZE;
193 case DimensionKind::UNKNOWN_AT_COMPILE_OTHER_AT_EXECUTE:
194 if (scope) scope->append(" UNKNOWN_AT_COMPILE_OTHER_AT_EXECUTE");
195 return UNKNOWN_SIZE;
196 }
197 };
198 auto addOperand = [&model, &getDimForCompile, scale](OperandParams params,
199 std::string* scope = nullptr) {
200 OperandType matrixTypeWithPotentiallyUnknownDims(
201 TensorType,
202 {getDimForCompile(std::get<0>(params), scope),
203 getDimForCompile(std::get<1>(params), scope)},
204 scale);
205 return model.addOperand(&matrixTypeWithPotentiallyUnknownDims);
206 };
207 auto inputOpd0 = addOperand(paramsForInput0, &input0Scope);
208 auto inputOpd1 = addOperand(paramsForInput1, &input1Scope);
209 auto intermediateOpd0 = addOperand(OperandParams{
210 // Dimensions for intermediate operand actually deduced at execution time
211 DimensionKind::UNKNOWN_AT_COMPILE_INTENDED_AT_EXECUTE,
212 DimensionKind::UNKNOWN_AT_COMPILE_INTENDED_AT_EXECUTE});
213 auto constantOpd0 = addOperand(paramsForConst, &constantScope);
214 auto outputOpd0 = addOperand(paramsForOutput, &outputScope);
215
216 // Make the gtest failure easier to read
217 SCOPED_TRACE(input0Scope);
218 SCOPED_TRACE(input1Scope);
219 SCOPED_TRACE(constantScope);
220 SCOPED_TRACE(outputScope);
221
222 OperandType scalarType(Type::INT32, {});
223 int32_t activation(ANEURALNETWORKS_FUSED_NONE);
224 auto activationOpd0 = model.addOperand(&scalarType);
225
226 model.setOperandValue(activationOpd0, &activation, sizeof(activation));
227 model.setOperandValue(constantOpd0, twos, sizeof(twos));
228 model.addOperation(ANEURALNETWORKS_ADD, {inputOpd0, inputOpd1, activationOpd0},
229 {intermediateOpd0});
230 model.addOperation(ANEURALNETWORKS_ADD, {intermediateOpd0, constantOpd0, activationOpd0},
231 {outputOpd0});
232 model.identifyInputsAndOutputs({inputOpd0, inputOpd1}, {outputOpd0});
233 if (std::get<0>(paramsForConst) == DimensionKind::INTENDED_AT_COMPILE_NOT_SET_AT_EXECUTE &&
234 std::get<1>(paramsForConst) == DimensionKind::INTENDED_AT_COMPILE_NOT_SET_AT_EXECUTE) {
235 ASSERT_TRUE(model.isValid());
236 ASSERT_EQ(model.finish(), Result::NO_ERROR);
237 } else {
238 ASSERT_FALSE(model.isValid());
239 // There is no contract (yet) for specific errors in NeuralNetworks.h,
240 // so we just assert on not being successful.
241 ASSERT_NE(model.finish(), Result::NO_ERROR);
242 return;
243 }
244
245 Compilation compilation(&model);
246 ASSERT_EQ(compilation.finish(), Result::NO_ERROR);
247
248 IntendedMatrix actual = {{10, 10, 10}, {10, 10, 10}, {10, 10, 10}};
249 Execution execution(&compilation);
250
251 OperandType matrixTypeIntended(TensorType, {INTENDED_SIZE, INTENDED_SIZE}, scale);
252 OperandType matrixTypeFirstOther(TensorType, {OTHER_SIZE, INTENDED_SIZE}, scale);
253 OperandType matrixTypeSecondOther(TensorType, {INTENDED_SIZE, OTHER_SIZE}, scale);
254 OperandType matrixTypeBothOther(TensorType, {OTHER_SIZE, OTHER_SIZE}, scale);
255 bool allAreIntendedSizeAtExecution = true;
256
257 // Helper to return appropriate "type" parameter to setInput/setOutput based
258 // on OperandParams
259 auto typeAtSet = [&](OperandParams params) {
260 auto first = std::get<0>(params), second = std::get<1>(params);
261 if (first == DimensionKind::UNKNOWN_AT_COMPILE_OTHER_AT_EXECUTE &&
262 second == DimensionKind::UNKNOWN_AT_COMPILE_OTHER_AT_EXECUTE) {
263 allAreIntendedSizeAtExecution = false;
264 return &matrixTypeBothOther.operandType;
265 } else if (first == DimensionKind::UNKNOWN_AT_COMPILE_OTHER_AT_EXECUTE) {
266 allAreIntendedSizeAtExecution = false;
267 return &matrixTypeFirstOther.operandType;
268 } else if (second == DimensionKind::UNKNOWN_AT_COMPILE_OTHER_AT_EXECUTE) {
269 allAreIntendedSizeAtExecution = false;
270 return &matrixTypeSecondOther.operandType;
271 } else if (first == DimensionKind::INTENDED_AT_COMPILE_AND_EXECUTE &&
272 second == DimensionKind::INTENDED_AT_COMPILE_AND_EXECUTE) {
273 return &matrixTypeIntended.operandType;
274 } else if (first == DimensionKind::INTENDED_AT_COMPILE_NOT_SET_AT_EXECUTE &&
275 second == DimensionKind::INTENDED_AT_COMPILE_NOT_SET_AT_EXECUTE) {
276 return static_cast<ANeuralNetworksOperandType*>(nullptr);
277 } else {
278 return &matrixTypeIntended.operandType;
279 }
280 };
281 // Helper to return appropriate "size" parameter to setInput/setOutput based
282 // on OperandParams
283 auto sizeAtSet = [](OperandParams params) {
284 auto first = std::get<0>(params), second = std::get<1>(params);
285 size_t firstDim = (first == DimensionKind::UNKNOWN_AT_COMPILE_OTHER_AT_EXECUTE)
286 ? OTHER_SIZE
287 : INTENDED_SIZE;
288 size_t secondDim = (second == DimensionKind::UNKNOWN_AT_COMPILE_OTHER_AT_EXECUTE)
289 ? OTHER_SIZE
290 : INTENDED_SIZE;
291 return firstDim * secondDim * sizeof(fives[0][0]);
292 };
293 ASSERT_EQ(execution.setInput(0, ones, sizeAtSet(paramsForInput0), typeAtSet(paramsForInput0)),
294 Result::NO_ERROR);
295 ASSERT_EQ(execution.setInput(1, twos, sizeAtSet(paramsForInput1), typeAtSet(paramsForInput1)),
296 Result::NO_ERROR);
297 ASSERT_EQ(
298 execution.setOutput(0, actual, sizeAtSet(paramsForOutput), typeAtSet(paramsForOutput)),
299 Result::NO_ERROR);
300
301 if (allAreIntendedSizeAtExecution) {
302 ASSERT_EQ(execution.compute(computeMode), Result::NO_ERROR);
303 } else {
304 // There is no contract (yet) for specific errors in NeuralNetworks.h,
305 // so we just assert on not being successful.
306 ASSERT_NE(execution.compute(), Result::NO_ERROR);
307 return;
308 }
309
310 constexpr size_t count = sizeof(fives) / sizeof(fives[0][0]);
311 std::vector<T> expected_opds(&fives[0][0], &fives[0][0] + count);
312 std::vector<T> actual_opds(&actual[0][0], &actual[0][0] + count);
313 CompareResults(expected_opds, actual_opds);
314 }
315
Combine(const std::vector<DimensionKind> & firsts,const std::vector<DimensionKind> & seconds)316 std::vector<OperandParams> Combine(const std::vector<DimensionKind>& firsts,
317 const std::vector<DimensionKind>& seconds) {
318 std::vector<OperandParams> ret;
319 for (auto first : firsts) {
320 for (auto second : seconds) {
321 ret.push_back({first, second});
322 }
323 }
324 return ret;
325 }
326
327 template <class T, Type TensorType>
TestAll()328 void UnknownDimensionsTest::TestAll() {
329 const OperandParams paramsForInput0 = GetParam();
330 for (auto paramsForInput1 : ioValues) {
331 for (auto paramsForConst : constantValues) {
332 for (auto paramsForOutput : ioValues) {
333 for (auto computeMode : computeModes) {
334 TestOne<T, TensorType>(paramsForInput0, paramsForInput1, paramsForConst,
335 paramsForOutput, computeMode);
336 }
337 }
338 }
339 }
340 }
341
TEST_P(UnknownDimensionsTest,Float)342 TEST_P(UnknownDimensionsTest, Float) {
343 TestAll<float, Type::TENSOR_FLOAT32>();
344 }
345
TEST_P(UnknownDimensionsTest,Quantized)346 TEST_P(UnknownDimensionsTest, Quantized) {
347 TestAll<uint8_t, Type::TENSOR_QUANT8_ASYMM>();
348 }
349
TEST_P(UnknownDimensionsTest,Float16)350 TEST_P(UnknownDimensionsTest, Float16) {
351 TestAll<_Float16, Type::TENSOR_FLOAT16>();
352 }
353
354 INSTANTIATE_TEST_SUITE_P(UnknownCombinationsTest, UnknownDimensionsTest,
355 ::testing::ValuesIn(ioValues));
356 } // end namespace
357