1 /*
2  * Copyright (C) 2019 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "HalInterfaces.h"
18 #include "Manager.h"
19 #include "NeuralNetworks.h"
20 #include "NeuralNetworksExtensions.h"
21 #include "NeuralNetworksWrapperExtensions.h"
22 #include "TestNeuralNetworksWrapper.h"
23 #include "TypeManager.h"
24 #include "Utils.h"
25 #include "ValidateHal.h"
26 
27 #include <gtest/gtest.h>
28 
29 #include "FibonacciDriver.h"
30 #include "FibonacciExtension.h"
31 
32 #include <vector>
33 
34 namespace android {
35 namespace nn {
36 namespace {
37 
38 using ::android::nn::test_wrapper::ExtensionModel;
39 using ::android::nn::test_wrapper::ExtensionOperandParams;
40 using ::android::nn::test_wrapper::ExtensionOperandType;
41 using ::android::nn::test_wrapper::Type;
42 
43 class FibonacciExtensionTest : public ::testing::Test {
44    protected:
SetUp()45     virtual void SetUp() {
46         if (DeviceManager::get()->getUseCpuOnly()) {
47             // This test requires the use a custom driver.
48             GTEST_SKIP();
49         }
50 
51         // Real world extension tests should run against actual hardware
52         // implementations, but there is no hardware supporting the test
53         // extension. Hence the sample software driver.
54         DeviceManager::get()->forTest_registerDevice(sample_driver::FibonacciDriver::kDriverName,
55                                                      new sample_driver::FibonacciDriver());
56         // Discover extensions provided by registered devices.
57         TypeManager::get()->forTest_reset();
58 
59         uint32_t numDevices = 0;
60         ASSERT_EQ(ANeuralNetworks_getDeviceCount(&numDevices), ANEURALNETWORKS_NO_ERROR);
61         ANeuralNetworksDevice* fibonacciDevice = nullptr;
62         ANeuralNetworksDevice* cpuDevice = nullptr;
63         for (uint32_t i = 0; i < numDevices; i++) {
64             ANeuralNetworksDevice* device = nullptr;
65             EXPECT_EQ(ANeuralNetworks_getDevice(i, &device), ANEURALNETWORKS_NO_ERROR);
66             bool supportsFibonacciExtension;
67             ASSERT_EQ(
68                     ANeuralNetworksDevice_getExtensionSupport(
69                             device, EXAMPLE_FIBONACCI_EXTENSION_NAME, &supportsFibonacciExtension),
70                     ANEURALNETWORKS_NO_ERROR);
71             if (supportsFibonacciExtension) {
72                 ASSERT_EQ(fibonacciDevice, nullptr) << "Found multiple Fibonacci drivers";
73                 fibonacciDevice = device;
74             } else if (DeviceManager::get()->forTest_isCpuDevice(device)) {
75                 ASSERT_EQ(cpuDevice, nullptr) << "Found multiple CPU drivers";
76                 cpuDevice = device;
77             }
78         }
79         ASSERT_NE(fibonacciDevice, nullptr) << "Expecting Fibonacci driver to be available";
80         ASSERT_NE(cpuDevice, nullptr) << "Expecting CPU driver to be available";
81         mDevices = {fibonacciDevice, cpuDevice};
82     }
83 
TearDown()84     virtual void TearDown() {
85         if (mExecution) {
86             ANeuralNetworksExecution_free(mExecution);
87         }
88         if (mCompilation) {
89             ANeuralNetworksCompilation_free(mCompilation);
90         }
91         DeviceManager::get()->forTest_reInitializeDeviceList();
92         TypeManager::get()->forTest_reset();
93     }
94 
checkSupportedOperations(const std::vector<bool> & expected)95     void checkSupportedOperations(const std::vector<bool>& expected) {
96         const uint32_t kMaxNumberOperations = 256;
97         EXPECT_LE(expected.size(), kMaxNumberOperations);
98         bool supported[kMaxNumberOperations] = {false};
99         EXPECT_EQ(ANeuralNetworksModel_getSupportedOperationsForDevices(
100                           mModel.getHandle(), mDevices.data(), mDevices.size(), supported),
101                   ANEURALNETWORKS_NO_ERROR);
102         for (size_t i = 0; i < expected.size(); ++i) {
103             SCOPED_TRACE(::testing::Message() << "i = " << i);
104             EXPECT_EQ(supported[i], expected[i]);
105         }
106     }
107 
prepareForExecution()108     void prepareForExecution() {
109         ASSERT_EQ(ANeuralNetworksCompilation_createForDevices(mModel.getHandle(), mDevices.data(),
110                                                               mDevices.size(), &mCompilation),
111                   ANEURALNETWORKS_NO_ERROR);
112         ASSERT_EQ(ANeuralNetworksCompilation_finish(mCompilation), ANEURALNETWORKS_NO_ERROR);
113         ASSERT_EQ(ANeuralNetworksExecution_create(mCompilation, &mExecution),
114                   ANEURALNETWORKS_NO_ERROR);
115     }
116 
117     std::vector<ANeuralNetworksDevice*> mDevices;
118     ANeuralNetworksExecution* mExecution = nullptr;
119     ANeuralNetworksCompilation* mCompilation = nullptr;
120     ExtensionModel mModel;
121 };
122 
addNopOperation(ExtensionModel * model,ExtensionOperandType inputType,uint32_t input,uint32_t output)123 void addNopOperation(ExtensionModel* model, ExtensionOperandType inputType, uint32_t input,
124                      uint32_t output) {
125     // Our NOP operation is ADD, which has no extension type support.
126     ASSERT_EQ(inputType.operandType.type, ANEURALNETWORKS_TENSOR_FLOAT32);
127     ASSERT_EQ(inputType.dimensions.size(), 1u);
128 
129     uint32_t inputZeros = model->addOperand(&inputType);
130     uint32_t inputSize = inputType.dimensions[0];
131     uint32_t inputLength = sizeof(float) * inputSize;
132     const float kZeros[100] = {};
133     ASSERT_GE(sizeof(kZeros), inputLength);
134     model->setOperandValue(inputZeros, &kZeros, inputLength);
135 
136     ExtensionOperandType scalarType(Type::INT32, {});
137     uint32_t activation = model->addOperand(&scalarType);
138     int32_t kNoActivation = ANEURALNETWORKS_FUSED_NONE;
139     model->setOperandValue(activation, &kNoActivation, sizeof(kNoActivation));
140 
141     model->addOperation(ANEURALNETWORKS_ADD, {input, inputZeros, activation}, {output});
142 }
143 
createModel(ExtensionModel * model,ExtensionOperandType inputType,ExtensionOperandType outputType,bool addNopOperations)144 void createModel(ExtensionModel* model, ExtensionOperandType inputType,
145                  ExtensionOperandType outputType, bool addNopOperations) {
146     uint32_t fibonacciInput = model->addOperand(&inputType);
147     uint32_t fibonacciOutput = model->addOperand(&outputType);
148 
149     uint32_t modelInput = addNopOperations ? model->addOperand(&inputType) : fibonacciInput;
150     uint32_t modelOutput = addNopOperations ? model->addOperand(&outputType) : fibonacciOutput;
151 
152     if (addNopOperations) {
153         addNopOperation(model, inputType, modelInput, fibonacciInput);
154     }
155     model->addOperation(
156             model->getExtensionOperationType(EXAMPLE_FIBONACCI_EXTENSION_NAME, EXAMPLE_FIBONACCI),
157             {fibonacciInput}, {fibonacciOutput});
158     if (addNopOperations) {
159         addNopOperation(model, outputType, fibonacciOutput, modelOutput);
160     }
161 
162     model->identifyInputsAndOutputs({modelInput}, {modelOutput});
163     model->finish();
164     ASSERT_TRUE(model->isValid());
165 }
166 
TEST_F(FibonacciExtensionTest,ModelWithExtensionOperandTypes)167 TEST_F(FibonacciExtensionTest, ModelWithExtensionOperandTypes) {
168     constexpr uint32_t N = 10;
169     constexpr double scale = 0.5;
170     constexpr int64_t zeroPoint = 10;
171 
172     ExtensionOperandType inputType(static_cast<Type>(mModel.getExtensionOperandType(
173                                            EXAMPLE_FIBONACCI_EXTENSION_NAME, EXAMPLE_INT64)),
174                                    {});
175     ExtensionOperandType outputType(
176             static_cast<Type>(mModel.getExtensionOperandType(EXAMPLE_FIBONACCI_EXTENSION_NAME,
177                                                              EXAMPLE_TENSOR_QUANT64_ASYMM)),
178             {N},
179             ExtensionOperandParams(ExampleQuant64AsymmParams{
180                     .scale = scale,
181                     .zeroPoint = zeroPoint,
182             }));
183     createModel(&mModel, inputType, outputType, /*addNopOperations=*/false);
184     checkSupportedOperations({true});
185     prepareForExecution();
186 
187     int64_t input = N;
188     EXPECT_EQ(ANeuralNetworksExecution_setInput(mExecution, 0, nullptr, &input, sizeof(input)),
189               ANEURALNETWORKS_NO_ERROR);
190 
191     int64_t output[N] = {};
192     EXPECT_EQ(ANeuralNetworksExecution_setOutput(mExecution, 0, nullptr, &output, sizeof(output)),
193               ANEURALNETWORKS_NO_ERROR);
194 
195     ASSERT_EQ(ANeuralNetworksExecution_compute(mExecution), ANEURALNETWORKS_NO_ERROR);
196 
197     EXPECT_EQ(output[0], 1 / scale + zeroPoint);
198     EXPECT_EQ(output[1], 1 / scale + zeroPoint);
199     EXPECT_EQ(output[2], 2 / scale + zeroPoint);
200     EXPECT_EQ(output[3], 3 / scale + zeroPoint);
201     EXPECT_EQ(output[4], 5 / scale + zeroPoint);
202     EXPECT_EQ(output[5], 8 / scale + zeroPoint);
203     EXPECT_EQ(output[6], 13 / scale + zeroPoint);
204     EXPECT_EQ(output[7], 21 / scale + zeroPoint);
205     EXPECT_EQ(output[8], 34 / scale + zeroPoint);
206     EXPECT_EQ(output[9], 55 / scale + zeroPoint);
207 }
208 
TEST_F(FibonacciExtensionTest,ModelWithTemporaries)209 TEST_F(FibonacciExtensionTest, ModelWithTemporaries) {
210     constexpr uint32_t N = 10;
211 
212     ExtensionOperandType inputType(Type::TENSOR_FLOAT32, {1});
213     ExtensionOperandType outputType(Type::TENSOR_FLOAT32, {N});
214     createModel(&mModel, inputType, outputType, /*addNopOperations=*/true);
215     checkSupportedOperations({true, true, true});
216     prepareForExecution();
217 
218     float input[] = {N};
219     EXPECT_EQ(ANeuralNetworksExecution_setInput(mExecution, 0, nullptr, &input, sizeof(input)),
220               ANEURALNETWORKS_NO_ERROR);
221 
222     float output[N] = {};
223     EXPECT_EQ(ANeuralNetworksExecution_setOutput(mExecution, 0, nullptr, &output, sizeof(output)),
224               ANEURALNETWORKS_NO_ERROR);
225 
226     ASSERT_EQ(ANeuralNetworksExecution_compute(mExecution), ANEURALNETWORKS_NO_ERROR);
227 
228     EXPECT_EQ(output[0], 1);
229     EXPECT_EQ(output[1], 1);
230     EXPECT_EQ(output[2], 2);
231     EXPECT_EQ(output[3], 3);
232     EXPECT_EQ(output[4], 5);
233     EXPECT_EQ(output[5], 8);
234     EXPECT_EQ(output[6], 13);
235     EXPECT_EQ(output[7], 21);
236     EXPECT_EQ(output[8], 34);
237     EXPECT_EQ(output[9], 55);
238 }
239 
TEST_F(FibonacciExtensionTest,InvalidInputType)240 TEST_F(FibonacciExtensionTest, InvalidInputType) {
241     ExtensionOperandType inputType(Type::TENSOR_INT32, {1});  // Unsupported type.
242     ExtensionOperandType outputType(Type::TENSOR_FLOAT32, {1});
243     createModel(&mModel, inputType, outputType, /*addNopOperations=*/false);
244     checkSupportedOperations({false});  // The driver reports that it doesn't support the operation.
245     ASSERT_EQ(ANeuralNetworksCompilation_createForDevices(mModel.getHandle(), mDevices.data(),
246                                                           mDevices.size(), &mCompilation),
247               ANEURALNETWORKS_NO_ERROR);
248     ASSERT_EQ(ANeuralNetworksCompilation_finish(mCompilation), ANEURALNETWORKS_BAD_DATA);
249 }
250 
TEST_F(FibonacciExtensionTest,InvalidOutputType)251 TEST_F(FibonacciExtensionTest, InvalidOutputType) {
252     ExtensionOperandType inputType(Type::TENSOR_FLOAT32, {1});
253     ExtensionOperandType outputType(Type::TENSOR_INT32, {1});  // Unsupported type.
254     createModel(&mModel, inputType, outputType, /*addNopOperations=*/false);
255     checkSupportedOperations({false});  // The driver reports that it doesn't support the operation.
256     ASSERT_EQ(ANeuralNetworksCompilation_createForDevices(mModel.getHandle(), mDevices.data(),
257                                                           mDevices.size(), &mCompilation),
258               ANEURALNETWORKS_NO_ERROR);
259     ASSERT_EQ(ANeuralNetworksCompilation_finish(mCompilation), ANEURALNETWORKS_BAD_DATA);
260 }
261 
TEST_F(FibonacciExtensionTest,InvalidInputValue)262 TEST_F(FibonacciExtensionTest, InvalidInputValue) {
263     ExtensionOperandType inputType(Type::TENSOR_FLOAT32, {1});
264     ExtensionOperandType outputType(Type::TENSOR_FLOAT32, {1});
265     createModel(&mModel, inputType, outputType, /*addNopOperations=*/false);
266     checkSupportedOperations({true});
267     prepareForExecution();
268 
269     float input[] = {-1};  // Invalid input value.
270     EXPECT_EQ(ANeuralNetworksExecution_setInput(mExecution, 0, nullptr, &input, sizeof(input)),
271               ANEURALNETWORKS_NO_ERROR);
272 
273     float output[1] = {};
274     EXPECT_EQ(ANeuralNetworksExecution_setOutput(mExecution, 0, nullptr, &output, sizeof(output)),
275               ANEURALNETWORKS_NO_ERROR);
276 
277     ASSERT_EQ(ANeuralNetworksExecution_compute(mExecution), ANEURALNETWORKS_OP_FAILED);
278 }
279 
TEST_F(FibonacciExtensionTest,InvalidNumInputs)280 TEST_F(FibonacciExtensionTest, InvalidNumInputs) {
281     ExtensionOperandType inputType(Type::TENSOR_FLOAT32, {1});
282     ExtensionOperandType outputType(Type::TENSOR_FLOAT32, {1});
283     uint32_t input1 = mModel.addOperand(&inputType);
284     uint32_t input2 = mModel.addOperand(&inputType);  // Extra input.
285     uint32_t output = mModel.addOperand(&outputType);
286     mModel.addOperation(
287             mModel.getExtensionOperationType(EXAMPLE_FIBONACCI_EXTENSION_NAME, EXAMPLE_FIBONACCI),
288             {input1, input2}, {output});
289     mModel.identifyInputsAndOutputs({input1, input2}, {output});
290     mModel.finish();
291     ASSERT_TRUE(mModel.isValid());
292     checkSupportedOperations({false});
293     ASSERT_EQ(ANeuralNetworksCompilation_createForDevices(mModel.getHandle(), mDevices.data(),
294                                                           mDevices.size(), &mCompilation),
295               ANEURALNETWORKS_NO_ERROR);
296     ASSERT_EQ(ANeuralNetworksCompilation_finish(mCompilation), ANEURALNETWORKS_BAD_DATA);
297 }
298 
TEST_F(FibonacciExtensionTest,InvalidNumOutputs)299 TEST_F(FibonacciExtensionTest, InvalidNumOutputs) {
300     ExtensionOperandType inputType(Type::TENSOR_FLOAT32, {1});
301     ExtensionOperandType outputType(Type::TENSOR_FLOAT32, {1});
302     uint32_t input = mModel.addOperand(&inputType);
303     uint32_t output1 = mModel.addOperand(&outputType);
304     uint32_t output2 = mModel.addOperand(&outputType);  // Extra output.
305     mModel.addOperation(
306             mModel.getExtensionOperationType(EXAMPLE_FIBONACCI_EXTENSION_NAME, EXAMPLE_FIBONACCI),
307             {input}, {output1, output2});
308     mModel.identifyInputsAndOutputs({input}, {output1, output2});
309     mModel.finish();
310     ASSERT_TRUE(mModel.isValid());
311     checkSupportedOperations({false});
312     ASSERT_EQ(ANeuralNetworksCompilation_createForDevices(mModel.getHandle(), mDevices.data(),
313                                                           mDevices.size(), &mCompilation),
314               ANEURALNETWORKS_NO_ERROR);
315     ASSERT_EQ(ANeuralNetworksCompilation_finish(mCompilation), ANEURALNETWORKS_BAD_DATA);
316 }
317 
TEST_F(FibonacciExtensionTest,InvalidOperation)318 TEST_F(FibonacciExtensionTest, InvalidOperation) {
319     ExtensionOperandType inputType(Type::TENSOR_FLOAT32, {1});
320     ExtensionOperandType outputType(Type::TENSOR_FLOAT32, {1});
321     uint32_t input = mModel.addOperand(&inputType);
322     uint32_t output = mModel.addOperand(&outputType);
323     mModel.addOperation(mModel.getExtensionOperationType(
324                                 EXAMPLE_FIBONACCI_EXTENSION_NAME,
325                                 EXAMPLE_FIBONACCI + 1),  // This operation should not exist.
326                         {input}, {output});
327     mModel.identifyInputsAndOutputs({input}, {output});
328     mModel.finish();
329     ASSERT_TRUE(mModel.isValid());
330     checkSupportedOperations({false});
331     ASSERT_EQ(ANeuralNetworksCompilation_createForDevices(mModel.getHandle(), mDevices.data(),
332                                                           mDevices.size(), &mCompilation),
333               ANEURALNETWORKS_NO_ERROR);
334     ASSERT_EQ(ANeuralNetworksCompilation_finish(mCompilation), ANEURALNETWORKS_BAD_DATA);
335 }
336 
337 }  // namespace
338 }  // namespace nn
339 }  // namespace android
340