1 /*
2 * Copyright (C) 2017 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "TestNeuralNetworksWrapper.h"
18
19 //#include <android-base/logging.h>
20 #include <gtest/gtest.h>
21
22 using namespace android::nn::test_wrapper;
23
24 namespace {
25
26 typedef float Matrix3x4[3][4];
27 typedef float Matrix4[4];
28
29 const int32_t kNoActivation = ANEURALNETWORKS_FUSED_NONE;
30
31 class TrivialTest : public ::testing::Test {
32 protected:
SetUp()33 virtual void SetUp() {}
34
35 const Matrix3x4 matrix1 = {{1.f, 2.f, 3.f, 4.f}, {5.f, 6.f, 7.f, 8.f}, {9.f, 10.f, 11.f, 12.f}};
36 const Matrix3x4 matrix2 = {{100.f, 200.f, 300.f, 400.f},
37 {500.f, 600.f, 700.f, 800.f},
38 {900.f, 1000.f, 1100.f, 1200.f}};
39 const Matrix4 matrix2b = {100.f, 200.f, 300.f, 400.f};
40 const Matrix3x4 matrix3 = {
41 {20.f, 30.f, 40.f, 50.f}, {21.f, 22.f, 23.f, 24.f}, {31.f, 32.f, 33.f, 34.f}};
42 const Matrix3x4 expected2 = {{101.f, 202.f, 303.f, 404.f},
43 {505.f, 606.f, 707.f, 808.f},
44 {909.f, 1010.f, 1111.f, 1212.f}};
45 const Matrix3x4 expected2b = {{101.f, 202.f, 303.f, 404.f},
46 {105.f, 206.f, 307.f, 408.f},
47 {109.f, 210.f, 311.f, 412.f}};
48 const Matrix3x4 expected2c = {{100.f, 400.f, 900.f, 1600.f},
49 {500.f, 1200.f, 2100.f, 3200.f},
50 {900.f, 2000.f, 3300.f, 4800.f}};
51
52 const Matrix3x4 expected3 = {{121.f, 232.f, 343.f, 454.f},
53 {526.f, 628.f, 730.f, 832.f},
54 {940.f, 1042.f, 1144.f, 1246.f}};
55 const Matrix3x4 expected3b = {
56 {22.f, 34.f, 46.f, 58.f}, {31.f, 34.f, 37.f, 40.f}, {49.f, 52.f, 55.f, 58.f}};
57 };
58
59 // Create a model that can add two tensors using a one node graph.
CreateAddTwoTensorModel(Model * model)60 void CreateAddTwoTensorModel(Model* model) {
61 OperandType matrixType(Type::TENSOR_FLOAT32, {3, 4});
62 OperandType scalarType(Type::INT32, {});
63 auto a = model->addOperand(&matrixType);
64 auto b = model->addOperand(&matrixType);
65 auto c = model->addOperand(&matrixType);
66 auto d = model->addConstantOperand(&scalarType, kNoActivation);
67 model->addOperation(ANEURALNETWORKS_ADD, {a, b, d}, {c});
68 model->identifyInputsAndOutputs({a, b}, {c});
69 ASSERT_TRUE(model->isValid());
70 model->finish();
71 }
72
73 // Create a model that can add three tensors using a two node graph,
74 // with one tensor set as part of the model.
CreateAddThreeTensorModel(Model * model,const Matrix3x4 bias)75 void CreateAddThreeTensorModel(Model* model, const Matrix3x4 bias) {
76 OperandType matrixType(Type::TENSOR_FLOAT32, {3, 4});
77 OperandType scalarType(Type::INT32, {});
78 auto a = model->addOperand(&matrixType);
79 auto b = model->addOperand(&matrixType);
80 auto c = model->addOperand(&matrixType);
81 auto d = model->addOperand(&matrixType);
82 auto e = model->addOperand(&matrixType);
83 auto f = model->addConstantOperand(&scalarType, kNoActivation);
84 model->setOperandValue(e, bias, sizeof(Matrix3x4));
85 model->addOperation(ANEURALNETWORKS_ADD, {a, c, f}, {b});
86 model->addOperation(ANEURALNETWORKS_ADD, {b, e, f}, {d});
87 model->identifyInputsAndOutputs({c, a}, {d});
88 ASSERT_TRUE(model->isValid());
89 model->finish();
90 }
91
92 // Check that the values are the same. This works only if dealing with integer
93 // value, otherwise we should accept values that are similar if not exact.
CompareMatrices(const Matrix3x4 & expected,const Matrix3x4 & actual)94 int CompareMatrices(const Matrix3x4& expected, const Matrix3x4& actual) {
95 int errors = 0;
96 for (int i = 0; i < 3; i++) {
97 for (int j = 0; j < 4; j++) {
98 if (expected[i][j] != actual[i][j]) {
99 printf("expected[%d][%d] != actual[%d][%d], %f != %f\n", i, j, i, j,
100 static_cast<double>(expected[i][j]), static_cast<double>(actual[i][j]));
101 errors++;
102 }
103 }
104 }
105 return errors;
106 }
107
TEST_F(TrivialTest,AddTwo)108 TEST_F(TrivialTest, AddTwo) {
109 Model modelAdd2;
110 CreateAddTwoTensorModel(&modelAdd2);
111
112 // Test the one node model.
113 Matrix3x4 actual;
114 memset(&actual, 0, sizeof(actual));
115 Compilation compilation(&modelAdd2);
116 compilation.finish();
117 Execution execution(&compilation);
118 ASSERT_EQ(execution.setInput(0, matrix1, sizeof(Matrix3x4)), Result::NO_ERROR);
119 ASSERT_EQ(execution.setInput(1, matrix2, sizeof(Matrix3x4)), Result::NO_ERROR);
120 ASSERT_EQ(execution.setOutput(0, actual, sizeof(Matrix3x4)), Result::NO_ERROR);
121 ASSERT_EQ(execution.compute(), Result::NO_ERROR);
122 ASSERT_EQ(CompareMatrices(expected2, actual), 0);
123 }
124
TEST_F(TrivialTest,AddTwoWithHardwareBufferInput)125 TEST_F(TrivialTest, AddTwoWithHardwareBufferInput) {
126 Model modelAdd2;
127 CreateAddTwoTensorModel(&modelAdd2);
128
129 AHardwareBuffer_Desc desc{
130 .width = sizeof(matrix1),
131 .height = 1,
132 .layers = 1,
133 .format = AHARDWAREBUFFER_FORMAT_BLOB,
134 .usage = AHARDWAREBUFFER_USAGE_CPU_READ_OFTEN | AHARDWAREBUFFER_USAGE_CPU_WRITE_OFTEN,
135 };
136 AHardwareBuffer* matrix1Buffer = nullptr;
137 ASSERT_EQ(AHardwareBuffer_allocate(&desc, &matrix1Buffer), 0);
138 Memory matrix1Memory(matrix1Buffer);
139 ASSERT_TRUE(matrix1Memory.isValid());
140
141 // Test the one node model.
142 Matrix3x4 actual;
143 memset(&actual, 0, sizeof(actual));
144 Compilation compilation(&modelAdd2);
145 compilation.finish();
146 Execution execution(&compilation);
147 ASSERT_EQ(execution.setInputFromMemory(0, &matrix1Memory, 0, sizeof(Matrix3x4)),
148 Result::NO_ERROR);
149 ASSERT_EQ(execution.setInput(1, matrix2, sizeof(Matrix3x4)), Result::NO_ERROR);
150 ASSERT_EQ(execution.setOutput(0, actual, sizeof(Matrix3x4)), Result::NO_ERROR);
151
152 // Set the value for matrix1Buffer.
153 void* bufferPtr = nullptr;
154 ASSERT_EQ(AHardwareBuffer_lock(matrix1Buffer, desc.usage, -1, NULL, &bufferPtr), 0);
155 memcpy((uint8_t*)bufferPtr, matrix1, sizeof(matrix1));
156 int synFenceFd = -1;
157 ASSERT_EQ(AHardwareBuffer_unlock(matrix1Buffer, &synFenceFd), 0);
158 if (synFenceFd > 0) {
159 // If valid sync fence is return by AHardwareBuffer_unlock, use
160 // ANeuralNetworksExecution_startComputeWithDependencies
161 ANeuralNetworksEvent* eventBufferUnlock;
162 ANeuralNetworksEvent* eventToSignal;
163 ASSERT_EQ(ANeuralNetworksEvent_createFromSyncFenceFd(synFenceFd, &eventBufferUnlock),
164 ANEURALNETWORKS_NO_ERROR);
165 close(synFenceFd);
166 ANeuralNetworksExecution* executionHandle = execution.getHandle();
167 ASSERT_EQ(ANeuralNetworksExecution_startComputeWithDependencies(
168 executionHandle, &eventBufferUnlock, 1, 0, &eventToSignal),
169 ANEURALNETWORKS_NO_ERROR);
170 ASSERT_EQ(ANeuralNetworksEvent_wait(eventToSignal), ANEURALNETWORKS_NO_ERROR);
171 ANeuralNetworksEvent_free(eventBufferUnlock);
172 ANeuralNetworksEvent_free(eventToSignal);
173 } else {
174 ASSERT_EQ(execution.compute(), Result::NO_ERROR);
175 }
176
177 ASSERT_EQ(CompareMatrices(expected2, actual), 0);
178 AHardwareBuffer_release(matrix1Buffer);
179 }
180
TEST_F(TrivialTest,AddThree)181 TEST_F(TrivialTest, AddThree) {
182 Model modelAdd3;
183 CreateAddThreeTensorModel(&modelAdd3, matrix3);
184
185 // Test the three node model.
186 Matrix3x4 actual;
187 memset(&actual, 0, sizeof(actual));
188 Compilation compilation2(&modelAdd3);
189 compilation2.finish();
190 Execution execution2(&compilation2);
191 ASSERT_EQ(execution2.setInput(0, matrix1, sizeof(Matrix3x4)), Result::NO_ERROR);
192 ASSERT_EQ(execution2.setInput(1, matrix2, sizeof(Matrix3x4)), Result::NO_ERROR);
193 ASSERT_EQ(execution2.setOutput(0, actual, sizeof(Matrix3x4)), Result::NO_ERROR);
194 ASSERT_EQ(execution2.compute(), Result::NO_ERROR);
195 ASSERT_EQ(CompareMatrices(expected3, actual), 0);
196
197 // Test it a second time to make sure the model is reusable.
198 memset(&actual, 0, sizeof(actual));
199 Compilation compilation3(&modelAdd3);
200 compilation3.finish();
201 Execution execution3(&compilation3);
202 ASSERT_EQ(execution3.setInput(0, matrix1, sizeof(Matrix3x4)), Result::NO_ERROR);
203 ASSERT_EQ(execution3.setInput(1, matrix1, sizeof(Matrix3x4)), Result::NO_ERROR);
204 ASSERT_EQ(execution3.setOutput(0, actual, sizeof(Matrix3x4)), Result::NO_ERROR);
205 ASSERT_EQ(execution3.compute(), Result::NO_ERROR);
206 ASSERT_EQ(CompareMatrices(expected3b, actual), 0);
207 }
208
TEST_F(TrivialTest,FencedAddThree)209 TEST_F(TrivialTest, FencedAddThree) {
210 Model modelAdd3;
211 CreateAddThreeTensorModel(&modelAdd3, matrix3);
212 Compilation compilation(&modelAdd3);
213 compilation.finish();
214
215 Matrix3x4 output1, output2;
216 memset(&output1, 0, sizeof(output1));
217 memset(&output2, 0, sizeof(output2));
218
219 // Start the first execution
220 Execution execution1(&compilation);
221 ASSERT_EQ(execution1.setInput(0, matrix1, sizeof(Matrix3x4)), Result::NO_ERROR);
222 ASSERT_EQ(execution1.setInput(1, matrix2, sizeof(Matrix3x4)), Result::NO_ERROR);
223 ASSERT_EQ(execution1.setOutput(0, output1, sizeof(Matrix3x4)), Result::NO_ERROR);
224 ANeuralNetworksEvent* event1;
225 ANeuralNetworksExecution* execution1_handle = execution1.getHandle();
226 ASSERT_EQ(ANeuralNetworksExecution_startComputeWithDependencies(execution1_handle, nullptr, 0,
227 0, &event1),
228 ANEURALNETWORKS_NO_ERROR);
229
230 // Start the second execution which will wait for the first one.
231 Execution execution2(&compilation);
232 ASSERT_EQ(execution2.setInput(0, matrix1, sizeof(Matrix3x4)), Result::NO_ERROR);
233 ASSERT_EQ(execution2.setInput(1, matrix1, sizeof(Matrix3x4)), Result::NO_ERROR);
234 ASSERT_EQ(execution2.setOutput(0, output2, sizeof(Matrix3x4)), Result::NO_ERROR);
235 ANeuralNetworksEvent* event2;
236 ANeuralNetworksExecution* execution2_handle = execution2.getHandle();
237 ASSERT_EQ(ANeuralNetworksExecution_startComputeWithDependencies(execution2_handle, &event1, 1,
238 0, &event2),
239 ANEURALNETWORKS_NO_ERROR);
240 // Wait for the second event.
241 ASSERT_EQ(ANeuralNetworksEvent_wait(event2), ANEURALNETWORKS_NO_ERROR);
242
243 // Check the results for both executions.
244 ASSERT_EQ(CompareMatrices(expected3, output1), 0);
245 ASSERT_EQ(CompareMatrices(expected3b, output2), 0);
246
247 // Free the event objects
248 ANeuralNetworksEvent_free(event1);
249 ANeuralNetworksEvent_free(event2);
250 }
251
TEST_F(TrivialTest,BroadcastAddTwo)252 TEST_F(TrivialTest, BroadcastAddTwo) {
253 Model modelBroadcastAdd2;
254 OperandType scalarType(Type::INT32, {});
255 auto activation = modelBroadcastAdd2.addConstantOperand(&scalarType, kNoActivation);
256
257 OperandType matrixType(Type::TENSOR_FLOAT32, {1, 1, 3, 4});
258 OperandType matrixType2(Type::TENSOR_FLOAT32, {4});
259
260 auto a = modelBroadcastAdd2.addOperand(&matrixType);
261 auto b = modelBroadcastAdd2.addOperand(&matrixType2);
262 auto c = modelBroadcastAdd2.addOperand(&matrixType);
263 modelBroadcastAdd2.addOperation(ANEURALNETWORKS_ADD, {a, b, activation}, {c});
264 modelBroadcastAdd2.identifyInputsAndOutputs({a, b}, {c});
265 ASSERT_TRUE(modelBroadcastAdd2.isValid());
266 modelBroadcastAdd2.finish();
267
268 // Test the one node model.
269 Matrix3x4 actual;
270 memset(&actual, 0, sizeof(actual));
271 Compilation compilation(&modelBroadcastAdd2);
272 compilation.finish();
273 Execution execution(&compilation);
274 ASSERT_EQ(execution.setInput(0, matrix1, sizeof(Matrix3x4)), Result::NO_ERROR);
275 ASSERT_EQ(execution.setInput(1, matrix2b, sizeof(Matrix4)), Result::NO_ERROR);
276 ASSERT_EQ(execution.setOutput(0, actual, sizeof(Matrix3x4)), Result::NO_ERROR);
277 ASSERT_EQ(execution.compute(), Result::NO_ERROR);
278 ASSERT_EQ(CompareMatrices(expected2b, actual), 0);
279 }
280
TEST_F(TrivialTest,BroadcastMulTwo)281 TEST_F(TrivialTest, BroadcastMulTwo) {
282 Model modelBroadcastMul2;
283 OperandType scalarType(Type::INT32, {});
284 auto activation = modelBroadcastMul2.addConstantOperand(&scalarType, kNoActivation);
285
286 OperandType matrixType(Type::TENSOR_FLOAT32, {1, 1, 3, 4});
287 OperandType matrixType2(Type::TENSOR_FLOAT32, {4});
288
289 auto a = modelBroadcastMul2.addOperand(&matrixType);
290 auto b = modelBroadcastMul2.addOperand(&matrixType2);
291 auto c = modelBroadcastMul2.addOperand(&matrixType);
292 modelBroadcastMul2.addOperation(ANEURALNETWORKS_MUL, {a, b, activation}, {c});
293 modelBroadcastMul2.identifyInputsAndOutputs({a, b}, {c});
294 ASSERT_TRUE(modelBroadcastMul2.isValid());
295 modelBroadcastMul2.finish();
296
297 // Test the one node model.
298 Matrix3x4 actual;
299 memset(&actual, 0, sizeof(actual));
300 Compilation compilation(&modelBroadcastMul2);
301 compilation.finish();
302 Execution execution(&compilation);
303 ASSERT_EQ(execution.setInput(0, matrix1, sizeof(Matrix3x4)), Result::NO_ERROR);
304 ASSERT_EQ(execution.setInput(1, matrix2b, sizeof(Matrix4)), Result::NO_ERROR);
305 ASSERT_EQ(execution.setOutput(0, actual, sizeof(Matrix3x4)), Result::NO_ERROR);
306 ASSERT_EQ(execution.compute(), Result::NO_ERROR);
307 ASSERT_EQ(CompareMatrices(expected2c, actual), 0);
308 }
309
310 } // end namespace
311