1 /*
2  * Copyright (C) 2017 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "Callbacks.h"
18 #include "TestHarness.h"
19 #include "Utils.h"
20 
21 #include <android-base/logging.h>
22 #include <android/hardware/neuralnetworks/1.0/IDevice.h>
23 #include <android/hardware/neuralnetworks/1.0/IExecutionCallback.h>
24 #include <android/hardware/neuralnetworks/1.0/IPreparedModel.h>
25 #include <android/hardware/neuralnetworks/1.0/IPreparedModelCallback.h>
26 #include <android/hardware/neuralnetworks/1.0/types.h>
27 #include <android/hidl/allocator/1.0/IAllocator.h>
28 #include <android/hidl/memory/1.0/IMemory.h>
29 #include <hidlmemory/mapping.h>
30 #include <iostream>
31 
32 namespace android {
33 namespace hardware {
34 namespace neuralnetworks {
35 
36 namespace generated_tests {
37 using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback;
38 using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback;
39 using ::test_helper::filter;
40 using ::test_helper::for_all;
41 using ::test_helper::for_each;
42 using ::test_helper::resize_accordingly;
43 using ::test_helper::MixedTyped;
44 using ::test_helper::MixedTypedExampleType;
45 using ::test_helper::Float32Operands;
46 using ::test_helper::Int32Operands;
47 using ::test_helper::Quant8Operands;
48 using ::test_helper::compare;
49 
50 template <typename T>
copy_back_(MixedTyped * dst,const std::vector<RequestArgument> & ra,char * src)51 void copy_back_(MixedTyped* dst, const std::vector<RequestArgument>& ra, char* src) {
52     MixedTyped& test = *dst;
53     for_each<T>(test, [&ra, src](int index, std::vector<T>& m) {
54         ASSERT_EQ(m.size(), ra[index].location.length / sizeof(T));
55         char* begin = src + ra[index].location.offset;
56         memcpy(m.data(), begin, ra[index].location.length);
57     });
58 }
59 
copy_back(MixedTyped * dst,const std::vector<RequestArgument> & ra,char * src)60 void copy_back(MixedTyped* dst, const std::vector<RequestArgument>& ra, char* src) {
61     copy_back_<float>(dst, ra, src);
62     copy_back_<int32_t>(dst, ra, src);
63     copy_back_<uint8_t>(dst, ra, src);
64 }
65 
66 // Top level driver for models and examples generated by test_generator.py
67 // Test driver for those generated from ml/nn/runtime/test/spec
EvaluatePreparedModel(sp<IPreparedModel> & preparedModel,std::function<bool (int)> is_ignored,const std::vector<MixedTypedExampleType> & examples,float fpRange=1e-5f)68 void EvaluatePreparedModel(sp<IPreparedModel>& preparedModel, std::function<bool(int)> is_ignored,
69                            const std::vector<MixedTypedExampleType>& examples,
70                            float fpRange = 1e-5f) {
71     const uint32_t INPUT = 0;
72     const uint32_t OUTPUT = 1;
73 
74     int example_no = 1;
75     for (auto& example : examples) {
76         SCOPED_TRACE(example_no++);
77 
78         const MixedTyped& inputs = example.first;
79         const MixedTyped& golden = example.second;
80 
81         std::vector<RequestArgument> inputs_info, outputs_info;
82         uint32_t inputSize = 0, outputSize = 0;
83 
84         // This function only partially specifies the metadata (vector of RequestArguments).
85         // The contents are copied over below.
86         for_all(inputs, [&inputs_info, &inputSize](int index, auto, auto s) {
87             if (inputs_info.size() <= static_cast<size_t>(index)) inputs_info.resize(index + 1);
88             RequestArgument arg = {
89                 .location = {.poolIndex = INPUT, .offset = 0, .length = static_cast<uint32_t>(s)},
90                 .dimensions = {},
91             };
92             RequestArgument arg_empty = {
93                 .hasNoValue = true,
94             };
95             inputs_info[index] = s ? arg : arg_empty;
96             inputSize += s;
97         });
98         // Compute offset for inputs 1 and so on
99         {
100             size_t offset = 0;
101             for (auto& i : inputs_info) {
102                 if (!i.hasNoValue) i.location.offset = offset;
103                 offset += i.location.length;
104             }
105         }
106 
107         MixedTyped test;  // holding test results
108 
109         // Go through all outputs, initialize RequestArgument descriptors
110         resize_accordingly(golden, test);
111         for_all(golden, [&outputs_info, &outputSize](int index, auto, auto s) {
112             if (outputs_info.size() <= static_cast<size_t>(index)) outputs_info.resize(index + 1);
113             RequestArgument arg = {
114                 .location = {.poolIndex = OUTPUT, .offset = 0, .length = static_cast<uint32_t>(s)},
115                 .dimensions = {},
116             };
117             outputs_info[index] = arg;
118             outputSize += s;
119         });
120         // Compute offset for outputs 1 and so on
121         {
122             size_t offset = 0;
123             for (auto& i : outputs_info) {
124                 i.location.offset = offset;
125                 offset += i.location.length;
126             }
127         }
128         std::vector<hidl_memory> pools = {nn::allocateSharedMemory(inputSize),
129                                           nn::allocateSharedMemory(outputSize)};
130         ASSERT_NE(0ull, pools[INPUT].size());
131         ASSERT_NE(0ull, pools[OUTPUT].size());
132 
133         // load data
134         sp<IMemory> inputMemory = mapMemory(pools[INPUT]);
135         sp<IMemory> outputMemory = mapMemory(pools[OUTPUT]);
136         ASSERT_NE(nullptr, inputMemory.get());
137         ASSERT_NE(nullptr, outputMemory.get());
138         char* inputPtr = reinterpret_cast<char*>(static_cast<void*>(inputMemory->getPointer()));
139         char* outputPtr = reinterpret_cast<char*>(static_cast<void*>(outputMemory->getPointer()));
140         ASSERT_NE(nullptr, inputPtr);
141         ASSERT_NE(nullptr, outputPtr);
142         inputMemory->update();
143         outputMemory->update();
144 
145         // Go through all inputs, copy the values
146         for_all(inputs, [&inputs_info, inputPtr](int index, auto p, auto s) {
147             char* begin = (char*)p;
148             char* end = begin + s;
149             // TODO: handle more than one input
150             std::copy(begin, end, inputPtr + inputs_info[index].location.offset);
151         });
152 
153         inputMemory->commit();
154         outputMemory->commit();
155 
156         // launch execution
157         sp<ExecutionCallback> executionCallback = new ExecutionCallback();
158         ASSERT_NE(nullptr, executionCallback.get());
159         Return<ErrorStatus> executionLaunchStatus = preparedModel->execute(
160             {.inputs = inputs_info, .outputs = outputs_info, .pools = pools}, executionCallback);
161         ASSERT_TRUE(executionLaunchStatus.isOk());
162         EXPECT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(executionLaunchStatus));
163 
164         // retrieve execution status
165         executionCallback->wait();
166         ErrorStatus executionReturnStatus = executionCallback->getStatus();
167         EXPECT_EQ(ErrorStatus::NONE, executionReturnStatus);
168 
169         // validate results
170         outputMemory->read();
171         copy_back(&test, outputs_info, outputPtr);
172         outputMemory->commit();
173         // Filter out don't cares
174         MixedTyped filtered_golden = filter(golden, is_ignored);
175         MixedTyped filtered_test = filter(test, is_ignored);
176 
177         // We want "close-enough" results for float
178         compare(filtered_golden, filtered_test, fpRange);
179     }
180 }
181 
Execute(const sp<V1_0::IDevice> & device,std::function<V1_0::Model (void)> create_model,std::function<bool (int)> is_ignored,const std::vector<MixedTypedExampleType> & examples)182 void Execute(const sp<V1_0::IDevice>& device, std::function<V1_0::Model(void)> create_model,
183              std::function<bool(int)> is_ignored,
184              const std::vector<MixedTypedExampleType>& examples) {
185     V1_0::Model model = create_model();
186 
187     // see if service can handle model
188     bool fullySupportsModel = false;
189     Return<void> supportedCall = device->getSupportedOperations(
190         model, [&fullySupportsModel](ErrorStatus status, const hidl_vec<bool>& supported) {
191             ASSERT_EQ(ErrorStatus::NONE, status);
192             ASSERT_NE(0ul, supported.size());
193             fullySupportsModel =
194                 std::all_of(supported.begin(), supported.end(), [](bool valid) { return valid; });
195         });
196     ASSERT_TRUE(supportedCall.isOk());
197 
198     // launch prepare model
199     sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
200     ASSERT_NE(nullptr, preparedModelCallback.get());
201     Return<ErrorStatus> prepareLaunchStatus = device->prepareModel(model, preparedModelCallback);
202     ASSERT_TRUE(prepareLaunchStatus.isOk());
203     ASSERT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(prepareLaunchStatus));
204 
205     // retrieve prepared model
206     preparedModelCallback->wait();
207     ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
208     sp<IPreparedModel> preparedModel = preparedModelCallback->getPreparedModel();
209 
210     // early termination if vendor service cannot fully prepare model
211     if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) {
212         ASSERT_EQ(nullptr, preparedModel.get());
213         LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
214                      "prepare model that it does not support.";
215         std::cout << "[          ]   Early termination of test because vendor service cannot "
216                      "prepare model that it does not support."
217                   << std::endl;
218         return;
219     }
220     EXPECT_EQ(ErrorStatus::NONE, prepareReturnStatus);
221     ASSERT_NE(nullptr, preparedModel.get());
222 
223     EvaluatePreparedModel(preparedModel, is_ignored, examples);
224 }
225 
Execute(const sp<V1_1::IDevice> & device,std::function<V1_1::Model (void)> create_model,std::function<bool (int)> is_ignored,const std::vector<MixedTypedExampleType> & examples)226 void Execute(const sp<V1_1::IDevice>& device, std::function<V1_1::Model(void)> create_model,
227              std::function<bool(int)> is_ignored,
228              const std::vector<MixedTypedExampleType>& examples) {
229     V1_1::Model model = create_model();
230 
231     // see if service can handle model
232     bool fullySupportsModel = false;
233     Return<void> supportedCall = device->getSupportedOperations_1_1(
234         model, [&fullySupportsModel](ErrorStatus status, const hidl_vec<bool>& supported) {
235             ASSERT_EQ(ErrorStatus::NONE, status);
236             ASSERT_NE(0ul, supported.size());
237             fullySupportsModel =
238                 std::all_of(supported.begin(), supported.end(), [](bool valid) { return valid; });
239         });
240     ASSERT_TRUE(supportedCall.isOk());
241 
242     // launch prepare model
243     sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
244     ASSERT_NE(nullptr, preparedModelCallback.get());
245     Return<ErrorStatus> prepareLaunchStatus = device->prepareModel_1_1(
246         model, ExecutionPreference::FAST_SINGLE_ANSWER, preparedModelCallback);
247     ASSERT_TRUE(prepareLaunchStatus.isOk());
248     ASSERT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(prepareLaunchStatus));
249 
250     // retrieve prepared model
251     preparedModelCallback->wait();
252     ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
253     sp<IPreparedModel> preparedModel = preparedModelCallback->getPreparedModel();
254 
255     // early termination if vendor service cannot fully prepare model
256     if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) {
257         ASSERT_EQ(nullptr, preparedModel.get());
258         LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
259                      "prepare model that it does not support.";
260         std::cout << "[          ]   Early termination of test because vendor service cannot "
261                      "prepare model that it does not support."
262                   << std::endl;
263         return;
264     }
265     EXPECT_EQ(ErrorStatus::NONE, prepareReturnStatus);
266     ASSERT_NE(nullptr, preparedModel.get());
267 
268     // If in relaxed mode, set the error range to be 5ULP of FP16.
269     float fpRange = !model.relaxComputationFloat32toFloat16 ? 1e-5f : 5.0f * 0.0009765625f;
270     EvaluatePreparedModel(preparedModel, is_ignored, examples, fpRange);
271 }
272 
273 }  // namespace generated_tests
274 
275 }  // namespace neuralnetworks
276 }  // namespace hardware
277 }  // namespace android
278