1 /*
2  * Copyright (C) 2018 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #define LOG_TAG "neuralnetworks_hidl_hal_test"
18 
19 #include "VtsHalNeuralnetworks.h"
20 
21 namespace android::hardware::neuralnetworks::V1_2::vts::functional {
22 
23 using implementation::PreparedModelCallback;
24 using V1_0::DeviceStatus;
25 using V1_0::ErrorStatus;
26 using V1_0::OperandLifeTime;
27 using V1_0::PerformanceInfo;
28 using V1_1::ExecutionPreference;
29 using HidlToken = hidl_array<uint8_t, static_cast<uint32_t>(Constant::BYTE_SIZE_OF_CACHE_TOKEN)>;
30 
31 // create device test
TEST_P(NeuralnetworksHidlTest,CreateDevice)32 TEST_P(NeuralnetworksHidlTest, CreateDevice) {}
33 
34 // status test
TEST_P(NeuralnetworksHidlTest,StatusTest)35 TEST_P(NeuralnetworksHidlTest, StatusTest) {
36     Return<DeviceStatus> status = kDevice->getStatus();
37     ASSERT_TRUE(status.isOk());
38     EXPECT_EQ(DeviceStatus::AVAILABLE, static_cast<DeviceStatus>(status));
39 }
40 
41 // initialization
TEST_P(NeuralnetworksHidlTest,GetCapabilitiesTest)42 TEST_P(NeuralnetworksHidlTest, GetCapabilitiesTest) {
43     using OperandPerformance = Capabilities::OperandPerformance;
44     Return<void> ret = kDevice->getCapabilities_1_2([](ErrorStatus status,
45                                                        const Capabilities& capabilities) {
46         EXPECT_EQ(ErrorStatus::NONE, status);
47 
48         auto isPositive = [](const PerformanceInfo& perf) {
49             return perf.execTime > 0.0f && perf.powerUsage > 0.0f;
50         };
51 
52         EXPECT_TRUE(isPositive(capabilities.relaxedFloat32toFloat16PerformanceScalar));
53         EXPECT_TRUE(isPositive(capabilities.relaxedFloat32toFloat16PerformanceTensor));
54         const auto& opPerf = capabilities.operandPerformance;
55         EXPECT_TRUE(std::all_of(
56                 opPerf.begin(), opPerf.end(),
57                 [isPositive](const OperandPerformance& a) { return isPositive(a.info); }));
58         EXPECT_TRUE(std::is_sorted(opPerf.begin(), opPerf.end(),
59                                    [](const OperandPerformance& a, const OperandPerformance& b) {
60                                        return a.type < b.type;
61                                    }));
62     });
63     EXPECT_TRUE(ret.isOk());
64 }
65 
66 // device version test
TEST_P(NeuralnetworksHidlTest,GetDeviceVersionStringTest)67 TEST_P(NeuralnetworksHidlTest, GetDeviceVersionStringTest) {
68     Return<void> ret =
69             kDevice->getVersionString([](ErrorStatus status, const hidl_string& version) {
70                 EXPECT_EQ(ErrorStatus::NONE, status);
71                 EXPECT_LT(0, version.size());
72             });
73     EXPECT_TRUE(ret.isOk());
74 }
75 
76 // device type test
TEST_P(NeuralnetworksHidlTest,GetDeviceTypeTest)77 TEST_P(NeuralnetworksHidlTest, GetDeviceTypeTest) {
78     Return<void> ret = kDevice->getType([](ErrorStatus status, DeviceType type) {
79         EXPECT_EQ(ErrorStatus::NONE, status);
80         EXPECT_TRUE(type == DeviceType::OTHER || type == DeviceType::CPU ||
81                     type == DeviceType::GPU || type == DeviceType::ACCELERATOR);
82     });
83     EXPECT_TRUE(ret.isOk());
84 }
85 
86 // device name test
TEST_P(NeuralnetworksHidlTest,GetDeviceNameTest)87 TEST_P(NeuralnetworksHidlTest, GetDeviceNameTest) {
88     const std::string deviceName = getName(GetParam());
89     auto pos = deviceName.find('-');
90     EXPECT_NE(pos, std::string::npos);
91     // The separator should not be the first or last character.
92     EXPECT_NE(pos, 0);
93     EXPECT_NE(pos, deviceName.length() - 1);
94     // There should only be 1 separator.
95     EXPECT_EQ(std::string::npos, deviceName.find('-', pos + 1));
96 }
97 
98 // device supported extensions test
TEST_P(NeuralnetworksHidlTest,GetDeviceSupportedExtensionsTest)99 TEST_P(NeuralnetworksHidlTest, GetDeviceSupportedExtensionsTest) {
100     Return<void> ret = kDevice->getSupportedExtensions(
101             [](ErrorStatus status, const hidl_vec<Extension>& extensions) {
102                 EXPECT_EQ(ErrorStatus::NONE, status);
103                 for (auto& extension : extensions) {
104                     std::string extensionName = extension.name;
105                     EXPECT_FALSE(extensionName.empty());
106                     for (char c : extensionName) {
107                         EXPECT_TRUE(('a' <= c && c <= 'z') || ('0' <= c && c <= '9') || c == '_' ||
108                                     c == '.')
109                                 << "Extension name contains an illegal character: " << c;
110                     }
111                     EXPECT_NE(extensionName.find('.'), std::string::npos)
112                             << "Extension name must start with the reverse domain name of the "
113                                "vendor";
114                 }
115             });
116     EXPECT_TRUE(ret.isOk());
117 }
118 
119 // getNumberOfCacheFilesNeeded test
TEST_P(NeuralnetworksHidlTest,getNumberOfCacheFilesNeeded)120 TEST_P(NeuralnetworksHidlTest, getNumberOfCacheFilesNeeded) {
121     Return<void> ret = kDevice->getNumberOfCacheFilesNeeded(
122             [](ErrorStatus status, uint32_t numModelCache, uint32_t numDataCache) {
123                 EXPECT_EQ(ErrorStatus::NONE, status);
124                 EXPECT_LE(numModelCache,
125                           static_cast<uint32_t>(Constant::MAX_NUMBER_OF_CACHE_FILES));
126                 EXPECT_LE(numDataCache, static_cast<uint32_t>(Constant::MAX_NUMBER_OF_CACHE_FILES));
127             });
128     EXPECT_TRUE(ret.isOk());
129 }
130 
131 // detect cycle
TEST_P(NeuralnetworksHidlTest,CycleTest)132 TEST_P(NeuralnetworksHidlTest, CycleTest) {
133     // opnd0 = TENSOR_FLOAT32            // model input
134     // opnd1 = TENSOR_FLOAT32            // model input
135     // opnd2 = INT32                     // model input
136     // opnd3 = ADD(opnd0, opnd4, opnd2)
137     // opnd4 = ADD(opnd1, opnd3, opnd2)
138     // opnd5 = ADD(opnd4, opnd0, opnd2)  // model output
139     //
140     //            +-----+
141     //            |     |
142     //            v     |
143     // 3 = ADD(0, 4, 2) |
144     // |                |
145     // +----------+     |
146     //            |     |
147     //            v     |
148     // 4 = ADD(1, 3, 2) |
149     // |                |
150     // +----------------+
151     // |
152     // |
153     // +-------+
154     //         |
155     //         v
156     // 5 = ADD(4, 0, 2)
157 
158     const std::vector<Operand> operands = {
159             {
160                     // operands[0]
161                     .type = OperandType::TENSOR_FLOAT32,
162                     .dimensions = {1},
163                     .numberOfConsumers = 2,
164                     .scale = 0.0f,
165                     .zeroPoint = 0,
166                     .lifetime = OperandLifeTime::MODEL_INPUT,
167                     .location = {.poolIndex = 0, .offset = 0, .length = 0},
168             },
169             {
170                     // operands[1]
171                     .type = OperandType::TENSOR_FLOAT32,
172                     .dimensions = {1},
173                     .numberOfConsumers = 1,
174                     .scale = 0.0f,
175                     .zeroPoint = 0,
176                     .lifetime = OperandLifeTime::MODEL_INPUT,
177                     .location = {.poolIndex = 0, .offset = 0, .length = 0},
178             },
179             {
180                     // operands[2]
181                     .type = OperandType::INT32,
182                     .dimensions = {},
183                     .numberOfConsumers = 3,
184                     .scale = 0.0f,
185                     .zeroPoint = 0,
186                     .lifetime = OperandLifeTime::MODEL_INPUT,
187                     .location = {.poolIndex = 0, .offset = 0, .length = 0},
188             },
189             {
190                     // operands[3]
191                     .type = OperandType::TENSOR_FLOAT32,
192                     .dimensions = {1},
193                     .numberOfConsumers = 1,
194                     .scale = 0.0f,
195                     .zeroPoint = 0,
196                     .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
197                     .location = {.poolIndex = 0, .offset = 0, .length = 0},
198             },
199             {
200                     // operands[4]
201                     .type = OperandType::TENSOR_FLOAT32,
202                     .dimensions = {1},
203                     .numberOfConsumers = 2,
204                     .scale = 0.0f,
205                     .zeroPoint = 0,
206                     .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
207                     .location = {.poolIndex = 0, .offset = 0, .length = 0},
208             },
209             {
210                     // operands[5]
211                     .type = OperandType::TENSOR_FLOAT32,
212                     .dimensions = {1},
213                     .numberOfConsumers = 0,
214                     .scale = 0.0f,
215                     .zeroPoint = 0,
216                     .lifetime = OperandLifeTime::MODEL_OUTPUT,
217                     .location = {.poolIndex = 0, .offset = 0, .length = 0},
218             },
219     };
220 
221     const std::vector<Operation> operations = {
222             {.type = OperationType::ADD, .inputs = {0, 4, 2}, .outputs = {3}},
223             {.type = OperationType::ADD, .inputs = {1, 3, 2}, .outputs = {4}},
224             {.type = OperationType::ADD, .inputs = {4, 0, 2}, .outputs = {5}},
225     };
226 
227     const Model model = {
228             .operands = operands,
229             .operations = operations,
230             .inputIndexes = {0, 1, 2},
231             .outputIndexes = {5},
232             .operandValues = {},
233             .pools = {},
234     };
235 
236     // ensure that getSupportedOperations_1_2() checks model validity
237     ErrorStatus supportedOpsErrorStatus = ErrorStatus::GENERAL_FAILURE;
238     Return<void> supportedOpsReturn = kDevice->getSupportedOperations_1_2(
239             model, [&model, &supportedOpsErrorStatus](ErrorStatus status,
240                                                       const hidl_vec<bool>& supported) {
241                 supportedOpsErrorStatus = status;
242                 if (status == ErrorStatus::NONE) {
243                     ASSERT_EQ(supported.size(), model.operations.size());
244                 }
245             });
246     ASSERT_TRUE(supportedOpsReturn.isOk());
247     ASSERT_EQ(supportedOpsErrorStatus, ErrorStatus::INVALID_ARGUMENT);
248 
249     // ensure that prepareModel_1_2() checks model validity
250     sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback;
251     Return<ErrorStatus> prepareLaunchReturn = kDevice->prepareModel_1_2(
252             model, ExecutionPreference::FAST_SINGLE_ANSWER, hidl_vec<hidl_handle>(),
253             hidl_vec<hidl_handle>(), HidlToken(), preparedModelCallback);
254     ASSERT_TRUE(prepareLaunchReturn.isOk());
255     //     Note that preparation can fail for reasons other than an
256     //     invalid model (invalid model should result in
257     //     INVALID_ARGUMENT) -- for example, perhaps not all
258     //     operations are supported, or perhaps the device hit some
259     //     kind of capacity limit.
260     EXPECT_NE(prepareLaunchReturn, ErrorStatus::NONE);
261     EXPECT_NE(preparedModelCallback->getStatus(), ErrorStatus::NONE);
262     EXPECT_EQ(preparedModelCallback->getPreparedModel(), nullptr);
263 }
264 
265 }  // namespace android::hardware::neuralnetworks::V1_2::vts::functional
266