1 /*
2 * Copyright (C) 2021 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #define LOG_TAG "neuralnetworks_aidl_hal_test"
18
19 #include <aidl/android/hardware/neuralnetworks/Capabilities.h>
20 #include <aidl/android/hardware/neuralnetworks/IDevice.h>
21 #include <aidl/android/hardware/neuralnetworks/Operand.h>
22 #include <aidl/android/hardware/neuralnetworks/OperandType.h>
23 #include <aidl/android/hardware/neuralnetworks/Priority.h>
24 #include <android/binder_interface_utils.h>
25
26 #include "Utils.h"
27 #include "VtsHalNeuralnetworks.h"
28
29 namespace aidl::android::hardware::neuralnetworks::vts::functional {
30
31 using implementation::PreparedModelCallback;
32
33 // create device test
TEST_P(NeuralNetworksAidlTest,CreateDevice)34 TEST_P(NeuralNetworksAidlTest, CreateDevice) {}
35
36 // initialization
TEST_P(NeuralNetworksAidlTest,GetCapabilitiesTest)37 TEST_P(NeuralNetworksAidlTest, GetCapabilitiesTest) {
38 Capabilities capabilities;
39 const auto retStatus = kDevice->getCapabilities(&capabilities);
40 ASSERT_TRUE(retStatus.isOk());
41
42 auto isPositive = [](const PerformanceInfo& perf) {
43 return perf.execTime > 0.0f && perf.powerUsage > 0.0f;
44 };
45
46 EXPECT_TRUE(isPositive(capabilities.relaxedFloat32toFloat16PerformanceScalar));
47 EXPECT_TRUE(isPositive(capabilities.relaxedFloat32toFloat16PerformanceTensor));
48 const auto& opPerf = capabilities.operandPerformance;
49 EXPECT_TRUE(
50 std::all_of(opPerf.begin(), opPerf.end(),
51 [isPositive](const OperandPerformance& a) { return isPositive(a.info); }));
52 EXPECT_TRUE(std::is_sorted(opPerf.begin(), opPerf.end(),
53 [](const OperandPerformance& a, const OperandPerformance& b) {
54 return a.type < b.type;
55 }));
56 EXPECT_TRUE(std::all_of(opPerf.begin(), opPerf.end(), [](const OperandPerformance& a) {
57 return a.type != OperandType::SUBGRAPH;
58 }));
59 EXPECT_TRUE(isPositive(capabilities.ifPerformance));
60 EXPECT_TRUE(isPositive(capabilities.whilePerformance));
61 }
62
63 // detect cycle
TEST_P(NeuralNetworksAidlTest,CycleTest)64 TEST_P(NeuralNetworksAidlTest, CycleTest) {
65 // opnd0 = TENSOR_FLOAT32 // model input
66 // opnd1 = TENSOR_FLOAT32 // model input
67 // opnd2 = INT32 // model input
68 // opnd3 = ADD(opnd0, opnd4, opnd2)
69 // opnd4 = ADD(opnd1, opnd3, opnd2)
70 // opnd5 = ADD(opnd4, opnd0, opnd2) // model output
71 //
72 // +-----+
73 // | |
74 // v |
75 // 3 = ADD(0, 4, 2) |
76 // | |
77 // +----------+ |
78 // | |
79 // v |
80 // 4 = ADD(1, 3, 2) |
81 // | |
82 // +----------------+
83 // |
84 // |
85 // +-------+
86 // |
87 // v
88 // 5 = ADD(4, 0, 2)
89
90 const std::vector<Operand> operands = {
91 {
92 // operands[0]
93 .type = OperandType::TENSOR_FLOAT32,
94 .dimensions = {1},
95 .scale = 0.0f,
96 .zeroPoint = 0,
97 .lifetime = OperandLifeTime::SUBGRAPH_INPUT,
98 .location = {.poolIndex = 0, .offset = 0, .length = 0},
99 },
100 {
101 // operands[1]
102 .type = OperandType::TENSOR_FLOAT32,
103 .dimensions = {1},
104 .scale = 0.0f,
105 .zeroPoint = 0,
106 .lifetime = OperandLifeTime::SUBGRAPH_INPUT,
107 .location = {.poolIndex = 0, .offset = 0, .length = 0},
108 },
109 {
110 // operands[2]
111 .type = OperandType::INT32,
112 .dimensions = {},
113 .scale = 0.0f,
114 .zeroPoint = 0,
115 .lifetime = OperandLifeTime::SUBGRAPH_INPUT,
116 .location = {.poolIndex = 0, .offset = 0, .length = 0},
117 },
118 {
119 // operands[3]
120 .type = OperandType::TENSOR_FLOAT32,
121 .dimensions = {1},
122 .scale = 0.0f,
123 .zeroPoint = 0,
124 .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
125 .location = {.poolIndex = 0, .offset = 0, .length = 0},
126 },
127 {
128 // operands[4]
129 .type = OperandType::TENSOR_FLOAT32,
130 .dimensions = {1},
131 .scale = 0.0f,
132 .zeroPoint = 0,
133 .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
134 .location = {.poolIndex = 0, .offset = 0, .length = 0},
135 },
136 {
137 // operands[5]
138 .type = OperandType::TENSOR_FLOAT32,
139 .dimensions = {1},
140 .scale = 0.0f,
141 .zeroPoint = 0,
142 .lifetime = OperandLifeTime::SUBGRAPH_OUTPUT,
143 .location = {.poolIndex = 0, .offset = 0, .length = 0},
144 },
145 };
146
147 const std::vector<Operation> operations = {
148 {.type = OperationType::ADD, .inputs = {0, 4, 2}, .outputs = {3}},
149 {.type = OperationType::ADD, .inputs = {1, 3, 2}, .outputs = {4}},
150 {.type = OperationType::ADD, .inputs = {4, 0, 2}, .outputs = {5}},
151 };
152
153 Subgraph subgraph = {
154 .operands = operands,
155 .operations = operations,
156 .inputIndexes = {0, 1, 2},
157 .outputIndexes = {5},
158 };
159 const Model model = {
160 .main = std::move(subgraph),
161 .referenced = {},
162 .operandValues = {},
163 .pools = {},
164 };
165
166 // ensure that getSupportedOperations() checks model validity
167 std::vector<bool> supportedOps;
168 const auto supportedOpsStatus = kDevice->getSupportedOperations(model, &supportedOps);
169 ASSERT_FALSE(supportedOpsStatus.isOk());
170 ASSERT_EQ(supportedOpsStatus.getExceptionCode(), EX_SERVICE_SPECIFIC);
171 ASSERT_EQ(static_cast<ErrorStatus>(supportedOpsStatus.getServiceSpecificError()),
172 ErrorStatus::INVALID_ARGUMENT);
173
174 // ensure that prepareModel() checks model validity
175 auto preparedModelCallback = ndk::SharedRefBase::make<PreparedModelCallback>();
176 auto prepareLaunchStatus =
177 kDevice->prepareModel(model, ExecutionPreference::FAST_SINGLE_ANSWER, kDefaultPriority,
178 kNoDeadline, {}, {}, kEmptyCacheToken, preparedModelCallback);
179 // Note that preparation can fail for reasons other than an
180 // invalid model (invalid model should result in
181 // INVALID_ARGUMENT) -- for example, perhaps not all
182 // operations are supported, or perhaps the device hit some
183 // kind of capacity limit.
184 ASSERT_FALSE(prepareLaunchStatus.isOk());
185 EXPECT_EQ(prepareLaunchStatus.getExceptionCode(), EX_SERVICE_SPECIFIC);
186 EXPECT_NE(static_cast<ErrorStatus>(prepareLaunchStatus.getServiceSpecificError()),
187 ErrorStatus::NONE);
188
189 EXPECT_NE(preparedModelCallback->getStatus(), ErrorStatus::NONE);
190 EXPECT_EQ(preparedModelCallback->getPreparedModel(), nullptr);
191 }
192
193 } // namespace aidl::android::hardware::neuralnetworks::vts::functional
194