1 /*
2  * Copyright (C) 2020 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "Conversions.h"
18 
19 #include <android-base/logging.h>
20 #include <android/hardware/neuralnetworks/1.0/types.h>
21 #include <android/hardware/neuralnetworks/1.1/types.h>
22 #include <nnapi/OperandTypes.h>
23 #include <nnapi/OperationTypes.h>
24 #include <nnapi/Result.h>
25 #include <nnapi/SharedMemory.h>
26 #include <nnapi/TypeUtils.h>
27 #include <nnapi/Types.h>
28 #include <nnapi/Validation.h>
29 #include <nnapi/hal/1.0/Conversions.h>
30 #include <nnapi/hal/CommonUtils.h>
31 
32 #include <algorithm>
33 #include <functional>
34 #include <iterator>
35 #include <type_traits>
36 #include <utility>
37 
38 #include "Utils.h"
39 
40 namespace android::nn {
41 namespace {
42 
43 using hardware::hidl_vec;
44 
45 template <typename Input>
46 using UnvalidatedConvertOutput =
47         std::decay_t<decltype(unvalidatedConvert(std::declval<Input>()).value())>;
48 
49 template <typename Type>
unvalidatedConvert(const hidl_vec<Type> & arguments)50 GeneralResult<std::vector<UnvalidatedConvertOutput<Type>>> unvalidatedConvert(
51         const hidl_vec<Type>& arguments) {
52     std::vector<UnvalidatedConvertOutput<Type>> canonical;
53     canonical.reserve(arguments.size());
54     for (const auto& argument : arguments) {
55         canonical.push_back(NN_TRY(nn::unvalidatedConvert(argument)));
56     }
57     return canonical;
58 }
59 
60 template <typename Type>
validatedConvert(const Type & halObject)61 GeneralResult<UnvalidatedConvertOutput<Type>> validatedConvert(const Type& halObject) {
62     auto canonical = NN_TRY(nn::unvalidatedConvert(halObject));
63     NN_TRY(hal::V1_1::utils::compliantVersion(canonical));
64     return canonical;
65 }
66 
67 }  // anonymous namespace
68 
unvalidatedConvert(const hal::V1_1::OperationType & operationType)69 GeneralResult<OperationType> unvalidatedConvert(const hal::V1_1::OperationType& operationType) {
70     return static_cast<OperationType>(operationType);
71 }
72 
unvalidatedConvert(const hal::V1_1::Capabilities & capabilities)73 GeneralResult<Capabilities> unvalidatedConvert(const hal::V1_1::Capabilities& capabilities) {
74     const auto quantized8Performance =
75             NN_TRY(unvalidatedConvert(capabilities.quantized8Performance));
76     const auto float32Performance = NN_TRY(unvalidatedConvert(capabilities.float32Performance));
77     const auto relaxedFloat32toFloat16Performance =
78             NN_TRY(unvalidatedConvert(capabilities.relaxedFloat32toFloat16Performance));
79 
80     auto table = hal::utils::makeQuantized8PerformanceConsistentWithP(float32Performance,
81                                                                       quantized8Performance);
82 
83     return Capabilities{
84             .relaxedFloat32toFloat16PerformanceScalar = relaxedFloat32toFloat16Performance,
85             .relaxedFloat32toFloat16PerformanceTensor = relaxedFloat32toFloat16Performance,
86             .operandPerformance = std::move(table),
87     };
88 }
89 
unvalidatedConvert(const hal::V1_1::Operation & operation)90 GeneralResult<Operation> unvalidatedConvert(const hal::V1_1::Operation& operation) {
91     const auto type = NN_TRY(unvalidatedConvert(operation.type));
92     return Operation{
93             .type = type,
94             .inputs = operation.inputs,
95             .outputs = operation.outputs,
96     };
97 }
98 
unvalidatedConvert(const hal::V1_1::Model & model)99 GeneralResult<Model> unvalidatedConvert(const hal::V1_1::Model& model) {
100     auto operations = NN_TRY(unvalidatedConvert(model.operations));
101 
102     // Verify number of consumers.
103     const auto numberOfConsumers =
104             NN_TRY(countNumberOfConsumers(model.operands.size(), operations));
105     CHECK(model.operands.size() == numberOfConsumers.size());
106     for (size_t i = 0; i < model.operands.size(); ++i) {
107         if (model.operands[i].numberOfConsumers != numberOfConsumers[i]) {
108             return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
109                    << "Invalid numberOfConsumers for operand " << i << ", expected "
110                    << numberOfConsumers[i] << " but found " << model.operands[i].numberOfConsumers;
111         }
112     }
113 
114     auto operands = NN_TRY(unvalidatedConvert(model.operands));
115     auto main = Model::Subgraph{
116             .operands = std::move(operands),
117             .operations = std::move(operations),
118             .inputIndexes = model.inputIndexes,
119             .outputIndexes = model.outputIndexes,
120     };
121 
122     auto operandValues = NN_TRY(unvalidatedConvert(model.operandValues));
123     auto pools = NN_TRY(unvalidatedConvert(model.pools));
124     return Model{
125             .main = std::move(main),
126             .operandValues = std::move(operandValues),
127             .pools = std::move(pools),
128             .relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16,
129     };
130 }
131 
unvalidatedConvert(const hal::V1_1::ExecutionPreference & executionPreference)132 GeneralResult<ExecutionPreference> unvalidatedConvert(
133         const hal::V1_1::ExecutionPreference& executionPreference) {
134     return static_cast<ExecutionPreference>(executionPreference);
135 }
136 
convert(const hal::V1_1::Capabilities & capabilities)137 GeneralResult<Capabilities> convert(const hal::V1_1::Capabilities& capabilities) {
138     return validatedConvert(capabilities);
139 }
140 
convert(const hal::V1_1::Model & model)141 GeneralResult<Model> convert(const hal::V1_1::Model& model) {
142     return validatedConvert(model);
143 }
144 
convert(const hal::V1_1::ExecutionPreference & executionPreference)145 GeneralResult<ExecutionPreference> convert(
146         const hal::V1_1::ExecutionPreference& executionPreference) {
147     return validatedConvert(executionPreference);
148 }
149 
150 }  // namespace android::nn
151 
152 namespace android::hardware::neuralnetworks::V1_1::utils {
153 namespace {
154 
155 using utils::unvalidatedConvert;
156 
unvalidatedConvert(const nn::Capabilities::PerformanceInfo & performanceInfo)157 nn::GeneralResult<V1_0::PerformanceInfo> unvalidatedConvert(
158         const nn::Capabilities::PerformanceInfo& performanceInfo) {
159     return V1_0::utils::unvalidatedConvert(performanceInfo);
160 }
161 
unvalidatedConvert(const nn::Operand & operand)162 nn::GeneralResult<V1_0::Operand> unvalidatedConvert(const nn::Operand& operand) {
163     return V1_0::utils::unvalidatedConvert(operand);
164 }
165 
unvalidatedConvert(const nn::Model::OperandValues & operandValues)166 nn::GeneralResult<hidl_vec<uint8_t>> unvalidatedConvert(
167         const nn::Model::OperandValues& operandValues) {
168     return V1_0::utils::unvalidatedConvert(operandValues);
169 }
170 
unvalidatedConvert(const nn::SharedMemory & memory)171 nn::GeneralResult<hidl_memory> unvalidatedConvert(const nn::SharedMemory& memory) {
172     return V1_0::utils::unvalidatedConvert(memory);
173 }
174 
175 template <typename Input>
176 using UnvalidatedConvertOutput =
177         std::decay_t<decltype(unvalidatedConvert(std::declval<Input>()).value())>;
178 
179 template <typename Type>
unvalidatedConvert(const std::vector<Type> & arguments)180 nn::GeneralResult<hidl_vec<UnvalidatedConvertOutput<Type>>> unvalidatedConvert(
181         const std::vector<Type>& arguments) {
182     hidl_vec<UnvalidatedConvertOutput<Type>> halObject(arguments.size());
183     for (size_t i = 0; i < arguments.size(); ++i) {
184         halObject[i] = NN_TRY(unvalidatedConvert(arguments[i]));
185     }
186     return halObject;
187 }
188 
189 template <typename Type>
validatedConvert(const Type & canonical)190 nn::GeneralResult<UnvalidatedConvertOutput<Type>> validatedConvert(const Type& canonical) {
191     NN_TRY(compliantVersion(canonical));
192     return unvalidatedConvert(canonical);
193 }
194 
195 }  // anonymous namespace
196 
unvalidatedConvert(const nn::OperationType & operationType)197 nn::GeneralResult<OperationType> unvalidatedConvert(const nn::OperationType& operationType) {
198     return static_cast<OperationType>(operationType);
199 }
200 
unvalidatedConvert(const nn::Capabilities & capabilities)201 nn::GeneralResult<Capabilities> unvalidatedConvert(const nn::Capabilities& capabilities) {
202     const auto float32Performance = NN_TRY(unvalidatedConvert(
203             capabilities.operandPerformance.lookup(nn::OperandType::TENSOR_FLOAT32)));
204     const auto quanitized8Performance = NN_TRY(unvalidatedConvert(
205             capabilities.operandPerformance.lookup(nn::OperandType::TENSOR_QUANT8_ASYMM)));
206     const auto relaxedFloat32toFloat16Performance =
207             NN_TRY(unvalidatedConvert(capabilities.relaxedFloat32toFloat16PerformanceTensor));
208     return Capabilities{
209             .float32Performance = float32Performance,
210             .quantized8Performance = quanitized8Performance,
211             .relaxedFloat32toFloat16Performance = relaxedFloat32toFloat16Performance,
212     };
213 }
214 
unvalidatedConvert(const nn::Operation & operation)215 nn::GeneralResult<Operation> unvalidatedConvert(const nn::Operation& operation) {
216     const auto type = NN_TRY(unvalidatedConvert(operation.type));
217     return Operation{
218             .type = type,
219             .inputs = operation.inputs,
220             .outputs = operation.outputs,
221     };
222 }
223 
unvalidatedConvert(const nn::Model & model)224 nn::GeneralResult<Model> unvalidatedConvert(const nn::Model& model) {
225     if (!hal::utils::hasNoPointerData(model)) {
226         return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT)
227                << "Mdoel cannot be unvalidatedConverted because it contains pointer-based memory";
228     }
229 
230     auto operands = NN_TRY(unvalidatedConvert(model.main.operands));
231 
232     // Update number of consumers.
233     const auto numberOfConsumers =
234             NN_TRY(countNumberOfConsumers(operands.size(), model.main.operations));
235     CHECK(operands.size() == numberOfConsumers.size());
236     for (size_t i = 0; i < operands.size(); ++i) {
237         operands[i].numberOfConsumers = numberOfConsumers[i];
238     }
239 
240     auto operations = NN_TRY(unvalidatedConvert(model.main.operations));
241     auto operandValues = NN_TRY(unvalidatedConvert(model.operandValues));
242     auto pools = NN_TRY(unvalidatedConvert(model.pools));
243     return Model{
244             .operands = std::move(operands),
245             .operations = std::move(operations),
246             .inputIndexes = model.main.inputIndexes,
247             .outputIndexes = model.main.outputIndexes,
248             .operandValues = std::move(operandValues),
249             .pools = std::move(pools),
250             .relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16,
251     };
252 }
253 
unvalidatedConvert(const nn::ExecutionPreference & executionPreference)254 nn::GeneralResult<ExecutionPreference> unvalidatedConvert(
255         const nn::ExecutionPreference& executionPreference) {
256     return static_cast<ExecutionPreference>(executionPreference);
257 }
258 
convert(const nn::Capabilities & capabilities)259 nn::GeneralResult<Capabilities> convert(const nn::Capabilities& capabilities) {
260     return validatedConvert(capabilities);
261 }
262 
convert(const nn::Model & model)263 nn::GeneralResult<Model> convert(const nn::Model& model) {
264     return validatedConvert(model);
265 }
266 
convert(const nn::ExecutionPreference & executionPreference)267 nn::GeneralResult<ExecutionPreference> convert(const nn::ExecutionPreference& executionPreference) {
268     return validatedConvert(executionPreference);
269 }
270 
convert(const nn::DeviceStatus & deviceStatus)271 nn::GeneralResult<V1_0::DeviceStatus> convert(const nn::DeviceStatus& deviceStatus) {
272     return V1_0::utils::convert(deviceStatus);
273 }
274 
convert(const nn::Request & request)275 nn::GeneralResult<V1_0::Request> convert(const nn::Request& request) {
276     return V1_0::utils::convert(request);
277 }
278 
convert(const nn::ErrorStatus & status)279 nn::GeneralResult<V1_0::ErrorStatus> convert(const nn::ErrorStatus& status) {
280     return V1_0::utils::convert(status);
281 }
282 
283 }  // namespace android::hardware::neuralnetworks::V1_1::utils
284