1 /*
2  * Copyright (C) 2017 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ANDROID_FRAMEWORKS_ML_NN_COMMON_UTILS_H
18 #define ANDROID_FRAMEWORKS_ML_NN_COMMON_UTILS_H
19 
20 #include <android-base/logging.h>
21 
22 #include <set>
23 #include <string>
24 #include <tuple>
25 #include <utility>
26 #include <vector>
27 
28 #include "HalInterfaces.h"
29 #include "NeuralNetworks.h"
30 #include "ValidateHal.h"
31 
32 namespace android {
33 namespace nn {
34 
35 // The number of data types (OperandCode) defined in NeuralNetworks.h.
36 const int kNumberOfDataTypes = 16;
37 
38 // The number of operation types (OperationCode) defined in NeuralNetworks.h.
39 const int kNumberOfOperationTypes = 102;
40 
41 // The number of execution preferences defined in NeuralNetworks.h.
42 const int kNumberOfPreferences = 3;
43 
44 // The number of data types (OperandCode) defined in NeuralNetworksOEM.h.
45 const int kNumberOfDataTypesOEM = 2;
46 
47 // The number of operation types (OperationCode) defined in NeuralNetworksOEM.h.
48 const int kNumberOfOperationTypesOEM = 1;
49 
50 // The lowest number assigned to any OEM Code in NeuralNetworksOEM.h.
51 const int kOEMCodeBase = 10000;
52 
53 /* IMPORTANT: if you change the following list, don't
54  * forget to update the corresponding 'tags' table in
55  * the initVlogMask() function implemented in Utils.cpp.
56  */
57 enum VLogFlags { MODEL = 0, COMPILATION, EXECUTION, CPUEXE, MANAGER, DRIVER, MEMORY };
58 
59 #define VLOG_IS_ON(TAG) ((vLogMask & (1 << (TAG))) != 0)
60 
61 #define VLOG(TAG)                 \
62     if (LIKELY(!VLOG_IS_ON(TAG))) \
63         ;                         \
64     else                          \
65         LOG(INFO)
66 
67 extern int vLogMask;
68 void initVLogMask();
69 
70 #ifdef NN_DEBUGGABLE
71 #define SHOW_IF_DEBUG(msg) msg
72 #else
73 #define SHOW_IF_DEBUG(msg) ""
74 #endif
75 
76 // DEPRECATED(b/118737105). Use CHECK.
77 #define nnAssert(v) CHECK(v)
78 
79 #define NN_RETURN_IF_ERROR(expr)                      \
80     do {                                              \
81         int _errorCode = (expr);                      \
82         if (_errorCode != ANEURALNETWORKS_NO_ERROR) { \
83             return _errorCode;                        \
84         }                                             \
85     } while (0)
86 
87 // The NN_RET_CHECK family of macros defined below is similar to the CHECK family defined in
88 // system/core/base/include/android-base/logging.h
89 //
90 // The difference is that NN_RET_CHECK macros use LOG(ERROR) instead of LOG(FATAL)
91 // and return false instead of aborting.
92 
93 // Logs an error and returns false. Append context using << after. For example:
94 //
95 //   NN_RET_CHECK_FAIL() << "Something went wrong";
96 //
97 // The containing function must return a bool.
98 #define NN_RET_CHECK_FAIL()                   \
99     return ::android::nn::FalseyErrorStream() \
100            << "NN_RET_CHECK failed (" << __FILE__ << ":" << __LINE__ << "): "
101 
102 // Logs an error and returns false if condition is false. Extra logging can be appended using <<
103 // after. For example:
104 //
105 //   NN_RET_CHECK(false) << "Something went wrong";
106 //
107 // The containing function must return a bool.
108 #define NN_RET_CHECK(condition) \
109     while (UNLIKELY(!(condition))) NN_RET_CHECK_FAIL() << #condition << " "
110 
111 // Helper for NN_CHECK_xx(x, y) macros.
112 #define NN_RET_CHECK_OP(LHS, RHS, OP)                                                 \
113     for (auto _values = ::android::base::MakeEagerEvaluator(LHS, RHS);                \
114          UNLIKELY(!(_values.lhs OP _values.rhs));                                     \
115          /* empty */)                                                                 \
116     NN_RET_CHECK_FAIL() << #LHS << " " << #OP << " " << #RHS << " (" << #LHS << " = " \
117                         << _values.lhs << ", " << #RHS << " = " << _values.rhs << ") "
118 
119 // Logs an error and returns false if a condition between x and y does not hold. Extra logging can
120 // be appended using << after. For example:
121 //
122 //   NN_RET_CHECK_EQ(a, b) << "Something went wrong";
123 //
124 // The values must implement the appropriate comparison operator as well as
125 // `operator<<(std::ostream&, ...)`.
126 // The containing function must return a bool.
127 #define NN_RET_CHECK_EQ(x, y) NN_RET_CHECK_OP(x, y, ==)
128 #define NN_RET_CHECK_NE(x, y) NN_RET_CHECK_OP(x, y, !=)
129 #define NN_RET_CHECK_LE(x, y) NN_RET_CHECK_OP(x, y, <=)
130 #define NN_RET_CHECK_LT(x, y) NN_RET_CHECK_OP(x, y, <)
131 #define NN_RET_CHECK_GE(x, y) NN_RET_CHECK_OP(x, y, >=)
132 #define NN_RET_CHECK_GT(x, y) NN_RET_CHECK_OP(x, y, >)
133 
134 // Type to represent a deadline time point across processes.
135 using Deadline = std::chrono::steady_clock::time_point;
136 
137 // Make an Deadline from a duration. If the sum of the current time and the
138 // duration exceeds the max time, return a time point holding the maximum
139 // expressible time.
140 Deadline makeDeadline(uint64_t duration);
141 
142 // Convenience function. If the duration is provided, this function creates a
143 // Deadline using makeDeadline. If the duration is not provided, this function
144 // returns std::nullopt.
145 std::optional<Deadline> makeDeadline(std::optional<uint64_t> duration);
146 
147 // Make an optional Deadline from an OptionalTimePoint. If
148 // timePoint.nanosecondsSinceEpoch cannot be represented in Deadline, return a
149 // time point holding the maximum Deadline. If the OptionalTimePoint is none,
150 // this function returns std::nullopt.
151 std::optional<Deadline> makeDeadline(const hal::OptionalTimePoint& timePoint);
152 
153 // Returns true if the deadline has passed. Returns false if either the deadline
154 // has not been exceeded or if the deadline is not present.
155 bool hasDeadlinePassed(const std::optional<Deadline>& deadline);
156 
157 // Make an OptionalTimePoint from an optional Deadline. If the Deadline is not
158 // provided, this function returns none for OptionalTimePoint.
159 hal::OptionalTimePoint makeTimePoint(const std::optional<Deadline>& deadline);
160 
161 // Ensure that every user of FalseyErrorStream is linked to the
162 // correct instance, using the correct LOG_TAG
163 namespace {
164 
165 // A wrapper around LOG(ERROR) that can be implicitly converted to bool (always evaluates to false).
166 // Used to implement stream logging in NN_RET_CHECK.
167 class FalseyErrorStream {
168     DISALLOW_COPY_AND_ASSIGN(FalseyErrorStream);
169 
170    public:
FalseyErrorStream()171     FalseyErrorStream() {}
172 
173     template <typename T>
174     FalseyErrorStream& operator<<(const T& value) {
175         mBuffer << value;
176         return *this;
177     }
178 
~FalseyErrorStream()179     ~FalseyErrorStream() { LOG(ERROR) << mBuffer.str(); }
180 
181     operator bool() const { return false; }
182 
183    private:
184     std::ostringstream mBuffer;
185 };
186 
187 template <HalVersion version>
188 struct VersionedType {};
189 
190 template <>
191 struct VersionedType<HalVersion::V1_2> {
192     using OperandPerformance = hal::V1_2::Capabilities::OperandPerformance;
193     using OperandType = hal::V1_2::OperandType;
194 };
195 
196 template <>
197 struct VersionedType<HalVersion::V1_3> {
198     using OperandPerformance = hal::V1_3::Capabilities::OperandPerformance;
199     using OperandType = hal::V1_3::OperandType;
200 };
201 
202 template <HalVersion version>
203 using VersionedOperandPerformance = typename VersionedType<version>::OperandPerformance;
204 template <HalVersion version>
205 using VersionedOperandType = typename VersionedType<version>::OperandType;
206 
207 }  // namespace
208 
209 // Return a vector with one entry for each non-extension OperandType except
210 // SUBGRAPH, set to the specified PerformanceInfo value.  The vector will be
211 // sorted by OperandType.
212 //
213 // Control flow (OperandType::SUBGRAPH) operation performance is specified
214 // separately using Capabilities::ifPerformance and
215 // Capabilities::whilePerformance.
216 template <HalVersion version>
217 hal::hidl_vec<VersionedOperandPerformance<version>> nonExtensionOperandPerformance(
218         hal::PerformanceInfo perf);
219 
220 // Update the vector entry corresponding to the specified OperandType with the
221 // specified PerformanceInfo value.  The vector must already have an entry for
222 // that OperandType, and must be sorted by OperandType.
223 void update(hal::hidl_vec<hal::V1_2::Capabilities::OperandPerformance>* operandPerformance,
224             hal::V1_2::OperandType type, hal::PerformanceInfo perf);
225 void update(hal::hidl_vec<hal::V1_3::Capabilities::OperandPerformance>* operandPerformance,
226             hal::V1_3::OperandType type, hal::PerformanceInfo perf);
227 
228 // Look for a vector entry corresponding to the specified OperandType.  If
229 // found, return the associated PerformanceInfo.  If not, return a pessimistic
230 // PerformanceInfo (FLT_MAX).  The vector must be sorted by OperandType.
231 hal::PerformanceInfo lookup(
232         const hal::hidl_vec<hal::V1_2::Capabilities::OperandPerformance>& operandPerformance,
233         hal::V1_2::OperandType type);
234 hal::PerformanceInfo lookup(
235         const hal::hidl_vec<hal::V1_3::Capabilities::OperandPerformance>& operandPerformance,
236         hal::V1_3::OperandType type);
237 
238 // Returns true if an operand type is an extension type.
239 bool isExtensionOperandType(hal::OperandType type);
240 
241 // Returns true if an operation type is an extension type.
242 bool isExtensionOperationType(hal::OperationType type);
243 
244 // Returns the amount of space needed to store a value of the specified
245 // dimensions and type. For a tensor with unspecified rank or at least one
246 // unspecified dimension, returns zero.
247 //
248 // Aborts if the specified type is an extension type.
249 // Aborts if the size would overflow the return type.
250 //
251 // See also TypeManager::getSizeOfData(OperandType, const std::vector<uint32_t>&).
252 uint32_t nonExtensionOperandSizeOfData(hal::OperandType type,
253                                        const std::vector<uint32_t>& dimensions);
254 
255 // Returns the amount of space needed to store a value of the dimensions and
256 // type of this operand. For a tensor with unspecified rank or at least one
257 // unspecified dimension, returns zero.
258 //
259 // Aborts if the specified type is an extension type.
260 // Aborts if the size would overflow the return type.
261 //
262 // See also TypeManager::getSizeOfData(const Operand&).
263 inline uint32_t nonExtensionOperandSizeOfData(const hal::Operand& operand) {
264     return nonExtensionOperandSizeOfData(operand.type, operand.dimensions);
265 }
266 
267 // Returns the amount of space needed to store a value of the specified
268 // dimensions and element size. For a tensor with unspecified rank or at least
269 // one unspecified dimension, returns zero.
270 //
271 // Aborts if the size would overflow the return type.
272 //
273 // See also TypeManager::getSizeOfData(const Operand&).
274 uint32_t sizeOfTensorData(uint32_t sizeOfElement, const std::vector<uint32_t>& dimensions);
275 
276 // Returns true if the amount of space needed to store a value of the specified
277 // dimensions and element size overflows the uint32_t type.
278 //
279 // Aborts if the specified type is an extension type.
280 //
281 // See also TypeManager::sizeOfDataOverflowsUInt32(OperandType, const std::vector<uint32_t>&).
282 bool nonExtensionOperandSizeOfDataOverflowsUInt32(hal::OperandType type,
283                                                   const std::vector<uint32_t>& dimensions);
284 
285 // Returns true if the amount of space needed to store a value of the specified
286 // dimensions and element size overflows the uint32_t type.
287 //
288 // See also TypeManager::sizeOfDataOverflowsUInt32(OperandType, const std::vector<uint32_t>&).
289 bool sizeOfTensorDataOverflowsUInt32(uint32_t elementSize, const std::vector<uint32_t>& dimensions);
290 
291 // Returns true if a non-extension operand type is a scalar type.
292 //
293 // Aborts if the specified type is an extension type.
294 //
295 // See also TypeManager::isTensorType(OperandType).
296 bool nonExtensionOperandTypeIsScalar(int type);
297 
298 // Returns the name of the operation type in ASCII.
299 std::string getOperationName(hal::OperationType opCode);
300 
301 // Returns the name of the operand type in ASCII.
302 std::string getOperandTypeName(hal::OperandType type);
303 
304 // Whether an operand of tensor type has unspecified dimensions.
305 //
306 // Undefined behavior if the operand type is a scalar type.
307 bool tensorHasUnspecifiedDimensions(int type, const uint32_t* dim, uint32_t dimCount);
308 bool tensorHasUnspecifiedDimensions(hal::OperandType type, const std::vector<uint32_t>& dimensions);
309 bool tensorHasUnspecifiedDimensions(const hal::Operand& operand);
310 bool tensorHasUnspecifiedDimensions(const ANeuralNetworksOperandType* type);
311 
312 // Returns the number of padding bytes needed to align data of the
313 // specified length.  It aligns object of length:
314 // 2, 3 on a 2 byte boundary,
315 // 4+ on a 4 byte boundary.
316 // We may want to have different alignments for tensors.
317 // TODO: This is arbitrary, more a proof of concept.  We need
318 // to determine what this should be.
319 uint32_t alignBytesNeeded(uint32_t index, size_t length);
320 
321 // Does a detailed LOG(INFO) of the model
322 void logModelToInfo(const hal::V1_0::Model& model);
323 void logModelToInfo(const hal::V1_1::Model& model);
324 void logModelToInfo(const hal::V1_2::Model& model);
325 void logModelToInfo(const hal::V1_3::Model& model);
326 
327 inline std::string toString(uint32_t obj) {
328     return std::to_string(obj);
329 }
330 
331 template <typename Type>
332 std::string toString(const std::vector<Type>& range) {
333     std::string os = "[";
334     for (size_t i = 0; i < range.size(); ++i) {
335         os += (i == 0 ? "" : ", ") + toString(range[i]);
336     }
337     return os += "]";
338 }
339 
340 template <typename A, typename B>
341 std::string toString(const std::pair<A, B>& pair) {
342     std::ostringstream oss;
343     oss << "(" << toString(pair.first) << ", " << toString(pair.second) << ")";
344     return oss.str();
345 }
346 
347 inline std::string toString(HalVersion halVersion) {
348     switch (halVersion) {
349         case HalVersion::UNKNOWN:
350             return "UNKNOWN HAL version";
351         case HalVersion::V1_0:
352             return "HAL version 1.0";
353         case HalVersion::V1_1:
354             return "HAL version 1.1";
355         case HalVersion::V1_2:
356             return "HAL version 1.2";
357         case HalVersion::V1_3:
358             return "HAL version 1.3";
359     }
360 }
361 
362 inline bool validCode(uint32_t codeCount, uint32_t codeCountOEM, uint32_t code) {
363     return (code < codeCount) || (code >= kOEMCodeBase && (code - kOEMCodeBase) < codeCountOEM);
364 }
365 
366 bool validateOperandSymmPerChannelQuantParams(
367         const hal::Operand& halOperand,
368         const ANeuralNetworksSymmPerChannelQuantParams& channelQuant, const char* tag);
369 
370 // Validates an operand type.
371 //
372 // extensionOperandTypeInfo must be nullptr iff the type is not an extension type.
373 //
374 // If allowPartial is true, the dimensions may be underspecified.
375 int validateOperandType(
376         const ANeuralNetworksOperandType& type,
377         const hal::Extension::OperandTypeInformation* const extensionOperandTypeInfo,
378         const char* tag, bool allowPartial);
379 int validateOperandList(uint32_t count, const uint32_t* list, uint32_t operandCount,
380                         const char* tag);
381 
382 // A set of functions to help validate models containing IF or WHILE operations.
383 struct SubgraphValidationHelper {
384     // Checks if a given operand is a SUBGRAPH operand with a valid offset.
385     std::function<bool(const hal::Operand&)> isValidSubgraphReference;
386     // Gets the input count of a subgraph referenced by a given operand.
387     std::function<uint32_t(const hal::Operand&)> getSubgraphInputCount;
388     // Gets the output count of a subgraph referenced by a given operand.
389     std::function<uint32_t(const hal::Operand&)> getSubgraphOutputCount;
390     // Gets the specified input operand of a subgraph referenced by a given operand.
391     std::function<const hal::Operand*(const hal::Operand&, uint32_t)> getSubgraphInputOperand;
392     // Gets the specified output operand of a subgraph referenced by a given operand.
393     std::function<const hal::Operand*(const hal::Operand&, uint32_t)> getSubgraphOutputOperand;
394     // Whether control flow operations with inner or outer input or output
395     // operands of unknown size are allowed.
396     bool allowControlFlowOperationWithOperandOfUnknownSize;
397 };
398 
399 // Returns ANEURALNETWORKS_NO_ERROR if the corresponding operation is defined and can handle the
400 // provided operand types in the given HAL version, otherwise returns ANEURALNETWORKS_BAD_DATA.
401 // The last argument is only used for validating IF and WHILE operations.
402 int validateOperation(ANeuralNetworksOperationType opType, uint32_t inputCount,
403                       const uint32_t* inputIndexes, uint32_t outputCount,
404                       const uint32_t* outputIndexes, const std::vector<hal::Operand>& operands,
405                       HalVersion halVersion, const SubgraphValidationHelper& helper);
406 
407 inline size_t getSizeFromInts(int lower, int higher) {
408     return (uint32_t)(lower) + ((uint64_t)(uint32_t)(higher) << 32);
409 }
410 
411 // Convert ANEURALNETWORKS_* result code to ErrorStatus.
412 // Not guaranteed to be a 1-to-1 mapping.
413 hal::ErrorStatus convertResultCodeToErrorStatus(int resultCode);
414 
415 // Convert ErrorStatus to ANEURALNETWORKS_* result code.
416 // Not guaranteed to be a 1-to-1 mapping.
417 int convertErrorStatusToResultCode(hal::ErrorStatus status);
418 
419 // Convert execution results to runtime format. Additionally checks that the
420 // returned results abide by the HAL specification, and logs an error if the
421 // result violates the specification.
422 std::tuple<int, std::vector<hal::OutputShape>, hal::Timing> getExecutionResult(
423         hal::ErrorStatus status, std::vector<hal::OutputShape> outputShapes, hal::Timing timing);
424 
425 // Combine two tensor dimensions, both may have unspecified dimensions or rank.
426 std::optional<std::vector<uint32_t>> combineDimensions(const std::vector<uint32_t>& lhs,
427                                                        const std::vector<uint32_t>& rhs);
428 
429 // Versioning
430 
431 bool compliantWithV1_0(const hal::V1_0::Capabilities& capabilities);
432 bool compliantWithV1_0(const hal::V1_1::Capabilities& capabilities);
433 bool compliantWithV1_0(const hal::V1_2::Capabilities& capabilities);
434 bool compliantWithV1_0(const hal::V1_3::Capabilities& capabilities);
435 bool compliantWithV1_1(const hal::V1_0::Capabilities& capabilities);
436 bool compliantWithV1_1(const hal::V1_1::Capabilities& capabilities);
437 bool compliantWithV1_1(const hal::V1_2::Capabilities& capabilities);
438 bool compliantWithV1_1(const hal::V1_3::Capabilities& capabilities);
439 bool compliantWithV1_2(const hal::V1_0::Capabilities& capabilities);
440 bool compliantWithV1_2(const hal::V1_1::Capabilities& capabilities);
441 bool compliantWithV1_2(const hal::V1_2::Capabilities& capabilities);
442 bool compliantWithV1_2(const hal::V1_3::Capabilities& capabilities);
443 bool compliantWithV1_3(const hal::V1_0::Capabilities& capabilities);
444 bool compliantWithV1_3(const hal::V1_1::Capabilities& capabilities);
445 bool compliantWithV1_3(const hal::V1_2::Capabilities& capabilities);
446 bool compliantWithV1_3(const hal::V1_3::Capabilities& capabilities);
447 
448 // If noncompliantOperations != nullptr, then
449 //     precondition: noncompliantOperations->empty()
450 //     postcondition: *noncompliantOperations consists of the indices of the noncompliant
451 //                    operations; if the compliance check fails for some reason
452 //                    other than a noncompliant operation,
453 //                    *noncompliantOperations consists of the indices of all operations
454 bool compliantWithV1_0(const hal::V1_0::Model& model);
455 bool compliantWithV1_0(const hal::V1_1::Model& model);
456 bool compliantWithV1_0(const hal::V1_2::Model& model,
457                        std::set<uint32_t>* noncompliantOperations = nullptr);
458 bool compliantWithV1_0(const hal::V1_3::Model& model,
459                        std::set<uint32_t>* noncompliantOperations = nullptr);
460 bool compliantWithV1_1(const hal::V1_0::Model& model);
461 bool compliantWithV1_1(const hal::V1_1::Model& model);
462 bool compliantWithV1_1(const hal::V1_2::Model& model,
463                        std::set<uint32_t>* noncompliantOperations = nullptr);
464 bool compliantWithV1_1(const hal::V1_3::Model& model,
465                        std::set<uint32_t>* noncompliantOperations = nullptr);
466 bool compliantWithV1_2(const hal::V1_0::Model& model);
467 bool compliantWithV1_2(const hal::V1_1::Model& model);
468 bool compliantWithV1_2(const hal::V1_2::Model& model,
469                        std::set<uint32_t>* noncompliantOperations = nullptr);
470 bool compliantWithV1_2(const hal::V1_3::Model& model,
471                        std::set<uint32_t>* noncompliantOperations = nullptr);
472 
473 hal::V1_0::ErrorStatus convertToV1_0(hal::V1_0::ErrorStatus status);
474 hal::V1_0::ErrorStatus convertToV1_0(hal::V1_3::ErrorStatus status);
475 hal::V1_3::ErrorStatus convertToV1_3(hal::V1_0::ErrorStatus status);
476 hal::V1_3::ErrorStatus convertToV1_3(hal::V1_3::ErrorStatus status);
477 
478 hal::V1_0::Capabilities convertToV1_0(const hal::V1_0::Capabilities& capabilities);
479 hal::V1_0::Capabilities convertToV1_0(const hal::V1_1::Capabilities& capabilities);
480 hal::V1_0::Capabilities convertToV1_0(const hal::V1_2::Capabilities& capabilities);
481 hal::V1_0::Capabilities convertToV1_0(const hal::V1_3::Capabilities& capabilities);
482 hal::V1_1::Capabilities convertToV1_1(const hal::V1_0::Capabilities& capabilities);
483 hal::V1_1::Capabilities convertToV1_1(const hal::V1_1::Capabilities& capabilities);
484 hal::V1_1::Capabilities convertToV1_1(const hal::V1_2::Capabilities& capabilities);
485 hal::V1_1::Capabilities convertToV1_1(const hal::V1_3::Capabilities& capabilities);
486 hal::V1_2::Capabilities convertToV1_2(const hal::V1_0::Capabilities& capabilities);
487 hal::V1_2::Capabilities convertToV1_2(const hal::V1_1::Capabilities& capabilities);
488 hal::V1_2::Capabilities convertToV1_2(const hal::V1_2::Capabilities& capabilities);
489 hal::V1_2::Capabilities convertToV1_2(const hal::V1_3::Capabilities& capabilities);
490 hal::V1_3::Capabilities convertToV1_3(const hal::V1_0::Capabilities& capabilities);
491 hal::V1_3::Capabilities convertToV1_3(const hal::V1_1::Capabilities& capabilities);
492 hal::V1_3::Capabilities convertToV1_3(const hal::V1_2::Capabilities& capabilities);
493 hal::V1_3::Capabilities convertToV1_3(const hal::V1_3::Capabilities& capabilities);
494 
495 hal::V1_0::Model convertToV1_0(const hal::V1_0::Model& model);
496 hal::V1_0::Model convertToV1_0(const hal::V1_1::Model& model);
497 hal::V1_0::Model convertToV1_0(const hal::V1_2::Model& model);
498 hal::V1_0::Model convertToV1_0(const hal::V1_3::Model& model);
499 hal::V1_1::Model convertToV1_1(const hal::V1_0::Model& model);
500 hal::V1_1::Model convertToV1_1(const hal::V1_1::Model& model);
501 hal::V1_1::Model convertToV1_1(const hal::V1_2::Model& model);
502 hal::V1_1::Model convertToV1_1(const hal::V1_3::Model& model);
503 hal::V1_2::Model convertToV1_2(const hal::V1_0::Model& model);
504 hal::V1_2::Model convertToV1_2(const hal::V1_1::Model& model);
505 hal::V1_2::Model convertToV1_2(const hal::V1_2::Model& model);
506 hal::V1_2::Model convertToV1_2(const hal::V1_3::Model& model);
507 hal::V1_3::Model convertToV1_3(const hal::V1_0::Model& model);
508 hal::V1_3::Model convertToV1_3(const hal::V1_1::Model& model);
509 hal::V1_3::Model convertToV1_3(const hal::V1_2::Model& model);
510 hal::V1_3::Model convertToV1_3(const hal::V1_3::Model& model);
511 
512 hal::V1_0::OperationType uncheckedConvertToV1_0(hal::V1_3::OperationType type);
513 hal::V1_1::OperationType uncheckedConvertToV1_1(hal::V1_3::OperationType type);
514 hal::V1_2::OperationType uncheckedConvertToV1_2(hal::V1_3::OperationType type);
515 
516 hal::V1_0::Operand convertToV1_0(const hal::V1_2::Operand& operand);
517 hal::V1_0::Operand convertToV1_0(const hal::V1_3::Operand& operand);
518 hal::V1_2::Operand convertToV1_2(const hal::V1_0::Operand& operand);
519 hal::V1_2::Operand convertToV1_2(const hal::V1_3::Operand& operand);
520 hal::V1_3::Operand convertToV1_3(const hal::V1_0::Operand& operand);
521 hal::V1_3::Operand convertToV1_3(const hal::V1_2::Operand& operand);
522 hal::V1_3::Operand convertToV1_3(const hal::V1_3::Operand& operand);
523 
524 hal::hidl_vec<hal::V1_0::Operand> convertToV1_0(const hal::hidl_vec<hal::V1_0::Operand>& operands);
525 hal::hidl_vec<hal::V1_0::Operand> convertToV1_0(const hal::hidl_vec<hal::V1_2::Operand>& operands);
526 hal::hidl_vec<hal::V1_0::Operand> convertToV1_0(const hal::hidl_vec<hal::V1_3::Operand>& operands);
527 hal::hidl_vec<hal::V1_2::Operand> convertToV1_2(const hal::hidl_vec<hal::V1_0::Operand>& operands);
528 hal::hidl_vec<hal::V1_2::Operand> convertToV1_2(const hal::hidl_vec<hal::V1_2::Operand>& operands);
529 hal::hidl_vec<hal::V1_2::Operand> convertToV1_2(const hal::hidl_vec<hal::V1_3::Operand>& operands);
530 hal::hidl_vec<hal::V1_3::Operand> convertToV1_3(const hal::hidl_vec<hal::V1_0::Operand>& operands);
531 hal::hidl_vec<hal::V1_3::Operand> convertToV1_3(const hal::hidl_vec<hal::V1_2::Operand>& operands);
532 hal::hidl_vec<hal::V1_3::Operand> convertToV1_3(const hal::hidl_vec<hal::V1_3::Operand>& operands);
533 
534 bool compliantWithV1_0(const hal::V1_0::Request& request);
535 bool compliantWithV1_0(const hal::V1_3::Request& request);
536 bool compliantWithV1_2(const hal::V1_3::Request& request);
537 
538 hal::V1_0::Request convertToV1_0(const hal::V1_0::Request& request);
539 hal::V1_0::Request convertToV1_0(const hal::V1_3::Request& request);
540 hal::V1_0::Request convertToV1_2(const hal::V1_3::Request& request);
541 hal::V1_3::Request convertToV1_3(const hal::V1_0::Request& request);
542 hal::V1_3::Request convertToV1_3(const hal::V1_3::Request& request);
543 
544 bool compliantWithV1_0(hal::V1_0::OperandLifeTime lifetime);
545 bool compliantWithV1_0(hal::V1_3::OperandLifeTime lifetime);
546 bool compliantWithV1_3(hal::V1_0::OperandLifeTime lifetime);
547 bool compliantWithV1_3(hal::V1_3::OperandLifeTime lifetime);
548 
549 hal::V1_0::OperandLifeTime convertToV1_0(hal::V1_0::OperandLifeTime lifetime);
550 hal::V1_0::OperandLifeTime convertToV1_0(hal::V1_3::OperandLifeTime lifetime);
551 hal::V1_3::OperandLifeTime convertToV1_3(hal::V1_0::OperandLifeTime lifetime);
552 hal::V1_3::OperandLifeTime convertToV1_3(hal::V1_3::OperandLifeTime lifetime);
553 
554 constexpr hal::Priority convertToHalPriority(int32_t priority) {
555     switch (priority) {
556         case ANEURALNETWORKS_PRIORITY_LOW:
557             return hal::Priority::LOW;
558         case ANEURALNETWORKS_PRIORITY_MEDIUM:
559             return hal::Priority::MEDIUM;
560         case ANEURALNETWORKS_PRIORITY_HIGH:
561             return hal::Priority::HIGH;
562     }
563     LOG(FATAL) << "unrecognized priority: " << priority;
564     return {};
565 }
566 
567 // The function syncWait() has the same semantics as the system function
568 // ::sync_wait(), except that the syncWait() return value is semantically
569 // richer.  The timeout parameter is in msecs.
570 enum class FenceState {
571     ACTIVE,    // fence has not been signaled
572     SIGNALED,  // fence has been signaled
573     ERROR,     // fence has been placed in the error state
574     UNKNOWN,   // either bad argument passed to syncWait(), or internal error
575 };
576 FenceState syncWait(int fd, int timeout);
577 
578 #ifdef NN_DEBUGGABLE
579 uint32_t getProp(const char* str, uint32_t defaultValue = 0);
580 #endif  // NN_DEBUGGABLE
581 
582 }  // namespace nn
583 }  // namespace android
584 
585 #endif  // ANDROID_FRAMEWORKS_ML_NN_COMMON_UTILS_H
586