1 /*
2  * Copyright (C) 2017 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 // This file contains pre-canonical-types utility code and does not includes HAL
17 // utilities. LegacyHalUtils.h is a superset of these utilities that includes
18 // HAL utilities.
19 
20 #ifndef ANDROID_PACKAGES_MODULES_NEURALNETWORKS_COMMON_LEGACY_UTILS_H
21 #define ANDROID_PACKAGES_MODULES_NEURALNETWORKS_COMMON_LEGACY_UTILS_H
22 
23 #include <android-base/logging.h>
24 #include <nnapi/TypeUtils.h>
25 #include <nnapi/Types.h>
26 
27 #include <functional>
28 #include <tuple>
29 #include <utility>
30 #include <vector>
31 
32 #include "NeuralNetworks.h"
33 #include "OperationResolver.h"
34 #include "nnapi/TypeUtils.h"
35 #include "nnapi/Types.h"
36 
37 namespace android {
38 namespace nn {
39 
40 // The number of data types (OperandCode) defined in NeuralNetworksTypes.h.
41 const int kNumberOfDataTypes = 16;
42 
43 // The number of operation types (OperationCode) defined in NeuralNetworksTypes.h.
44 const int kNumberOfOperationTypes = 106;
45 
46 #ifdef NN_EXPERIMENTAL_FEATURE
47 const int kNumberOfExperimentalOperationTypes = 1;
48 #endif  // NN_EXPERIMENTAL_FEATURE
49 
50 static_assert(kNumberOfOperationTypes == BuiltinOperationResolver::kNumberOfOperationTypes);
51 
52 // The number of execution preferences defined in NeuralNetworks.h.
53 const int kNumberOfPreferences = 3;
54 
55 // The number of data types (OperandCode) defined in NeuralNetworksOEM.h.
56 const int kNumberOfDataTypesOEM = 2;
57 
58 // The number of operation types (OperationCode) defined in NeuralNetworksOEM.h.
59 const int kNumberOfOperationTypesOEM = 1;
60 
61 // The lowest number assigned to any OEM Code in NeuralNetworksOEM.h.
62 const int kOEMCodeBase = 10000;
63 
64 #ifdef NN_DEBUGGABLE
65 #define SHOW_IF_DEBUG(msg) msg
66 #else
67 #define SHOW_IF_DEBUG(msg) ""
68 #endif
69 
70 #define NN_RETURN_IF_ERROR(expr)                      \
71     do {                                              \
72         int _errorCode = (expr);                      \
73         if (_errorCode != ANEURALNETWORKS_NO_ERROR) { \
74             return _errorCode;                        \
75         }                                             \
76     } while (0)
77 
78 enum class HalVersion : int32_t {
79     UNKNOWN,
80     V1_0,
81     V1_1,
82     V1_2,
83     V1_3,
84     AIDL_V1,
85     AIDL_V2,
86     AIDL_UNSTABLE,
87     // TODO(b/207721221): Add AIDL support to TestPartitioning so that LATEST can be set to AIDL
88     //  version.
89     LATEST = V1_3,
90 };
91 
92 std::ostream& operator<<(std::ostream& os, const HalVersion& halVersion);
93 
94 // Make a Duration from a duration in nanoseconds. If the value exceeds the max duration, return the
95 // maximum expressible duration.
96 Duration makeTimeoutDuration(uint64_t nanoseconds);
97 
98 // Make a Duration from a duration in nanoseconds. If the value exceeds the max duration, return the
99 // maximum expressible duration. If nanoseconds == -1, the duration is omitted. Precondition:
100 // nanoseconds >= -1
101 OptionalDuration makeTimeoutDuration(int64_t nanoseconds);
102 
103 // Make a deadline from a duration. If the sum of the current time and the
104 // duration exceeds the max time, return a time point holding the maximum
105 // expressible time.
106 TimePoint makeDeadline(Duration duration);
107 
makeDeadline(uint64_t duration)108 inline TimePoint makeDeadline(uint64_t duration) {
109     return makeDeadline(makeTimeoutDuration(duration));
110 }
111 
112 // Convenience function. If the duration is provided, this function creates a
113 // deadline using makeDeadline. If the duration is not provided, this function
114 // returns std::nullopt.
makeDeadline(OptionalDuration duration)115 inline OptionalTimePoint makeDeadline(OptionalDuration duration) {
116     return duration.has_value() ? std::make_optional(makeDeadline(*duration)) : OptionalTimePoint{};
117 }
makeDeadline(std::optional<uint64_t> duration)118 inline OptionalTimePoint makeDeadline(std::optional<uint64_t> duration) {
119     return duration.has_value() ? std::make_optional(makeDeadline(*duration)) : OptionalTimePoint{};
120 }
makeDeadline(int64_t duration)121 inline OptionalTimePoint makeDeadline(int64_t duration) {
122     return makeDeadline(makeTimeoutDuration(duration));
123 }
124 
125 // Returns true if the deadline has passed. Returns false if either the deadline
126 // has not been exceeded or if the deadline is not present.
127 bool hasDeadlinePassed(const OptionalTimePoint& deadline);
128 
129 // Returns true if an operand type is an extension type.
130 bool isExtensionOperandType(OperandType type);
131 
132 // Returns true if an operation type is an extension type.
133 bool isExtensionOperationType(OperationType type);
134 
135 // Returns the amount of space needed to store a value of the specified
136 // dimensions and type. For a tensor with unspecified rank or at least one
137 // unspecified dimension, returns zero.
138 //
139 // Aborts if the specified type is an extension type.
140 // Aborts if the size would overflow the return type.
141 //
142 // See also TypeManager::getSizeOfData(OperandType, const std::vector<uint32_t>&).
143 uint32_t nonExtensionOperandSizeOfData(OperandType type, const std::vector<uint32_t>& dimensions);
144 
145 // Returns the amount of space needed to store a value of the dimensions and
146 // type of this operand. For a tensor with unspecified rank or at least one
147 // unspecified dimension, returns zero.
148 //
149 // Aborts if the specified type is an extension type.
150 // Aborts if the size would overflow the return type.
151 //
152 // See also TypeManager::getSizeOfData(const Operand&).
nonExtensionOperandSizeOfData(const Operand & operand)153 inline uint32_t nonExtensionOperandSizeOfData(const Operand& operand) {
154     return nonExtensionOperandSizeOfData(operand.type, operand.dimensions);
155 }
156 
157 // Returns the amount of space needed to store a value of the specified
158 // dimensions and element size. For a tensor with unspecified rank or at least
159 // one unspecified dimension, returns zero.
160 //
161 // Aborts if the size would overflow the return type.
162 //
163 // See also TypeManager::getSizeOfData(const Operand&).
164 uint32_t sizeOfTensorData(uint32_t sizeOfElement, const std::vector<uint32_t>& dimensions);
165 
166 // Returns true if the amount of space needed to store a value of the specified
167 // dimensions and element size overflows the uint32_t type.
168 //
169 // Aborts if the specified type is an extension type.
170 //
171 // See also TypeManager::sizeOfDataOverflowsUInt32(OperandType, const std::vector<uint32_t>&).
172 bool nonExtensionOperandSizeOfDataOverflowsUInt32(OperandType type,
173                                                   const std::vector<uint32_t>& dimensions);
174 
175 // Returns true if the amount of space needed to store a value of the specified
176 // dimensions and element size overflows the uint32_t type.
177 //
178 // See also TypeManager::sizeOfDataOverflowsUInt32(OperandType, const std::vector<uint32_t>&).
179 bool sizeOfTensorDataOverflowsUInt32(uint32_t elementSize, const std::vector<uint32_t>& dimensions);
180 
181 // Returns true if a non-extension operand type is a scalar type.
182 //
183 // Aborts if the specified type is an extension type.
184 //
185 // See also TypeManager::isTensorType(OperandType).
186 bool nonExtensionOperandTypeIsScalar(int type);
187 
188 // Whether an operand of tensor type has unspecified dimensions.
189 //
190 // Undefined behavior if the operand type is a scalar type.
191 bool tensorHasUnspecifiedDimensions(int type, const uint32_t* dim, uint32_t dimCount);
192 bool tensorHasUnspecifiedDimensions(OperandType type, const Dimensions& dimensions);
193 bool tensorHasUnspecifiedDimensions(const ANeuralNetworksOperandType* type);
194 
195 // Returns the number of padding bytes needed to align data starting at `index` with `length` number
196 // of bytes such that `index` + returned number of padding bytes is aligned. Refer to
197 // `getAlignmentForLength` for more information on alignment (such as what the current alignments
198 // are for different data lengths).
199 uint32_t alignBytesNeeded(uint32_t index, size_t length);
200 
201 // Does a detailed LOG(INFO) of the model
202 void logModelToInfo(const Model& model);
203 
validCode(uint32_t codeCount,uint32_t codeCountOEM,uint32_t code)204 inline bool validCode(uint32_t codeCount, uint32_t codeCountOEM, uint32_t code) {
205     return (code < codeCount) || (code >= kOEMCodeBase && (code - kOEMCodeBase) < codeCountOEM);
206 }
207 
208 // Validates an operand type.
209 //
210 // extensionOperandTypeInfo must be nullptr iff the type is not an extension type.
211 //
212 // If allowPartial is true, the dimensions may be underspecified.
213 int validateOperandType(const ANeuralNetworksOperandType& type,
214                         const Extension::OperandTypeInformation* const extensionOperandTypeInfo,
215                         const char* tag, bool allowPartial);
216 int validateOperandList(uint32_t count, const uint32_t* list, uint32_t operandCount,
217                         const char* tag);
218 
219 // A set of functions to help validate models containing IF or WHILE operations.
220 struct SubgraphValidationHelper {
221     // Checks if a given operand is a SUBGRAPH operand with a valid offset.
222     std::function<bool(const Operand&)> isValidSubgraphReference;
223     // Gets the input count of a subgraph referenced by a given operand.
224     std::function<uint32_t(const Operand&)> getSubgraphInputCount;
225     // Gets the output count of a subgraph referenced by a given operand.
226     std::function<uint32_t(const Operand&)> getSubgraphOutputCount;
227     // Gets the specified input operand of a subgraph referenced by a given operand.
228     std::function<const Operand*(const Operand&, uint32_t)> getSubgraphInputOperand;
229     // Gets the specified output operand of a subgraph referenced by a given operand.
230     std::function<const Operand*(const Operand&, uint32_t)> getSubgraphOutputOperand;
231     // Whether control flow operations with inner or outer input or output
232     // operands of unknown size are allowed.
233     bool allowControlFlowOperationWithOperandOfUnknownSize;
234 };
235 
236 // Returns ANEURALNETWORKS_NO_ERROR if the corresponding operation is defined and can handle the
237 // provided operand types in the given HAL version, otherwise returns ANEURALNETWORKS_BAD_DATA.
238 // The last argument is only used for validating IF and WHILE operations.
239 int validateOperation(ANeuralNetworksOperationType opType, uint32_t inputCount,
240                       const uint32_t* inputIndexes, uint32_t outputCount,
241                       const uint32_t* outputIndexes, const std::vector<Operand>& operands,
242                       HalVersion halVersion, const SubgraphValidationHelper& helper);
243 
getSizeFromInts(int lower,int higher)244 inline size_t getSizeFromInts(int lower, int higher) {
245     return (uint32_t)(lower) + ((uint64_t)(uint32_t)(higher) << 32);
246 }
247 
248 // Convert ANEURALNETWORKS_* result code to ErrorStatus.
249 // Not guaranteed to be a 1-to-1 mapping.
250 ErrorStatus convertResultCodeToErrorStatus(int resultCode);
251 
252 // Convert ErrorStatus to ANEURALNETWORKS_* result code.
253 // Not guaranteed to be a 1-to-1 mapping.
254 int convertErrorStatusToResultCode(ErrorStatus status);
255 
256 // Convert execution results to runtime format. Additionally checks that the
257 // returned results abide by the HAL specification, and logs an error if the
258 // result violates the specification.
259 std::tuple<int, std::vector<OutputShape>, Timing> getExecutionResult(
260         ErrorStatus status, std::vector<OutputShape> outputShapes, Timing timing);
261 
convertToCanonicalPriority(int32_t priority)262 constexpr Priority convertToCanonicalPriority(int32_t priority) {
263     switch (priority) {
264         case ANEURALNETWORKS_PRIORITY_LOW:
265             return Priority::LOW;
266         case ANEURALNETWORKS_PRIORITY_MEDIUM:
267             return Priority::MEDIUM;
268         case ANEURALNETWORKS_PRIORITY_HIGH:
269             return Priority::HIGH;
270     }
271     LOG(FATAL) << "unrecognized priority: " << priority;
272     return {};
273 }
274 
275 // The function syncWait() has the same semantics as the system function
276 // ::sync_wait(), except that the syncWait() return value is semantically
277 // richer.  The timeout parameter is in msecs.
278 enum class FenceState {
279     ACTIVE,    // fence has not been signaled
280     SIGNALED,  // fence has been signaled
281     ERROR,     // fence has been placed in the error state
282     UNKNOWN,   // either bad argument passed to syncWait(), or internal error
283 };
284 FenceState syncWait(int fd, int timeout);
285 
286 #ifdef NN_DEBUGGABLE
287 uint32_t getProp(const char* str, uint32_t defaultValue = 0);
288 #endif  // NN_DEBUGGABLE
289 
290 struct ApiVersion {
291     Version canonical;
292     int64_t featureLevel;
293 };
294 
295 constexpr auto kHalVersionV1_0ToApi = ApiVersion{.canonical = kVersionFeatureLevel1,
296                                                  .featureLevel = ANEURALNETWORKS_FEATURE_LEVEL_1};
297 constexpr auto kHalVersionV1_1ToApi = ApiVersion{.canonical = kVersionFeatureLevel2,
298                                                  .featureLevel = ANEURALNETWORKS_FEATURE_LEVEL_2};
299 constexpr auto kHalVersionV1_2ToApi = ApiVersion{.canonical = kVersionFeatureLevel3,
300                                                  .featureLevel = ANEURALNETWORKS_FEATURE_LEVEL_3};
301 constexpr auto kHalVersionV1_3ToApi = ApiVersion{.canonical = kVersionFeatureLevel4,
302                                                  .featureLevel = ANEURALNETWORKS_FEATURE_LEVEL_4};
303 
304 // Utility that measures time period, in nanoseconds, from creation
305 // to destruction and stores result in the supplied memory location
306 // on destruction
307 struct [[nodiscard]] TimeNanoMeasurer {
308     TimePoint start;
309     uint64_t* saveAt;
310 
TimeNanoMeasurerTimeNanoMeasurer311     explicit TimeNanoMeasurer(uint64_t* saveAt) : start(Clock::now()), saveAt(saveAt) {}
~TimeNanoMeasurerTimeNanoMeasurer312     ~TimeNanoMeasurer() { *saveAt = currentDuration(start); }
313     DISALLOW_COPY_AND_ASSIGN(TimeNanoMeasurer);
314 
currentDurationTimeNanoMeasurer315     static inline uint64_t currentDuration(const TimePoint& start) {
316         auto end = Clock::now();
317         return std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
318     }
319 };
320 
321 }  // namespace nn
322 }  // namespace android
323 
324 #endif  // ANDROID_PACKAGES_MODULES_NEURALNETWORKS_COMMON_LEGACY_UTILS_H
325