1 /*
2 * Copyright (C) 2017 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #define LOG_TAG "Utils"
18
19 #include "LegacyUtils.h"
20
21 #include <android-base/logging.h>
22 #include <android-base/properties.h>
23 #include <errno.h>
24 #include <nnapi/TypeUtils.h>
25 #include <poll.h>
26
27 #include <algorithm>
28 #include <functional>
29 #include <limits>
30 #include <numeric>
31 #include <string>
32 #include <tuple>
33 #include <unordered_map>
34 #include <utility>
35 #include <vector>
36
37 #include "ControlFlow.h"
38 #include "NeuralNetworks.h"
39 #include "NeuralNetworksOEM.h"
40 #include "OperationResolver.h"
41
42 namespace android {
43 namespace nn {
44
operator <<(std::ostream & os,const HalVersion & halVersion)45 std::ostream& operator<<(std::ostream& os, const HalVersion& halVersion) {
46 switch (halVersion) {
47 case HalVersion::UNKNOWN:
48 return os << "UNKNOWN HAL version";
49 case HalVersion::V1_0:
50 return os << "HAL version 1.0";
51 case HalVersion::V1_1:
52 return os << "HAL version 1.1";
53 case HalVersion::V1_2:
54 return os << "HAL version 1.2";
55 case HalVersion::V1_3:
56 return os << "HAL version 1.3";
57 case HalVersion::AIDL_V1:
58 return os << "HAL version AIDL_V1";
59 case HalVersion::AIDL_V2:
60 return os << "HAL version AIDL_V2";
61 case HalVersion::AIDL_UNSTABLE:
62 return os << "HAL uses unstable AIDL";
63 }
64 return os << "HalVersion{" << static_cast<int32_t>(halVersion) << "}";
65 }
66
makeTimeoutDuration(uint64_t nanoseconds)67 Duration makeTimeoutDuration(uint64_t nanoseconds) {
68 constexpr auto kMaxCount = Duration::max().count();
69 using CommonType = std::common_type_t<Duration::rep, uint64_t>;
70 const auto count = std::min<CommonType>(kMaxCount, nanoseconds);
71 return Duration{static_cast<Duration::rep>(count)};
72 }
73
makeTimeoutDuration(int64_t nanoseconds)74 OptionalDuration makeTimeoutDuration(int64_t nanoseconds) {
75 CHECK_GE(nanoseconds, -1);
76 if (nanoseconds == -1) {
77 return OptionalDuration{};
78 }
79 return makeTimeoutDuration(static_cast<uint64_t>(nanoseconds));
80 }
81
makeDeadline(Duration duration)82 TimePoint makeDeadline(Duration duration) {
83 constexpr auto kMaxTime = TimePoint::max();
84 const auto currentTime = Clock::now();
85
86 // If there would be an overflow, use the max value.
87 if (duration > kMaxTime - currentTime) {
88 return kMaxTime;
89 }
90 return currentTime + duration;
91 }
92
hasDeadlinePassed(const OptionalTimePoint & deadline)93 bool hasDeadlinePassed(const OptionalTimePoint& deadline) {
94 if (!deadline.has_value()) {
95 return false;
96 }
97 return Clock::now() >= *deadline;
98 }
99
isExtensionOperandType(int32_t type)100 static bool isExtensionOperandType(int32_t type) {
101 return (static_cast<uint32_t>(type) >> kExtensionTypeBits) != 0;
102 }
103
isExtensionOperationType(ANeuralNetworksOperationType type)104 static bool isExtensionOperationType(ANeuralNetworksOperationType type) {
105 return (static_cast<uint32_t>(type) >> kExtensionTypeBits) != 0;
106 }
107
isExtensionOperandType(OperandType type)108 bool isExtensionOperandType(OperandType type) {
109 return isExtensionOperandType(static_cast<int32_t>(type));
110 }
111
isExtensionOperationType(OperationType type)112 bool isExtensionOperationType(OperationType type) {
113 return isExtensionOperationType(static_cast<int32_t>(type));
114 }
115
116 namespace {
117
118 template <typename EntryType, uint32_t entryCount, uint32_t entryCountOEM>
tableLookup(const EntryType (& table)[entryCount],const EntryType (& tableOEM)[entryCountOEM],uint32_t code)119 EntryType tableLookup(const EntryType (&table)[entryCount],
120 const EntryType (&tableOEM)[entryCountOEM], uint32_t code) {
121 if (code < entryCount) {
122 return table[code];
123 } else if (code >= kOEMCodeBase && (code - kOEMCodeBase) < entryCountOEM) {
124 return tableOEM[code - kOEMCodeBase];
125 } else {
126 LOG(FATAL) << "tableLookup: bad code";
127 return EntryType();
128 }
129 }
130
convert(HalVersion halVersion)131 static Version convert(HalVersion halVersion) {
132 switch (halVersion) {
133 case HalVersion::UNKNOWN:
134 break;
135 case HalVersion::V1_0:
136 return kVersionFeatureLevel1;
137 case HalVersion::V1_1:
138 return kVersionFeatureLevel2;
139 case HalVersion::V1_2:
140 return kVersionFeatureLevel3;
141 case HalVersion::V1_3:
142 return kVersionFeatureLevel4;
143 case HalVersion::AIDL_V1:
144 return kVersionFeatureLevel5;
145 case HalVersion::AIDL_V2:
146 return kVersionFeatureLevel6;
147 case HalVersion::AIDL_UNSTABLE:
148 return kVersionFeatureLevel7;
149 }
150 LOG(FATAL) << "Cannot convert " << halVersion;
151 return {};
152 }
153
154 class OperationValidationContext : public IOperationValidationContext {
155 DISALLOW_IMPLICIT_CONSTRUCTORS(OperationValidationContext);
156
157 public:
OperationValidationContext(const char * operationName,uint32_t inputCount,const uint32_t * inputIndexes,uint32_t outputCount,const uint32_t * outputIndexes,const Operand * operands)158 OperationValidationContext(const char* operationName, uint32_t inputCount,
159 const uint32_t* inputIndexes, uint32_t outputCount,
160 const uint32_t* outputIndexes, const Operand* operands)
161 : operationName(operationName),
162 inputCount(inputCount),
163 inputIndexes(inputIndexes),
164 outputCount(outputCount),
165 outputIndexes(outputIndexes),
166 operands(operands) {}
167
168 const char* getOperationName() const override;
169
170 uint32_t getNumInputs() const override;
171 OperandType getInputType(uint32_t index) const override;
172 Shape getInputShape(uint32_t index) const override;
173 const Operand::ExtraParams& getInputExtraParams(uint32_t index) const override;
174
175 uint32_t getNumOutputs() const override;
176 OperandType getOutputType(uint32_t index) const override;
177 Shape getOutputShape(uint32_t index) const override;
178
179 private:
180 const Operand* getInputOperand(uint32_t index) const;
181 const Operand* getOutputOperand(uint32_t index) const;
182
183 const char* operationName;
184 uint32_t inputCount;
185 const uint32_t* inputIndexes;
186 uint32_t outputCount;
187 const uint32_t* outputIndexes;
188 const Operand* operands;
189 };
190
getOperationName() const191 const char* OperationValidationContext::getOperationName() const {
192 return operationName;
193 }
194
getInputOperand(uint32_t index) const195 const Operand* OperationValidationContext::getInputOperand(uint32_t index) const {
196 CHECK(index < static_cast<uint32_t>(inputCount));
197 return &operands[inputIndexes[index]];
198 }
199
getOutputOperand(uint32_t index) const200 const Operand* OperationValidationContext::getOutputOperand(uint32_t index) const {
201 CHECK(index < static_cast<uint32_t>(outputCount));
202 return &operands[outputIndexes[index]];
203 }
204
getNumInputs() const205 uint32_t OperationValidationContext::getNumInputs() const {
206 return inputCount;
207 }
208
getNumOutputs() const209 uint32_t OperationValidationContext::getNumOutputs() const {
210 return outputCount;
211 }
212
getInputType(uint32_t index) const213 OperandType OperationValidationContext::getInputType(uint32_t index) const {
214 return getInputOperand(index)->type;
215 }
216
getInputShape(uint32_t index) const217 Shape OperationValidationContext::getInputShape(uint32_t index) const {
218 const Operand* operand = getInputOperand(index);
219 return {operand->type, operand->dimensions, operand->scale, operand->zeroPoint,
220 operand->extraParams};
221 }
222
getInputExtraParams(uint32_t index) const223 const Operand::ExtraParams& OperationValidationContext::getInputExtraParams(uint32_t index) const {
224 return getInputOperand(index)->extraParams;
225 }
226
getOutputType(uint32_t index) const227 OperandType OperationValidationContext::getOutputType(uint32_t index) const {
228 return getOutputOperand(index)->type;
229 }
230
getOutputShape(uint32_t index) const231 Shape OperationValidationContext::getOutputShape(uint32_t index) const {
232 const Operand* operand = getOutputOperand(index);
233 return {operand->type, operand->dimensions, operand->scale, operand->zeroPoint,
234 operand->extraParams};
235 }
236
237 }; // anonymous namespace
238
239 #define COUNT(X) (sizeof(X) / sizeof(X[0]))
240
241 const uint32_t kSizeOfDataType[]{
242 4, // ANEURALNETWORKS_FLOAT32
243 4, // ANEURALNETWORKS_INT32
244 4, // ANEURALNETWORKS_UINT32
245 4, // ANEURALNETWORKS_TENSOR_FLOAT32
246 4, // ANEURALNETWORKS_TENSOR_INT32
247 1, // ANEURALNETWORKS_TENSOR_QUANT8_ASYMM
248 1, // ANEURALNETWORKS_BOOL
249 2, // ANEURALNETWORKS_TENSOR_QUANT16_SYMM
250 2, // ANEURALNETWORKS_TENSOR_FLOAT16
251 1, // ANEURALNETWORKS_TENSOR_BOOL8
252 2, // ANEURALNETWORKS_FLOAT16
253 1, // ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL
254 2, // ANEURALNETWORKS_TENSOR_QUANT16_ASYMM
255 1, // ANEURALNETWORKS_TENSOR_QUANT8_SYMM
256 1, // ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED
257 0, // ANEURALNETWORKS_MODEL
258 };
259
260 static_assert(COUNT(kSizeOfDataType) == kNumberOfDataTypes, "kSizeOfDataType is incorrect");
261
262 const bool kScalarDataType[]{
263 true, // ANEURALNETWORKS_FLOAT32
264 true, // ANEURALNETWORKS_INT32
265 true, // ANEURALNETWORKS_UINT32
266 false, // ANEURALNETWORKS_TENSOR_FLOAT32
267 false, // ANEURALNETWORKS_TENSOR_INT32
268 false, // ANEURALNETWORKS_TENSOR_QUANT8_ASYMM
269 true, // ANEURALNETWORKS_BOOL
270 false, // ANEURALNETWORKS_TENSOR_QUANT16_SYMM
271 false, // ANEURALNETWORKS_TENSOR_FLOAT16
272 false, // ANEURALNETWORKS_TENSOR_BOOL8
273 true, // ANEURALNETWORKS_FLOAT16
274 false, // ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL
275 false, // ANEURALNETWORKS_TENSOR_QUANT16_ASYMM
276 false, // ANEURALNETWORKS_TENSOR_QUANT8_SYMM
277 false, // ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED
278 true, // ANEURALNETWORKS_MODEL
279 };
280
281 static_assert(COUNT(kScalarDataType) == kNumberOfDataTypes, "kScalarDataType is incorrect");
282
283 const uint32_t kSizeOfDataTypeOEM[]{
284 0, // ANEURALNETWORKS_OEM
285 1, // ANEURALNETWORKS_TENSOR_OEM_BYTE
286 };
287
288 static_assert(COUNT(kSizeOfDataTypeOEM) == kNumberOfDataTypesOEM,
289 "kSizeOfDataTypeOEM is incorrect");
290
291 const bool kScalarDataTypeOEM[]{
292 true, // ANEURALNETWORKS_OEM
293 false, // ANEURALNETWORKS_TENSOR_OEM_BYTE
294 };
295
296 static_assert(COUNT(kScalarDataTypeOEM) == kNumberOfDataTypesOEM,
297 "kScalarDataTypeOEM is incorrect");
298
nonExtensionOperandTypeIsScalar(int type)299 bool nonExtensionOperandTypeIsScalar(int type) {
300 CHECK(!isExtensionOperandType(type)) << "Extension operand types are not supported";
301 return tableLookup(kScalarDataType, kScalarDataTypeOEM, type);
302 }
303
nonExtensionOperandSizeOfData(OperandType type,const std::vector<uint32_t> & dimensions)304 uint32_t nonExtensionOperandSizeOfData(OperandType type, const std::vector<uint32_t>& dimensions) {
305 const size_t size = getNonExtensionSize(type, dimensions).value();
306 CHECK_LE(size, std::numeric_limits<uint32_t>::max());
307 return size;
308 }
309
310 // Returns a pair of {false, size} on success, {true, 0} if size overflows uint32_t.
sizeOfTensorDataHelper(uint32_t sizeOfElement,const std::vector<uint32_t> & dimensions)311 static std::pair<bool, uint32_t> sizeOfTensorDataHelper(uint32_t sizeOfElement,
312 const std::vector<uint32_t>& dimensions) {
313 if (dimensions.empty()) {
314 return {false, 0};
315 }
316 uint64_t size = static_cast<uint64_t>(sizeOfElement);
317 constexpr uint64_t kMaxSize = static_cast<uint64_t>(std::numeric_limits<uint32_t>::max());
318 for (uint32_t d : dimensions) {
319 size *= d;
320 if (size > kMaxSize) return {true, 0};
321 }
322 return {false, static_cast<uint32_t>(size)};
323 }
324
sizeOfTensorData(uint32_t sizeOfElement,const std::vector<uint32_t> & dimensions)325 uint32_t sizeOfTensorData(uint32_t sizeOfElement, const std::vector<uint32_t>& dimensions) {
326 const auto [overflow, size] = sizeOfTensorDataHelper(sizeOfElement, dimensions);
327 CHECK(!overflow);
328 return size;
329 }
330
nonExtensionOperandSizeOfDataOverflowsUInt32(OperandType type,const std::vector<uint32_t> & dimensions)331 bool nonExtensionOperandSizeOfDataOverflowsUInt32(OperandType type,
332 const std::vector<uint32_t>& dimensions) {
333 CHECK(!isExtension(type)) << "Size of extension operand data is unknown";
334 int n = static_cast<int>(type);
335 uint32_t sizeOfElement = tableLookup(kSizeOfDataType, kSizeOfDataTypeOEM, n);
336 return tableLookup(kScalarDataType, kScalarDataTypeOEM, n)
337 ? false
338 : sizeOfTensorDataOverflowsUInt32(sizeOfElement, dimensions);
339 }
340
sizeOfTensorDataOverflowsUInt32(uint32_t sizeOfElement,const std::vector<uint32_t> & dimensions)341 bool sizeOfTensorDataOverflowsUInt32(uint32_t sizeOfElement,
342 const std::vector<uint32_t>& dimensions) {
343 return sizeOfTensorDataHelper(sizeOfElement, dimensions).first;
344 }
345
tensorHasUnspecifiedDimensions(int type,const uint32_t * dim,uint32_t dimCount)346 bool tensorHasUnspecifiedDimensions(int type, const uint32_t* dim, uint32_t dimCount) {
347 if (!isExtensionOperandType(type)) {
348 CHECK(!nonExtensionOperandTypeIsScalar(type))
349 << "A scalar type can never have unspecified dimensions";
350 }
351 return dimCount == 0 || std::find(dim, dim + dimCount, 0) != (dim + dimCount);
352 }
353
tensorHasUnspecifiedDimensions(const ANeuralNetworksOperandType * type)354 bool tensorHasUnspecifiedDimensions(const ANeuralNetworksOperandType* type) {
355 return tensorHasUnspecifiedDimensions(type->type, type->dimensions, type->dimensionCount);
356 }
357
alignBytesNeeded(uint32_t index,size_t length)358 uint32_t alignBytesNeeded(uint32_t index, size_t length) {
359 uint32_t alignment = getAlignmentForLength(length);
360 uint32_t pattern = alignment - 1;
361 uint32_t extra = (~(index - 1)) & pattern;
362 return extra;
363 }
364
logModelToInfo(const Model & model)365 void logModelToInfo(const Model& model) {
366 LOG(INFO) << model;
367 }
368
validateScalarDimensions(const ANeuralNetworksOperandType & type,const char * tag)369 static bool validateScalarDimensions(const ANeuralNetworksOperandType& type, const char* tag) {
370 NN_RET_CHECK_EQ(type.dimensionCount, 0u) << tag << " invalid dimensions for scalar type";
371 NN_RET_CHECK(type.dimensions == nullptr) << tag << " invalid dimensions for scalar type";
372 return true;
373 }
374
validateQuant8AsymmParams(const ANeuralNetworksOperandType & type,const char * tag)375 static bool validateQuant8AsymmParams(const ANeuralNetworksOperandType& type, const char* tag) {
376 NN_RET_CHECK(0 <= type.zeroPoint && type.zeroPoint <= 255)
377 << tag << " invalid zeroPoint: " << type.zeroPoint;
378 NN_RET_CHECK_GT(type.scale, 0.f) << tag << " invalid scale";
379 return true;
380 }
381
validateQuant8AsymmSignedParams(const ANeuralNetworksOperandType & type,const char * tag)382 static bool validateQuant8AsymmSignedParams(const ANeuralNetworksOperandType& type,
383 const char* tag) {
384 NN_RET_CHECK(-128 <= type.zeroPoint && type.zeroPoint <= 127)
385 << tag << " invalid zeroPoint: " << type.zeroPoint;
386 NN_RET_CHECK_GT(type.scale, 0.f) << tag << " invalid scale";
387 return true;
388 }
389
validateQuant8SymmParams(const ANeuralNetworksOperandType & type,const char * tag)390 static bool validateQuant8SymmParams(const ANeuralNetworksOperandType& type, const char* tag) {
391 NN_RET_CHECK_EQ(type.zeroPoint, 0) << tag << " invalid zeroPoint: " << type.zeroPoint;
392 NN_RET_CHECK_GT(type.scale, 0.f) << tag << " invalid scale";
393 return true;
394 }
395
validateQuant16AsymmParams(const ANeuralNetworksOperandType & type,const char * tag)396 static bool validateQuant16AsymmParams(const ANeuralNetworksOperandType& type, const char* tag) {
397 NN_RET_CHECK(0 <= type.zeroPoint && type.zeroPoint <= 65535)
398 << tag << " invalid zeroPoint: " << type.zeroPoint;
399 NN_RET_CHECK_GT(type.scale, 0.f) << tag << " invalid scale";
400 return true;
401 }
402
validateQuantSymmParams(const ANeuralNetworksOperandType & type,const char * tag)403 static bool validateQuantSymmParams(const ANeuralNetworksOperandType& type, const char* tag) {
404 NN_RET_CHECK_EQ(type.zeroPoint, 0) << tag << " zeroPoint is not zero";
405 NN_RET_CHECK_GT(type.scale, 0.f) << tag << " invalid scale";
406 return true;
407 }
408
validateNoQuantParams(const ANeuralNetworksOperandType & type,const char * tag)409 static bool validateNoQuantParams(const ANeuralNetworksOperandType& type, const char* tag) {
410 NN_RET_CHECK_EQ(type.zeroPoint, 0) << tag << " zeroPoint is not zero";
411 NN_RET_CHECK_EQ(type.scale, 0.f) << tag << " scale is not zero";
412 return true;
413 }
414
validateTensorDimensions(const ANeuralNetworksOperandType & type,const Extension::OperandTypeInformation * const extensionOperandTypeInfo,const char * tag,bool allowPartial)415 static bool validateTensorDimensions(
416 const ANeuralNetworksOperandType& type,
417 const Extension::OperandTypeInformation* const extensionOperandTypeInfo, const char* tag,
418 bool allowPartial) {
419 if (!allowPartial) {
420 NN_RET_CHECK_GT(type.dimensionCount, 0u) << tag << " invalid operand dimensions";
421 }
422 uint64_t size =
423 isExtensionOperandType(type.type)
424 ? extensionOperandTypeInfo->byteSize
425 : tableLookup(kSizeOfDataType, kSizeOfDataTypeOEM, static_cast<int>(type.type));
426 constexpr uint64_t kMaxSize = std::numeric_limits<uint32_t>::max();
427 for (uint32_t i = 0; i < type.dimensionCount; i++) {
428 if (!allowPartial) {
429 NN_RET_CHECK_NE(type.dimensions[i], 0u) << tag << " invalid operand dimensions";
430 }
431 if (type.dimensions[i] != 0) {
432 size *= type.dimensions[i];
433 NN_RET_CHECK_LE(size, kMaxSize) << tag << " operand byte size exceeds " << kMaxSize;
434 }
435 }
436 return true;
437 }
438
validateOperandTypeHelper(const ANeuralNetworksOperandType & type,const Extension::OperandTypeInformation * const extensionOperandTypeInfo,const char * tag,bool allowPartial)439 static bool validateOperandTypeHelper(
440 const ANeuralNetworksOperandType& type,
441 const Extension::OperandTypeInformation* const extensionOperandTypeInfo, const char* tag,
442 bool allowPartial) {
443 NN_RET_CHECK_EQ(type.dimensionCount == 0, type.dimensions == nullptr);
444 if (isExtensionOperandType(type.type)) {
445 NN_RET_CHECK(extensionOperandTypeInfo != nullptr);
446 if (extensionOperandTypeInfo->isTensor) {
447 NN_RET_CHECK(
448 validateTensorDimensions(type, extensionOperandTypeInfo, tag, allowPartial));
449 } else {
450 NN_RET_CHECK(validateScalarDimensions(type, tag));
451 }
452 return validateNoQuantParams(type, tag);
453 }
454
455 NN_RET_CHECK(extensionOperandTypeInfo == nullptr);
456 NN_RET_CHECK(validCode(kNumberOfDataTypes, kNumberOfDataTypesOEM, type.type))
457 << tag << " invalid OperandType: " << type.type;
458
459 bool isScalar = tableLookup(kScalarDataType, kScalarDataTypeOEM, type.type);
460 if (isScalar) {
461 NN_RET_CHECK(validateScalarDimensions(type, tag));
462 if (type.type != ANEURALNETWORKS_OEM_SCALAR) { // Historically, we have allowed OEM types
463 // to use quantization parameters.
464 NN_RET_CHECK(validateNoQuantParams(type, tag));
465 }
466 } else {
467 NN_RET_CHECK(validateTensorDimensions(type, extensionOperandTypeInfo, tag, allowPartial));
468 if (type.type == ANEURALNETWORKS_TENSOR_QUANT8_ASYMM) {
469 NN_RET_CHECK(validateQuant8AsymmParams(type, tag));
470 } else if (type.type == ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED) {
471 NN_RET_CHECK(validateQuant8AsymmSignedParams(type, tag));
472 } else if (type.type == ANEURALNETWORKS_TENSOR_QUANT8_SYMM) {
473 NN_RET_CHECK(validateQuant8SymmParams(type, tag));
474 } else if (type.type == ANEURALNETWORKS_TENSOR_QUANT16_ASYMM) {
475 NN_RET_CHECK(validateQuant16AsymmParams(type, tag));
476 } else if (type.type == ANEURALNETWORKS_TENSOR_QUANT16_SYMM) {
477 NN_RET_CHECK(validateQuantSymmParams(type, tag));
478 } else if (type.type == ANEURALNETWORKS_TENSOR_INT32) {
479 // TODO(b/119869082): TENSOR_INT32 should not use quantization parameters.
480 } else if (type.type == ANEURALNETWORKS_TENSOR_OEM_BYTE) {
481 // Historically, we have allowed OEM types to use quantization parameters.
482 } else {
483 NN_RET_CHECK(validateNoQuantParams(type, tag));
484 }
485 }
486
487 return true;
488 }
489
validateOperandType(const ANeuralNetworksOperandType & type,const Extension::OperandTypeInformation * const extensionOperandTypeInfo,const char * tag,bool allowPartial)490 int validateOperandType(const ANeuralNetworksOperandType& type,
491 const Extension::OperandTypeInformation* const extensionOperandTypeInfo,
492 const char* tag, bool allowPartial) {
493 return validateOperandTypeHelper(type, extensionOperandTypeInfo, tag, allowPartial)
494 ? ANEURALNETWORKS_NO_ERROR
495 : ANEURALNETWORKS_BAD_DATA;
496 }
497
validateOperandList(uint32_t count,const uint32_t * list,uint32_t operandCount,const char * tag)498 int validateOperandList(uint32_t count, const uint32_t* list, uint32_t operandCount,
499 const char* tag) {
500 for (uint32_t i = 0; i < count; i++) {
501 if (list[i] >= operandCount) {
502 LOG(ERROR) << tag << " invalid operand index at " << i << " = " << list[i]
503 << ", operandCount " << operandCount;
504 return ANEURALNETWORKS_BAD_DATA;
505 }
506 }
507 return ANEURALNETWORKS_NO_ERROR;
508 }
509
validateOperationOperandTypes(const std::vector<Operand> & operands,uint32_t inOperandCount,const uint32_t * inOperandIndexes,const std::vector<OperandType> & inExpectedTypes,uint32_t outOperandCount,const uint32_t * outOperandIndexes,const std::vector<OperandType> & outExpectedInTypes)510 int validateOperationOperandTypes(const std::vector<Operand>& operands, uint32_t inOperandCount,
511 const uint32_t* inOperandIndexes,
512 const std::vector<OperandType>& inExpectedTypes,
513 uint32_t outOperandCount, const uint32_t* outOperandIndexes,
514 const std::vector<OperandType>& outExpectedInTypes) {
515 if (inOperandCount != static_cast<uint32_t>(inExpectedTypes.size()) ||
516 outOperandCount != static_cast<uint32_t>(outExpectedInTypes.size())) {
517 LOG(ERROR) << "Wrong operand count: expected " << inExpectedTypes.size() << " inputs and "
518 << outExpectedInTypes.size() << " outputs,"
519 << "got " << inOperandCount << " inputs and " << outOperandCount << " outputs";
520 return ANEURALNETWORKS_BAD_DATA;
521 }
522 for (uint32_t i = 0; i < inOperandCount; i++) {
523 if (operands[inOperandIndexes[i]].type != inExpectedTypes[i]) {
524 LOG(ERROR) << "Invalid input tensor type " << operands[inOperandIndexes[i]].type
525 << " for input " << i << ", expected " << inExpectedTypes[i];
526 return ANEURALNETWORKS_BAD_DATA;
527 }
528 }
529 for (uint32_t i = 0; i < outOperandCount; i++) {
530 if (operands[outOperandIndexes[i]].type != outExpectedInTypes[i]) {
531 LOG(ERROR) << "Invalid output tensor type " << operands[outOperandIndexes[i]].type
532 << " for input " << i << ", expected " << outExpectedInTypes[i];
533 return ANEURALNETWORKS_BAD_DATA;
534 }
535 }
536
537 return ANEURALNETWORKS_NO_ERROR;
538 }
539
validateHalVersion(ANeuralNetworksOperationType opType,HalVersion halVersion,HalVersion minSupportedHalVersion)540 static int validateHalVersion(ANeuralNetworksOperationType opType, HalVersion halVersion,
541 HalVersion minSupportedHalVersion) {
542 if (halVersion < minSupportedHalVersion) {
543 LOG(ERROR) << "The given inputs and outputs for operation " << opType
544 << " are only supported in " << minSupportedHalVersion
545 << " and later (validating using " << halVersion << ")";
546 return ANEURALNETWORKS_BAD_DATA;
547 }
548 return ANEURALNETWORKS_NO_ERROR;
549 }
550
551 // Checks if two operands have the same types, ranks (if specified), dimensions
552 // (if specified), scales, zeroPoints, and extraParams.
compatible(const Operand & a,const Operand & b)553 static bool compatible(const Operand& a, const Operand& b) {
554 NN_RET_CHECK(a.type == b.type) << a.type << " != " << b.type;
555 if (a.dimensions.size() != 0 && b.dimensions.size() != 0) {
556 NN_RET_CHECK_EQ(a.dimensions.size(), b.dimensions.size()) << "Incompatible dimensions";
557 for (uint32_t i = 0, n = a.dimensions.size(); i < n; ++i) {
558 if (a.dimensions[i] != 0 && b.dimensions[i] != 0) {
559 NN_RET_CHECK_EQ(a.dimensions[i], b.dimensions[i]) << "Incompatible dimensions";
560 }
561 }
562 }
563 NN_RET_CHECK_EQ(a.scale, b.scale);
564 NN_RET_CHECK_EQ(a.zeroPoint, b.zeroPoint);
565 NN_RET_CHECK(a.extraParams == b.extraParams) << a.extraParams << " != " << b.extraParams;
566 return true;
567 }
568
validateConditionOperand(const Operand & operand)569 static bool validateConditionOperand(const Operand& operand) {
570 NN_RET_CHECK(operand.type == OperandType::TENSOR_BOOL8)
571 << "Unexpected condition operand type: " << operand.type;
572 NN_RET_CHECK_EQ(operand.dimensions.size(), 1u) << "Condition operand must be a singleton";
573 NN_RET_CHECK_EQ(operand.dimensions[0], 1u) << "Condition operand must be a singleton";
574 return true;
575 }
576
checkSubgraphValidationHelper(const SubgraphValidationHelper & helper)577 static void checkSubgraphValidationHelper(const SubgraphValidationHelper& helper) {
578 CHECK(helper.isValidSubgraphReference != nullptr);
579 CHECK(helper.getSubgraphInputCount != nullptr);
580 CHECK(helper.getSubgraphOutputCount != nullptr);
581 CHECK(helper.getSubgraphInputOperand != nullptr);
582 CHECK(helper.getSubgraphOutputOperand != nullptr);
583 }
584
validateIfOperation(uint32_t inputCount,const uint32_t * inputs,uint32_t outputCount,const uint32_t * outputs,const std::vector<Operand> & operands,const SubgraphValidationHelper & helper)585 static bool validateIfOperation(uint32_t inputCount, const uint32_t* inputs, uint32_t outputCount,
586 const uint32_t* outputs, const std::vector<Operand>& operands,
587 const SubgraphValidationHelper& helper) {
588 namespace op = operation_if;
589 checkSubgraphValidationHelper(helper);
590 NN_RET_CHECK_GE(inputCount, 3u) << "ANEURALNETWORKS_IF must have at least 3 inputs";
591 NN_RET_CHECK_GE(outputCount, 1u) << "ANEURALNETWORKS_IF must have at least 1 output";
592 auto validateBranchOperand = [&](const Operand& branchModelOperand) -> bool {
593 NN_RET_CHECK(helper.isValidSubgraphReference(branchModelOperand))
594 << "Operand is not a valid subgraph reference";
595 const uint32_t branchModelInputCount = helper.getSubgraphInputCount(branchModelOperand);
596 const uint32_t branchModelOutputCount = helper.getSubgraphOutputCount(branchModelOperand);
597 NN_RET_CHECK_EQ(inputCount, op::kFirstInput + branchModelInputCount);
598 NN_RET_CHECK_EQ(outputCount, branchModelOutputCount);
599 for (uint32_t i = 0; i < branchModelInputCount; ++i) {
600 const Operand& innerOperand = *helper.getSubgraphInputOperand(branchModelOperand, i);
601 const Operand& outerOperand = operands[inputs[op::kFirstInput + i]];
602 NN_RET_CHECK(compatible(innerOperand, outerOperand));
603 }
604 for (uint32_t i = 0; i < branchModelOutputCount; ++i) {
605 const Operand& innerOperand = *helper.getSubgraphOutputOperand(branchModelOperand, i);
606 const Operand& outerOperand = operands[outputs[i]];
607 NN_RET_CHECK(compatible(innerOperand, outerOperand));
608 }
609 return true;
610 };
611 NN_RET_CHECK(validateConditionOperand(operands[inputs[op::kCondBoolOperand]]))
612 << "Validation failed for IF condition operand";
613 NN_RET_CHECK(validateBranchOperand(operands[inputs[op::kThenModelOperand]]))
614 << "Validation failed for IF then model";
615 NN_RET_CHECK(validateBranchOperand(operands[inputs[op::kElseModelOperand]]))
616 << "Validation failed for IF else model";
617 return true;
618 }
619
validateControlFlowOperandUnknownSize(const SubgraphValidationHelper & helper,const Operand & operand)620 static bool validateControlFlowOperandUnknownSize(const SubgraphValidationHelper& helper,
621 const Operand& operand) {
622 if (!helper.allowControlFlowOperationWithOperandOfUnknownSize && !isExtension(operand.type)) {
623 NN_RET_CHECK_NE(nonExtensionOperandSizeOfData(operand.type, operand.dimensions), 0u);
624 }
625 return true;
626 }
627
validateWhileOperation(uint32_t inputCount,const uint32_t * inputs,uint32_t outputCount,const uint32_t * outputs,const std::vector<Operand> & operands,const SubgraphValidationHelper & helper)628 static bool validateWhileOperation(uint32_t inputCount, const uint32_t* inputs,
629 uint32_t outputCount, const uint32_t* outputs,
630 const std::vector<Operand>& operands,
631 const SubgraphValidationHelper& helper) {
632 // Let the loop have
633 // - m >= 1 input-output operands,
634 // - k >= 0 state-only operands, and
635 // - n >= 0 input-only operands.
636 // Then
637 // - the WHILE loop operation has (2 + m + k + n) inputs and m outputs.
638 // - the condition model has (m + k + n) inputs and 1 output.
639 // - the body model has (m + k + n) inputs and (m + k) outputs.
640 namespace op = operation_while;
641 checkSubgraphValidationHelper(helper);
642 NN_RET_CHECK_GE(inputCount, 3u) << "ANEURALNETWORKS_WHILE must have at least 3 inputs";
643 NN_RET_CHECK_GE(outputCount, 1u) << "ANEURALNETWORKS_WHILE must have at least 1 output";
644 auto validateCondOperand = [&](const Operand& condModelOperand) -> bool {
645 NN_RET_CHECK(helper.isValidSubgraphReference(condModelOperand))
646 << "Operand is not a valid subgraph reference";
647 const uint32_t condModelInputCount = helper.getSubgraphInputCount(condModelOperand);
648 const uint32_t condModelOutputCount = helper.getSubgraphOutputCount(condModelOperand);
649 NN_RET_CHECK_EQ(inputCount, op::kFirstInput + condModelInputCount);
650 NN_RET_CHECK_EQ(condModelOutputCount, 1u);
651 for (uint32_t i = 0; i < condModelInputCount; ++i) {
652 const Operand& innerOperand = *helper.getSubgraphInputOperand(condModelOperand, i);
653 const Operand& outerOperand = operands[inputs[op::kFirstInput + i]];
654 NN_RET_CHECK(compatible(innerOperand, outerOperand));
655 NN_RET_CHECK(validateControlFlowOperandUnknownSize(helper, innerOperand));
656 NN_RET_CHECK(validateControlFlowOperandUnknownSize(helper, outerOperand));
657 }
658 NN_RET_CHECK(
659 validateConditionOperand(*helper.getSubgraphOutputOperand(condModelOperand, 0)));
660 return true;
661 };
662 auto validateBodyOperand = [&](const Operand& bodyModelOperand) -> bool {
663 NN_RET_CHECK(helper.isValidSubgraphReference(bodyModelOperand))
664 << "Operand is not a valid subgraph reference";
665 const uint32_t bodyModelInputCount = helper.getSubgraphInputCount(bodyModelOperand);
666 const uint32_t bodyModelOutputCount = helper.getSubgraphOutputCount(bodyModelOperand);
667 NN_RET_CHECK_EQ(inputCount, op::kFirstInput + bodyModelInputCount);
668 NN_RET_CHECK_GE(bodyModelOutputCount, outputCount);
669 NN_RET_CHECK_GE(bodyModelInputCount, bodyModelOutputCount);
670 const uint32_t inputOutputCount = outputCount;
671 const uint32_t stateOnlyCount = bodyModelOutputCount - inputOutputCount;
672 const uint32_t inputOnlyCount = bodyModelInputCount - bodyModelOutputCount;
673 for (uint32_t i = 0, n = inputOutputCount + stateOnlyCount + inputOnlyCount; i < n; ++i) {
674 const Operand& innerOperand = *helper.getSubgraphInputOperand(bodyModelOperand, i);
675 const Operand& outerOperand = operands[inputs[op::kFirstInput + i]];
676 NN_RET_CHECK(compatible(innerOperand, outerOperand));
677 NN_RET_CHECK(validateControlFlowOperandUnknownSize(helper, innerOperand));
678 NN_RET_CHECK(validateControlFlowOperandUnknownSize(helper, outerOperand));
679 }
680 for (uint32_t i = 0; i < inputOutputCount; ++i) {
681 const Operand& innerOperand = *helper.getSubgraphOutputOperand(bodyModelOperand, i);
682 const Operand& outerOperand = operands[outputs[i]];
683 NN_RET_CHECK(compatible(innerOperand, outerOperand));
684 NN_RET_CHECK(validateControlFlowOperandUnknownSize(helper, outerOperand));
685 }
686 for (uint32_t i = 0, n = inputOutputCount + stateOnlyCount; i < n; ++i) {
687 const Operand& inputOperand = *helper.getSubgraphInputOperand(bodyModelOperand, i);
688 const Operand& outputOperand = *helper.getSubgraphOutputOperand(bodyModelOperand, i);
689 NN_RET_CHECK(compatible(inputOperand, outputOperand));
690 NN_RET_CHECK(validateControlFlowOperandUnknownSize(helper, outputOperand));
691 }
692 return true;
693 };
694 NN_RET_CHECK(validateCondOperand(operands[inputs[op::kCondModelOperand]]))
695 << "Validation failed for WHILE condition model";
696 NN_RET_CHECK(validateBodyOperand(operands[inputs[op::kBodyModelOperand]]))
697 << "Validation failed for WHILE body model";
698 return true;
699 }
700
validateOperation(ANeuralNetworksOperationType opType,uint32_t inputCount,const uint32_t * inputIndexes,uint32_t outputCount,const uint32_t * outputIndexes,const std::vector<Operand> & operands,HalVersion halVersion,const SubgraphValidationHelper & helper)701 int validateOperation(ANeuralNetworksOperationType opType, uint32_t inputCount,
702 const uint32_t* inputIndexes, uint32_t outputCount,
703 const uint32_t* outputIndexes, const std::vector<Operand>& operands,
704 HalVersion halVersion, const SubgraphValidationHelper& helper) {
705 NN_RETURN_IF_ERROR(validateOperandList(inputCount, inputIndexes,
706 static_cast<uint32_t>(operands.size()),
707 "ANeuralNetworksModel_addOperation inputs"));
708 NN_RETURN_IF_ERROR(validateOperandList(outputCount, outputIndexes,
709 static_cast<uint32_t>(operands.size()),
710 "ANeuralNetworksModel_addOperation outputs"));
711
712 if (isExtensionOperationType(opType)) {
713 if (halVersion < HalVersion::V1_2) {
714 LOG(ERROR)
715 << "Extension operations are supported since HAL version 1.2, validating using "
716 << halVersion;
717 return ANEURALNETWORKS_BAD_DATA;
718 }
719 // There is no other validation we can do for an extension operation.
720 return ANEURALNETWORKS_NO_ERROR;
721 }
722
723 auto logInvalidInOutNumber = [opType, inputCount, outputCount](int expIn, int expOut) {
724 LOG(ERROR) << "Invalid number of input operands (" << inputCount << ", expected " << expIn
725 << ") or output operands (" << outputCount << ", expected " << expOut
726 << ") for operation " << opType;
727 };
728
729 switch (opType) {
730 case ANEURALNETWORKS_OEM_OPERATION: {
731 return ANEURALNETWORKS_NO_ERROR;
732 }
733 case ANEURALNETWORKS_RESHAPE: {
734 if (inputCount != 2 || outputCount != 1) {
735 logInvalidInOutNumber(2, 1);
736 return ANEURALNETWORKS_BAD_DATA;
737 }
738 auto inputType = operands[inputIndexes[0]].type;
739 std::vector<OperandType> inExpectedTypes;
740 std::vector<OperandType> outExpectedTypes;
741 if (inputType == OperandType::TENSOR_FLOAT32) {
742 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
743 inExpectedTypes = {OperandType::TENSOR_FLOAT32, OperandType::TENSOR_INT32};
744 outExpectedTypes = {OperandType::TENSOR_FLOAT32};
745 } else if (inputType == OperandType::TENSOR_FLOAT16) {
746 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
747 inExpectedTypes = {OperandType::TENSOR_FLOAT16, OperandType::TENSOR_INT32};
748 outExpectedTypes = {OperandType::TENSOR_FLOAT16};
749 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
750 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
751 inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM, OperandType::TENSOR_INT32};
752 outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
753 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
754 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3));
755 inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED,
756 OperandType::TENSOR_INT32};
757 outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED};
758 } else if (inputType == OperandType::TENSOR_INT32) {
759 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::AIDL_V2));
760 inExpectedTypes = {OperandType::TENSOR_INT32, OperandType::TENSOR_INT32};
761 outExpectedTypes = {OperandType::TENSOR_INT32};
762 } else {
763 LOG(ERROR) << "Unsupported input tensor type for operation " << opType;
764 return ANEURALNETWORKS_BAD_DATA;
765 }
766 const auto inputRank = operands[inputIndexes[0]].dimensions.size();
767 if (inputRank > 4) {
768 LOG(ERROR) << "Unsupported input tensor rank for operation " << opType;
769 return ANEURALNETWORKS_BAD_DATA;
770 }
771 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
772 inExpectedTypes, outputCount, outputIndexes,
773 outExpectedTypes);
774 }
775 case ANEURALNETWORKS_DEPTH_TO_SPACE: {
776 if ((inputCount != 3 && inputCount != 2) || outputCount != 1) {
777 LOG(ERROR) << "Invalid number of input operands (" << inputCount
778 << ", expected 3 or 2) or output operands (" << outputCount
779 << ", expected 1) for operation " << opType;
780 return ANEURALNETWORKS_BAD_DATA;
781 }
782 auto inputType = operands[inputIndexes[0]].type;
783 std::vector<OperandType> inExpectedTypes;
784 std::vector<OperandType> outExpectedTypes;
785 if (inputType == OperandType::TENSOR_FLOAT32) {
786 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
787 inExpectedTypes = {OperandType::TENSOR_FLOAT32, OperandType::INT32};
788 outExpectedTypes = {OperandType::TENSOR_FLOAT32};
789 } else if (inputType == OperandType::TENSOR_FLOAT16) {
790 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
791 inExpectedTypes = {OperandType::TENSOR_FLOAT16, OperandType::INT32};
792 outExpectedTypes = {OperandType::TENSOR_FLOAT16};
793 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
794 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
795 inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM, OperandType::INT32};
796 outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
797 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
798 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3));
799 inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED, OperandType::INT32};
800 outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED};
801 } else {
802 LOG(ERROR) << "Unsupported input tensor type for operation " << opType;
803 return ANEURALNETWORKS_BAD_DATA;
804 }
805 if (inputCount == 3) {
806 inExpectedTypes.push_back(OperandType::BOOL);
807 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
808 } else {
809 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
810 }
811 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
812 inExpectedTypes, outputCount, outputIndexes,
813 outExpectedTypes);
814 }
815 case ANEURALNETWORKS_SPACE_TO_DEPTH: {
816 if ((inputCount != 3 && inputCount != 2) || outputCount != 1) {
817 LOG(ERROR) << "Invalid number of input operands (" << inputCount
818 << ", expected 3 or 2) or output operands (" << outputCount
819 << ", expected 1) for operation " << opType;
820 return ANEURALNETWORKS_BAD_DATA;
821 }
822 auto inputType = operands[inputIndexes[0]].type;
823 std::vector<OperandType> inExpectedTypes;
824 std::vector<OperandType> outExpectedTypes;
825 if (inputType == OperandType::TENSOR_FLOAT32) {
826 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
827 inExpectedTypes = {OperandType::TENSOR_FLOAT32, OperandType::INT32};
828 outExpectedTypes = {OperandType::TENSOR_FLOAT32};
829 } else if (inputType == OperandType::TENSOR_FLOAT16) {
830 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
831 inExpectedTypes = {OperandType::TENSOR_FLOAT16, OperandType::INT32};
832 outExpectedTypes = {OperandType::TENSOR_FLOAT16};
833 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
834 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
835 inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM, OperandType::INT32};
836 outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
837 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
838 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3));
839 inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED, OperandType::INT32};
840 outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED};
841 } else {
842 LOG(ERROR) << "Unsupported input tensor type for operation " << opType;
843 return ANEURALNETWORKS_BAD_DATA;
844 }
845 if (inputCount == 3) {
846 inExpectedTypes.push_back(OperandType::BOOL);
847 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
848 } else {
849 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
850 }
851 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
852 inExpectedTypes, outputCount, outputIndexes,
853 outExpectedTypes);
854 }
855 case ANEURALNETWORKS_EMBEDDING_LOOKUP: {
856 if (inputCount != 2 || outputCount != 1) {
857 logInvalidInOutNumber(2, 1);
858 return ANEURALNETWORKS_BAD_DATA;
859 }
860 auto inputType = operands[inputIndexes[1]].type;
861 if (inputType != OperandType::TENSOR_FLOAT16 &&
862 inputType != OperandType::TENSOR_FLOAT32 &&
863 inputType != OperandType::TENSOR_INT32 &&
864 inputType != OperandType::TENSOR_QUANT8_ASYMM &&
865 inputType != OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
866 LOG(ERROR) << "Unsupported input tensor type for operation " << opType;
867 return ANEURALNETWORKS_BAD_DATA;
868 }
869 std::vector<OperandType> inExpectedTypes = {OperandType::TENSOR_INT32, inputType};
870 std::vector<OperandType> outExpectedTypes = {inputType};
871 if (inputType == OperandType::TENSOR_FLOAT16 ||
872 inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
873 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3));
874 } else if (inputType == OperandType::TENSOR_INT32 ||
875 inputType == OperandType::TENSOR_QUANT8_ASYMM) {
876 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
877 } else {
878 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
879 }
880 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
881 inExpectedTypes, outputCount, outputIndexes,
882 outExpectedTypes);
883 }
884 case ANEURALNETWORKS_HASHTABLE_LOOKUP: {
885 if (inputCount != 3 || outputCount != 2) {
886 logInvalidInOutNumber(3, 2);
887 return ANEURALNETWORKS_BAD_DATA;
888 }
889 auto inputType = operands[inputIndexes[2]].type;
890 if (inputType != OperandType::TENSOR_FLOAT32 &&
891 inputType != OperandType::TENSOR_INT32 &&
892 inputType != OperandType::TENSOR_QUANT8_ASYMM) {
893 LOG(ERROR) << "Unsupported input tensor type for operation " << opType;
894 return ANEURALNETWORKS_BAD_DATA;
895 }
896 std::vector<OperandType> inExpectedTypes = {OperandType::TENSOR_INT32,
897 OperandType::TENSOR_INT32, inputType};
898 std::vector<OperandType> outExpectedTypes = {inputType,
899 OperandType::TENSOR_QUANT8_ASYMM};
900 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
901 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
902 inExpectedTypes, outputCount, outputIndexes,
903 outExpectedTypes);
904 }
905 case ANEURALNETWORKS_LSH_PROJECTION: {
906 if (inputCount != 4 || outputCount != 1) {
907 logInvalidInOutNumber(4, 1);
908 return ANEURALNETWORKS_BAD_DATA;
909 }
910 auto inputType = operands[inputIndexes[1]].type;
911 if (inputType != OperandType::TENSOR_FLOAT16 &&
912 inputType != OperandType::TENSOR_FLOAT32 &&
913 inputType != OperandType::TENSOR_INT32 &&
914 inputType != OperandType::TENSOR_QUANT8_ASYMM) {
915 LOG(ERROR) << "Unsupported input tensor type for operation " << opType;
916 return ANEURALNETWORKS_BAD_DATA;
917 }
918 auto hashType = operands[inputIndexes[0]].type;
919 std::vector<OperandType> inExpectedTypes;
920 if (hashType == OperandType::TENSOR_FLOAT16) {
921 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
922 inExpectedTypes = {
923 OperandType::TENSOR_FLOAT16,
924 inputType,
925 OperandType::TENSOR_FLOAT16,
926 OperandType::INT32,
927 };
928 } else if (hashType == OperandType::TENSOR_FLOAT32) {
929 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
930 inExpectedTypes = {
931 OperandType::TENSOR_FLOAT32,
932 inputType,
933 OperandType::TENSOR_FLOAT32,
934 OperandType::INT32,
935 };
936 } else {
937 LOG(ERROR) << "Unsupported hash tensor type for operation " << opType;
938 return ANEURALNETWORKS_BAD_DATA;
939 }
940 std::vector<OperandType> outExpectedTypes = {OperandType::TENSOR_INT32};
941 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
942 inExpectedTypes, outputCount, outputIndexes,
943 outExpectedTypes);
944 }
945 case ANEURALNETWORKS_BIDIRECTIONAL_SEQUENCE_LSTM: {
946 const uint32_t kNumOutputs = 2;
947 const uint32_t kNumOutputsMerged = 1;
948 const uint32_t kNumOutputsWithState = 6;
949 const uint32_t kNumOutputsMergedWithState = 5;
950 if (inputCount != 61 ||
951 (outputCount != kNumOutputs && outputCount != kNumOutputsMerged &&
952 outputCount != kNumOutputsWithState &&
953 outputCount != kNumOutputsMergedWithState)) {
954 LOG(ERROR) << "Invalid number of input operands (" << inputCount
955 << ", expected 61) or output operands (" << outputCount
956 << ", expected 1, 2, 5 or 6) for operation " << opType;
957 return ANEURALNETWORKS_BAD_DATA;
958 }
959
960 std::vector<OperandType> inExpectedTypes;
961 auto inputType = operands[inputIndexes[0]].type;
962 if (inputType != OperandType::TENSOR_FLOAT32 &&
963 inputType != OperandType::TENSOR_FLOAT16) {
964 LOG(ERROR) << "Unsupported input tensor type for operation " << opType;
965 return ANEURALNETWORKS_BAD_DATA;
966 }
967
968 inExpectedTypes = {};
969 for (int i = 0; i < 48; ++i) {
970 inExpectedTypes.push_back(inputType);
971 }
972 inExpectedTypes.push_back(OperandType::INT32);
973 inExpectedTypes.push_back(inputType == OperandType::TENSOR_FLOAT32
974 ? OperandType::FLOAT32
975 : OperandType::FLOAT16);
976 inExpectedTypes.push_back(inputType == OperandType::TENSOR_FLOAT32
977 ? OperandType::FLOAT32
978 : OperandType::FLOAT16);
979 inExpectedTypes.push_back(OperandType::BOOL);
980 inExpectedTypes.push_back(OperandType::BOOL);
981 for (int i = 0; i < 8; ++i) {
982 inExpectedTypes.push_back(inputType);
983 }
984
985 HalVersion minSupportedHalVersion = HalVersion::V1_2;
986 if (outputCount == kNumOutputsWithState || outputCount == kNumOutputsMergedWithState) {
987 minSupportedHalVersion = HalVersion::V1_3;
988 }
989 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, minSupportedHalVersion));
990 std::vector<OperandType> outExpectedTypes(outputCount, inputType);
991 auto status = validateOperationOperandTypes(operands, inputCount, inputIndexes,
992 inExpectedTypes, outputCount, outputIndexes,
993 outExpectedTypes);
994 return status;
995 }
996 case ANEURALNETWORKS_LSTM: {
997 if ((inputCount != 23 && inputCount != 27) || outputCount != 4) {
998 LOG(ERROR) << "Invalid number of input operands (" << inputCount
999 << ", expected 23 or 27) or output operands (" << outputCount
1000 << ", expected 4) for operation " << opType;
1001 return ANEURALNETWORKS_BAD_DATA;
1002 }
1003 std::vector<OperandType> inExpectedTypes;
1004 std::vector<OperandType> outExpectedTypes;
1005 auto inputType = operands[inputIndexes[0]].type;
1006 if (inputType != OperandType::TENSOR_FLOAT32 &&
1007 inputType != OperandType::TENSOR_FLOAT16) {
1008 LOG(ERROR) << "Unsupported input tensor type for operation " << opType;
1009 return ANEURALNETWORKS_BAD_DATA;
1010 }
1011
1012 inExpectedTypes = {inputType, inputType, inputType, inputType, inputType,
1013 inputType, inputType, inputType, inputType, inputType,
1014 inputType, inputType, inputType, inputType, inputType,
1015 inputType, inputType, inputType, inputType, inputType,
1016 OperandType::INT32};
1017 if (inputType == OperandType::TENSOR_FLOAT32) {
1018 inExpectedTypes.push_back(OperandType::FLOAT32);
1019 inExpectedTypes.push_back(OperandType::FLOAT32);
1020 } else {
1021 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1022 inExpectedTypes.push_back(OperandType::FLOAT16);
1023 inExpectedTypes.push_back(OperandType::FLOAT16);
1024 }
1025
1026 outExpectedTypes = {inputType, inputType, inputType, inputType};
1027 if (inputCount == 23) {
1028 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
1029 } else {
1030 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1031 for (int i = 0; i < 4; ++i) {
1032 inExpectedTypes.push_back(inputType);
1033 }
1034 }
1035 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
1036 inExpectedTypes, outputCount, outputIndexes,
1037 outExpectedTypes);
1038 }
1039 case ANEURALNETWORKS_QUANTIZED_16BIT_LSTM: {
1040 if (inputCount != 15 || outputCount != 2) {
1041 logInvalidInOutNumber(15, 2);
1042 return ANEURALNETWORKS_BAD_DATA;
1043 }
1044 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1045 std::vector<OperandType> inExpectedTypes = {
1046 OperandType::TENSOR_QUANT8_ASYMM, OperandType::TENSOR_QUANT8_ASYMM,
1047 OperandType::TENSOR_QUANT8_ASYMM, OperandType::TENSOR_QUANT8_ASYMM,
1048 OperandType::TENSOR_QUANT8_ASYMM, OperandType::TENSOR_QUANT8_ASYMM,
1049 OperandType::TENSOR_QUANT8_ASYMM, OperandType::TENSOR_QUANT8_ASYMM,
1050 OperandType::TENSOR_QUANT8_ASYMM, OperandType::TENSOR_INT32,
1051 OperandType::TENSOR_INT32, OperandType::TENSOR_INT32,
1052 OperandType::TENSOR_INT32, OperandType::TENSOR_QUANT16_SYMM,
1053 OperandType::TENSOR_QUANT8_ASYMM};
1054 std::vector<OperandType> outExpectedTypes = {OperandType::TENSOR_QUANT16_SYMM,
1055 OperandType::TENSOR_QUANT8_ASYMM};
1056 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
1057 inExpectedTypes, outputCount, outputIndexes,
1058 outExpectedTypes);
1059 }
1060 case ANEURALNETWORKS_RANDOM_MULTINOMIAL: {
1061 if (inputCount != 3 || outputCount != 1) {
1062 logInvalidInOutNumber(3, 1);
1063 return ANEURALNETWORKS_BAD_DATA;
1064 }
1065 OperandType inputType = operands[inputIndexes[0]].type;
1066 std::vector<OperandType> inExpectedTypes;
1067 if (inputType == OperandType::TENSOR_FLOAT32 ||
1068 inputType == OperandType::TENSOR_FLOAT16) {
1069 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1070 inExpectedTypes = {
1071 inputType,
1072 OperandType::INT32,
1073 OperandType::TENSOR_INT32,
1074 };
1075 } else {
1076 LOG(ERROR) << "Unsupported input tensor type for operation " << opType;
1077 return ANEURALNETWORKS_BAD_DATA;
1078 }
1079 std::vector<OperandType> outExpectedTypes = {OperandType::TENSOR_INT32};
1080 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
1081 inExpectedTypes, outputCount, outputIndexes,
1082 outExpectedTypes);
1083 }
1084 case ANEURALNETWORKS_RNN: {
1085 if (inputCount != 6 || outputCount != 2) {
1086 logInvalidInOutNumber(6, 2);
1087 return ANEURALNETWORKS_BAD_DATA;
1088 }
1089 OperandType inputType = operands[inputIndexes[0]].type;
1090 std::vector<OperandType> inExpectedTypes;
1091 std::vector<OperandType> outExpectedTypes;
1092 if (inputType == OperandType::TENSOR_FLOAT32) {
1093 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
1094 inExpectedTypes = {
1095 OperandType::TENSOR_FLOAT32, OperandType::TENSOR_FLOAT32,
1096 OperandType::TENSOR_FLOAT32, OperandType::TENSOR_FLOAT32,
1097 OperandType::TENSOR_FLOAT32, OperandType::INT32,
1098 };
1099 outExpectedTypes = {
1100 OperandType::TENSOR_FLOAT32,
1101 OperandType::TENSOR_FLOAT32,
1102 };
1103 } else if (inputType == OperandType::TENSOR_FLOAT16) {
1104 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1105 inExpectedTypes = {
1106 OperandType::TENSOR_FLOAT16, OperandType::TENSOR_FLOAT16,
1107 OperandType::TENSOR_FLOAT16, OperandType::TENSOR_FLOAT16,
1108 OperandType::TENSOR_FLOAT16, OperandType::INT32,
1109 };
1110 outExpectedTypes = {
1111 OperandType::TENSOR_FLOAT16,
1112 OperandType::TENSOR_FLOAT16,
1113 };
1114 } else {
1115 LOG(ERROR) << "Unsupported input tensor type for operation " << opType;
1116 return ANEURALNETWORKS_BAD_DATA;
1117 }
1118 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
1119 inExpectedTypes, outputCount, outputIndexes,
1120 outExpectedTypes);
1121 }
1122 case ANEURALNETWORKS_SVDF: {
1123 if (inputCount != 7 || outputCount != 2) {
1124 logInvalidInOutNumber(7, 2);
1125 return ANEURALNETWORKS_BAD_DATA;
1126 }
1127 OperandType inputType = operands[inputIndexes[0]].type;
1128 if (inputType == OperandType::TENSOR_FLOAT32) {
1129 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
1130
1131 } else if (inputType == OperandType::TENSOR_FLOAT16) {
1132 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1133 } else {
1134 LOG(ERROR) << "Unsupported input tensor type for operation " << opType;
1135 return ANEURALNETWORKS_BAD_DATA;
1136 }
1137 std::vector<OperandType> inExpectedTypes = {
1138 inputType, inputType, inputType, inputType,
1139 inputType, OperandType::INT32, OperandType::INT32,
1140 };
1141 std::vector<OperandType> outExpectedTypes = {inputType, inputType};
1142 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
1143 inExpectedTypes, outputCount, outputIndexes,
1144 outExpectedTypes);
1145 }
1146 case ANEURALNETWORKS_BATCH_TO_SPACE_ND: {
1147 if ((inputCount != 3 && inputCount != 2) || outputCount != 1) {
1148 LOG(ERROR) << "Invalid number of input operands (" << inputCount
1149 << ", expected 3 or 2) or output operands (" << outputCount
1150 << ", expected 1) for operation " << opType;
1151 return ANEURALNETWORKS_BAD_DATA;
1152 }
1153 auto inputType = operands[inputIndexes[0]].type;
1154 std::vector<OperandType> inExpectedTypes;
1155 std::vector<OperandType> outExpectedTypes;
1156 if (inputType == OperandType::TENSOR_FLOAT32) {
1157 inExpectedTypes = {
1158 OperandType::TENSOR_FLOAT32,
1159 OperandType::TENSOR_INT32,
1160 };
1161 outExpectedTypes = {OperandType::TENSOR_FLOAT32};
1162 } else if (inputType == OperandType::TENSOR_FLOAT16) {
1163 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1164 inExpectedTypes = {
1165 OperandType::TENSOR_FLOAT16,
1166 OperandType::TENSOR_INT32,
1167 };
1168 outExpectedTypes = {OperandType::TENSOR_FLOAT16};
1169 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
1170 inExpectedTypes = {
1171 OperandType::TENSOR_QUANT8_ASYMM,
1172 OperandType::TENSOR_INT32,
1173 };
1174 outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
1175 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
1176 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3));
1177 inExpectedTypes = {
1178 OperandType::TENSOR_QUANT8_ASYMM_SIGNED,
1179 OperandType::TENSOR_INT32,
1180 };
1181 outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED};
1182 } else {
1183 LOG(ERROR) << "Unsupported input tensor type for operation " << opType;
1184 return ANEURALNETWORKS_BAD_DATA;
1185 }
1186 if (inputCount == 3) {
1187 inExpectedTypes.push_back(OperandType::BOOL);
1188 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1189 } else {
1190 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_1));
1191 }
1192 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
1193 inExpectedTypes, outputCount, outputIndexes,
1194 outExpectedTypes);
1195 }
1196 case ANEURALNETWORKS_SPACE_TO_BATCH_ND: {
1197 if ((inputCount != 4 && inputCount != 3) || outputCount != 1) {
1198 LOG(ERROR) << "Invalid number of input operands (" << inputCount
1199 << ", expected 4 or 3) or output operands (" << outputCount
1200 << ", expected 1) for operation " << opType;
1201 return ANEURALNETWORKS_BAD_DATA;
1202 }
1203 auto inputType = operands[inputIndexes[0]].type;
1204 std::vector<OperandType> inExpectedTypes;
1205 std::vector<OperandType> outExpectedTypes;
1206 if (inputType == OperandType::TENSOR_FLOAT32) {
1207 inExpectedTypes = {
1208 OperandType::TENSOR_FLOAT32,
1209 OperandType::TENSOR_INT32,
1210 OperandType::TENSOR_INT32,
1211 };
1212 outExpectedTypes = {OperandType::TENSOR_FLOAT32};
1213 } else if (inputType == OperandType::TENSOR_FLOAT16) {
1214 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1215 inExpectedTypes = {
1216 OperandType::TENSOR_FLOAT16,
1217 OperandType::TENSOR_INT32,
1218 OperandType::TENSOR_INT32,
1219 };
1220 outExpectedTypes = {OperandType::TENSOR_FLOAT16};
1221 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
1222 if (operands[inputIndexes[0]].zeroPoint != 0) {
1223 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1224 }
1225 inExpectedTypes = {
1226 OperandType::TENSOR_QUANT8_ASYMM,
1227 OperandType::TENSOR_INT32,
1228 OperandType::TENSOR_INT32,
1229 };
1230 outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
1231 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
1232 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3));
1233 inExpectedTypes = {
1234 OperandType::TENSOR_QUANT8_ASYMM_SIGNED,
1235 OperandType::TENSOR_INT32,
1236 OperandType::TENSOR_INT32,
1237 };
1238 outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED};
1239 } else {
1240 LOG(ERROR) << "Unsupported input tensor type for operation " << opType;
1241 return ANEURALNETWORKS_BAD_DATA;
1242 }
1243 if (inputCount == 4) {
1244 inExpectedTypes.push_back(OperandType::BOOL);
1245 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1246 } else {
1247 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_1));
1248 }
1249 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
1250 inExpectedTypes, outputCount, outputIndexes,
1251 outExpectedTypes);
1252 }
1253 case ANEURALNETWORKS_PAD: {
1254 if (inputCount != 2 || outputCount != 1) {
1255 logInvalidInOutNumber(2, 1);
1256 return ANEURALNETWORKS_BAD_DATA;
1257 }
1258 auto inputType = operands[inputIndexes[0]].type;
1259 std::vector<OperandType> inExpectedTypes;
1260 std::vector<OperandType> outExpectedTypes;
1261 if (inputType == OperandType::TENSOR_FLOAT32) {
1262 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_1));
1263 inExpectedTypes = {
1264 OperandType::TENSOR_FLOAT32,
1265 OperandType::TENSOR_INT32,
1266 };
1267 outExpectedTypes = {OperandType::TENSOR_FLOAT32};
1268 } else if (inputType == OperandType::TENSOR_FLOAT16) {
1269 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1270 inExpectedTypes = {
1271 OperandType::TENSOR_FLOAT16,
1272 OperandType::TENSOR_INT32,
1273 };
1274 outExpectedTypes = {OperandType::TENSOR_FLOAT16};
1275 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM ||
1276 inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
1277 if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
1278 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3));
1279 } else {
1280 if (operands[inputIndexes[0]].zeroPoint == 0) {
1281 NN_RETURN_IF_ERROR(
1282 validateHalVersion(opType, halVersion, HalVersion::V1_1));
1283 } else {
1284 NN_RETURN_IF_ERROR(
1285 validateHalVersion(opType, halVersion, HalVersion::V1_2));
1286 }
1287 }
1288 inExpectedTypes = {
1289 inputType,
1290 OperandType::TENSOR_INT32,
1291 };
1292 outExpectedTypes = {inputType};
1293 } else {
1294 LOG(ERROR) << "Unsupported input tensor type for operation " << opType;
1295 return ANEURALNETWORKS_BAD_DATA;
1296 }
1297 const auto inputRank = operands[inputIndexes[0]].dimensions.size();
1298 if (inputRank > 4) {
1299 LOG(ERROR) << "Unsupported input tensor rank for operation " << opType;
1300 return ANEURALNETWORKS_BAD_DATA;
1301 }
1302 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
1303 inExpectedTypes, outputCount, outputIndexes,
1304 outExpectedTypes);
1305 }
1306 case ANEURALNETWORKS_PAD_V2: {
1307 if (inputCount != 3 || outputCount != 1) {
1308 logInvalidInOutNumber(3, 1);
1309 return ANEURALNETWORKS_BAD_DATA;
1310 }
1311 auto inputType = operands[inputIndexes[0]].type;
1312 std::vector<OperandType> inExpectedTypes;
1313 std::vector<OperandType> outExpectedTypes;
1314 if (inputType == OperandType::TENSOR_FLOAT32) {
1315 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1316 inExpectedTypes = {
1317 OperandType::TENSOR_FLOAT32,
1318 OperandType::TENSOR_INT32,
1319 OperandType::FLOAT32,
1320 };
1321 outExpectedTypes = {OperandType::TENSOR_FLOAT32};
1322 } else if (inputType == OperandType::TENSOR_FLOAT16) {
1323 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1324 inExpectedTypes = {
1325 OperandType::TENSOR_FLOAT16,
1326 OperandType::TENSOR_INT32,
1327 OperandType::FLOAT16,
1328 };
1329 outExpectedTypes = {OperandType::TENSOR_FLOAT16};
1330 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM ||
1331 inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
1332 if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
1333 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3));
1334 } else {
1335 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1336 }
1337 inExpectedTypes = {
1338 inputType,
1339 OperandType::TENSOR_INT32,
1340 OperandType::INT32,
1341 }; // TODO(b/116699425): Make it UINT8.
1342 outExpectedTypes = {inputType};
1343 } else {
1344 LOG(ERROR) << "Unsupported input tensor type for operation " << opType;
1345 return ANEURALNETWORKS_BAD_DATA;
1346 }
1347 const auto inputRank = operands[inputIndexes[0]].dimensions.size();
1348 if (inputRank > 4) {
1349 LOG(ERROR) << "Unsupported input tensor rank for operation " << opType;
1350 return ANEURALNETWORKS_BAD_DATA;
1351 }
1352 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
1353 inExpectedTypes, outputCount, outputIndexes,
1354 outExpectedTypes);
1355 }
1356 case ANEURALNETWORKS_CAST: {
1357 if (inputCount != 1 || outputCount != 1) {
1358 logInvalidInOutNumber(1, 1);
1359 return ANEURALNETWORKS_BAD_DATA;
1360 }
1361 auto inputOperand = operands[inputIndexes[0]];
1362 auto outputOperand = operands[outputIndexes[0]];
1363 auto inputType = inputOperand.type;
1364 auto outputType = outputOperand.type;
1365 std::vector<OperandType> inExpectedTypes;
1366 std::vector<OperandType> outExpectedTypes;
1367 if ((inputType == OperandType::TENSOR_FLOAT16 ||
1368 inputType == OperandType::TENSOR_FLOAT32 ||
1369 inputType == OperandType::TENSOR_INT32 ||
1370 inputType == OperandType::TENSOR_QUANT8_ASYMM) &&
1371 (outputType == OperandType::TENSOR_FLOAT16 ||
1372 outputType == OperandType::TENSOR_FLOAT32 ||
1373 outputType == OperandType::TENSOR_INT32 ||
1374 outputType == OperandType::TENSOR_QUANT8_ASYMM)) {
1375 inExpectedTypes = {inputType};
1376 outExpectedTypes = {outputType};
1377 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1378 } else if (inputType == OperandType::TENSOR_BOOL8 ||
1379 inputType == OperandType::TENSOR_QUANT16_ASYMM ||
1380 inputType == OperandType::TENSOR_QUANT16_SYMM ||
1381 inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED ||
1382 inputType == OperandType::TENSOR_QUANT8_SYMM) {
1383 inExpectedTypes = {inputType};
1384 outExpectedTypes = {inputType}; // Only identity CAST is supported.
1385 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3));
1386 } else {
1387 LOG(ERROR) << "Unsupported data type for operation " << opType;
1388 return ANEURALNETWORKS_BAD_DATA;
1389 }
1390 // Validate that output shape is equal to input shape if dimensions
1391 // are already known.
1392 auto getNumberOfElements = [](const std::vector<uint32_t>& dims) {
1393 if (dims.size() == 0) {
1394 return 0;
1395 }
1396 return std::accumulate(dims.begin(), dims.end(), 1, std::multiplies<>());
1397 };
1398 if (inputOperand.dimensions.size() != 0 && outputOperand.dimensions.size() != 0 &&
1399 getNumberOfElements(outputOperand.dimensions) != 0 &&
1400 inputOperand.dimensions != outputOperand.dimensions) {
1401 return ANEURALNETWORKS_BAD_DATA;
1402 }
1403 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
1404 inExpectedTypes, outputCount, outputIndexes,
1405 outExpectedTypes);
1406 }
1407 case ANEURALNETWORKS_MEAN: {
1408 if (inputCount != 3 || outputCount != 1) {
1409 logInvalidInOutNumber(3, 1);
1410 return ANEURALNETWORKS_BAD_DATA;
1411 }
1412 const auto inputRank = operands[inputIndexes[0]].dimensions.size();
1413 if (inputRank > 4) {
1414 LOG(ERROR) << "Unsupported input tensor rank for operation " << opType;
1415 return ANEURALNETWORKS_BAD_DATA;
1416 }
1417 auto inputType = operands[inputIndexes[0]].type;
1418 if (inputType == OperandType::TENSOR_FLOAT32) {
1419 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_1));
1420 } else if (inputType == OperandType::TENSOR_FLOAT16) {
1421 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1422 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
1423 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_1));
1424 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
1425 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3));
1426 } else {
1427 LOG(ERROR) << "Unsupported input tensor type for operation " << opType;
1428 return ANEURALNETWORKS_BAD_DATA;
1429 }
1430 std::vector<OperandType> inExpectedTypes = {inputType, OperandType::TENSOR_INT32,
1431 OperandType::INT32};
1432 std::vector<OperandType> outExpectedTypes = {inputType};
1433 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
1434 inExpectedTypes, outputCount, outputIndexes,
1435 outExpectedTypes);
1436 }
1437 case ANEURALNETWORKS_ARGMAX:
1438 case ANEURALNETWORKS_ARGMIN: {
1439 if (inputCount != 2 || outputCount != 1) {
1440 logInvalidInOutNumber(2, 1);
1441 return ANEURALNETWORKS_BAD_DATA;
1442 }
1443 auto inputType = operands[inputIndexes[0]].type;
1444 std::vector<OperandType> inExpectedTypes;
1445 std::vector<OperandType> outExpectedTypes;
1446 if (inputType == OperandType::TENSOR_FLOAT16 ||
1447 inputType == OperandType::TENSOR_FLOAT32 ||
1448 inputType == OperandType::TENSOR_INT32 ||
1449 inputType == OperandType::TENSOR_QUANT8_ASYMM ||
1450 inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
1451 inExpectedTypes = {inputType, OperandType::INT32};
1452 outExpectedTypes = {OperandType::TENSOR_INT32};
1453 } else {
1454 LOG(ERROR) << "Unsupported input tensor type for operation " << opType;
1455 return ANEURALNETWORKS_BAD_DATA;
1456 }
1457 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1458 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
1459 inExpectedTypes, outputCount, outputIndexes,
1460 outExpectedTypes);
1461 }
1462 case ANEURALNETWORKS_EXPAND_DIMS: {
1463 if (inputCount != 2 || outputCount != 1) {
1464 logInvalidInOutNumber(2, 1);
1465 return ANEURALNETWORKS_BAD_DATA;
1466 }
1467 auto inputType = operands[inputIndexes[0]].type;
1468 std::vector<OperandType> inExpectedTypes;
1469 std::vector<OperandType> outExpectedTypes;
1470 if (inputType == OperandType::TENSOR_FLOAT16 ||
1471 inputType == OperandType::TENSOR_FLOAT32 ||
1472 inputType == OperandType::TENSOR_INT32 ||
1473 inputType == OperandType::TENSOR_QUANT8_ASYMM ||
1474 inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
1475 inExpectedTypes = {inputType, OperandType::INT32};
1476 outExpectedTypes = {inputType};
1477 } else {
1478 LOG(ERROR) << "Unsupported input tensor type for operation " << opType;
1479 return ANEURALNETWORKS_BAD_DATA;
1480 }
1481 if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
1482 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3));
1483 } else {
1484 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1485 }
1486 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
1487 inExpectedTypes, outputCount, outputIndexes,
1488 outExpectedTypes);
1489 }
1490 case ANEURALNETWORKS_SPLIT: {
1491 if (inputCount != 3) {
1492 LOG(ERROR) << "Invalid number of input operands (" << inputCount << ", expected 3)"
1493 << opType;
1494 return ANEURALNETWORKS_BAD_DATA;
1495 }
1496 auto inputType = operands[inputIndexes[0]].type;
1497 if (inputType != OperandType::TENSOR_FLOAT16 &&
1498 inputType != OperandType::TENSOR_FLOAT32 &&
1499 inputType != OperandType::TENSOR_INT32 &&
1500 inputType != OperandType::TENSOR_QUANT8_ASYMM &&
1501 inputType != OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
1502 LOG(ERROR) << "Unsupported input tensor type for operation " << opType;
1503 return ANEURALNETWORKS_BAD_DATA;
1504 }
1505 if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
1506 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3));
1507 } else {
1508 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1509 }
1510 std::vector<OperandType> inExpectedTypes = {inputType, OperandType::INT32,
1511 OperandType::INT32};
1512 std::vector<OperandType> outExpectedTypes(outputCount, inputType);
1513 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
1514 inExpectedTypes, outputCount, outputIndexes,
1515 outExpectedTypes);
1516 }
1517 case ANEURALNETWORKS_MAXIMUM:
1518 case ANEURALNETWORKS_MINIMUM: {
1519 if (inputCount != 2 || outputCount != 1) {
1520 logInvalidInOutNumber(2, 1);
1521 return ANEURALNETWORKS_BAD_DATA;
1522 }
1523 std::vector<OperandType> inExpectedTypes;
1524 std::vector<OperandType> outExpectedTypes;
1525 OperandType inputType = operands[inputIndexes[0]].type;
1526 if (inputType == OperandType::TENSOR_FLOAT16 ||
1527 inputType == OperandType::TENSOR_FLOAT32 ||
1528 inputType == OperandType::TENSOR_INT32 ||
1529 inputType == OperandType::TENSOR_QUANT8_ASYMM ||
1530 inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
1531 inExpectedTypes = {inputType, inputType};
1532 outExpectedTypes = {inputType};
1533 } else {
1534 LOG(ERROR) << "Unsupported input tensor type for operation " << opType;
1535 return ANEURALNETWORKS_BAD_DATA;
1536 }
1537 if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
1538 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3));
1539 } else {
1540 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1541 }
1542 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
1543 inExpectedTypes, outputCount, outputIndexes,
1544 outExpectedTypes);
1545 }
1546 case ANEURALNETWORKS_GROUPED_CONV_2D: {
1547 if ((inputCount != 12 && inputCount != 9) || outputCount != 1) {
1548 LOG(ERROR) << "Invalid number of input operands (" << inputCount
1549 << ", expected 12 or 9) or output operands (" << outputCount
1550 << ", expected 1) for operation " << opType;
1551 return ANEURALNETWORKS_BAD_DATA;
1552 }
1553 auto inputType = operands[inputIndexes[0]].type;
1554 auto filterType = operands[inputIndexes[1]].type;
1555 std::vector<OperandType> inExpectedTypes;
1556 std::vector<OperandType> outExpectedTypes;
1557 if (inputType == OperandType::TENSOR_FLOAT32) {
1558 inExpectedTypes = {OperandType::TENSOR_FLOAT32, OperandType::TENSOR_FLOAT32,
1559 OperandType::TENSOR_FLOAT32, OperandType::INT32,
1560 OperandType::INT32, OperandType::INT32,
1561 OperandType::INT32, OperandType::INT32};
1562 outExpectedTypes = {OperandType::TENSOR_FLOAT32};
1563 } else if (inputType == OperandType::TENSOR_FLOAT16) {
1564 inExpectedTypes = {OperandType::TENSOR_FLOAT16, OperandType::TENSOR_FLOAT16,
1565 OperandType::TENSOR_FLOAT16, OperandType::INT32,
1566 OperandType::INT32, OperandType::INT32,
1567 OperandType::INT32, OperandType::INT32};
1568 outExpectedTypes = {OperandType::TENSOR_FLOAT16};
1569 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM ||
1570 inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
1571 if (filterType != inputType &&
1572 filterType != OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL) {
1573 LOG(ERROR) << "Unsupported filter tensor type for operation " << opType;
1574 return ANEURALNETWORKS_BAD_DATA;
1575 }
1576
1577 if (filterType == OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL &&
1578 std::get<Operand::SymmPerChannelQuantParams>(
1579 operands[inputIndexes[1]].extraParams)
1580 .channelDim != 0) {
1581 LOG(ERROR) << "Unsupported filter tensor channel dimension for operation "
1582 << opType;
1583 return ANEURALNETWORKS_BAD_DATA;
1584 }
1585
1586 inExpectedTypes = {
1587 inputType, filterType, OperandType::TENSOR_INT32,
1588 OperandType::INT32, OperandType::INT32, OperandType::INT32,
1589 OperandType::INT32, OperandType::INT32};
1590 outExpectedTypes = {inputType};
1591 } else {
1592 LOG(ERROR) << "Unsupported input tensor type for operation " << opType;
1593 return ANEURALNETWORKS_BAD_DATA;
1594 }
1595
1596 if (inputCount == 12) {
1597 std::vector<OperandType> explicitScalarTypes(3, OperandType::INT32);
1598 inExpectedTypes.insert(inExpectedTypes.end(), explicitScalarTypes.begin(),
1599 explicitScalarTypes.end());
1600 }
1601 inExpectedTypes.push_back(OperandType::BOOL);
1602 if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
1603 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3));
1604 } else {
1605 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1606 }
1607 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
1608 inExpectedTypes, outputCount, outputIndexes,
1609 outExpectedTypes);
1610 }
1611 case ANEURALNETWORKS_TILE: {
1612 if (inputCount != 2 || outputCount != 1) {
1613 logInvalidInOutNumber(2, 1);
1614 return ANEURALNETWORKS_BAD_DATA;
1615 }
1616 auto inputType = operands[inputIndexes[0]].type;
1617 std::vector<OperandType> inExpectedTypes;
1618 std::vector<OperandType> outExpectedTypes;
1619 if (inputType == OperandType::TENSOR_FLOAT16 ||
1620 inputType == OperandType::TENSOR_FLOAT32 ||
1621 inputType == OperandType::TENSOR_INT32 ||
1622 inputType == OperandType::TENSOR_QUANT8_ASYMM ||
1623 inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
1624 inExpectedTypes = {inputType, OperandType::TENSOR_INT32};
1625 outExpectedTypes = {inputType};
1626 } else {
1627 LOG(ERROR) << "Unsupported input tensor type for operation " << opType;
1628 return ANEURALNETWORKS_BAD_DATA;
1629 }
1630 if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
1631 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3));
1632 } else {
1633 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1634 }
1635 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
1636 inExpectedTypes, outputCount, outputIndexes,
1637 outExpectedTypes);
1638 }
1639 case ANEURALNETWORKS_POW: {
1640 if (inputCount != 2 || outputCount != 1) {
1641 logInvalidInOutNumber(2, 1);
1642 return ANEURALNETWORKS_BAD_DATA;
1643 }
1644 auto inputType = operands[inputIndexes[0]].type;
1645 std::vector<OperandType> inExpectedTypes;
1646 std::vector<OperandType> outExpectedTypes;
1647 if (inputType == OperandType::TENSOR_FLOAT16 ||
1648 inputType == OperandType::TENSOR_FLOAT32) {
1649 inExpectedTypes = {inputType, inputType};
1650 outExpectedTypes = {inputType};
1651 } else {
1652 LOG(ERROR) << "Unsupported input tensor type for operation " << opType;
1653 return ANEURALNETWORKS_BAD_DATA;
1654 }
1655 if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
1656 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3));
1657 } else {
1658 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1659 }
1660 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
1661 inExpectedTypes, outputCount, outputIndexes,
1662 outExpectedTypes);
1663 }
1664 case ANEURALNETWORKS_IF: {
1665 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3));
1666 return validateIfOperation(inputCount, inputIndexes, outputCount, outputIndexes,
1667 operands, helper)
1668 ? ANEURALNETWORKS_NO_ERROR
1669 : ANEURALNETWORKS_BAD_DATA;
1670 }
1671 case ANEURALNETWORKS_WHILE: {
1672 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3));
1673 return validateWhileOperation(inputCount, inputIndexes, outputCount, outputIndexes,
1674 operands, helper)
1675 ? ANEURALNETWORKS_NO_ERROR
1676 : ANEURALNETWORKS_BAD_DATA;
1677 }
1678 default: {
1679 const OperationRegistration* operationRegistration =
1680 BuiltinOperationResolver::get()->findOperation(
1681 static_cast<OperationType>(opType));
1682 if (operationRegistration == nullptr) {
1683 if (0 <= opType && opType < kNumberOfOperationTypes) {
1684 LOG(ERROR) << opType << " not registered";
1685 } else {
1686 LOG(ERROR) << "Operation type " << opType << " out of the range [0, "
1687 << kNumberOfOperationTypes << ")";
1688 }
1689 return ANEURALNETWORKS_UNEXPECTED_NULL;
1690 }
1691 if (operationRegistration->validate == nullptr) {
1692 LOG(ERROR) << "Incomplete operation registration: " << opType;
1693 return ANEURALNETWORKS_UNEXPECTED_NULL;
1694 }
1695 OperationValidationContext context(operationRegistration->name, inputCount,
1696 inputIndexes, outputCount, outputIndexes,
1697 operands.data());
1698 const auto maybeVersion = operationRegistration->validate(&context);
1699 if (!maybeVersion.has_value()) {
1700 LOG(ERROR) << "Validation failed for operation " << opType << ": "
1701 << maybeVersion.error();
1702 return ANEURALNETWORKS_BAD_DATA;
1703 }
1704 if (!validateVersion(&context, convert(halVersion), maybeVersion.value())) {
1705 LOG(ERROR) << "Validation failed for operation " << opType;
1706 return ANEURALNETWORKS_BAD_DATA;
1707 }
1708 return ANEURALNETWORKS_NO_ERROR;
1709 }
1710 }
1711 }
1712
convertResultCodeToErrorStatus(int resultCode)1713 ErrorStatus convertResultCodeToErrorStatus(int resultCode) {
1714 switch (resultCode) {
1715 case ANEURALNETWORKS_NO_ERROR:
1716 return ErrorStatus::NONE;
1717
1718 case ANEURALNETWORKS_BAD_DATA:
1719 case ANEURALNETWORKS_UNEXPECTED_NULL:
1720 return ErrorStatus::INVALID_ARGUMENT;
1721
1722 case ANEURALNETWORKS_OUTPUT_INSUFFICIENT_SIZE:
1723 return ErrorStatus::OUTPUT_INSUFFICIENT_SIZE;
1724
1725 case ANEURALNETWORKS_UNAVAILABLE_DEVICE:
1726 return ErrorStatus::DEVICE_UNAVAILABLE;
1727
1728 case ANEURALNETWORKS_BAD_STATE:
1729 case ANEURALNETWORKS_INCOMPLETE:
1730 case ANEURALNETWORKS_OP_FAILED:
1731 case ANEURALNETWORKS_OUT_OF_MEMORY:
1732 case ANEURALNETWORKS_UNMAPPABLE:
1733 case ANEURALNETWORKS_DEAD_OBJECT:
1734 return ErrorStatus::GENERAL_FAILURE;
1735
1736 case ANEURALNETWORKS_MISSED_DEADLINE_TRANSIENT:
1737 return ErrorStatus::MISSED_DEADLINE_TRANSIENT;
1738 case ANEURALNETWORKS_MISSED_DEADLINE_PERSISTENT:
1739 return ErrorStatus::MISSED_DEADLINE_PERSISTENT;
1740 case ANEURALNETWORKS_RESOURCE_EXHAUSTED_TRANSIENT:
1741 return ErrorStatus::RESOURCE_EXHAUSTED_TRANSIENT;
1742 case ANEURALNETWORKS_RESOURCE_EXHAUSTED_PERSISTENT:
1743 return ErrorStatus::RESOURCE_EXHAUSTED_PERSISTENT;
1744 }
1745 LOG(ERROR) << "Unknown result code " << resultCode << " mapped to ErrorStatus::GENERAL_FAILURE";
1746 return ErrorStatus::GENERAL_FAILURE;
1747 }
1748
convertErrorStatusToResultCode(ErrorStatus status)1749 int convertErrorStatusToResultCode(ErrorStatus status) {
1750 switch (status) {
1751 case ErrorStatus::NONE:
1752 return ANEURALNETWORKS_NO_ERROR;
1753 case ErrorStatus::DEVICE_UNAVAILABLE:
1754 return ANEURALNETWORKS_UNAVAILABLE_DEVICE;
1755 case ErrorStatus::GENERAL_FAILURE:
1756 return ANEURALNETWORKS_OP_FAILED;
1757 case ErrorStatus::OUTPUT_INSUFFICIENT_SIZE:
1758 return ANEURALNETWORKS_OUTPUT_INSUFFICIENT_SIZE;
1759 case ErrorStatus::INVALID_ARGUMENT:
1760 return ANEURALNETWORKS_BAD_DATA;
1761 case ErrorStatus::MISSED_DEADLINE_TRANSIENT:
1762 return ANEURALNETWORKS_MISSED_DEADLINE_TRANSIENT;
1763 case ErrorStatus::MISSED_DEADLINE_PERSISTENT:
1764 return ANEURALNETWORKS_MISSED_DEADLINE_PERSISTENT;
1765 case ErrorStatus::RESOURCE_EXHAUSTED_TRANSIENT:
1766 return ANEURALNETWORKS_RESOURCE_EXHAUSTED_TRANSIENT;
1767 case ErrorStatus::RESOURCE_EXHAUSTED_PERSISTENT:
1768 return ANEURALNETWORKS_RESOURCE_EXHAUSTED_PERSISTENT;
1769 case ErrorStatus::DEAD_OBJECT:
1770 return ANEURALNETWORKS_DEAD_OBJECT;
1771 }
1772 LOG(ERROR) << "Unknown ErrorStatus " << status << " mapped to ANEURALNETWORKS_OP_FAILED";
1773 return ANEURALNETWORKS_OP_FAILED;
1774 }
1775
getExecutionResult(ErrorStatus status,std::vector<OutputShape> outputShapes,Timing timing)1776 std::tuple<int, std::vector<OutputShape>, Timing> getExecutionResult(
1777 ErrorStatus status, std::vector<OutputShape> outputShapes, Timing timing) {
1778 constexpr Timing kNoTiming = {};
1779 const int n = convertErrorStatusToResultCode(status);
1780 if (status != ErrorStatus::NONE && status != ErrorStatus::OUTPUT_INSUFFICIENT_SIZE &&
1781 !outputShapes.empty()) {
1782 LOG(ERROR) << "The driver returned OutputShapes when it shouldn't.";
1783 outputShapes.clear();
1784 }
1785 if (status != ErrorStatus::NONE && timing != kNoTiming) {
1786 LOG(ERROR) << "The driver returned Timing when it shouldn't.";
1787 timing = kNoTiming;
1788 }
1789 return {n, std::move(outputShapes), timing};
1790 }
1791
syncWait(int fd,int timeout)1792 FenceState syncWait(int fd, int timeout) {
1793 // This implementation is directly based on the ::sync_wait() implementation.
1794
1795 struct pollfd fds;
1796 int ret;
1797
1798 if (fd < 0) {
1799 errno = EINVAL;
1800 return FenceState::UNKNOWN;
1801 }
1802
1803 fds.fd = fd;
1804 fds.events = POLLIN;
1805
1806 do {
1807 ret = poll(&fds, 1, timeout);
1808 if (ret > 0) {
1809 if (fds.revents & POLLNVAL) {
1810 errno = EINVAL;
1811 return FenceState::UNKNOWN;
1812 }
1813 if (fds.revents & POLLERR) {
1814 errno = EINVAL;
1815 return FenceState::ERROR;
1816 }
1817 return FenceState::SIGNALED;
1818 } else if (ret == 0) {
1819 errno = ETIME;
1820 return FenceState::ACTIVE;
1821 }
1822 } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
1823
1824 return FenceState::UNKNOWN;
1825 }
1826
1827 #ifdef NN_DEBUGGABLE
getProp(const char * str,uint32_t defaultValue)1828 uint32_t getProp(const char* str, uint32_t defaultValue) {
1829 const std::string propStr = android::base::GetProperty(str, "");
1830 if (propStr.size() > 0) {
1831 return std::stoi(propStr);
1832 } else {
1833 return defaultValue;
1834 }
1835 }
1836 #endif // NN_DEBUGGABLE
1837
1838 } // namespace nn
1839 } // namespace android
1840