1 /*
2 * Copyright (C) 2017 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #define LOG_TAG "Utils"
18
19 #include "Utils.h"
20 #include "NeuralNetworks.h"
21 #include "NeuralNetworksOEM.h"
22
23 #include <android-base/logging.h>
24 #include <android-base/properties.h>
25 #include <android-base/strings.h>
26 #include <sys/system_properties.h>
27 #include <unordered_map>
28
29 using ::android::hidl::allocator::V1_0::IAllocator;
30
31 namespace android {
32 namespace nn {
33
34 const char kVLogPropKey[] = "debug.nn.vlog";
35 int vLogMask = ~0;
36
37 // Split the space separated list of tags from verbose log setting and build the
38 // logging mask from it. note that '1' and 'all' are special cases to enable all
39 // verbose logging.
40 //
41 // NN API verbose logging setting comes from system property debug.nn.vlog.
42 // Example:
43 // setprop debug.nn.vlog 1 : enable all logging tags.
44 // setprop debug.nn.vlog "model compilation" : only enable logging for MODEL and
45 // COMPILATION tags.
initVLogMask()46 void initVLogMask() {
47 vLogMask = 0;
48 const std::string vLogSetting = android::base::GetProperty(kVLogPropKey, "");
49 if (vLogSetting.empty()) {
50 return;
51 }
52
53 std::unordered_map<std::string, int> vLogFlags = {
54 {"1", -1},
55 {"all", -1},
56 {"model", MODEL},
57 {"compilation", COMPILATION},
58 {"execution", EXECUTION},
59 {"cpuexe", CPUEXE},
60 {"manager", MANAGER},
61 {"driver", DRIVER}};
62
63 std::vector<std::string> elements = android::base::Split(vLogSetting, " ,:");
64 for (const auto& elem : elements) {
65 const auto& flag = vLogFlags.find(elem);
66 if (flag == vLogFlags.end()) {
67 LOG(ERROR) << "Unknown trace flag: " << elem;
68 continue;
69 }
70
71 if (flag->second == -1) {
72 // -1 is used for the special values "1" and "all" that enable all
73 // tracing.
74 vLogMask = ~0;
75 return;
76 } else {
77 vLogMask |= 1 << flag->second;
78 }
79 }
80 }
81
82 namespace {
83
84 template <typename EntryType, uint32_t entryCount, uint32_t entryCountOEM>
tableLookup(const EntryType (& table)[entryCount],const EntryType (& tableOEM)[entryCountOEM],uint32_t code)85 EntryType tableLookup(const EntryType (&table)[entryCount],
86 const EntryType (&tableOEM)[entryCountOEM],
87 uint32_t code) {
88 if (code < entryCount) {
89 return table[code];
90 } else if (code >= kOEMCodeBase && (code - kOEMCodeBase) < entryCountOEM) {
91 return tableOEM[code - kOEMCodeBase];
92 } else {
93 nnAssert(!"tableLookup: bad code");
94 return EntryType();
95 }
96 }
97
98 }; // anonymous namespace
99
100 #define COUNT(X) (sizeof(X) / sizeof(X[0]))
101
102 const char* kTypeNames[kNumberOfDataTypes] = {
103 "FLOAT32", "INT32", "UINT32",
104 "TENSOR_FLOAT32", "TENSOR_INT32", "TENSOR_QUANT8_ASYMM",
105 };
106
107 static_assert(COUNT(kTypeNames) == kNumberOfDataTypes, "kTypeNames is incorrect");
108
109 const char* kTypeNamesOEM[kNumberOfDataTypesOEM] = {
110 "OEM", "TENSOR_OEM_BYTE",
111 };
112
113 static_assert(COUNT(kTypeNamesOEM) == kNumberOfDataTypesOEM, "kTypeNamesOEM is incorrect");
114
getOperandTypeName(OperandType type)115 const char* getOperandTypeName(OperandType type) {
116 uint32_t n = static_cast<uint32_t>(type);
117 return tableLookup(kTypeNames, kTypeNamesOEM, n);
118 }
119
120 // TODO Check if this useful
121 const char* kErrorNames[] = {
122 "NO_ERROR", "OUT_OF_MEMORY", "INCOMPLETE", "NULL", "BAD_DATA",
123 };
124
125 const char* kOperationNames[kNumberOfOperationTypes] = {
126 "ADD",
127 "AVERAGE_POOL",
128 "CONCATENATION",
129 "CONV",
130 "DEPTHWISE_CONV",
131 "DEPTH_TO_SPACE",
132 "DEQUANTIZE",
133 "EMBEDDING_LOOKUP",
134 "FLOOR",
135 "FULLY_CONNECTED",
136 "HASHTABLE_LOOKUP",
137 "L2_NORMALIZATION",
138 "L2_POOL",
139 "LOCAL_RESPONSE_NORMALIZATION",
140 "LOGISTIC",
141 "LSH_PROJECTION",
142 "LSTM",
143 "MAX_POOL",
144 "MUL",
145 "RELU",
146 "RELU1",
147 "RELU6",
148 "RESHAPE",
149 "RESIZE_BILINEAR",
150 "RNN",
151 "SOFTMAX",
152 "SPACE_TO_DEPTH",
153 "SVDF",
154 "TANH",
155 "BATCH_TO_SPACE_ND",
156 "DIV",
157 "MEAN",
158 "PAD",
159 "SPACE_TO_BATCH_ND",
160 "SQUEEZE",
161 "STRIDED_SLICE",
162 "SUB",
163 "TRANSPOSE",
164 };
165
166 static_assert(COUNT(kOperationNames) == kNumberOfOperationTypes, "kOperationNames is incorrect");
167
168 const char* kOperationNamesOEM[kNumberOfOperationTypesOEM] = {
169 "OEM_OPERATION",
170 };
171
172 static_assert(COUNT(kOperationNamesOEM) == kNumberOfOperationTypesOEM,
173 "kOperationNamesOEM is incorrect");
174
getOperationName(OperationType type)175 const char* getOperationName(OperationType type) {
176 uint32_t n = static_cast<uint32_t>(type);
177 return tableLookup(kOperationNames, kOperationNamesOEM, n);
178 }
179
180 const uint32_t kSizeOfDataType[]{
181 4, // ANEURALNETWORKS_FLOAT32
182 4, // ANEURALNETWORKS_INT32
183 4, // ANEURALNETWORKS_UINT32
184 4, // ANEURALNETWORKS_TENSOR_FLOAT32
185 4, // ANEURALNETWORKS_TENSOR_INT32
186 1 // ANEURALNETWORKS_TENSOR_SYMMETRICAL_QUANT8
187 };
188
189 static_assert(COUNT(kSizeOfDataType) == kNumberOfDataTypes, "kSizeOfDataType is incorrect");
190
191 const bool kScalarDataType[]{
192 true, // ANEURALNETWORKS_FLOAT32
193 true, // ANEURALNETWORKS_INT32
194 true, // ANEURALNETWORKS_UINT32
195 false, // ANEURALNETWORKS_TENSOR_FLOAT32
196 false, // ANEURALNETWORKS_TENSOR_INT32
197 false, // ANEURALNETWORKS_TENSOR_SYMMETRICAL_QUANT8
198 };
199
200 static_assert(COUNT(kScalarDataType) == kNumberOfDataTypes, "kScalarDataType is incorrect");
201
202 const uint32_t kSizeOfDataTypeOEM[]{
203 0, // ANEURALNETWORKS_OEM
204 1, // ANEURALNETWORKS_TENSOR_OEM_BYTE
205 };
206
207 static_assert(COUNT(kSizeOfDataTypeOEM) == kNumberOfDataTypesOEM,
208 "kSizeOfDataTypeOEM is incorrect");
209
210 const bool kScalarDataTypeOEM[]{
211 true, // ANEURALNETWORKS_OEM
212 false, // ANEURALNETWORKS_TENSOR_OEM_BYTE
213 };
214
215 static_assert(COUNT(kScalarDataTypeOEM) == kNumberOfDataTypesOEM,
216 "kScalarDataTypeOEM is incorrect");
217
sizeOfData(OperandType type,const std::vector<uint32_t> & dimensions)218 uint32_t sizeOfData(OperandType type, const std::vector<uint32_t>& dimensions) {
219 int n = static_cast<int>(type);
220
221 uint32_t size = tableLookup(kSizeOfDataType, kSizeOfDataTypeOEM, n);
222
223 if (tableLookup(kScalarDataType, kScalarDataTypeOEM, n) == true) {
224 return size;
225 }
226
227 for (auto d : dimensions) {
228 size *= d;
229 }
230 return size;
231 }
232
allocateSharedMemory(int64_t size)233 hidl_memory allocateSharedMemory(int64_t size) {
234 static const std::string type = "ashmem";
235 static sp<IAllocator> allocator = IAllocator::getService(type);
236
237 hidl_memory memory;
238
239 // TODO: should we align memory size to nearest page? doesn't seem necessary...
240 allocator->allocate(size, [&](bool success, const hidl_memory& mem) {
241 if (!success) {
242 LOG(ERROR) << "unable to allocate " << size << " bytes of " << type;
243 } else {
244 memory = mem;
245 }
246 });
247
248 return memory;
249 }
250
alignBytesNeeded(uint32_t index,size_t length)251 uint32_t alignBytesNeeded(uint32_t index, size_t length) {
252 uint32_t pattern;
253 if (length < 2) {
254 pattern = 0; // No alignment necessary
255 } else if (length < 4) {
256 pattern = 1; // Align on 2-byte boundary
257 } else {
258 pattern = 3; // Align on 4-byte boundary
259 }
260 uint32_t extra = (~(index - 1)) & pattern;
261 return extra;
262 }
263
logModelToInfo(const V1_0::Model & model)264 void logModelToInfo(const V1_0::Model& model) {
265 LOG(INFO) << "V1_0::Model start";
266 LOG(INFO) << "operands" << toString(model.operands);
267 LOG(INFO) << "operations" << toString(model.operations);
268 LOG(INFO) << "inputIndexes" << toString(model.inputIndexes);
269 LOG(INFO) << "outputIndexes" << toString(model.outputIndexes);
270 LOG(INFO) << "operandValues size" << model.operandValues.size();
271 LOG(INFO) << "pools" << SHOW_IF_DEBUG(toString(model.pools));
272 }
273
logModelToInfo(const V1_1::Model & model)274 void logModelToInfo(const V1_1::Model& model) {
275 LOG(INFO) << "V1_1::Model start";
276 LOG(INFO) << "operands" << toString(model.operands);
277 LOG(INFO) << "operations" << toString(model.operations);
278 LOG(INFO) << "inputIndexes" << toString(model.inputIndexes);
279 LOG(INFO) << "outputIndexes" << toString(model.outputIndexes);
280 LOG(INFO) << "operandValues size" << model.operandValues.size();
281 LOG(INFO) << "pools" << SHOW_IF_DEBUG(toString(model.pools));
282 }
283
284 // Validates the type. The used dimensions can be underspecified.
validateOperandType(const ANeuralNetworksOperandType & type,const char * tag,bool allowPartial)285 int validateOperandType(const ANeuralNetworksOperandType& type, const char* tag,
286 bool allowPartial) {
287 if (!allowPartial) {
288 for (uint32_t i = 0; i < type.dimensionCount; i++) {
289 if (type.dimensions[i] == 0) {
290 LOG(ERROR) << tag << " OperandType invalid dimensions[" << i
291 << "] = " << type.dimensions[i];
292 return ANEURALNETWORKS_BAD_DATA;
293 }
294 }
295 }
296 if (!validCode(kNumberOfDataTypes, kNumberOfDataTypesOEM, type.type)) {
297 LOG(ERROR) << tag << " OperandType invalid type " << type.type;
298 return ANEURALNETWORKS_BAD_DATA;
299 }
300 if (type.type == ANEURALNETWORKS_TENSOR_QUANT8_ASYMM) {
301 if (type.zeroPoint < 0 || type.zeroPoint > 255) {
302 LOG(ERROR) << tag << " OperandType invalid zeroPoint " << type.zeroPoint;
303 return ANEURALNETWORKS_BAD_DATA;
304 }
305 if (type.scale <= 0.f) {
306 LOG(ERROR) << tag << " OperandType invalid scale " << type.scale;
307 return ANEURALNETWORKS_BAD_DATA;
308 }
309 }
310 if (type.type == ANEURALNETWORKS_FLOAT32 ||
311 type.type == ANEURALNETWORKS_INT32 ||
312 type.type == ANEURALNETWORKS_UINT32 ||
313 type.type == ANEURALNETWORKS_OEM_SCALAR) {
314 if (type.dimensionCount != 0 || type.dimensions != nullptr) {
315 LOG(ERROR) << tag << " Invalid dimensions for scalar type";
316 return ANEURALNETWORKS_BAD_DATA;
317 }
318 }
319
320 return ANEURALNETWORKS_NO_ERROR;
321 }
322
validateOperandList(uint32_t count,const uint32_t * list,uint32_t operandCount,const char * tag)323 int validateOperandList(uint32_t count, const uint32_t* list, uint32_t operandCount,
324 const char* tag) {
325 for (uint32_t i = 0; i < count; i++) {
326 if (list[i] >= operandCount) {
327 LOG(ERROR) << tag << " invalid operand index at " << i << " = " << list[i]
328 << ", operandCount " << operandCount;
329 return ANEURALNETWORKS_BAD_DATA;
330 }
331 }
332 return ANEURALNETWORKS_NO_ERROR;
333 }
334
validateOperationOperandTypes(const std::vector<Operand> & operands,uint32_t inOperandCount,const uint32_t * inOperandIndexes,const std::vector<OperandType> & inExpectedTypes,uint32_t outOperandCount,const uint32_t * outOperandIndexes,const std::vector<OperandType> & outExpectedInTypes)335 int validateOperationOperandTypes(const std::vector<Operand>& operands,
336 uint32_t inOperandCount, const uint32_t* inOperandIndexes,
337 const std::vector<OperandType>& inExpectedTypes,
338 uint32_t outOperandCount, const uint32_t* outOperandIndexes,
339 const std::vector<OperandType>& outExpectedInTypes) {
340 if (inOperandCount > static_cast<uint32_t>(inExpectedTypes.size()) ||
341 outOperandCount > static_cast<uint32_t>(outExpectedInTypes.size())) {
342 return ANEURALNETWORKS_BAD_DATA;
343 }
344 for (uint32_t i = 0; i < inOperandCount; i++) {
345 if (operands[inOperandIndexes[i]].type != inExpectedTypes[i]) {
346 LOG(ERROR) << "Invalid input tensor type "
347 << toString(operands[inOperandIndexes[i]].type)
348 << " for input " << i << ", expected " << toString(inExpectedTypes[i]);
349 return ANEURALNETWORKS_BAD_DATA;
350 }
351 }
352 for (uint32_t i = 0; i < outOperandCount; i++) {
353 if (operands[outOperandIndexes[i]].type != outExpectedInTypes[i]) {
354 LOG(ERROR) << "Invalid output tensor type "
355 << toString(operands[outOperandIndexes[i]].type)
356 << " for input " << i << ", expected " << toString(outExpectedInTypes[i]);
357 return ANEURALNETWORKS_BAD_DATA;
358 }
359 }
360
361 return ANEURALNETWORKS_NO_ERROR;
362 }
363
validateOperation(ANeuralNetworksOperationType opType,uint32_t inputCount,const uint32_t * inputIndexes,uint32_t outputCount,const uint32_t * outputIndexes,const std::vector<Operand> & operands)364 int validateOperation(ANeuralNetworksOperationType opType,
365 uint32_t inputCount, const uint32_t* inputIndexes,
366 uint32_t outputCount, const uint32_t* outputIndexes,
367 const std::vector<Operand>& operands) {
368 int n = validateOperandList(inputCount, inputIndexes, static_cast<uint32_t>(operands.size()),
369 "ANeuralNetworksModel_addOperation inputs");
370 if (n != ANEURALNETWORKS_NO_ERROR) {
371 return n;
372 }
373 n = validateOperandList(outputCount, outputIndexes, static_cast<uint32_t>(operands.size()),
374 "ANeuralNetworksModel_addOperation outputs");
375 if (n != ANEURALNETWORKS_NO_ERROR) {
376 return n;
377 }
378
379 auto logInvalidInOutNumber = [opType, inputCount, outputCount](int expIn, int expOut) {
380 LOG(ERROR) << "Invalid number of input operands ("
381 << inputCount << ", expected " << expIn << ") or output operands ("
382 << outputCount << ", expected " << expOut << ") for operation "
383 << kOperationNames[opType];
384 };
385
386 switch (opType) {
387 case ANEURALNETWORKS_OEM_OPERATION: {
388 return ANEURALNETWORKS_NO_ERROR;
389 }
390 case ANEURALNETWORKS_ADD: {
391 if (inputCount != 3 || outputCount != 1) {
392 logInvalidInOutNumber(3, 1);
393 return ANEURALNETWORKS_BAD_DATA;
394 }
395 auto inputType = operands[inputIndexes[0]].type;
396 std::vector<OperandType> inExpectedTypes;
397 std::vector<OperandType> outExpectedTypes;
398 if (inputType == OperandType::TENSOR_FLOAT32) {
399 inExpectedTypes = {OperandType::TENSOR_FLOAT32,
400 OperandType::TENSOR_FLOAT32,
401 OperandType::INT32};
402 outExpectedTypes = {OperandType::TENSOR_FLOAT32};
403 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
404 inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM,
405 OperandType::TENSOR_QUANT8_ASYMM,
406 OperandType::INT32};
407 outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
408 } else {
409 LOG(ERROR) << "Unsupported input tensor type for operation "
410 << kOperationNames[opType];
411 return ANEURALNETWORKS_BAD_DATA;
412 }
413 return validateOperationOperandTypes(operands,
414 inputCount, inputIndexes,
415 inExpectedTypes,
416 outputCount, outputIndexes,
417 outExpectedTypes);
418 }
419 case ANEURALNETWORKS_MUL: {
420 if (inputCount != 3 || outputCount != 1) {
421 logInvalidInOutNumber(3, 1);
422 return ANEURALNETWORKS_BAD_DATA;
423 }
424 auto inputType = operands[inputIndexes[0]].type;
425 std::vector<OperandType> inExpectedTypes;
426 std::vector<OperandType> outExpectedTypes;
427 if (inputType == OperandType::TENSOR_FLOAT32) {
428 inExpectedTypes = {OperandType::TENSOR_FLOAT32,
429 OperandType::TENSOR_FLOAT32,
430 OperandType::INT32};
431 outExpectedTypes = {OperandType::TENSOR_FLOAT32};
432 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
433 inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM,
434 OperandType::TENSOR_QUANT8_ASYMM,
435 OperandType::INT32};
436 outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
437 } else {
438 LOG(ERROR) << "Unsupported input tensor type for operation "
439 << kOperationNames[opType];
440 return ANEURALNETWORKS_BAD_DATA;
441 }
442 return validateOperationOperandTypes(operands,
443 inputCount, inputIndexes,
444 inExpectedTypes,
445 outputCount, outputIndexes,
446 outExpectedTypes);
447 }
448 case ANEURALNETWORKS_FLOOR: {
449 if (inputCount != 1 || outputCount != 1) {
450 logInvalidInOutNumber(1, 1);
451 return ANEURALNETWORKS_BAD_DATA;
452 }
453 auto inputType = operands[inputIndexes[0]].type;
454 std::vector<OperandType> inExpectedTypes;
455 std::vector<OperandType> outExpectedTypes;
456 if (inputType == OperandType::TENSOR_FLOAT32) {
457 inExpectedTypes = {OperandType::TENSOR_FLOAT32};
458 outExpectedTypes = {OperandType::TENSOR_FLOAT32};
459 } else {
460 LOG(ERROR) << "Unsupported input tensor type for operation "
461 << kOperationNames[opType];
462 return ANEURALNETWORKS_BAD_DATA;
463 }
464 return validateOperationOperandTypes(operands,
465 inputCount, inputIndexes,
466 inExpectedTypes,
467 outputCount, outputIndexes,
468 outExpectedTypes);
469 }
470 case ANEURALNETWORKS_DEQUANTIZE: {
471 if (inputCount != 1 || outputCount != 1) {
472 logInvalidInOutNumber(1, 1);
473 return ANEURALNETWORKS_BAD_DATA;
474 }
475 auto inputType = operands[inputIndexes[0]].type;
476 std::vector<OperandType> inExpectedTypes;
477 std::vector<OperandType> outExpectedTypes;
478 if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
479 inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
480 outExpectedTypes = {OperandType::TENSOR_FLOAT32};
481 } else {
482 LOG(ERROR) << "Unsupported input tensor type for operation "
483 << kOperationNames[opType];
484 return ANEURALNETWORKS_BAD_DATA;
485 }
486 return validateOperationOperandTypes(operands,
487 inputCount, inputIndexes,
488 inExpectedTypes,
489 outputCount, outputIndexes,
490 outExpectedTypes);
491 }
492 case ANEURALNETWORKS_DEPTHWISE_CONV_2D: {
493 if ((inputCount != 11 && inputCount != 8) || outputCount != 1) {
494 LOG(ERROR) << "Invalid number of input operands ("
495 << inputCount << ", expected 11 or 8) or output operands ("
496 << outputCount << ", expected 1) for operation "
497 << kOperationNames[opType];
498 return ANEURALNETWORKS_BAD_DATA;
499 }
500 auto inputType = operands[inputIndexes[0]].type;
501 std::vector<OperandType> inExpectedTypes;
502 std::vector<OperandType> outExpectedTypes;
503 if (inputType == OperandType::TENSOR_FLOAT32) {
504 inExpectedTypes = {OperandType::TENSOR_FLOAT32,
505 OperandType::TENSOR_FLOAT32,
506 OperandType::TENSOR_FLOAT32,
507 OperandType::INT32,
508 OperandType::INT32,
509 OperandType::INT32,
510 OperandType::INT32,
511 OperandType::INT32};
512 outExpectedTypes = {OperandType::TENSOR_FLOAT32};
513 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
514 inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM,
515 OperandType::TENSOR_QUANT8_ASYMM,
516 OperandType::TENSOR_INT32,
517 OperandType::INT32,
518 OperandType::INT32,
519 OperandType::INT32,
520 OperandType::INT32,
521 OperandType::INT32};
522 outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
523 } else {
524 LOG(ERROR) << "Unsupported input tensor type for operation "
525 << kOperationNames[opType];
526 return ANEURALNETWORKS_BAD_DATA;
527 }
528
529 if (inputCount == 11) {
530 std::vector<OperandType> explicitScalarTypes(3, OperandType::INT32);
531 inExpectedTypes.insert(inExpectedTypes.end(),
532 explicitScalarTypes.begin(),
533 explicitScalarTypes.end());
534 }
535 return validateOperationOperandTypes(operands,
536 inputCount, inputIndexes,
537 inExpectedTypes,
538 outputCount, outputIndexes,
539 outExpectedTypes);
540 }
541 case ANEURALNETWORKS_CONV_2D: {
542 if ((inputCount != 10 && inputCount != 7) || outputCount != 1) {
543 LOG(ERROR) << "Invalid number of input operands ("
544 << inputCount << ", expected 10 or 7) or output operands ("
545 << outputCount << ", expected 1) for operation "
546 << kOperationNames[opType];
547 return ANEURALNETWORKS_BAD_DATA;
548 }
549 auto inputType = operands[inputIndexes[0]].type;
550 std::vector<OperandType> inExpectedTypes;
551 std::vector<OperandType> outExpectedTypes;
552 if (inputType == OperandType::TENSOR_FLOAT32) {
553 inExpectedTypes = {OperandType::TENSOR_FLOAT32,
554 OperandType::TENSOR_FLOAT32,
555 OperandType::TENSOR_FLOAT32,
556 OperandType::INT32,
557 OperandType::INT32,
558 OperandType::INT32,
559 OperandType::INT32};
560 outExpectedTypes = {OperandType::TENSOR_FLOAT32};
561 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
562 inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM,
563 OperandType::TENSOR_QUANT8_ASYMM,
564 OperandType::TENSOR_INT32,
565 OperandType::INT32,
566 OperandType::INT32,
567 OperandType::INT32,
568 OperandType::INT32};
569 outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
570 } else {
571 LOG(ERROR) << "Unsupported input tensor type for operation "
572 << kOperationNames[opType];
573 return ANEURALNETWORKS_BAD_DATA;
574 }
575
576 if (inputCount == 10) {
577 std::vector<OperandType> explicitScalarTypes(3, OperandType::INT32);
578 inExpectedTypes.insert(inExpectedTypes.end(),
579 explicitScalarTypes.begin(),
580 explicitScalarTypes.end());
581 }
582 return validateOperationOperandTypes(operands,
583 inputCount, inputIndexes,
584 inExpectedTypes,
585 outputCount, outputIndexes,
586 outExpectedTypes);
587 }
588 case ANEURALNETWORKS_AVERAGE_POOL_2D: {
589 if ((inputCount != 10 && inputCount != 7) || outputCount != 1) {
590 LOG(ERROR) << "Invalid number of input operands ("
591 << inputCount << ", expected 10 or 7) or output operands ("
592 << outputCount << ", expected 1) for operation "
593 << kOperationNames[opType];
594 return ANEURALNETWORKS_BAD_DATA;
595 }
596 auto inputType = operands[inputIndexes[0]].type;
597 std::vector<OperandType> inExpectedTypes;
598 std::vector<OperandType> outExpectedTypes;
599 if (inputType == OperandType::TENSOR_FLOAT32) {
600 inExpectedTypes = {OperandType::TENSOR_FLOAT32,
601 OperandType::INT32,
602 OperandType::INT32,
603 OperandType::INT32,
604 OperandType::INT32,
605 OperandType::INT32,
606 OperandType::INT32};
607 outExpectedTypes = {OperandType::TENSOR_FLOAT32};
608 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
609 inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM,
610 OperandType::INT32,
611 OperandType::INT32,
612 OperandType::INT32,
613 OperandType::INT32,
614 OperandType::INT32,
615 OperandType::INT32};
616 outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
617 } else {
618 LOG(ERROR) << "Unsupported input tensor type for operation "
619 << kOperationNames[opType];
620 return ANEURALNETWORKS_BAD_DATA;
621 }
622
623 if (inputCount == 10) {
624 std::vector<OperandType> explicitScalarTypes(3, OperandType::INT32);
625 inExpectedTypes.insert(inExpectedTypes.end(),
626 explicitScalarTypes.begin(),
627 explicitScalarTypes.end());
628 }
629 return validateOperationOperandTypes(operands,
630 inputCount, inputIndexes,
631 inExpectedTypes,
632 outputCount, outputIndexes,
633 outExpectedTypes);
634 }
635 case ANEURALNETWORKS_L2_POOL_2D: {
636 if ((inputCount != 10 && inputCount != 7) || outputCount != 1) {
637 LOG(ERROR) << "Invalid number of input operands ("
638 << inputCount << ", expected 10 or 7) or output operands ("
639 << outputCount << ", expected 1) for operation "
640 << kOperationNames[opType];
641 return ANEURALNETWORKS_BAD_DATA;
642 }
643 auto inputType = operands[inputIndexes[0]].type;
644 std::vector<OperandType> inExpectedTypes;
645 std::vector<OperandType> outExpectedTypes;
646 if (inputType == OperandType::TENSOR_FLOAT32) {
647 inExpectedTypes = {OperandType::TENSOR_FLOAT32,
648 OperandType::INT32,
649 OperandType::INT32,
650 OperandType::INT32,
651 OperandType::INT32,
652 OperandType::INT32,
653 OperandType::INT32};
654 outExpectedTypes = {OperandType::TENSOR_FLOAT32};
655 } else {
656 LOG(ERROR) << "Unsupported input tensor type for operation "
657 << kOperationNames[opType];
658 return ANEURALNETWORKS_BAD_DATA;
659 }
660
661 if (inputCount == 10) {
662 std::vector<OperandType> explicitScalarTypes(3, OperandType::INT32);
663 inExpectedTypes.insert(inExpectedTypes.end(),
664 explicitScalarTypes.begin(),
665 explicitScalarTypes.end());
666 }
667 return validateOperationOperandTypes(operands,
668 inputCount, inputIndexes,
669 inExpectedTypes,
670 outputCount, outputIndexes,
671 outExpectedTypes);
672 }
673 case ANEURALNETWORKS_MAX_POOL_2D: {
674 if ((inputCount != 10 && inputCount != 7) || outputCount != 1) {
675 LOG(ERROR) << "Invalid number of input operands ("
676 << inputCount << ", expected 10 or 7) or output operands ("
677 << outputCount << ", expected 1) for operation "
678 << kOperationNames[opType];
679 return ANEURALNETWORKS_BAD_DATA;
680 }
681 auto inputType = operands[inputIndexes[0]].type;
682 std::vector<OperandType> inExpectedTypes;
683 std::vector<OperandType> outExpectedTypes;
684 if (inputType == OperandType::TENSOR_FLOAT32) {
685 inExpectedTypes = {OperandType::TENSOR_FLOAT32,
686 OperandType::INT32,
687 OperandType::INT32,
688 OperandType::INT32,
689 OperandType::INT32,
690 OperandType::INT32,
691 OperandType::INT32};
692 outExpectedTypes = {OperandType::TENSOR_FLOAT32};
693 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
694 inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM,
695 OperandType::INT32,
696 OperandType::INT32,
697 OperandType::INT32,
698 OperandType::INT32,
699 OperandType::INT32,
700 OperandType::INT32};
701 outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
702 } else {
703 LOG(ERROR) << "Unsupported input tensor type for operation "
704 << kOperationNames[opType];
705 return ANEURALNETWORKS_BAD_DATA;
706 }
707
708 if (inputCount == 10) {
709 std::vector<OperandType> explicitScalarTypes(3, OperandType::INT32);
710 inExpectedTypes.insert(inExpectedTypes.end(),
711 explicitScalarTypes.begin(),
712 explicitScalarTypes.end());
713 }
714 return validateOperationOperandTypes(operands,
715 inputCount, inputIndexes,
716 inExpectedTypes,
717 outputCount, outputIndexes,
718 outExpectedTypes);
719 }
720 case ANEURALNETWORKS_RELU: {
721 if (inputCount != 1 || outputCount != 1) {
722 logInvalidInOutNumber(1, 1);
723 return ANEURALNETWORKS_BAD_DATA;
724 }
725 auto inputType = operands[inputIndexes[0]].type;
726 std::vector<OperandType> inExpectedTypes;
727 std::vector<OperandType> outExpectedTypes;
728 if (inputType == OperandType::TENSOR_FLOAT32) {
729 inExpectedTypes = {OperandType::TENSOR_FLOAT32};
730 outExpectedTypes = {OperandType::TENSOR_FLOAT32};
731 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
732 inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
733 outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
734 } else {
735 LOG(ERROR) << "Unsupported input tensor type for operation "
736 << kOperationNames[opType];
737 return ANEURALNETWORKS_BAD_DATA;
738 }
739 return validateOperationOperandTypes(operands,
740 inputCount, inputIndexes,
741 inExpectedTypes,
742 outputCount, outputIndexes,
743 outExpectedTypes);
744 }
745 case ANEURALNETWORKS_RELU1: {
746 if (inputCount != 1 || outputCount != 1) {
747 logInvalidInOutNumber(1, 1);
748 return ANEURALNETWORKS_BAD_DATA;
749 }
750 auto inputType = operands[inputIndexes[0]].type;
751 std::vector<OperandType> inExpectedTypes;
752 std::vector<OperandType> outExpectedTypes;
753 if (inputType == OperandType::TENSOR_FLOAT32) {
754 inExpectedTypes = {OperandType::TENSOR_FLOAT32};
755 outExpectedTypes = {OperandType::TENSOR_FLOAT32};
756 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
757 inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
758 outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
759 } else {
760 LOG(ERROR) << "Unsupported input tensor type for operation "
761 << kOperationNames[opType];
762 return ANEURALNETWORKS_BAD_DATA;
763 }
764 return validateOperationOperandTypes(operands,
765 inputCount, inputIndexes,
766 inExpectedTypes,
767 outputCount, outputIndexes,
768 outExpectedTypes);
769 }
770 case ANEURALNETWORKS_RELU6: {
771 if (inputCount != 1 || outputCount != 1) {
772 logInvalidInOutNumber(1, 1);
773 return ANEURALNETWORKS_BAD_DATA;
774 }
775 auto inputType = operands[inputIndexes[0]].type;
776 std::vector<OperandType> inExpectedTypes;
777 std::vector<OperandType> outExpectedTypes;
778 if (inputType == OperandType::TENSOR_FLOAT32) {
779 inExpectedTypes = {OperandType::TENSOR_FLOAT32};
780 outExpectedTypes = {OperandType::TENSOR_FLOAT32};
781 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
782 inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
783 outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
784 } else {
785 LOG(ERROR) << "Unsupported input tensor type for operation "
786 << kOperationNames[opType];
787 return ANEURALNETWORKS_BAD_DATA;
788 }
789 return validateOperationOperandTypes(operands,
790 inputCount, inputIndexes,
791 inExpectedTypes,
792 outputCount, outputIndexes,
793 outExpectedTypes);
794 }
795 case ANEURALNETWORKS_TANH: {
796 if (inputCount != 1 || outputCount != 1) {
797 logInvalidInOutNumber(1, 1);
798 return ANEURALNETWORKS_BAD_DATA;
799 }
800 auto inputType = operands[inputIndexes[0]].type;
801 std::vector<OperandType> inExpectedTypes;
802 std::vector<OperandType> outExpectedTypes;
803 if (inputType == OperandType::TENSOR_FLOAT32) {
804 inExpectedTypes = {OperandType::TENSOR_FLOAT32};
805 outExpectedTypes = {OperandType::TENSOR_FLOAT32};
806 } else {
807 LOG(ERROR) << "Unsupported input tensor type for operation "
808 << kOperationNames[opType];
809 return ANEURALNETWORKS_BAD_DATA;
810 }
811 return validateOperationOperandTypes(operands,
812 inputCount, inputIndexes,
813 inExpectedTypes,
814 outputCount, outputIndexes,
815 outExpectedTypes);
816 }
817 case ANEURALNETWORKS_LOGISTIC: {
818 if (inputCount != 1 || outputCount != 1) {
819 logInvalidInOutNumber(1, 1);
820 return ANEURALNETWORKS_BAD_DATA;
821 }
822 auto inputType = operands[inputIndexes[0]].type;
823 std::vector<OperandType> inExpectedTypes;
824 std::vector<OperandType> outExpectedTypes;
825 if (inputType == OperandType::TENSOR_FLOAT32) {
826 inExpectedTypes = {OperandType::TENSOR_FLOAT32};
827 outExpectedTypes = {OperandType::TENSOR_FLOAT32};
828 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
829 inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
830 outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
831 } else {
832 LOG(ERROR) << "Unsupported input tensor type for operation "
833 << kOperationNames[opType];
834 return ANEURALNETWORKS_BAD_DATA;
835 }
836 return validateOperationOperandTypes(operands,
837 inputCount, inputIndexes,
838 inExpectedTypes,
839 outputCount, outputIndexes,
840 outExpectedTypes);
841 }
842 case ANEURALNETWORKS_SOFTMAX: {
843 if (inputCount != 2 || outputCount != 1) {
844 logInvalidInOutNumber(2, 1);
845 return ANEURALNETWORKS_BAD_DATA;
846 }
847 auto inputType = operands[inputIndexes[0]].type;
848 std::vector<OperandType> inExpectedTypes;
849 std::vector<OperandType> outExpectedTypes;
850 if (inputType == OperandType::TENSOR_FLOAT32) {
851 inExpectedTypes = {OperandType::TENSOR_FLOAT32,
852 OperandType::FLOAT32};
853 outExpectedTypes = {OperandType::TENSOR_FLOAT32};
854 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
855 inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM,
856 OperandType::FLOAT32};
857 outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
858 } else {
859 LOG(ERROR) << "Unsupported input tensor type for operation "
860 << kOperationNames[opType];
861 return ANEURALNETWORKS_BAD_DATA;
862 }
863 return validateOperationOperandTypes(operands,
864 inputCount, inputIndexes,
865 inExpectedTypes,
866 outputCount, outputIndexes,
867 outExpectedTypes);
868 }
869 case ANEURALNETWORKS_FULLY_CONNECTED: {
870 if (inputCount != 4 || outputCount != 1) {
871 logInvalidInOutNumber(4, 1);
872 return ANEURALNETWORKS_BAD_DATA;
873 }
874 auto inputType = operands[inputIndexes[0]].type;
875 std::vector<OperandType> inExpectedTypes;
876 std::vector<OperandType> outExpectedTypes;
877 if (inputType == OperandType::TENSOR_FLOAT32) {
878 inExpectedTypes = {OperandType::TENSOR_FLOAT32,
879 OperandType::TENSOR_FLOAT32,
880 OperandType::TENSOR_FLOAT32,
881 OperandType::INT32};
882 outExpectedTypes = {OperandType::TENSOR_FLOAT32};
883 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
884 inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM,
885 OperandType::TENSOR_QUANT8_ASYMM,
886 OperandType::TENSOR_INT32,
887 OperandType::INT32};
888 outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
889 } else {
890 LOG(ERROR) << "Unsupported input tensor type for operation "
891 << kOperationNames[opType];
892 return ANEURALNETWORKS_BAD_DATA;
893 }
894 return validateOperationOperandTypes(operands,
895 inputCount, inputIndexes,
896 inExpectedTypes,
897 outputCount, outputIndexes,
898 outExpectedTypes);
899 }
900 case ANEURALNETWORKS_CONCATENATION: {
901 if (inputCount < 2 || outputCount != 1) {
902 LOG(ERROR) << "Invalid number of input operands ("
903 << inputCount << ", expected at least 2) or output operands ("
904 << outputCount << ", expected 1) for operation "
905 << kOperationNames[opType];
906 return ANEURALNETWORKS_BAD_DATA;
907 }
908 auto inputType = operands[inputIndexes[0]].type;
909 std::vector<OperandType> inExpectedTypes(inputCount, inputType);
910 std::vector<OperandType> outExpectedTypes = {inputType};
911 // The last one is the activation function.
912 inExpectedTypes.back() = OperandType::INT32;
913 return validateOperationOperandTypes(operands,
914 inputCount, inputIndexes,
915 inExpectedTypes,
916 outputCount, outputIndexes,
917 outExpectedTypes);
918 }
919 case ANEURALNETWORKS_L2_NORMALIZATION: {
920 if (inputCount != 1 || outputCount != 1) {
921 logInvalidInOutNumber(1, 1);
922 return ANEURALNETWORKS_BAD_DATA;
923 }
924 auto inputType = operands[inputIndexes[0]].type;
925 std::vector<OperandType> inExpectedTypes;
926 std::vector<OperandType> outExpectedTypes;
927 if (inputType == OperandType::TENSOR_FLOAT32) {
928 inExpectedTypes = {OperandType::TENSOR_FLOAT32};
929 outExpectedTypes = {OperandType::TENSOR_FLOAT32};
930 } else {
931 LOG(ERROR) << "Unsupported input tensor type for operation "
932 << kOperationNames[opType];
933 return ANEURALNETWORKS_BAD_DATA;
934 }
935 return validateOperationOperandTypes(operands,
936 inputCount, inputIndexes,
937 inExpectedTypes,
938 outputCount, outputIndexes,
939 outExpectedTypes);
940 }
941 case ANEURALNETWORKS_LOCAL_RESPONSE_NORMALIZATION: {
942 if (inputCount != 5 || outputCount != 1) {
943 logInvalidInOutNumber(5, 1);
944 return ANEURALNETWORKS_BAD_DATA;
945 }
946 auto inputType = operands[inputIndexes[0]].type;
947 std::vector<OperandType> inExpectedTypes;
948 std::vector<OperandType> outExpectedTypes;
949 if (inputType == OperandType::TENSOR_FLOAT32) {
950 inExpectedTypes = {OperandType::TENSOR_FLOAT32,
951 OperandType::INT32,
952 OperandType::FLOAT32,
953 OperandType::FLOAT32,
954 OperandType::FLOAT32};
955 outExpectedTypes = {OperandType::TENSOR_FLOAT32};
956 } else {
957 LOG(ERROR) << "Unsupported input tensor type for operation "
958 << kOperationNames[opType];
959 return ANEURALNETWORKS_BAD_DATA;
960 }
961 return validateOperationOperandTypes(operands,
962 inputCount, inputIndexes,
963 inExpectedTypes,
964 outputCount, outputIndexes,
965 outExpectedTypes);
966 }
967 case ANEURALNETWORKS_RESHAPE: {
968 if (inputCount != 2 || outputCount != 1) {
969 logInvalidInOutNumber(2, 1);
970 return ANEURALNETWORKS_BAD_DATA;
971 }
972 auto inputType = operands[inputIndexes[0]].type;
973 std::vector<OperandType> inExpectedTypes;
974 std::vector<OperandType> outExpectedTypes;
975 if (inputType == OperandType::TENSOR_FLOAT32) {
976 inExpectedTypes = {OperandType::TENSOR_FLOAT32,
977 OperandType::TENSOR_INT32};
978 outExpectedTypes = {OperandType::TENSOR_FLOAT32};
979 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
980 inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM,
981 OperandType::TENSOR_INT32};
982 outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
983 } else {
984 LOG(ERROR) << "Unsupported input tensor type for operation "
985 << kOperationNames[opType];
986 return ANEURALNETWORKS_BAD_DATA;
987 }
988 return validateOperationOperandTypes(operands,
989 inputCount, inputIndexes,
990 inExpectedTypes,
991 outputCount, outputIndexes,
992 outExpectedTypes);
993 }
994 case ANEURALNETWORKS_RESIZE_BILINEAR: {
995 if (inputCount != 3 || outputCount != 1) {
996 logInvalidInOutNumber(3, 1);
997 return ANEURALNETWORKS_BAD_DATA;
998 }
999 auto inputType = operands[inputIndexes[0]].type;
1000 std::vector<OperandType> inExpectedTypes;
1001 std::vector<OperandType> outExpectedTypes;
1002 if (inputType == OperandType::TENSOR_FLOAT32) {
1003 inExpectedTypes = {OperandType::TENSOR_FLOAT32,
1004 OperandType::INT32,
1005 OperandType::INT32};
1006 outExpectedTypes = {OperandType::TENSOR_FLOAT32};
1007 } else {
1008 LOG(ERROR) << "Unsupported input tensor type for operation "
1009 << kOperationNames[opType];
1010 return ANEURALNETWORKS_BAD_DATA;
1011 }
1012 return validateOperationOperandTypes(operands,
1013 inputCount, inputIndexes,
1014 inExpectedTypes,
1015 outputCount, outputIndexes,
1016 outExpectedTypes);
1017 }
1018 case ANEURALNETWORKS_DEPTH_TO_SPACE: {
1019 if (inputCount != 2 || outputCount != 1) {
1020 logInvalidInOutNumber(2, 1);
1021 return ANEURALNETWORKS_BAD_DATA;
1022 }
1023 auto inputType = operands[inputIndexes[0]].type;
1024 std::vector<OperandType> inExpectedTypes;
1025 std::vector<OperandType> outExpectedTypes;
1026 if (inputType == OperandType::TENSOR_FLOAT32) {
1027 inExpectedTypes = {OperandType::TENSOR_FLOAT32,
1028 OperandType::INT32};
1029 outExpectedTypes = {OperandType::TENSOR_FLOAT32};
1030 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
1031 inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM,
1032 OperandType::INT32};
1033 outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
1034 } else {
1035 LOG(ERROR) << "Unsupported input tensor type for operation "
1036 << kOperationNames[opType];
1037 return ANEURALNETWORKS_BAD_DATA;
1038 }
1039 return validateOperationOperandTypes(operands,
1040 inputCount, inputIndexes,
1041 inExpectedTypes,
1042 outputCount, outputIndexes,
1043 outExpectedTypes);
1044 }
1045 case ANEURALNETWORKS_SPACE_TO_DEPTH: {
1046 if (inputCount != 2 || outputCount != 1) {
1047 logInvalidInOutNumber(2, 1);
1048 return ANEURALNETWORKS_BAD_DATA;
1049 }
1050 auto inputType = operands[inputIndexes[0]].type;
1051 std::vector<OperandType> inExpectedTypes;
1052 std::vector<OperandType> outExpectedTypes;
1053 if (inputType == OperandType::TENSOR_FLOAT32) {
1054 inExpectedTypes = {OperandType::TENSOR_FLOAT32,
1055 OperandType::INT32};
1056 outExpectedTypes = {OperandType::TENSOR_FLOAT32};
1057 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
1058 inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM,
1059 OperandType::INT32};
1060 outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
1061 } else {
1062 LOG(ERROR) << "Unsupported input tensor type for operation "
1063 << kOperationNames[opType];
1064 return ANEURALNETWORKS_BAD_DATA;
1065 }
1066 return validateOperationOperandTypes(operands,
1067 inputCount, inputIndexes,
1068 inExpectedTypes,
1069 outputCount, outputIndexes,
1070 outExpectedTypes);
1071 }
1072 case ANEURALNETWORKS_EMBEDDING_LOOKUP: {
1073 if (inputCount != 2 || outputCount != 1) {
1074 logInvalidInOutNumber(2, 1);
1075 return ANEURALNETWORKS_BAD_DATA;
1076 }
1077 auto inputType = operands[inputIndexes[1]].type;
1078 std::vector<OperandType> inExpectedTypes = {OperandType::TENSOR_INT32,
1079 inputType};
1080 std::vector<OperandType> outExpectedTypes = {inputType};
1081 return validateOperationOperandTypes(operands,
1082 inputCount, inputIndexes,
1083 inExpectedTypes,
1084 outputCount, outputIndexes,
1085 outExpectedTypes);
1086 }
1087 case ANEURALNETWORKS_HASHTABLE_LOOKUP: {
1088 if (inputCount != 3 || outputCount != 2) {
1089 logInvalidInOutNumber(3, 2);
1090 return ANEURALNETWORKS_BAD_DATA;
1091 }
1092 auto inputType = operands[inputIndexes[2]].type;
1093 std::vector<OperandType> inExpectedTypes = {OperandType::TENSOR_INT32,
1094 OperandType::TENSOR_INT32,
1095 inputType};
1096 std::vector<OperandType> outExpectedTypes = {inputType,
1097 OperandType::TENSOR_QUANT8_ASYMM};
1098 return validateOperationOperandTypes(operands,
1099 inputCount, inputIndexes,
1100 inExpectedTypes,
1101 outputCount, outputIndexes,
1102 outExpectedTypes);
1103 }
1104 case ANEURALNETWORKS_LSH_PROJECTION: {
1105 if (inputCount != 4 || outputCount != 1) {
1106 logInvalidInOutNumber(4, 1);
1107 return ANEURALNETWORKS_BAD_DATA;
1108 }
1109 auto inputType = operands[inputIndexes[1]].type;
1110 std::vector<OperandType> inExpectedTypes = {OperandType::TENSOR_FLOAT32,
1111 inputType,
1112 OperandType::TENSOR_FLOAT32,
1113 OperandType::INT32};
1114 std::vector<OperandType> outExpectedTypes = {OperandType::TENSOR_INT32};
1115 return validateOperationOperandTypes(operands,
1116 inputCount, inputIndexes,
1117 inExpectedTypes,
1118 outputCount, outputIndexes,
1119 outExpectedTypes);
1120 }
1121 case ANEURALNETWORKS_LSTM: {
1122 if (inputCount != 23 || outputCount != 4) {
1123 logInvalidInOutNumber(23, 4);
1124 return ANEURALNETWORKS_BAD_DATA;
1125 }
1126 std::vector<OperandType> inExpectedTypes = {OperandType::TENSOR_FLOAT32,
1127 OperandType::TENSOR_FLOAT32,
1128 OperandType::TENSOR_FLOAT32,
1129 OperandType::TENSOR_FLOAT32,
1130 OperandType::TENSOR_FLOAT32,
1131 OperandType::TENSOR_FLOAT32,
1132 OperandType::TENSOR_FLOAT32,
1133 OperandType::TENSOR_FLOAT32,
1134 OperandType::TENSOR_FLOAT32,
1135 OperandType::TENSOR_FLOAT32,
1136 OperandType::TENSOR_FLOAT32,
1137 OperandType::TENSOR_FLOAT32,
1138 OperandType::TENSOR_FLOAT32,
1139 OperandType::TENSOR_FLOAT32,
1140 OperandType::TENSOR_FLOAT32,
1141 OperandType::TENSOR_FLOAT32,
1142 OperandType::TENSOR_FLOAT32,
1143 OperandType::TENSOR_FLOAT32,
1144 OperandType::TENSOR_FLOAT32,
1145 OperandType::TENSOR_FLOAT32,
1146 OperandType::INT32,
1147 OperandType::FLOAT32,
1148 OperandType::FLOAT32};
1149 std::vector<OperandType> outExpectedTypes = {OperandType::TENSOR_FLOAT32,
1150 OperandType::TENSOR_FLOAT32,
1151 OperandType::TENSOR_FLOAT32,
1152 OperandType::TENSOR_FLOAT32};
1153 return validateOperationOperandTypes(operands,
1154 inputCount, inputIndexes,
1155 inExpectedTypes,
1156 outputCount, outputIndexes,
1157 outExpectedTypes);
1158 }
1159 case ANEURALNETWORKS_RNN: {
1160 if (inputCount != 6 || outputCount != 2) {
1161 logInvalidInOutNumber(6, 2);
1162 return ANEURALNETWORKS_BAD_DATA;
1163 }
1164 std::vector<OperandType> inExpectedTypes = {OperandType::TENSOR_FLOAT32,
1165 OperandType::TENSOR_FLOAT32,
1166 OperandType::TENSOR_FLOAT32,
1167 OperandType::TENSOR_FLOAT32,
1168 OperandType::TENSOR_FLOAT32,
1169 OperandType::INT32};
1170 std::vector<OperandType> outExpectedTypes = {OperandType::TENSOR_FLOAT32,
1171 OperandType::TENSOR_FLOAT32};
1172 return validateOperationOperandTypes(operands,
1173 inputCount, inputIndexes,
1174 inExpectedTypes,
1175 outputCount, outputIndexes,
1176 outExpectedTypes);
1177 }
1178 case ANEURALNETWORKS_SVDF: {
1179 if (inputCount != 7 || outputCount != 2) {
1180 logInvalidInOutNumber(7, 2);
1181 return ANEURALNETWORKS_BAD_DATA;
1182 }
1183 std::vector<OperandType> inExpectedTypes = {OperandType::TENSOR_FLOAT32,
1184 OperandType::TENSOR_FLOAT32,
1185 OperandType::TENSOR_FLOAT32,
1186 OperandType::TENSOR_FLOAT32,
1187 OperandType::TENSOR_FLOAT32,
1188 OperandType::INT32,
1189 OperandType::INT32};
1190 std::vector<OperandType> outExpectedTypes = {OperandType::TENSOR_FLOAT32,
1191 OperandType::TENSOR_FLOAT32};
1192 return validateOperationOperandTypes(operands,
1193 inputCount, inputIndexes,
1194 inExpectedTypes,
1195 outputCount, outputIndexes,
1196 outExpectedTypes);
1197 }
1198 case ANEURALNETWORKS_BATCH_TO_SPACE_ND: {
1199 if (inputCount != 2 || outputCount != 1) {
1200 logInvalidInOutNumber(2, 1);
1201 return ANEURALNETWORKS_BAD_DATA;
1202 }
1203 auto inputType = operands[inputIndexes[0]].type;
1204 std::vector<OperandType> inExpectedTypes;
1205 std::vector<OperandType> outExpectedTypes;
1206 if (inputType == OperandType::TENSOR_FLOAT32) {
1207 inExpectedTypes = {OperandType::TENSOR_FLOAT32,
1208 OperandType::TENSOR_INT32};
1209 outExpectedTypes = {OperandType::TENSOR_FLOAT32};
1210 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
1211 inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM,
1212 OperandType::TENSOR_INT32};
1213 outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
1214 } else {
1215 LOG(ERROR) << "Unsupported input tensor type for operation "
1216 << kOperationNames[opType];
1217 return ANEURALNETWORKS_BAD_DATA;
1218 }
1219 return validateOperationOperandTypes(operands,
1220 inputCount, inputIndexes,
1221 inExpectedTypes,
1222 outputCount, outputIndexes,
1223 outExpectedTypes);
1224 }
1225 case ANEURALNETWORKS_SPACE_TO_BATCH_ND: {
1226 if (inputCount != 3 || outputCount != 1) {
1227 logInvalidInOutNumber(3, 1);
1228 return ANEURALNETWORKS_BAD_DATA;
1229 }
1230 auto inputType = operands[inputIndexes[0]].type;
1231 std::vector<OperandType> inExpectedTypes;
1232 std::vector<OperandType> outExpectedTypes;
1233 if (inputType == OperandType::TENSOR_FLOAT32) {
1234 inExpectedTypes = {OperandType::TENSOR_FLOAT32,
1235 OperandType::TENSOR_INT32,
1236 OperandType::TENSOR_INT32};
1237 outExpectedTypes = {OperandType::TENSOR_FLOAT32};
1238 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
1239 inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM,
1240 OperandType::TENSOR_INT32,
1241 OperandType::TENSOR_INT32};
1242 outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
1243 } else {
1244 LOG(ERROR) << "Unsupported input tensor type for operation "
1245 << kOperationNames[opType];
1246 return ANEURALNETWORKS_BAD_DATA;
1247 }
1248 return validateOperationOperandTypes(operands,
1249 inputCount, inputIndexes,
1250 inExpectedTypes,
1251 outputCount, outputIndexes,
1252 outExpectedTypes);
1253 }
1254 case ANEURALNETWORKS_PAD: {
1255 if (inputCount != 2 || outputCount != 1) {
1256 logInvalidInOutNumber(2, 1);
1257 return ANEURALNETWORKS_BAD_DATA;
1258 }
1259 auto inputType = operands[inputIndexes[0]].type;
1260 std::vector<OperandType> inExpectedTypes;
1261 std::vector<OperandType> outExpectedTypes;
1262 if (inputType == OperandType::TENSOR_FLOAT32) {
1263 inExpectedTypes = {OperandType::TENSOR_FLOAT32,
1264 OperandType::TENSOR_INT32};
1265 outExpectedTypes = {OperandType::TENSOR_FLOAT32};
1266 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
1267 inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM,
1268 OperandType::TENSOR_INT32};
1269 outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
1270 } else {
1271 LOG(ERROR) << "Unsupported input tensor type for operation "
1272 << kOperationNames[opType];
1273 return ANEURALNETWORKS_BAD_DATA;
1274 }
1275 return validateOperationOperandTypes(operands,
1276 inputCount, inputIndexes,
1277 inExpectedTypes,
1278 outputCount, outputIndexes,
1279 outExpectedTypes);
1280 }
1281 case ANEURALNETWORKS_SQUEEZE: {
1282 if (inputCount != 2 || outputCount != 1) {
1283 logInvalidInOutNumber(2, 1);
1284 return ANEURALNETWORKS_BAD_DATA;
1285 }
1286 auto inputType = operands[inputIndexes[0]].type;
1287 std::vector<OperandType> inExpectedTypes;
1288 std::vector<OperandType> outExpectedTypes;
1289 if (inputType == OperandType::TENSOR_FLOAT32) {
1290 inExpectedTypes = {OperandType::TENSOR_FLOAT32,
1291 OperandType::TENSOR_INT32};
1292 outExpectedTypes = {OperandType::TENSOR_FLOAT32};
1293 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
1294 inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM,
1295 OperandType::TENSOR_INT32};
1296 outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
1297 } else {
1298 LOG(ERROR) << "Unsupported input tensor type for operation "
1299 << kOperationNames[opType];
1300 return ANEURALNETWORKS_BAD_DATA;
1301 }
1302 return validateOperationOperandTypes(operands,
1303 inputCount, inputIndexes,
1304 inExpectedTypes,
1305 outputCount, outputIndexes,
1306 outExpectedTypes);
1307 }
1308 case ANEURALNETWORKS_TRANSPOSE: {
1309 if (inputCount != 2 || outputCount != 1) {
1310 logInvalidInOutNumber(2, 1);
1311 return ANEURALNETWORKS_BAD_DATA;
1312 }
1313 auto inputType = operands[inputIndexes[0]].type;
1314 std::vector<OperandType> inExpectedTypes;
1315 std::vector<OperandType> outExpectedTypes;
1316 if (inputType == OperandType::TENSOR_FLOAT32) {
1317 inExpectedTypes = {OperandType::TENSOR_FLOAT32,
1318 OperandType::TENSOR_INT32};
1319 outExpectedTypes = {OperandType::TENSOR_FLOAT32};
1320 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
1321 inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM,
1322 OperandType::TENSOR_INT32};
1323 outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
1324 } else {
1325 LOG(ERROR) << "Unsupported input tensor type for operation "
1326 << kOperationNames[opType];
1327 return ANEURALNETWORKS_BAD_DATA;
1328 }
1329 return validateOperationOperandTypes(operands,
1330 inputCount, inputIndexes,
1331 inExpectedTypes,
1332 outputCount, outputIndexes,
1333 outExpectedTypes);
1334 }
1335 case ANEURALNETWORKS_STRIDED_SLICE: {
1336 if (inputCount != 7 || outputCount != 1) {
1337 logInvalidInOutNumber(7, 1);
1338 return ANEURALNETWORKS_BAD_DATA;
1339 }
1340 auto inputType = operands[inputIndexes[0]].type;
1341 std::vector<OperandType> inExpectedTypes;
1342 std::vector<OperandType> outExpectedTypes;
1343 if (inputType == OperandType::TENSOR_FLOAT32) {
1344 inExpectedTypes = {OperandType::TENSOR_FLOAT32,
1345 OperandType::TENSOR_INT32,
1346 OperandType::TENSOR_INT32,
1347 OperandType::TENSOR_INT32,
1348 OperandType::INT32,
1349 OperandType::INT32,
1350 OperandType::INT32};
1351 outExpectedTypes = {OperandType::TENSOR_FLOAT32};
1352 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
1353 inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM,
1354 OperandType::TENSOR_INT32,
1355 OperandType::TENSOR_INT32,
1356 OperandType::TENSOR_INT32,
1357 OperandType::INT32,
1358 OperandType::INT32,
1359 OperandType::INT32};
1360 outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
1361 } else {
1362 LOG(ERROR) << "Unsupported input tensor type for operation "
1363 << kOperationNames[opType];
1364 return ANEURALNETWORKS_BAD_DATA;
1365 }
1366 return validateOperationOperandTypes(operands,
1367 inputCount, inputIndexes,
1368 inExpectedTypes,
1369 outputCount, outputIndexes,
1370 outExpectedTypes);
1371 }
1372 case ANEURALNETWORKS_DIV: {
1373 if (inputCount != 3 || outputCount != 1) {
1374 logInvalidInOutNumber(3, 1);
1375 return ANEURALNETWORKS_BAD_DATA;
1376 }
1377 auto inputType = operands[inputIndexes[0]].type;
1378 std::vector<OperandType> inExpectedTypes;
1379 std::vector<OperandType> outExpectedTypes;
1380 if (inputType == OperandType::TENSOR_FLOAT32) {
1381 inExpectedTypes = {OperandType::TENSOR_FLOAT32,
1382 OperandType::TENSOR_FLOAT32,
1383 OperandType::INT32};
1384 outExpectedTypes = {OperandType::TENSOR_FLOAT32};
1385 } else {
1386 LOG(ERROR) << "Unsupported input tensor type for operation "
1387 << kOperationNames[opType];
1388 return ANEURALNETWORKS_BAD_DATA;
1389 }
1390 return validateOperationOperandTypes(operands,
1391 inputCount, inputIndexes,
1392 inExpectedTypes,
1393 outputCount, outputIndexes,
1394 outExpectedTypes);
1395 }
1396 case ANEURALNETWORKS_SUB: {
1397 if (inputCount != 3 || outputCount != 1) {
1398 logInvalidInOutNumber(3, 1);
1399 return ANEURALNETWORKS_BAD_DATA;
1400 }
1401 auto inputType = operands[inputIndexes[0]].type;
1402 std::vector<OperandType> inExpectedTypes;
1403 std::vector<OperandType> outExpectedTypes;
1404 if (inputType == OperandType::TENSOR_FLOAT32) {
1405 inExpectedTypes = {OperandType::TENSOR_FLOAT32,
1406 OperandType::TENSOR_FLOAT32,
1407 OperandType::INT32};
1408 outExpectedTypes = {OperandType::TENSOR_FLOAT32};
1409 } else {
1410 LOG(ERROR) << "Unsupported input tensor type for operation "
1411 << kOperationNames[opType];
1412 return ANEURALNETWORKS_BAD_DATA;
1413 }
1414 return validateOperationOperandTypes(operands,
1415 inputCount, inputIndexes,
1416 inExpectedTypes,
1417 outputCount, outputIndexes,
1418 outExpectedTypes);
1419 }
1420 case ANEURALNETWORKS_MEAN: {
1421 if (inputCount != 3 || outputCount != 1) {
1422 logInvalidInOutNumber(3, 1);
1423 return ANEURALNETWORKS_BAD_DATA;
1424 }
1425 auto inputType = operands[inputIndexes[0]].type;
1426 std::vector<OperandType> inExpectedTypes;
1427 std::vector<OperandType> outExpectedTypes;
1428 if (inputType == OperandType::TENSOR_FLOAT32) {
1429 inExpectedTypes = {OperandType::TENSOR_FLOAT32,
1430 OperandType::TENSOR_INT32,
1431 OperandType::INT32};
1432 outExpectedTypes = {OperandType::TENSOR_FLOAT32};
1433 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
1434 inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM,
1435 OperandType::TENSOR_INT32,
1436 OperandType::INT32};
1437 outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
1438 } else {
1439 LOG(ERROR) << "Unsupported input tensor type for operation "
1440 << kOperationNames[opType];
1441 return ANEURALNETWORKS_BAD_DATA;
1442 }
1443 return validateOperationOperandTypes(operands,
1444 inputCount, inputIndexes,
1445 inExpectedTypes,
1446 outputCount, outputIndexes,
1447 outExpectedTypes);
1448 }
1449 default:
1450 return ANEURALNETWORKS_BAD_DATA;
1451 }
1452 }
1453
convertResultCodeToErrorStatus(int resultCode)1454 ErrorStatus convertResultCodeToErrorStatus(int resultCode) {
1455 switch (resultCode) {
1456 case ANEURALNETWORKS_NO_ERROR:
1457 return ErrorStatus::NONE;
1458
1459 case ANEURALNETWORKS_BAD_DATA:
1460 case ANEURALNETWORKS_UNEXPECTED_NULL:
1461 return ErrorStatus::INVALID_ARGUMENT;
1462
1463 default:
1464 LOG(ERROR) << "Unknown result code " << resultCode
1465 << " mapped to ErrorStatus::GENERAL_FAILURE";
1466 case ANEURALNETWORKS_BAD_STATE:
1467 case ANEURALNETWORKS_INCOMPLETE:
1468 case ANEURALNETWORKS_OP_FAILED:
1469 case ANEURALNETWORKS_OUT_OF_MEMORY:
1470 case ANEURALNETWORKS_UNMAPPABLE:
1471 return ErrorStatus::GENERAL_FAILURE;
1472 }
1473 }
1474
convertErrorStatusToResultCode(ErrorStatus status)1475 int convertErrorStatusToResultCode(ErrorStatus status) {
1476 switch (status) {
1477 case ErrorStatus::NONE:
1478 return ANEURALNETWORKS_NO_ERROR;
1479
1480 case ErrorStatus::INVALID_ARGUMENT:
1481 return ANEURALNETWORKS_BAD_DATA;
1482
1483 default:
1484 LOG(ERROR) << "Unknown ErrorStatus " << toString(status)
1485 << " mapped to ANEURALNETWORKS_OP_FAILED";
1486 case ErrorStatus::DEVICE_UNAVAILABLE:
1487 case ErrorStatus::GENERAL_FAILURE:
1488 case ErrorStatus::OUTPUT_INSUFFICIENT_SIZE:
1489 return ANEURALNETWORKS_OP_FAILED;
1490 }
1491 }
1492
1493 // Versioning
1494
compliantWithV1_0(V1_0::OperationType)1495 bool compliantWithV1_0(V1_0::OperationType) {
1496 return true;
1497 }
1498
compliantWithV1_0(V1_1::OperationType operation)1499 bool compliantWithV1_0(V1_1::OperationType operation) {
1500 switch (static_cast<V1_0::OperationType>(operation)) {
1501 case V1_0::OperationType::ADD:
1502 case V1_0::OperationType::AVERAGE_POOL_2D:
1503 case V1_0::OperationType::CONCATENATION:
1504 case V1_0::OperationType::CONV_2D:
1505 case V1_0::OperationType::DEPTHWISE_CONV_2D:
1506 case V1_0::OperationType::DEPTH_TO_SPACE:
1507 case V1_0::OperationType::DEQUANTIZE:
1508 case V1_0::OperationType::EMBEDDING_LOOKUP:
1509 case V1_0::OperationType::FLOOR:
1510 case V1_0::OperationType::FULLY_CONNECTED:
1511 case V1_0::OperationType::HASHTABLE_LOOKUP:
1512 case V1_0::OperationType::L2_NORMALIZATION:
1513 case V1_0::OperationType::L2_POOL_2D:
1514 case V1_0::OperationType::LOCAL_RESPONSE_NORMALIZATION:
1515 case V1_0::OperationType::LOGISTIC:
1516 case V1_0::OperationType::LSH_PROJECTION:
1517 case V1_0::OperationType::LSTM:
1518 case V1_0::OperationType::MAX_POOL_2D:
1519 case V1_0::OperationType::MUL:
1520 case V1_0::OperationType::RELU:
1521 case V1_0::OperationType::RELU1:
1522 case V1_0::OperationType::RELU6:
1523 case V1_0::OperationType::RESHAPE:
1524 case V1_0::OperationType::RESIZE_BILINEAR:
1525 case V1_0::OperationType::RNN:
1526 case V1_0::OperationType::SOFTMAX:
1527 case V1_0::OperationType::SPACE_TO_DEPTH:
1528 case V1_0::OperationType::SVDF:
1529 case V1_0::OperationType::TANH:
1530 case V1_0::OperationType::OEM_OPERATION:
1531 return true;
1532 default:
1533 return false;
1534 }
1535 }
1536
compliantWithV1_1(V1_0::OperationType)1537 bool compliantWithV1_1(V1_0::OperationType) {
1538 return true;
1539 }
1540
compliantWithV1_1(V1_1::OperationType)1541 bool compliantWithV1_1(V1_1::OperationType) {
1542 return true;
1543 }
1544
compliantWithV1_0(V1_0::Capabilities)1545 bool compliantWithV1_0(V1_0::Capabilities) {
1546 return true;
1547 }
1548
compliantWithV1_0(const V1_1::Capabilities & capabilities)1549 bool compliantWithV1_0(const V1_1::Capabilities& capabilities) {
1550 return capabilities.relaxedFloat32toFloat16Performance.execTime ==
1551 capabilities.float32Performance.execTime
1552 &&
1553 capabilities.relaxedFloat32toFloat16Performance.powerUsage ==
1554 capabilities.float32Performance.powerUsage;
1555 }
1556
compliantWithV1_1(const V1_0::Capabilities &)1557 bool compliantWithV1_1(const V1_0::Capabilities&) {
1558 return true;
1559 }
1560
compliantWithV1_1(const V1_1::Capabilities &)1561 bool compliantWithV1_1(const V1_1::Capabilities&) {
1562 return true;
1563 }
1564
compliantWithV1_0(const V1_0::Operation &)1565 bool compliantWithV1_0(const V1_0::Operation&) {
1566 return true;
1567 }
1568
compliantWithV1_0(const V1_1::Operation & operation)1569 bool compliantWithV1_0(const V1_1::Operation& operation) {
1570 return compliantWithV1_0(operation.type);
1571 }
1572
compliantWithV1_1(const V1_0::Operation &)1573 bool compliantWithV1_1(const V1_0::Operation&) {
1574 return true;
1575 }
1576
compliantWithV1_1(const V1_1::Operation &)1577 bool compliantWithV1_1(const V1_1::Operation&) {
1578 return true;
1579 }
1580
compliantWithV1_0(const hidl_vec<V1_1::Operation> & operations)1581 static bool compliantWithV1_0(const hidl_vec<V1_1::Operation>& operations) {
1582 return std::all_of(operations.begin(), operations.end(),
1583 [](const V1_1::Operation& operation) {
1584 return compliantWithV1_0(operation);
1585 });
1586 }
1587
compliantWithV1_0(const V1_0::Model &)1588 bool compliantWithV1_0(const V1_0::Model&) {
1589 return true;
1590 }
1591
compliantWithV1_0(const V1_1::Model & model)1592 bool compliantWithV1_0(const V1_1::Model& model) {
1593 // In addition to new enumeration values being introduced in V1_1::Model, a
1594 // new flag was introduced to indicate whether or not float32 data can be
1595 // calculated using float16 units. This 'relaxComputationFloat32toFloat16'
1596 // flag is not relevant in whether a V1_1::Model is compliant with a
1597 // V1_0::Model because all 1.0 drivers require strict calculation by default
1598 // in the P NN runtime. Even if fp16 calculations are allowed, they can
1599 // still be computed by a strict fp32 driver.
1600 return compliantWithV1_0(model.operations);
1601 }
1602
compliantWithV1_1(const V1_0::Model &)1603 bool compliantWithV1_1(const V1_0::Model&) {
1604 return true;
1605 }
1606
compliantWithV1_1(const V1_1::Model &)1607 bool compliantWithV1_1(const V1_1::Model&) {
1608 return true;
1609 }
1610
convertToV1_0(V1_0::OperationType type)1611 V1_0::OperationType convertToV1_0(V1_0::OperationType type) {
1612 return type;
1613 }
1614
convertToV1_0(V1_1::OperationType type)1615 V1_0::OperationType convertToV1_0(V1_1::OperationType type) {
1616 if (!compliantWithV1_0(type)) {
1617 LOG(ERROR) << "Upcasting non-compliant type " << toString(type)
1618 << " from V1_1::OperationType to V1_0::OperationType";
1619 }
1620 return static_cast<V1_0::OperationType>(type);
1621 }
1622
convertToV1_1(V1_0::OperationType type)1623 V1_1::OperationType convertToV1_1(V1_0::OperationType type) {
1624 return static_cast<V1_1::OperationType>(type);
1625 }
1626
convertToV1_1(V1_1::OperationType type)1627 V1_1::OperationType convertToV1_1(V1_1::OperationType type) {
1628 return type;
1629 }
1630
convertToV1_0(const V1_0::Capabilities & capabilities)1631 V1_0::Capabilities convertToV1_0(const V1_0::Capabilities& capabilities) {
1632 return capabilities;
1633 }
1634
convertToV1_0(const V1_1::Capabilities & capabilities)1635 V1_0::Capabilities convertToV1_0(const V1_1::Capabilities& capabilities) {
1636 if (!compliantWithV1_0(capabilities)) {
1637 LOG(ERROR) << "Upcasting non-compliant capabilities " << toString(capabilities)
1638 << " from V1_1::Capabilities to V1_0::Capabilities";
1639 }
1640 return { .float32Performance = capabilities.float32Performance,
1641 .quantized8Performance = capabilities.quantized8Performance };
1642 }
1643
convertToV1_1(const V1_0::Capabilities & capabilities)1644 V1_1::Capabilities convertToV1_1(const V1_0::Capabilities& capabilities) {
1645 return { .float32Performance = capabilities.float32Performance,
1646 .quantized8Performance = capabilities.quantized8Performance,
1647 .relaxedFloat32toFloat16Performance = capabilities.float32Performance };
1648 }
1649
convertToV1_1(const V1_1::Capabilities & capabilities)1650 V1_1::Capabilities convertToV1_1(const V1_1::Capabilities& capabilities) {
1651 return capabilities;
1652 }
1653
convertToV1_0(const V1_0::Operation & operation)1654 V1_0::Operation convertToV1_0(const V1_0::Operation& operation) {
1655 return operation;
1656 }
1657
convertToV1_0(const V1_1::Operation & operation)1658 V1_0::Operation convertToV1_0(const V1_1::Operation& operation) {
1659 if (!compliantWithV1_0(operation)) {
1660 LOG(ERROR) << "Upcasting non-compliant operation " << toString(operation)
1661 << " from V1_1::Operation to V1_0::Operation";
1662 }
1663 return {.type = convertToV1_0(operation.type),
1664 .inputs = operation.inputs,
1665 .outputs = operation.outputs};
1666 }
1667
convertToV1_1(const V1_0::Operation & operation)1668 V1_1::Operation convertToV1_1(const V1_0::Operation& operation) {
1669 return {.type = convertToV1_1(operation.type),
1670 .inputs = operation.inputs,
1671 .outputs = operation.outputs};
1672 }
1673
convertToV1_1(const V1_1::Operation & operation)1674 V1_1::Operation convertToV1_1(const V1_1::Operation& operation) {
1675 return operation;
1676 }
1677
convertToV1_0(const hidl_vec<V1_1::Operation> & operations)1678 static hidl_vec<V1_0::Operation> convertToV1_0(const hidl_vec<V1_1::Operation>& operations) {
1679 hidl_vec<V1_0::Operation> result(operations.size());
1680 std::transform(operations.begin(), operations.end(), result.begin(),
1681 [](const V1_1::Operation& operation) { return convertToV1_0(operation); });
1682 return result;
1683 }
1684
convertToV1_1(const hidl_vec<V1_0::Operation> & operations)1685 static hidl_vec<V1_1::Operation> convertToV1_1(const hidl_vec<V1_0::Operation>& operations) {
1686 hidl_vec<V1_1::Operation> result(operations.size());
1687 std::transform(operations.begin(), operations.end(), result.begin(),
1688 [](const V1_0::Operation& operation) { return convertToV1_1(operation); });
1689 return result;
1690 }
1691
convertToV1_0(const V1_0::Model & model)1692 V1_0::Model convertToV1_0(const V1_0::Model& model) {
1693 return model;
1694 }
1695
convertToV1_0(const V1_1::Model & model)1696 V1_0::Model convertToV1_0(const V1_1::Model& model) {
1697 if (!compliantWithV1_0(model)) {
1698 LOG(ERROR) << "Upcasting non-compliant model " << SHOW_IF_DEBUG(toString(model))
1699 << " from V1_1::Model to V1_0::Model";
1700 }
1701 return {.operands = model.operands,
1702 .operations = convertToV1_0(model.operations),
1703 .inputIndexes = model.inputIndexes,
1704 .outputIndexes = model.outputIndexes,
1705 .operandValues = model.operandValues,
1706 .pools = model.pools};
1707 }
1708
convertToV1_1(const V1_0::Model & model)1709 V1_1::Model convertToV1_1(const V1_0::Model& model) {
1710 return {.operands = model.operands,
1711 .operations = convertToV1_1(model.operations),
1712 .inputIndexes = model.inputIndexes,
1713 .outputIndexes = model.outputIndexes,
1714 .operandValues = model.operandValues,
1715 .pools = model.pools,
1716 .relaxComputationFloat32toFloat16 = false};
1717 }
1718
convertToV1_1(const V1_1::Model & model)1719 V1_1::Model convertToV1_1(const V1_1::Model& model) {
1720 return model;
1721 }
1722
1723 #ifdef NN_DEBUGGABLE
getProp(const char * str,uint32_t defaultValue)1724 uint32_t getProp(const char* str, uint32_t defaultValue) {
1725 const std::string propStr = android::base::GetProperty(str, "");
1726 if (propStr.size() > 0) {
1727 return std::stoi(propStr);
1728 } else {
1729 return defaultValue;
1730 }
1731 }
1732 #endif // NN_DEBUGGABLE
1733
1734 } // namespace nn
1735 } // namespace android
1736