1 /*
2 * Copyright (C) 2018 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <gmock/gmock.h>
18 #include <gtest/gtest-death-test.h>
19 #include <gtest/gtest.h>
20
21 #include <algorithm>
22 #include <cstddef>
23 #include <cstdint>
24 #include <iterator>
25 #include <memory>
26 #include <optional>
27 #include <set>
28 #include <sstream>
29 #include <string>
30 #include <utility>
31 #include <vector>
32
33 #include "NeuralNetworks.h"
34 #include "NeuralNetworksOEM.h"
35 #include "NeuralNetworksWrapper.h"
36
37 using namespace android::nn::wrapper;
38
39 namespace {
40
41 static const int32_t kAvailableOperandCodes[] = {ANEURALNETWORKS_FLOAT32,
42 ANEURALNETWORKS_INT32,
43 ANEURALNETWORKS_UINT32,
44 ANEURALNETWORKS_TENSOR_FLOAT32,
45 ANEURALNETWORKS_TENSOR_INT32,
46 ANEURALNETWORKS_TENSOR_QUANT8_ASYMM,
47 ANEURALNETWORKS_BOOL,
48 ANEURALNETWORKS_TENSOR_QUANT16_SYMM,
49 ANEURALNETWORKS_TENSOR_FLOAT16,
50 ANEURALNETWORKS_TENSOR_BOOL8,
51 ANEURALNETWORKS_FLOAT16,
52 ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL,
53 ANEURALNETWORKS_TENSOR_QUANT16_ASYMM,
54 ANEURALNETWORKS_TENSOR_OEM_BYTE};
55
getOpType(int32_t opcode,uint32_t dimCount=0,const uint32_t * dim=nullptr)56 ANeuralNetworksOperandType getOpType(int32_t opcode, uint32_t dimCount = 0,
57 const uint32_t* dim = nullptr) {
58 ANeuralNetworksOperandType opType = {.type = opcode,
59 .dimensionCount = dimCount,
60 .dimensions = dim,
61 .scale = 0.0,
62 .zeroPoint = 0};
63 if (opcode == ANEURALNETWORKS_TENSOR_QUANT8_ASYMM ||
64 opcode == ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED ||
65 opcode == ANEURALNETWORKS_TENSOR_QUANT8_SYMM ||
66 opcode == ANEURALNETWORKS_TENSOR_QUANT16_ASYMM ||
67 opcode == ANEURALNETWORKS_TENSOR_QUANT16_SYMM) {
68 opType.scale = 1. / 256.;
69 }
70 return opType;
71 }
72
73 struct OperandTypeWithExtraParams {
OperandTypeWithExtraParams__anon37b312a30111::OperandTypeWithExtraParams74 OperandTypeWithExtraParams(const ANeuralNetworksOperandType& operandType)
75 : operandType(operandType), channelQuant(std::nullopt), valueModel(std::nullopt) {}
76
77 ANeuralNetworksOperandType operandType;
78 std::optional<ANeuralNetworksSymmPerChannelQuantParams> channelQuant;
79 std::optional<const ANeuralNetworksModel*> valueModel;
80
operator ==__anon37b312a30111::OperandTypeWithExtraParams81 bool operator==(const OperandTypeWithExtraParams& that) const {
82 if (operandType.type != that.operandType.type ||
83 operandType.scale != that.operandType.scale ||
84 operandType.zeroPoint != that.operandType.zeroPoint ||
85 operandType.dimensionCount != that.operandType.dimensionCount) {
86 return false;
87 }
88
89 if (channelQuant.has_value() != that.channelQuant.has_value() ||
90 (channelQuant.has_value() &&
91 (channelQuant->channelDim != that.channelQuant->channelDim ||
92 channelQuant->scaleCount != that.channelQuant->scaleCount))) {
93 return false;
94 }
95
96 if (valueModel != that.valueModel) {
97 return false;
98 }
99
100 if (operandType.dimensions) {
101 if (!that.operandType.dimensions) {
102 return false;
103 }
104 if (!std::equal(operandType.dimensions,
105 operandType.dimensions + operandType.dimensionCount,
106 that.operandType.dimensions)) {
107 return false;
108 }
109 } else {
110 if (that.operandType.dimensions) {
111 return false;
112 }
113 }
114
115 if (channelQuant.has_value()) {
116 if (channelQuant->scales) {
117 return that.channelQuant->scales &&
118 std::equal(channelQuant->scales,
119 channelQuant->scales + channelQuant->scaleCount,
120 that.channelQuant->scales);
121 } else {
122 return that.channelQuant->scales == nullptr;
123 }
124 }
125 return true;
126 }
127
operator !=__anon37b312a30111::OperandTypeWithExtraParams128 bool operator!=(const OperandTypeWithExtraParams& that) const { return !(*this == that); }
129
operator <__anon37b312a30111::OperandTypeWithExtraParams130 bool operator<(const OperandTypeWithExtraParams& that) const {
131 if (operandType.type < that.operandType.type) return true;
132 if (operandType.dimensionCount < that.operandType.dimensionCount) return true;
133 return false;
134 }
135 }; // namespace
136
137 // Generates valid and invalid mutations of given OperandTypeWithParams
138 // instances.
139 // It is also responsible of freeing the memory allocated when creating
140 // mutations.
141 // Mutations shouldn't outlive the generating TensorRankConstraint instance.
142 class TensorRankConstraint {
143 public:
TensorRankConstraint(const TensorRankConstraint & copyFrom)144 TensorRankConstraint(const TensorRankConstraint& copyFrom) {
145 // ignoring the array of allocated dimension
146 this->mRangeMax = copyFrom.mRangeMax;
147 this->mRangeMin = copyFrom.mRangeMin;
148 }
149
operator =(const TensorRankConstraint & copyFrom)150 TensorRankConstraint& operator=(const TensorRankConstraint& copyFrom) {
151 // ignoring the array of allocated dimension
152 this->mRangeMax = copyFrom.mRangeMax;
153 this->mRangeMin = copyFrom.mRangeMin;
154 return *this;
155 }
156
Exactly(uint32_t rank)157 static TensorRankConstraint Exactly(uint32_t rank) {
158 return TensorRankConstraint(std::optional(rank), std::optional(rank));
159 }
160
AtLeast(uint32_t min)161 static TensorRankConstraint AtLeast(uint32_t min) {
162 return TensorRankConstraint(std::optional(min), std::nullopt);
163 }
164
UpTo(uint32_t max)165 static TensorRankConstraint UpTo(uint32_t max) {
166 return TensorRankConstraint(std::nullopt, std::optional(max));
167 }
168
Between(uint32_t min,uint32_t max)169 static TensorRankConstraint Between(uint32_t min, uint32_t max) {
170 if (min == 0) {
171 return UpTo(max);
172 }
173 return TensorRankConstraint(std::optional(min), std::optional(max));
174 }
175
MutationsWithValidRank(const std::vector<OperandTypeWithExtraParams> & operandsTypeWithParams)176 std::set<std::vector<OperandTypeWithExtraParams>> MutationsWithValidRank(
177 const std::vector<OperandTypeWithExtraParams>& operandsTypeWithParams) {
178 // can't be both nullopt
179 if (!mRangeMin) {
180 return {ModifyForRank(operandsTypeWithParams, 1),
181 ModifyForRank(operandsTypeWithParams, *mRangeMax)};
182 } else if (!mRangeMax) {
183 return {ModifyForRank(operandsTypeWithParams, *mRangeMin),
184 ModifyForRank(operandsTypeWithParams, *mRangeMin + 1)};
185 } else if (mRangeMax == mRangeMin) {
186 std::for_each(operandsTypeWithParams.begin(), operandsTypeWithParams.end(),
187 [this](const OperandTypeWithExtraParams& op) {
188 assert(op.operandType.dimensionCount == *mRangeMin);
189 });
190 return {operandsTypeWithParams};
191 } else {
192 return {ModifyForRank(operandsTypeWithParams, *mRangeMin),
193 ModifyForRank(operandsTypeWithParams, *mRangeMax)};
194 }
195 }
196
MutationsWithInvalidRank(const std::vector<OperandTypeWithExtraParams> & operandsTypeWithParams)197 std::set<std::vector<OperandTypeWithExtraParams>> MutationsWithInvalidRank(
198 const std::vector<OperandTypeWithExtraParams>& operandsTypeWithParams) {
199 std::set<std::vector<OperandTypeWithExtraParams>> result;
200 if (mRangeMax) {
201 result.insert(ModifyForRank(operandsTypeWithParams, *mRangeMax + 1));
202 }
203 if (mRangeMin.value_or(0) > 1) {
204 result.insert(ModifyForRank(operandsTypeWithParams, *mRangeMin - 1));
205 }
206 return result;
207 }
208
209 private:
ModifyForRank(const std::vector<OperandTypeWithExtraParams> & operandsTypeWithParams,uint_t newRank)210 std::vector<OperandTypeWithExtraParams> ModifyForRank(
211 const std::vector<OperandTypeWithExtraParams>& operandsTypeWithParams, uint_t newRank) {
212 std::vector<OperandTypeWithExtraParams> result;
213 std::transform(operandsTypeWithParams.cbegin(), operandsTypeWithParams.cend(),
214 std::back_inserter(result),
215 [this, newRank](const OperandTypeWithExtraParams& operandTypeWithParams) {
216 return ModifyForRank(operandTypeWithParams, newRank);
217 });
218 return result;
219 }
220
ModifyForRank(const OperandTypeWithExtraParams & operandTypeWithParams,uint_t newRank)221 OperandTypeWithExtraParams ModifyForRank(
222 const OperandTypeWithExtraParams& operandTypeWithParams, uint_t newRank) {
223 if (operandTypeWithParams.operandType.dimensionCount == newRank) {
224 return operandTypeWithParams;
225 }
226
227 uint32_t* resultDimensions = nullptr;
228 if (newRank != 0) {
229 std::unique_ptr<uint32_t[]> dimensions = std::make_unique<uint32_t[]>(newRank);
230 resultDimensions = dimensions.get();
231 mAllocatedDimensions.insert(std::move(dimensions));
232 std::fill(resultDimensions, resultDimensions + newRank, 1);
233 const auto originDims = operandTypeWithParams.operandType.dimensions;
234 if (originDims != nullptr) {
235 const int dimsToCopy =
236 std::min(operandTypeWithParams.operandType.dimensionCount, newRank);
237 std::copy(originDims, originDims + dimsToCopy, resultDimensions);
238 }
239 }
240
241 OperandTypeWithExtraParams result = operandTypeWithParams;
242 result.operandType = {
243 .type = operandTypeWithParams.operandType.type,
244 .dimensionCount = newRank,
245 .dimensions = resultDimensions,
246 .scale = operandTypeWithParams.operandType.scale,
247 .zeroPoint = operandTypeWithParams.operandType.zeroPoint,
248 };
249
250 return result;
251 }
252
TensorRankConstraint(const std::optional<uint32_t> & min,const std::optional<uint32_t> & max)253 TensorRankConstraint(const std::optional<uint32_t>& min, const std::optional<uint32_t>& max)
254 : mRangeMin(min), mRangeMax(max) {
255 if (mRangeMax.has_value()) {
256 assert(*mRangeMax >= mRangeMin.value_or(0));
257 }
258
259 assert(mRangeMax.has_value() || mRangeMin.has_value());
260 }
261
262 std::optional<uint32_t> mRangeMin;
263 std::optional<uint32_t> mRangeMax;
264 std::set<std::unique_ptr<uint32_t[]>> mAllocatedDimensions;
265 };
266
267 // Mutates a set of inputs applying the same rank constraint.
268 class TensorRankMutator {
269 public:
TensorRankMutator(const TensorRankConstraint & constraint,const std::set<uint32_t> & applyToIndexes={0})270 TensorRankMutator(const TensorRankConstraint& constraint,
271 const std::set<uint32_t>& applyToIndexes = {0})
272 : mApplyToIndexes(applyToIndexes.begin(), applyToIndexes.end()), mConstraint(constraint) {}
273
ValidInputsMutations(const std::vector<OperandTypeWithExtraParams> & validInputs)274 std::set<std::vector<OperandTypeWithExtraParams>> ValidInputsMutations(
275 const std::vector<OperandTypeWithExtraParams>& validInputs) {
276 return InputsMutations(
277 validInputs, [this](const std::vector<OperandTypeWithExtraParams>& inputsToMutate) {
278 return mConstraint.MutationsWithValidRank(inputsToMutate);
279 });
280 }
281
InvalidInputsMutations(const std::vector<OperandTypeWithExtraParams> & validInputs)282 std::set<std::vector<OperandTypeWithExtraParams>> InvalidInputsMutations(
283 const std::vector<OperandTypeWithExtraParams>& validInputs) {
284 return InputsMutations(
285 validInputs, [this](const std::vector<OperandTypeWithExtraParams>& inputsToMutate) {
286 return mConstraint.MutationsWithInvalidRank(inputsToMutate);
287 });
288 }
289
290 private:
InputsMutations(const std::vector<OperandTypeWithExtraParams> & validInputs,std::function<std::set<std::vector<OperandTypeWithExtraParams>> (const std::vector<OperandTypeWithExtraParams> &)> operandMutator)291 std::set<std::vector<OperandTypeWithExtraParams>> InputsMutations(
292 const std::vector<OperandTypeWithExtraParams>& validInputs,
293 std::function<std::set<std::vector<OperandTypeWithExtraParams>>(
294 const std::vector<OperandTypeWithExtraParams>&)>
295 operandMutator) {
296 std::for_each(mApplyToIndexes.begin(), mApplyToIndexes.end(),
297 [&validInputs](uint32_t index) { assert(index < validInputs.size()); });
298
299 std::vector<OperandTypeWithExtraParams> toMutate;
300 std::transform(mApplyToIndexes.begin(), mApplyToIndexes.end(), std::back_inserter(toMutate),
301 [&validInputs](int input_index) { return validInputs[input_index]; });
302
303 // Get a series of mutation for the operands in toMutate
304 std::set<std::vector<OperandTypeWithExtraParams>> mutatedOps = operandMutator(toMutate);
305
306 // Generate a set of mutation by replacing the mutated ops in validInputs
307 // with all the mutations in mutatedOps
308 std::set<std::vector<OperandTypeWithExtraParams>> mutatedValidInputs;
309 std::transform(
310 mutatedOps.cbegin(), mutatedOps.cend(),
311 std::inserter(mutatedValidInputs, mutatedValidInputs.begin()),
312 [this, &validInputs](const std::vector<OperandTypeWithExtraParams>& opsMutation) {
313 std::vector<OperandTypeWithExtraParams> currInputMutation(validInputs.begin(),
314 validInputs.end());
315 for (size_t i = 0; i < mApplyToIndexes.size(); i++) {
316 currInputMutation[mApplyToIndexes[i]] = opsMutation[i];
317 }
318
319 return currInputMutation;
320 });
321
322 return mutatedValidInputs;
323 }
324
325 std::vector<uint32_t> mApplyToIndexes;
326 TensorRankConstraint mConstraint;
327 };
328
329 class OperationTestBase {
330 public:
OperationTestBase(ANeuralNetworksOperationType opCode,const std::vector<ANeuralNetworksOperandType> & validInputs,const std::vector<ANeuralNetworksOperandType> & validOutputs,const std::vector<TensorRankMutator> & inputRankMutators={})331 OperationTestBase(ANeuralNetworksOperationType opCode,
332 const std::vector<ANeuralNetworksOperandType>& validInputs,
333 const std::vector<ANeuralNetworksOperandType>& validOutputs,
334 const std::vector<TensorRankMutator>& inputRankMutators = {})
335 : mOpCode(opCode), mValidInputs(), mValidOutputs(), mInputRankMutators(inputRankMutators) {
336 for (ANeuralNetworksOperandType input : validInputs) {
337 mValidInputs.push_back(input);
338 }
339 for (ANeuralNetworksOperandType output : validOutputs) {
340 mValidOutputs.push_back(output);
341 }
342 }
343
setInputSymmPerChannelQuantParams(int32_t index,const ANeuralNetworksSymmPerChannelQuantParams & channelQuant)344 void setInputSymmPerChannelQuantParams(
345 int32_t index, const ANeuralNetworksSymmPerChannelQuantParams& channelQuant) {
346 mValidInputs[index].channelQuant = channelQuant;
347 }
348
setOutputSymmPerChannelQuantParams(int32_t index,const ANeuralNetworksSymmPerChannelQuantParams & channelQuant)349 void setOutputSymmPerChannelQuantParams(
350 int32_t index, const ANeuralNetworksSymmPerChannelQuantParams& channelQuant) {
351 mValidOutputs[index].channelQuant = channelQuant;
352 }
353
setInputOperandValueFromModel(int32_t index,const ANeuralNetworksModel * valueModel)354 void setInputOperandValueFromModel(int32_t index, const ANeuralNetworksModel* valueModel) {
355 mValidInputs[index].valueModel = valueModel;
356 }
357
358 // Add each operand separately and add the operation using these operands.
359 // This function does not cover the cases that an operand is used mutiple times.
addOperation(const std::vector<OperandTypeWithExtraParams> & inputs,const std::vector<OperandTypeWithExtraParams> & outputs)360 int32_t addOperation(const std::vector<OperandTypeWithExtraParams>& inputs,
361 const std::vector<OperandTypeWithExtraParams>& outputs) {
362 ANeuralNetworksModel* model = nullptr;
363 ANeuralNetworksModel_create(&model);
364
365 uint32_t opIdx = 0;
366 std::vector<uint32_t> inputIds;
367 std::vector<uint32_t> outputIds;
368 for (uint32_t i = 0; i < inputs.size(); i++) {
369 ANeuralNetworksModel_addOperand(model, &inputs[i].operandType);
370 if (inputs[i].channelQuant) {
371 ANeuralNetworksModel_setOperandSymmPerChannelQuantParams(
372 model, opIdx, &inputs[i].channelQuant.value());
373 }
374 if (inputs[i].valueModel) {
375 ANeuralNetworksModel_setOperandValueFromModel(model, opIdx,
376 inputs[i].valueModel.value());
377 }
378 inputIds.push_back(opIdx++);
379 }
380 for (uint32_t i = 0; i < outputs.size(); i++) {
381 ANeuralNetworksModel_addOperand(model, &outputs[i].operandType);
382 if (outputs[i].channelQuant) {
383 ANeuralNetworksModel_setOperandSymmPerChannelQuantParams(
384 model, opIdx, &outputs[i].channelQuant.value());
385 }
386 outputIds.push_back(opIdx++);
387 }
388
389 int32_t result = ANeuralNetworksModel_addOperation(
390 model, mOpCode, static_cast<uint32_t>(inputIds.size()), inputIds.data(),
391 static_cast<uint32_t>(outputIds.size()), outputIds.data());
392 ANeuralNetworksModel_free(model);
393 return result;
394 }
395
testOpsValidations()396 void testOpsValidations() {
397 EXPECT_TRUE(testSuccess());
398 EXPECT_TRUE(testMutatingInputOperandCode());
399 EXPECT_TRUE(testMutatingInputOperandCounts());
400 EXPECT_TRUE(testMutatingOutputOperandCode());
401 EXPECT_TRUE(testMutatingOutputOperandCounts());
402 EXPECT_TRUE(testMutatingInputRanks());
403 }
404
testFailure(int32_t expectedResult)405 void testFailure(int32_t expectedResult) {
406 int32_t result = addOperation(mValidInputs, mValidOutputs);
407 EXPECT_TRUE(expectedResult == result);
408 }
409
testSuccess()410 bool testSuccess() {
411 int32_t result = addOperation(mValidInputs, mValidOutputs);
412 return ANEURALNETWORKS_NO_ERROR == result;
413 }
414
testMutatingInputOperandCode()415 bool testMutatingInputOperandCode() {
416 for (uint32_t i = 0; i < mValidInputs.size(); i++) {
417 // LSH_PROJECTION's second argument is allowed to have any type.
418 // This is the only operation that currently has a type that can be
419 // anything independent from any other type. Changing the operand
420 // type to any other type will result in a valid model for
421 // LSH_PROJECTION. If this is the case, skip the test.
422 if (mOpCode == ANEURALNETWORKS_LSH_PROJECTION && i == 1) {
423 continue;
424 }
425 // RANK can have input of any type.
426 if (mOpCode == ANEURALNETWORKS_RANK) {
427 continue;
428 }
429 OperandTypeWithExtraParams newType = mValidInputs[i];
430 int32_t originalOperandCode = mValidInputs[i].operandType.type;
431 std::set<int32_t> operandTypesToSkip;
432 // Transposed conv can have either fully quantized or per-channel
433 // quantized filter for the quantized version of the op.
434 if ((mOpCode == ANEURALNETWORKS_TRANSPOSE_CONV_2D ||
435 mOpCode == ANEURALNETWORKS_DEPTHWISE_CONV_2D) &&
436 i == 1) {
437 if (originalOperandCode == ANEURALNETWORKS_TENSOR_QUANT8_ASYMM ||
438 originalOperandCode == ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED ||
439 originalOperandCode == ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL) {
440 operandTypesToSkip.insert(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
441 operandTypesToSkip.insert(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
442 operandTypesToSkip.insert(ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL);
443 }
444 }
445 // CAST accepts any of supported types for any of output types
446 if (mOpCode == ANEURALNETWORKS_CAST && i == 0) {
447 operandTypesToSkip.insert(ANEURALNETWORKS_TENSOR_FLOAT16);
448 operandTypesToSkip.insert(ANEURALNETWORKS_TENSOR_FLOAT32);
449 operandTypesToSkip.insert(ANEURALNETWORKS_TENSOR_INT32);
450 operandTypesToSkip.insert(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
451 operandTypesToSkip.insert(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
452 }
453 // RANDOM_MULTINOMIAL's first input can be either of float16 or
454 // float32 type while everything else has the same types.
455 if (mOpCode == ANEURALNETWORKS_RANDOM_MULTINOMIAL && i == 0) {
456 if (originalOperandCode == ANEURALNETWORKS_TENSOR_FLOAT16) {
457 operandTypesToSkip.insert(ANEURALNETWORKS_TENSOR_FLOAT32);
458 } else if (originalOperandCode == ANEURALNETWORKS_TENSOR_FLOAT32) {
459 operandTypesToSkip.insert(ANEURALNETWORKS_TENSOR_FLOAT16);
460 }
461 }
462 // DEQUANTIZE supports any of the inputs types below for any of the
463 // output types.
464 if (mOpCode == ANEURALNETWORKS_DEQUANTIZE && i == 0) {
465 operandTypesToSkip.insert(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
466 operandTypesToSkip.insert(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
467 operandTypesToSkip.insert(ANEURALNETWORKS_TENSOR_QUANT8_SYMM);
468 operandTypesToSkip.insert(ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL);
469 }
470 // AXIS_ALIGNED_BBOX_TRANSFORM's second input cab be either QUANT8_ASYMM or
471 // QUANT8_ASYMM_SIGNED
472 if (mOpCode == ANEURALNETWORKS_AXIS_ALIGNED_BBOX_TRANSFORM && i == 1) {
473 operandTypesToSkip.insert(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
474 operandTypesToSkip.insert(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
475 }
476
477 for (int32_t newOperandCode : kAvailableOperandCodes) {
478 if (newOperandCode == originalOperandCode ||
479 operandTypesToSkip.find(newOperandCode) != operandTypesToSkip.end()) {
480 continue;
481 }
482 // Switch input 7 from bool to int for 10-input CONV_2d
483 // switch between valid "implicit padding with layout param"
484 // and valid "explicit padding without layout param"
485 if (mOpCode == ANEURALNETWORKS_CONV_2D && i == 7 && mValidInputs.size() == 10) {
486 if ((newOperandCode == ANEURALNETWORKS_INT32 &&
487 originalOperandCode == ANEURALNETWORKS_BOOL) ||
488 (newOperandCode == ANEURALNETWORKS_BOOL &&
489 originalOperandCode == ANEURALNETWORKS_INT32)) {
490 continue;
491 }
492 }
493 // QUANTIZE supports both types below and its output type does
494 // not depend on the input type.
495 if (mOpCode == ANEURALNETWORKS_QUANTIZE && i == 0 &&
496 (newOperandCode == ANEURALNETWORKS_TENSOR_FLOAT16 ||
497 newOperandCode == ANEURALNETWORKS_TENSOR_FLOAT32)) {
498 continue;
499 }
500
501 // ARGMIN/MAX supports four input types and has a fixed output type.
502 if ((mOpCode == ANEURALNETWORKS_ARGMIN || mOpCode == ANEURALNETWORKS_ARGMAX) &&
503 i == 0 &&
504 (newOperandCode == ANEURALNETWORKS_TENSOR_FLOAT16 ||
505 newOperandCode == ANEURALNETWORKS_TENSOR_FLOAT32 ||
506 newOperandCode == ANEURALNETWORKS_TENSOR_INT32 ||
507 newOperandCode == ANEURALNETWORKS_TENSOR_QUANT8_ASYMM ||
508 newOperandCode == ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED)) {
509 continue;
510 }
511
512 // Switch input 8 from bool to int for 11-input DEPTHWISE_CONV_2D
513 // switch between valid "implicit padding with layout param"
514 // and valid "explicit padding without layout param"
515 if (mOpCode == ANEURALNETWORKS_DEPTHWISE_CONV_2D && i == 8 &&
516 mValidInputs.size() == 11) {
517 if ((newOperandCode == ANEURALNETWORKS_INT32 &&
518 originalOperandCode == ANEURALNETWORKS_BOOL) ||
519 (newOperandCode == ANEURALNETWORKS_BOOL &&
520 originalOperandCode == ANEURALNETWORKS_INT32)) {
521 continue;
522 }
523 }
524
525 newType.operandType.type = newOperandCode;
526 std::vector<OperandTypeWithExtraParams> inputs = mValidInputs;
527 inputs[i] = newType;
528 int32_t result = addOperation(inputs, mValidOutputs);
529 if (ANEURALNETWORKS_NO_ERROR == result) {
530 return false;
531 }
532 }
533 }
534 return true;
535 }
536
testMutatingOutputOperandCode()537 bool testMutatingOutputOperandCode() {
538 for (uint32_t i = 0; i < mValidOutputs.size(); i++) {
539 // LSH_PROJECTION's second argument is allowed to have any type.
540 // This is the only operation that currently has a type that can be
541 // anything independent from any other type. Changing the operand
542 // type to any other type will result in a valid model for
543 // LSH_PROJECTION. If this is the case, skip the test.
544 if (mOpCode == ANEURALNETWORKS_LSH_PROJECTION && i == 1) {
545 continue;
546 }
547 OperandTypeWithExtraParams newType = mValidOutputs[i].operandType;
548 int32_t originalOperandCode = mValidOutputs[i].operandType.type;
549 for (int32_t newOperandCode : kAvailableOperandCodes) {
550 if (newOperandCode == originalOperandCode) {
551 continue;
552 }
553 // DEQUANTIZE's output can be either TENSOR_FLOAT16 or TENSOR_FLOAT32.
554 if (mOpCode == ANEURALNETWORKS_DEQUANTIZE &&
555 (newOperandCode == ANEURALNETWORKS_TENSOR_FLOAT16 ||
556 newOperandCode == ANEURALNETWORKS_TENSOR_FLOAT32)) {
557 continue;
558 }
559
560 // QUANTIZE's output can be either TENSOR_QUANT8_ASYMM or
561 // TENSOR_QUANT8_ASYMM_SIGNED.
562 if (mOpCode == ANEURALNETWORKS_QUANTIZE && i == 0 &&
563 (newOperandCode == ANEURALNETWORKS_TENSOR_QUANT8_ASYMM ||
564 newOperandCode == ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED)) {
565 continue;
566 }
567
568 // CAST accepts any of supported types for any of input types
569 if (mOpCode == ANEURALNETWORKS_CAST && i == 0 &&
570 (newOperandCode == ANEURALNETWORKS_TENSOR_FLOAT16 ||
571 newOperandCode == ANEURALNETWORKS_TENSOR_FLOAT32 ||
572 newOperandCode == ANEURALNETWORKS_TENSOR_INT32 ||
573 newOperandCode == ANEURALNETWORKS_TENSOR_QUANT8_ASYMM)) {
574 continue;
575 }
576 newType.operandType.type = newOperandCode;
577 std::vector<OperandTypeWithExtraParams> outputs = mValidOutputs;
578 outputs[i] = newType;
579 int32_t result = addOperation(mValidInputs, outputs);
580 if (ANEURALNETWORKS_NO_ERROR == result) {
581 return false;
582 }
583 }
584 }
585 return true;
586 }
587
testMutatingInputOperandCounts()588 bool testMutatingInputOperandCounts() {
589 uint32_t numToAdd = 5;
590 // LSTM since API 29 supports 23 and 27 outputs.
591 if (mOpCode == ANEURALNETWORKS_LSTM) {
592 numToAdd = 3;
593 }
594 std::vector<OperandTypeWithExtraParams> inputs = mValidInputs;
595 for (uint32_t i = 0; i < numToAdd; i++) {
596 inputs.push_back(inputs[0]);
597 if (ANEURALNETWORKS_NO_ERROR == addOperation(inputs, mValidOutputs)) {
598 return false;
599 }
600 }
601 return true;
602 }
603
testMutatingOutputOperandCounts()604 bool testMutatingOutputOperandCounts() {
605 // SPLIT's number of outputs depends on a value of one of its inputs and
606 // are not checked during validation.
607 if (mOpCode == ANEURALNETWORKS_SPLIT) {
608 return true;
609 }
610 std::vector<OperandTypeWithExtraParams> outputs = mValidOutputs;
611 for (int i = 0; i < 6; i++) {
612 outputs.push_back(outputs[0]);
613 if (ANEURALNETWORKS_NO_ERROR == addOperation(mValidInputs, outputs)) {
614 if (mOpCode == ANEURALNETWORKS_UNIDIRECTIONAL_SEQUENCE_RNN && i < 1) {
615 continue;
616 }
617 if (mOpCode == ANEURALNETWORKS_UNIDIRECTIONAL_SEQUENCE_LSTM && i < 3) {
618 continue;
619 }
620 if (mOpCode == ANEURALNETWORKS_BIDIRECTIONAL_SEQUENCE_RNN && i < 3) {
621 continue;
622 }
623 if (mOpCode == ANEURALNETWORKS_BIDIRECTIONAL_SEQUENCE_LSTM && i < 5) {
624 continue;
625 }
626 return false;
627 }
628 }
629 return true;
630 }
631
testMutatingInputRanks()632 bool testMutatingInputRanks() {
633 for (auto& rankMutator : mInputRankMutators) {
634 for (const auto& validMutation : rankMutator.ValidInputsMutations(mValidInputs)) {
635 int32_t result = addOperation(validMutation, mValidOutputs);
636 if (ANEURALNETWORKS_NO_ERROR != result) {
637 return false;
638 }
639 }
640
641 for (const auto& invalidMutation : rankMutator.InvalidInputsMutations(mValidInputs)) {
642 int32_t result = addOperation(invalidMutation, mValidOutputs);
643 if (ANEURALNETWORKS_NO_ERROR == result) {
644 return false;
645 }
646 }
647 }
648
649 return true;
650 }
651
652 private:
653 ANeuralNetworksOperationType mOpCode;
654 // The dimensions in the ANeuralNetworksOperandType must outlive the test object.
655 std::vector<OperandTypeWithExtraParams> mValidInputs;
656 std::vector<OperandTypeWithExtraParams> mValidOutputs;
657
658 std::vector<TensorRankMutator> mInputRankMutators;
659 };
660
operator <<(std::ostream & os,const OperandTypeWithExtraParams & operand)661 std::ostream& operator<<(std::ostream& os, const OperandTypeWithExtraParams& operand) {
662 const auto& operandType = operand.operandType;
663 os << "{ operand_type: { type: " << operandType.type << ", "
664 << "dimensionCount: " << operandType.dimensionCount << ", dimensions: [";
665 std::for_each(operandType.dimensions, operandType.dimensions + operandType.dimensionCount,
666 [&os](uint32_t dimension) { os << dimension << ", "; });
667 os << "], scale: " << operandType.scale << ", zeroPoint: " << operandType.zeroPoint << " }";
668
669 const auto& channelQuant = operand.channelQuant;
670 if (channelQuant.has_value()) {
671 os << ", channelQuant { channelDim: " << channelQuant->channelDim
672 << ", scaleCount: " << channelQuant->scaleCount << ", scales: [";
673 std::for_each(channelQuant->scales, channelQuant->scales + channelQuant->scaleCount,
674 [&os](float scale) { os << scale << ", "; });
675 os << "] }";
676 } else {
677 os << ", channelQuant: nullopt";
678 }
679
680 if (operand.valueModel.has_value()) {
681 os << ", valueModel: " << operand.valueModel.value();
682 } else {
683 os << ", valueModel: nullopt";
684 }
685 os << "}";
686 return os;
687 }
688
MutationWithDimensions(const OperandTypeWithExtraParams & origin,const std::vector<uint32_t> & expectedDims)689 inline OperandTypeWithExtraParams MutationWithDimensions(
690 const OperandTypeWithExtraParams& origin, const std::vector<uint32_t>& expectedDims) {
691 OperandTypeWithExtraParams expected = origin;
692 expected.operandType.dimensionCount = expectedDims.size();
693 if (expectedDims.size() == 0) {
694 expected.operandType.dimensions = nullptr;
695 } else {
696 expected.operandType.dimensions = expectedDims.data();
697 }
698 return expected;
699 }
DescribeMutationWithDimensions(const OperandTypeWithExtraParams & origin,const std::vector<uint32_t> & expectedDims)700 std::string DescribeMutationWithDimensions(const OperandTypeWithExtraParams& origin,
701 const std::vector<uint32_t>& expectedDims) {
702 std::ostringstream osstream;
703 osstream << MutationWithDimensions(origin, expectedDims);
704 return osstream.str();
705 }
706
MATCHER_P2(IsMutationWithDimensions,origin,expectedDims,DescribeMutationWithDimensions (origin,expectedDims))707 MATCHER_P2(IsMutationWithDimensions, origin, expectedDims,
708 DescribeMutationWithDimensions(origin, expectedDims)) {
709 return arg == MutationWithDimensions(origin, expectedDims);
710 }
711
TEST(TensorRankConstraint,ExactlyWillReturnSameInputAsValidMutation)712 TEST(TensorRankConstraint, ExactlyWillReturnSameInputAsValidMutation) {
713 uint32_t opDimensions[3] = {2, 2, 2};
714 OperandTypeWithExtraParams operand{ANeuralNetworksOperandType{
715 .type = ANEURALNETWORKS_TENSOR_INT32,
716 .dimensionCount = 3,
717 .dimensions = opDimensions,
718 }};
719
720 auto constraint = TensorRankConstraint::Exactly(3);
721 auto validMutationSet = constraint.MutationsWithValidRank({operand});
722 ASSERT_EQ(validMutationSet.size(), 1u);
723 auto validMutations = *validMutationSet.begin();
724 ASSERT_EQ(validMutations.size(), 1u);
725 EXPECT_THAT(validMutations[0],
726 IsMutationWithDimensions(operand, std::vector<uint32_t>({2, 2, 2})));
727 };
728
TEST(TensorRankConstraint,ExactlyWillFailIfValidInputHasInvalidSize)729 TEST(TensorRankConstraint, ExactlyWillFailIfValidInputHasInvalidSize) {
730 uint32_t opDimensions[2] = {2, 2};
731 OperandTypeWithExtraParams operand{ANeuralNetworksOperandType{
732 .type = ANEURALNETWORKS_TENSOR_INT32,
733 .dimensionCount = 2,
734 .dimensions = opDimensions,
735 }};
736 EXPECT_DEATH(TensorRankConstraint::Exactly(3).MutationsWithValidRank({operand}),
737 ".*assertion.+failed.*");
738 };
739
TEST(TensorRankConstraint,ExactlyWillReturnTwoInvalidMutationsWithLowerAndHigherRank)740 TEST(TensorRankConstraint, ExactlyWillReturnTwoInvalidMutationsWithLowerAndHigherRank) {
741 uint32_t opDimensions[3] = {2, 2, 2};
742 OperandTypeWithExtraParams operand{ANeuralNetworksOperandType{
743 .type = ANEURALNETWORKS_TENSOR_INT32,
744 .dimensionCount = 3,
745 .dimensions = opDimensions,
746 }};
747
748 auto constraint = TensorRankConstraint::Exactly(3);
749 auto invalidMutations = constraint.MutationsWithInvalidRank({operand});
750 ASSERT_EQ(invalidMutations.size(), 2u);
751 std::for_each(invalidMutations.begin(), invalidMutations.end(),
752 [&operand](const std::vector<OperandTypeWithExtraParams>& mutations) {
753 EXPECT_EQ(mutations.size(), 1u);
754 if (mutations.size() == 1) {
755 EXPECT_THAT(
756 mutations[0],
757 ::testing::AnyOf(
758 IsMutationWithDimensions(operand,
759 std::vector<uint32_t>({2, 2})),
760 IsMutationWithDimensions(
761 operand, std::vector<uint32_t>({2, 2, 2, 1}))));
762 }
763 });
764 };
765
TEST(TensorRankConstraint,AtLeastWillReturnTwoValidMutationsAboveThreshold)766 TEST(TensorRankConstraint, AtLeastWillReturnTwoValidMutationsAboveThreshold) {
767 uint32_t opDimensions[3] = {2, 2, 2};
768 OperandTypeWithExtraParams operand{ANeuralNetworksOperandType{
769 .type = ANEURALNETWORKS_TENSOR_INT32,
770 .dimensionCount = 2,
771 .dimensions = opDimensions,
772 }};
773
774 auto constraint = TensorRankConstraint::AtLeast(1);
775 auto invalidMutations =
776 constraint.MutationsWithValidRank({(OperandTypeWithExtraParams)operand});
777 ASSERT_EQ(invalidMutations.size(), 2u);
778 std::for_each(
779 invalidMutations.begin(), invalidMutations.end(),
780 [&operand](const std::vector<OperandTypeWithExtraParams>& mutations) {
781 EXPECT_EQ(mutations.size(), 1u);
782 if (mutations.size() == 1) {
783 EXPECT_THAT(mutations[0],
784 ::testing::AnyOf(IsMutationWithDimensions(
785 operand, std::vector<uint32_t>({2})),
786 IsMutationWithDimensions(
787 operand, std::vector<uint32_t>({2, 2}))));
788 }
789 });
790 }
791
TEST(TensorRankConstraint,AtLeastWillReturnOneInvalidMutationsBelowThreshold)792 TEST(TensorRankConstraint, AtLeastWillReturnOneInvalidMutationsBelowThreshold) {
793 uint32_t opDimensions[2] = {2, 2};
794 OperandTypeWithExtraParams operand{ANeuralNetworksOperandType{
795 .type = ANEURALNETWORKS_TENSOR_INT32,
796 .dimensionCount = 2,
797 .dimensions = opDimensions,
798 }};
799
800 auto constraint = TensorRankConstraint::AtLeast(2);
801 auto invalidMutations =
802 constraint.MutationsWithInvalidRank({(OperandTypeWithExtraParams)operand});
803 ASSERT_EQ(invalidMutations.size(), 1u);
804 auto invalidMutationVector = *invalidMutations.begin();
805 ASSERT_EQ(invalidMutationVector.size(), 1u);
806 ASSERT_THAT(invalidMutationVector[0],
807 IsMutationWithDimensions(operand, std::vector<uint32_t>({2})));
808 }
809
TEST(TensorRankConstraint,AtLeastWillReturnNoInvalidMutationsIfThresholdIs1)810 TEST(TensorRankConstraint, AtLeastWillReturnNoInvalidMutationsIfThresholdIs1) {
811 uint32_t opDimensions[1] = {2};
812 OperandTypeWithExtraParams operand{ANeuralNetworksOperandType{
813 .type = ANEURALNETWORKS_TENSOR_INT32,
814 .dimensionCount = 1,
815 .dimensions = opDimensions,
816 }};
817
818 auto constraint = TensorRankConstraint::AtLeast(1);
819 auto invalidMutations =
820 constraint.MutationsWithInvalidRank({(OperandTypeWithExtraParams)operand});
821 ASSERT_EQ(invalidMutations.size(), 0u);
822 }
823
TEST(TensorRankConstraint,UpToWillReturnUpToTwoValidMutationsBelowThreshold)824 TEST(TensorRankConstraint, UpToWillReturnUpToTwoValidMutationsBelowThreshold) {
825 uint32_t opDimensions[3] = {2, 2, 2};
826 OperandTypeWithExtraParams operand{ANeuralNetworksOperandType{
827 .type = ANEURALNETWORKS_TENSOR_INT32,
828 .dimensionCount = 2,
829 .dimensions = opDimensions,
830 }};
831
832 auto constraint = TensorRankConstraint::UpTo(3);
833 auto invalidMutations =
834 constraint.MutationsWithValidRank({(OperandTypeWithExtraParams)operand});
835
836 auto expected = std::vector<uint32_t>({7, 7});
837 ASSERT_EQ(invalidMutations.size(), 2u);
838 std::for_each(invalidMutations.begin(), invalidMutations.end(),
839 [&operand](const std::vector<OperandTypeWithExtraParams>& mutations) {
840 EXPECT_EQ(mutations.size(), 1u);
841 if (mutations.size() == 1) {
842 EXPECT_THAT(mutations[0],
843 ::testing::AnyOf(
844 IsMutationWithDimensions(operand,
845 std::vector<uint32_t>({2})),
846 IsMutationWithDimensions(
847 operand, std::vector<uint32_t>({2, 2, 1}))));
848 }
849 });
850 }
851
TEST(TensorRankConstraint,UpToWillReturnOneInvalidMutationsAboveThreshold)852 TEST(TensorRankConstraint, UpToWillReturnOneInvalidMutationsAboveThreshold) {
853 uint32_t opDimensions[3] = {2, 2, 2};
854 OperandTypeWithExtraParams operand{ANeuralNetworksOperandType{
855 .type = ANEURALNETWORKS_TENSOR_INT32,
856 .dimensionCount = 3,
857 .dimensions = opDimensions,
858 }};
859
860 auto constraint = TensorRankConstraint::UpTo(3);
861 auto invalidMutations =
862 constraint.MutationsWithInvalidRank({(OperandTypeWithExtraParams)operand});
863 ASSERT_EQ(invalidMutations.size(), 1u);
864 auto invalidMutationVector = *invalidMutations.begin();
865 ASSERT_EQ(invalidMutationVector.size(), 1u);
866 ASSERT_THAT(invalidMutationVector[0],
867 IsMutationWithDimensions(operand, std::vector<uint32_t>({2, 2, 2, 1})));
868 }
869
TEST(TensorRankConstraint,BetweenWillReturnTwoValidMutationsOnRangeBoundaries)870 TEST(TensorRankConstraint, BetweenWillReturnTwoValidMutationsOnRangeBoundaries) {
871 uint32_t opDimensions[3] = {2, 2, 2};
872 OperandTypeWithExtraParams operand{ANeuralNetworksOperandType{
873 .type = ANEURALNETWORKS_TENSOR_INT32,
874 .dimensionCount = 3,
875 .dimensions = opDimensions,
876 }};
877
878 auto constraint = TensorRankConstraint::Between(2, 4);
879 auto validMutations = constraint.MutationsWithValidRank({(OperandTypeWithExtraParams)operand});
880 ASSERT_EQ(validMutations.size(), 2u);
881 std::for_each(validMutations.begin(), validMutations.end(),
882 [&operand](const std::vector<OperandTypeWithExtraParams>& mutations) {
883 EXPECT_EQ(mutations.size(), 1u);
884 if (mutations.size() == 1) {
885 EXPECT_THAT(
886 mutations[0],
887 ::testing::AnyOf(
888 IsMutationWithDimensions(operand,
889 std::vector<uint32_t>({2, 2})),
890 IsMutationWithDimensions(
891 operand, std::vector<uint32_t>({2, 2, 2, 1}))));
892 }
893 });
894 }
895
TEST(TensorRankConstraint,BetweenWillReturnTwoInvValidMutationsAdjacentToRangeBoundaries)896 TEST(TensorRankConstraint, BetweenWillReturnTwoInvValidMutationsAdjacentToRangeBoundaries) {
897 uint32_t opDimensions[3] = {2, 2, 2};
898 OperandTypeWithExtraParams operand{ANeuralNetworksOperandType{
899 .type = ANEURALNETWORKS_TENSOR_INT32,
900 .dimensionCount = 3,
901 .dimensions = opDimensions,
902 }};
903
904 auto constraint = TensorRankConstraint::Between(2, 4);
905 auto validMutations =
906 constraint.MutationsWithInvalidRank({(OperandTypeWithExtraParams)operand});
907 ASSERT_EQ(validMutations.size(), 2u);
908 std::for_each(
909 validMutations.begin(), validMutations.end(),
910 [&operand](const std::vector<OperandTypeWithExtraParams>& mutations) {
911 EXPECT_EQ(mutations.size(), 1u);
912 if (mutations.size() == 1) {
913 EXPECT_THAT(
914 mutations[0],
915 ::testing::AnyOf(
916 IsMutationWithDimensions(operand, std::vector<uint32_t>({2})),
917 IsMutationWithDimensions(
918 operand, std::vector<uint32_t>({2, 2, 2, 1, 1}))));
919 }
920 });
921 }
922
TEST(TensorRankConstraint,BetweenWillReturnOneInvalidMutationsOnlyIfLowerBoundIs1)923 TEST(TensorRankConstraint, BetweenWillReturnOneInvalidMutationsOnlyIfLowerBoundIs1) {
924 uint32_t opDimensions[3] = {2, 2, 2};
925 OperandTypeWithExtraParams operand{ANeuralNetworksOperandType{
926 .type = ANEURALNETWORKS_TENSOR_INT32,
927 .dimensionCount = 3,
928 .dimensions = opDimensions,
929 }};
930
931 auto constraint = TensorRankConstraint::Between(1, 4);
932 auto invalidMutations =
933 constraint.MutationsWithInvalidRank({(OperandTypeWithExtraParams)operand});
934 ASSERT_EQ(invalidMutations.size(), 1u);
935 auto invalidMutationVector = *invalidMutations.begin();
936 ASSERT_EQ(invalidMutationVector.size(), 1u);
937 ASSERT_THAT(invalidMutationVector[0],
938 IsMutationWithDimensions(operand, std::vector<uint32_t>({2, 2, 2, 1, 1})));
939 }
940
TEST(TensorRankMutator,AppliesConstraintToInputsAtGivenInputsToGenerateValidMutations)941 TEST(TensorRankMutator, AppliesConstraintToInputsAtGivenInputsToGenerateValidMutations) {
942 uint32_t opDimensions0[2] = {0, 0};
943 OperandTypeWithExtraParams operand0{ANeuralNetworksOperandType{
944 .type = ANEURALNETWORKS_TENSOR_INT32,
945 .dimensionCount = 2,
946 .dimensions = opDimensions0,
947 }};
948 uint32_t opDimensions1[1] = {1};
949 OperandTypeWithExtraParams operand1{ANeuralNetworksOperandType{
950 .type = ANEURALNETWORKS_TENSOR_INT32,
951 .dimensionCount = 1,
952 .dimensions = opDimensions1,
953 }};
954 uint32_t opDimensions2[2] = {2, 2};
955 OperandTypeWithExtraParams operand2{ANeuralNetworksOperandType{
956 .type = ANEURALNETWORKS_TENSOR_INT32,
957 .dimensionCount = 2,
958 .dimensions = opDimensions2,
959 }};
960 TensorRankMutator mutator{TensorRankConstraint::AtLeast(2), {0, 2}};
961
962 const auto mutationSet = mutator.ValidInputsMutations({operand0, operand1, operand2});
963 ASSERT_EQ(mutationSet.size(), 2u);
964 std::for_each(mutationSet.begin(), mutationSet.end(),
965 [&](const std::vector<OperandTypeWithExtraParams>& mutatedInputs) {
966 EXPECT_EQ(mutatedInputs.size(), 3u);
967 if (mutatedInputs.size() == 3) {
968 EXPECT_EQ(mutatedInputs[0].operandType.dimensionCount,
969 mutatedInputs[2].operandType.dimensionCount);
970 EXPECT_THAT(mutatedInputs[0],
971 ::testing::AnyOf(
972 IsMutationWithDimensions(
973 operand0, std::vector<uint32_t>({0, 0})),
974 IsMutationWithDimensions(
975 operand0, std::vector<uint32_t>({0, 0, 1}))));
976
977 EXPECT_EQ(mutatedInputs[1], operand1);
978
979 EXPECT_THAT(mutatedInputs[2],
980 ::testing::AnyOf(
981 IsMutationWithDimensions(
982 operand2, std::vector<uint32_t>({2, 2})),
983 IsMutationWithDimensions(
984 operand2, std::vector<uint32_t>({2, 2, 1}))));
985 }
986 });
987 }
988
TEST(TensorRankMutator,AppliesConstraintToInputsAtGivenInputsToGenerateInvalidMutations)989 TEST(TensorRankMutator, AppliesConstraintToInputsAtGivenInputsToGenerateInvalidMutations) {
990 uint32_t opDimensions0[2] = {0, 0};
991 OperandTypeWithExtraParams operand0{ANeuralNetworksOperandType{
992 .type = ANEURALNETWORKS_TENSOR_INT32,
993 .dimensionCount = 2,
994 .dimensions = opDimensions0,
995 }};
996 uint32_t opDimensions1[1] = {1};
997 OperandTypeWithExtraParams operand1{ANeuralNetworksOperandType{
998 .type = ANEURALNETWORKS_TENSOR_INT32,
999 .dimensionCount = 1,
1000 .dimensions = opDimensions1,
1001 }};
1002 uint32_t opDimensions2[2] = {2, 2};
1003 OperandTypeWithExtraParams operand2{ANeuralNetworksOperandType{
1004 .type = ANEURALNETWORKS_TENSOR_INT32,
1005 .dimensionCount = 2,
1006 .dimensions = opDimensions2,
1007 }};
1008 TensorRankMutator mutator{TensorRankConstraint::AtLeast(2), {0, 2}};
1009
1010 const auto mutationSet = mutator.InvalidInputsMutations({operand0, operand1, operand2});
1011 ASSERT_EQ(mutationSet.size(), 1u);
1012 std::for_each(
1013 mutationSet.begin(), mutationSet.end(),
1014 [&](const std::vector<OperandTypeWithExtraParams>& mutatedInputs) {
1015 EXPECT_EQ(mutatedInputs.size(), 3u);
1016 if (mutatedInputs.size() == 3) {
1017 EXPECT_THAT(mutatedInputs[0],
1018 IsMutationWithDimensions(operand0, std::vector<uint32_t>({0})));
1019
1020 EXPECT_EQ(mutatedInputs[1], operand1);
1021
1022 EXPECT_THAT(mutatedInputs[2],
1023 IsMutationWithDimensions(operand2, std::vector<uint32_t>({2})));
1024 }
1025 });
1026 }
1027
argMinMaxTest(ANeuralNetworksOperationType operationCode,int32_t inputOperandType)1028 void argMinMaxTest(ANeuralNetworksOperationType operationCode, int32_t inputOperandType) {
1029 SCOPED_TRACE(inputOperandType);
1030 uint32_t inputDimensions[4] = {2, 2, 2, 2};
1031 ANeuralNetworksOperandType input0 = getOpType(inputOperandType, 4, inputDimensions);
1032 ANeuralNetworksOperandType axis = {
1033 .type = ANEURALNETWORKS_INT32,
1034 .dimensionCount = 0,
1035 .dimensions = nullptr,
1036 };
1037 uint32_t outputDimensions[3] = {2, 2, 2};
1038 ANeuralNetworksOperandType output = {
1039 .type = ANEURALNETWORKS_TENSOR_INT32,
1040 .dimensionCount = 3,
1041 .dimensions = outputDimensions,
1042 };
1043 OperationTestBase test(operationCode, {input0, axis}, {output});
1044 test.testOpsValidations();
1045 }
1046
TEST(OperationValidationTest,ARGMIN)1047 TEST(OperationValidationTest, ARGMIN) {
1048 argMinMaxTest(ANEURALNETWORKS_ARGMIN, ANEURALNETWORKS_TENSOR_FLOAT16);
1049 argMinMaxTest(ANEURALNETWORKS_ARGMIN, ANEURALNETWORKS_TENSOR_FLOAT32);
1050 argMinMaxTest(ANEURALNETWORKS_ARGMIN, ANEURALNETWORKS_TENSOR_INT32);
1051 argMinMaxTest(ANEURALNETWORKS_ARGMIN, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
1052 argMinMaxTest(ANEURALNETWORKS_ARGMIN, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
1053 }
1054
TEST(OperationValidationTest,ARGMAX)1055 TEST(OperationValidationTest, ARGMAX) {
1056 argMinMaxTest(ANEURALNETWORKS_ARGMAX, ANEURALNETWORKS_TENSOR_FLOAT16);
1057 argMinMaxTest(ANEURALNETWORKS_ARGMAX, ANEURALNETWORKS_TENSOR_FLOAT32);
1058 argMinMaxTest(ANEURALNETWORKS_ARGMAX, ANEURALNETWORKS_TENSOR_INT32);
1059 argMinMaxTest(ANEURALNETWORKS_ARGMAX, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
1060 argMinMaxTest(ANEURALNETWORKS_ARGMAX, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
1061 }
1062
dequantizeOpTest(int32_t inputOperandType,int32_t outputOperandType)1063 void dequantizeOpTest(int32_t inputOperandType, int32_t outputOperandType) {
1064 SCOPED_TRACE(testing::Message()
1065 << "inputType: " << inputOperandType << ", outputType: " << outputOperandType);
1066 uint32_t inputDimensions[4] = {2, 2, 2, 2};
1067 ANeuralNetworksOperandType input = getOpType(inputOperandType, 4, inputDimensions);
1068 ANeuralNetworksOperandType output = getOpType(outputOperandType, 4, inputDimensions);
1069 OperationTestBase dequantizeTest(ANEURALNETWORKS_DEQUANTIZE, {input}, {output},
1070 {{TensorRankConstraint::UpTo(4)}});
1071 dequantizeTest.testOpsValidations();
1072 }
1073
TEST(OperationValidationTest,DEQUANTIZE)1074 TEST(OperationValidationTest, DEQUANTIZE) {
1075 dequantizeOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM, ANEURALNETWORKS_TENSOR_FLOAT16);
1076 dequantizeOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM, ANEURALNETWORKS_TENSOR_FLOAT32);
1077 dequantizeOpTest(ANEURALNETWORKS_TENSOR_QUANT8_SYMM, ANEURALNETWORKS_TENSOR_FLOAT16);
1078 dequantizeOpTest(ANEURALNETWORKS_TENSOR_QUANT8_SYMM, ANEURALNETWORKS_TENSOR_FLOAT32);
1079 dequantizeOpTest(ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL,
1080 ANEURALNETWORKS_TENSOR_FLOAT16);
1081 dequantizeOpTest(ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL,
1082 ANEURALNETWORKS_TENSOR_FLOAT32);
1083 dequantizeOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED, ANEURALNETWORKS_TENSOR_FLOAT16);
1084 dequantizeOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED, ANEURALNETWORKS_TENSOR_FLOAT32);
1085 }
1086
expandDimsTest(int32_t inputOperandType)1087 void expandDimsTest(int32_t inputOperandType) {
1088 SCOPED_TRACE(inputOperandType);
1089 uint32_t inputDimensions[4] = {2, 2, 2, 2};
1090 ANeuralNetworksOperandType input0 = getOpType(inputOperandType, 4, inputDimensions);
1091 ANeuralNetworksOperandType axis = {
1092 .type = ANEURALNETWORKS_INT32,
1093 .dimensionCount = 0,
1094 .dimensions = nullptr,
1095 };
1096 uint32_t outputDimensions[5] = {2, 2, 2, 2, 2};
1097 ANeuralNetworksOperandType output = getOpType(inputOperandType, 5, outputDimensions);
1098 OperationTestBase test(ANEURALNETWORKS_EXPAND_DIMS, {input0, axis}, {output});
1099 test.testOpsValidations();
1100 }
1101
TEST(OperationValidationTest,EXPAND_DIMS)1102 TEST(OperationValidationTest, EXPAND_DIMS) {
1103 expandDimsTest(ANEURALNETWORKS_TENSOR_FLOAT16);
1104 expandDimsTest(ANEURALNETWORKS_TENSOR_FLOAT32);
1105 expandDimsTest(ANEURALNETWORKS_TENSOR_INT32);
1106 expandDimsTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
1107 expandDimsTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
1108 }
1109
gatherTest(int32_t inputOperandType)1110 void gatherTest(int32_t inputOperandType) {
1111 SCOPED_TRACE(inputOperandType);
1112 uint32_t inputDimensions[4] = {2, 2, 2, 2};
1113 ANeuralNetworksOperandType input0 = getOpType(inputOperandType, 4, inputDimensions);
1114 ANeuralNetworksOperandType axis = {
1115 .type = ANEURALNETWORKS_INT32,
1116 .dimensionCount = 0,
1117 .dimensions = nullptr,
1118 };
1119 ANeuralNetworksOperandType input2 = {
1120 .type = ANEURALNETWORKS_TENSOR_INT32,
1121 .dimensionCount = 4,
1122 .dimensions = inputDimensions,
1123 };
1124 uint32_t outputDimensions[7] = {2, 2, 2, 2, 2, 2, 2};
1125 ANeuralNetworksOperandType output = getOpType(inputOperandType, 7, outputDimensions);
1126 OperationTestBase test(ANEURALNETWORKS_GATHER, {input0, axis, input2}, {output});
1127 test.testOpsValidations();
1128 }
1129
TEST(OperationValidationTest,GATHER)1130 TEST(OperationValidationTest, GATHER) {
1131 gatherTest(ANEURALNETWORKS_TENSOR_FLOAT16);
1132 gatherTest(ANEURALNETWORKS_TENSOR_FLOAT32);
1133 gatherTest(ANEURALNETWORKS_TENSOR_INT32);
1134 gatherTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
1135 gatherTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
1136 }
1137
quantizeOpTest(int32_t inputOperandCode,int32_t outputOperandCode)1138 void quantizeOpTest(int32_t inputOperandCode, int32_t outputOperandCode) {
1139 uint32_t inputDimensions[4] = {2, 2, 2, 2};
1140 ANeuralNetworksOperandType input = {
1141 .type = inputOperandCode, .dimensionCount = 4, .dimensions = inputDimensions};
1142 ANeuralNetworksOperandType output = {.type = outputOperandCode,
1143 .dimensionCount = 4,
1144 .dimensions = inputDimensions,
1145 .scale = 1.0f,
1146 .zeroPoint = 0};
1147 OperationTestBase test(ANEURALNETWORKS_QUANTIZE, {input}, {output});
1148 test.testOpsValidations();
1149 }
1150
TEST(OperationValidationTest,QUANTIZE_float16)1151 TEST(OperationValidationTest, QUANTIZE_float16) {
1152 quantizeOpTest(ANEURALNETWORKS_TENSOR_FLOAT16, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
1153 quantizeOpTest(ANEURALNETWORKS_TENSOR_FLOAT16, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
1154 }
1155
TEST(OperationValidationTest,QUANTIZE_float32)1156 TEST(OperationValidationTest, QUANTIZE_float32) {
1157 quantizeOpTest(ANEURALNETWORKS_TENSOR_FLOAT32, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
1158 quantizeOpTest(ANEURALNETWORKS_TENSOR_FLOAT32, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
1159 }
1160
TEST(OperationValidationTest,QUANTIZED_16BIT_LSTM)1161 TEST(OperationValidationTest, QUANTIZED_16BIT_LSTM) {
1162 uint32_t oneDimensional[1] = {5};
1163 uint32_t twoDimensional[2] = {5, 5};
1164
1165 ANeuralNetworksOperandType int32Tensor1D = {
1166 .type = ANEURALNETWORKS_TENSOR_INT32,
1167 .dimensionCount = 1,
1168 .dimensions = oneDimensional,
1169 .scale = 0.0000318,
1170 .zeroPoint = 0,
1171 };
1172 ANeuralNetworksOperandType quant8Tensor2D = {
1173 .type = ANEURALNETWORKS_TENSOR_QUANT8_ASYMM,
1174 .dimensionCount = 2,
1175 .dimensions = twoDimensional,
1176 .scale = 0.00408021,
1177 .zeroPoint = 100,
1178 };
1179 ANeuralNetworksOperandType quant16Tensor2D = {
1180 .type = ANEURALNETWORKS_TENSOR_QUANT16_SYMM,
1181 .dimensionCount = 2,
1182 .dimensions = twoDimensional,
1183 .scale = 1.0 / 2048,
1184 .zeroPoint = 0,
1185 };
1186
1187 ANeuralNetworksOperandType input = quant8Tensor2D;
1188 ANeuralNetworksOperandType input_to_input_weights = quant8Tensor2D;
1189 ANeuralNetworksOperandType input_to_forget_weights = quant8Tensor2D;
1190 ANeuralNetworksOperandType input_to_cell_weights = quant8Tensor2D;
1191 ANeuralNetworksOperandType input_to_output_weights = quant8Tensor2D;
1192 ANeuralNetworksOperandType recurrent_to_input_weights = quant8Tensor2D;
1193 ANeuralNetworksOperandType recurrent_to_forget_weights = quant8Tensor2D;
1194 ANeuralNetworksOperandType recurrent_to_cell_weights = quant8Tensor2D;
1195 ANeuralNetworksOperandType recurrent_to_output_weights = quant8Tensor2D;
1196 ANeuralNetworksOperandType input_gate_bias = int32Tensor1D;
1197 ANeuralNetworksOperandType forget_gate_bias = int32Tensor1D;
1198 ANeuralNetworksOperandType cell_gate_bias = int32Tensor1D;
1199 ANeuralNetworksOperandType output_gate_bias = int32Tensor1D;
1200 ANeuralNetworksOperandType prev_cell_state = quant16Tensor2D;
1201 ANeuralNetworksOperandType prev_output = quant8Tensor2D;
1202
1203 ANeuralNetworksOperandType cell_state_out = quant16Tensor2D;
1204 ANeuralNetworksOperandType output = quant8Tensor2D;
1205
1206 OperationTestBase test(
1207 ANEURALNETWORKS_QUANTIZED_16BIT_LSTM,
1208 {input, input_to_input_weights, input_to_forget_weights, input_to_cell_weights,
1209 input_to_output_weights, recurrent_to_input_weights, recurrent_to_forget_weights,
1210 recurrent_to_cell_weights, recurrent_to_output_weights, input_gate_bias,
1211 forget_gate_bias, cell_gate_bias, output_gate_bias, prev_cell_state, prev_output},
1212 {cell_state_out, output});
1213 test.testOpsValidations();
1214 }
1215
splitTest(int32_t inputOperandType)1216 void splitTest(int32_t inputOperandType) {
1217 SCOPED_TRACE(inputOperandType);
1218 uint32_t inputDimensions[4] = {2, 2, 2, 2};
1219 ANeuralNetworksOperandType input0 = getOpType(inputOperandType, 4, inputDimensions);
1220 ANeuralNetworksOperandType axis = {
1221 .type = ANEURALNETWORKS_INT32,
1222 .dimensionCount = 0,
1223 .dimensions = nullptr,
1224 };
1225 ANeuralNetworksOperandType count = {
1226 .type = ANEURALNETWORKS_INT32,
1227 .dimensionCount = 0,
1228 .dimensions = nullptr,
1229 };
1230 uint32_t outputDimensions[2] = {2, 2};
1231 ANeuralNetworksOperandType output0 = getOpType(inputOperandType, 2, outputDimensions);
1232 ANeuralNetworksOperandType output1 = getOpType(inputOperandType, 2, outputDimensions);
1233 OperationTestBase test(ANEURALNETWORKS_SPLIT, {input0, axis, count}, {output0, output1});
1234 test.testOpsValidations();
1235 }
1236
TEST(OperationValidationTest,SPLIT)1237 TEST(OperationValidationTest, SPLIT) {
1238 splitTest(ANEURALNETWORKS_TENSOR_FLOAT16);
1239 splitTest(ANEURALNETWORKS_TENSOR_FLOAT32);
1240 splitTest(ANEURALNETWORKS_TENSOR_INT32);
1241 splitTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
1242 splitTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
1243 }
1244
tileTest(int32_t inputOperandType)1245 void tileTest(int32_t inputOperandType) {
1246 SCOPED_TRACE(inputOperandType);
1247 uint32_t inputDimensions[4] = {2, 2, 2, 2};
1248 ANeuralNetworksOperandType input0 = getOpType(inputOperandType, 4, inputDimensions);
1249 uint32_t multiplesDimensions[1] = {4};
1250 ANeuralNetworksOperandType multiples = {
1251 .type = ANEURALNETWORKS_TENSOR_INT32,
1252 .dimensionCount = 1,
1253 .dimensions = multiplesDimensions,
1254 };
1255 uint32_t outputDimensions[8] = {2, 2, 2, 2, 2, 2, 2, 2};
1256 ANeuralNetworksOperandType output0 = getOpType(inputOperandType, 8, outputDimensions);
1257 OperationTestBase test(ANEURALNETWORKS_TILE, {input0, multiples}, {output0});
1258 test.testOpsValidations();
1259 }
1260
TEST(OperationValidationTest,TILE)1261 TEST(OperationValidationTest, TILE) {
1262 tileTest(ANEURALNETWORKS_TENSOR_FLOAT16);
1263 tileTest(ANEURALNETWORKS_TENSOR_FLOAT32);
1264 tileTest(ANEURALNETWORKS_TENSOR_INT32);
1265 tileTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
1266 tileTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
1267 }
1268
topkV2Test(int32_t inputOperandType)1269 void topkV2Test(int32_t inputOperandType) {
1270 SCOPED_TRACE(inputOperandType);
1271 uint32_t inputDimensions[4] = {4, 5, 6, 7};
1272 ANeuralNetworksOperandType input = getOpType(inputOperandType, 4, inputDimensions);
1273 ANeuralNetworksOperandType k = getOpType(ANEURALNETWORKS_INT32);
1274 uint32_t outputDimensions[4] = {4, 5, 6, 3};
1275 ANeuralNetworksOperandType outputValues = getOpType(inputOperandType, 4, outputDimensions);
1276 ANeuralNetworksOperandType outputIndices =
1277 getOpType(ANEURALNETWORKS_TENSOR_INT32, 4, outputDimensions);
1278 OperationTestBase test(ANEURALNETWORKS_TOPK_V2, {input, k}, {outputValues, outputIndices});
1279 test.testOpsValidations();
1280 }
1281
TEST(OperationValidationTest,TOPK_V2)1282 TEST(OperationValidationTest, TOPK_V2) {
1283 topkV2Test(ANEURALNETWORKS_TENSOR_FLOAT16);
1284 topkV2Test(ANEURALNETWORKS_TENSOR_FLOAT32);
1285 topkV2Test(ANEURALNETWORKS_TENSOR_INT32);
1286 topkV2Test(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
1287 topkV2Test(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
1288 }
1289
simpleMathOpTest(ANeuralNetworksOperationType operationCode,int32_t operandCode)1290 void simpleMathOpTest(ANeuralNetworksOperationType operationCode, int32_t operandCode) {
1291 uint32_t inputDimensions[4] = {2, 2, 2, 2};
1292 ANeuralNetworksOperandType input1 = getOpType(operandCode, 4, inputDimensions);
1293
1294 ANeuralNetworksOperandType input2 = input1;
1295 ANeuralNetworksOperandType output = input1;
1296 ANeuralNetworksOperandType activation = {.type = ANEURALNETWORKS_INT32,
1297 .dimensionCount = 0,
1298 .dimensions = nullptr,
1299 .scale = 0.0f,
1300 .zeroPoint = 0};
1301
1302 OperationTestBase simpleMathTest(
1303 operationCode, {input1, input2, activation}, {output},
1304 {{TensorRankConstraint::UpTo(4), {0}}, {TensorRankConstraint::UpTo(4), {1}}});
1305 simpleMathTest.testOpsValidations();
1306 }
1307
TEST(OperationValidationTest,ADD_float16)1308 TEST(OperationValidationTest, ADD_float16) {
1309 simpleMathOpTest(ANEURALNETWORKS_ADD, ANEURALNETWORKS_TENSOR_FLOAT16);
1310 }
1311
TEST(OperationValidationTest,ADD_float32)1312 TEST(OperationValidationTest, ADD_float32) {
1313 simpleMathOpTest(ANEURALNETWORKS_ADD, ANEURALNETWORKS_TENSOR_FLOAT32);
1314 }
1315
TEST(OperationValidationTest,ADD_quant8)1316 TEST(OperationValidationTest, ADD_quant8) {
1317 simpleMathOpTest(ANEURALNETWORKS_ADD, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
1318 }
1319
TEST(OperationValidationTest,ADD_quant8_signed)1320 TEST(OperationValidationTest, ADD_quant8_signed) {
1321 simpleMathOpTest(ANEURALNETWORKS_ADD, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
1322 }
1323
TEST(OperationValidationTest,ADD_int32)1324 TEST(OperationValidationTest, ADD_int32) {
1325 simpleMathOpTest(ANEURALNETWORKS_ADD, ANEURALNETWORKS_TENSOR_INT32);
1326 }
1327
TEST(OperationValidationTest,MUL_float16)1328 TEST(OperationValidationTest, MUL_float16) {
1329 simpleMathOpTest(ANEURALNETWORKS_MUL, ANEURALNETWORKS_TENSOR_FLOAT16);
1330 }
1331
TEST(OperationValidationTest,MUL_float32)1332 TEST(OperationValidationTest, MUL_float32) {
1333 simpleMathOpTest(ANEURALNETWORKS_MUL, ANEURALNETWORKS_TENSOR_FLOAT32);
1334 }
1335
TEST(OperationValidationTest,MUL_quant8)1336 TEST(OperationValidationTest, MUL_quant8) {
1337 simpleMathOpTest(ANEURALNETWORKS_MUL, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
1338 }
1339
TEST(OperationValidationTest,MUL_quant8_signed)1340 TEST(OperationValidationTest, MUL_quant8_signed) {
1341 simpleMathOpTest(ANEURALNETWORKS_MUL, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
1342 }
1343
TEST(OperationValidationTest,MUL_int32)1344 TEST(OperationValidationTest, MUL_int32) {
1345 simpleMathOpTest(ANEURALNETWORKS_MUL, ANEURALNETWORKS_TENSOR_INT32);
1346 }
1347
TEST(OperationValidationTest,SUB_float16)1348 TEST(OperationValidationTest, SUB_float16) {
1349 simpleMathOpTest(ANEURALNETWORKS_SUB, ANEURALNETWORKS_TENSOR_FLOAT16);
1350 }
1351
TEST(OperationValidationTest,SUB_float32)1352 TEST(OperationValidationTest, SUB_float32) {
1353 simpleMathOpTest(ANEURALNETWORKS_SUB, ANEURALNETWORKS_TENSOR_FLOAT32);
1354 }
1355
TEST(OperationValidationTest,SUB_quant8)1356 TEST(OperationValidationTest, SUB_quant8) {
1357 simpleMathOpTest(ANEURALNETWORKS_SUB, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
1358 }
1359
TEST(OperationValidationTest,SUB_quant8_signed)1360 TEST(OperationValidationTest, SUB_quant8_signed) {
1361 simpleMathOpTest(ANEURALNETWORKS_SUB, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
1362 }
1363
TEST(OperationValidationTest,SUB_int32)1364 TEST(OperationValidationTest, SUB_int32) {
1365 simpleMathOpTest(ANEURALNETWORKS_SUB, ANEURALNETWORKS_TENSOR_INT32);
1366 }
1367
TEST(OperationValidationTest,DIV_float16)1368 TEST(OperationValidationTest, DIV_float16) {
1369 simpleMathOpTest(ANEURALNETWORKS_DIV, ANEURALNETWORKS_TENSOR_FLOAT16);
1370 }
1371
TEST(OperationValidationTest,DIV_float32)1372 TEST(OperationValidationTest, DIV_float32) {
1373 simpleMathOpTest(ANEURALNETWORKS_DIV, ANEURALNETWORKS_TENSOR_FLOAT32);
1374 }
1375
TEST(OperationValidationTest,DIV_int32)1376 TEST(OperationValidationTest, DIV_int32) {
1377 simpleMathOpTest(ANEURALNETWORKS_DIV, ANEURALNETWORKS_TENSOR_INT32);
1378 }
1379
TEST(OperationValidationTest,MUL_quant8_bad_output_scale)1380 TEST(OperationValidationTest, MUL_quant8_bad_output_scale) {
1381 uint32_t inputDimensions[4] = {2, 2, 2, 2};
1382 ANeuralNetworksOperandType input1 =
1383 getOpType(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM, 4, inputDimensions);
1384 ANeuralNetworksOperandType input2 = input1;
1385 ANeuralNetworksOperandType output = input1;
1386 input1.scale = 1.0f;
1387 input2.scale = 1.0f;
1388 output.scale = 0.5f;
1389 ANeuralNetworksOperandType activation = {.type = ANEURALNETWORKS_INT32,
1390 .dimensionCount = 0,
1391 .dimensions = nullptr,
1392 .scale = 0.0f,
1393 .zeroPoint = 0};
1394
1395 OperationTestBase mulTest(ANEURALNETWORKS_MUL, {input1, input2, activation}, {output});
1396 mulTest.testFailure(ANEURALNETWORKS_BAD_DATA);
1397 }
1398
binaryOpTest(ANeuralNetworksOperationType operationCode,int32_t operandCode)1399 void binaryOpTest(ANeuralNetworksOperationType operationCode, int32_t operandCode) {
1400 uint32_t inputDimensions[] = {2, 2, 2, 2, 2};
1401 ANeuralNetworksOperandType input1 = getOpType(operandCode, 5, inputDimensions);
1402
1403 ANeuralNetworksOperandType input2 = input1;
1404 ANeuralNetworksOperandType output = input1;
1405
1406 OperationTestBase test(operationCode, {input1, input2}, {output});
1407 test.testOpsValidations();
1408 }
1409
TEST(OperationValidationTest,MAXIMUM_float16)1410 TEST(OperationValidationTest, MAXIMUM_float16) {
1411 binaryOpTest(ANEURALNETWORKS_MAXIMUM, ANEURALNETWORKS_TENSOR_FLOAT16);
1412 }
1413
TEST(OperationValidationTest,MAXIMUM_float32)1414 TEST(OperationValidationTest, MAXIMUM_float32) {
1415 binaryOpTest(ANEURALNETWORKS_MAXIMUM, ANEURALNETWORKS_TENSOR_FLOAT32);
1416 }
1417
TEST(OperationValidationTest,MAXIMUM_int32)1418 TEST(OperationValidationTest, MAXIMUM_int32) {
1419 binaryOpTest(ANEURALNETWORKS_MAXIMUM, ANEURALNETWORKS_TENSOR_INT32);
1420 }
1421
TEST(OperationValidationTest,MAXIMUM_quant8)1422 TEST(OperationValidationTest, MAXIMUM_quant8) {
1423 binaryOpTest(ANEURALNETWORKS_MAXIMUM, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
1424 }
1425
TEST(OperationValidationTest,MAXIMUM_quant8signed)1426 TEST(OperationValidationTest, MAXIMUM_quant8signed) {
1427 binaryOpTest(ANEURALNETWORKS_MAXIMUM, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
1428 }
1429
TEST(OperationValidationTest,MINIMUM_float16)1430 TEST(OperationValidationTest, MINIMUM_float16) {
1431 binaryOpTest(ANEURALNETWORKS_MINIMUM, ANEURALNETWORKS_TENSOR_FLOAT16);
1432 }
1433
TEST(OperationValidationTest,MINIMUM_float32)1434 TEST(OperationValidationTest, MINIMUM_float32) {
1435 binaryOpTest(ANEURALNETWORKS_MINIMUM, ANEURALNETWORKS_TENSOR_FLOAT32);
1436 }
1437
TEST(OperationValidationTest,MINIMUM_int32)1438 TEST(OperationValidationTest, MINIMUM_int32) {
1439 binaryOpTest(ANEURALNETWORKS_MINIMUM, ANEURALNETWORKS_TENSOR_INT32);
1440 }
1441
TEST(OperationValidationTest,MINIMUM_quant8)1442 TEST(OperationValidationTest, MINIMUM_quant8) {
1443 binaryOpTest(ANEURALNETWORKS_MINIMUM, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
1444 }
1445
TEST(OperationValidationTest,MINIMUM_quant8signed)1446 TEST(OperationValidationTest, MINIMUM_quant8signed) {
1447 binaryOpTest(ANEURALNETWORKS_MINIMUM, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
1448 }
1449
activationOpTest(ANeuralNetworksOperationType operationCode,int32_t operandCode)1450 void activationOpTest(ANeuralNetworksOperationType operationCode, int32_t operandCode) {
1451 uint32_t inputDimensions[4] = {2, 2, 2, 2};
1452 ANeuralNetworksOperandType input = getOpType(operandCode, 4, inputDimensions);
1453
1454 ANeuralNetworksOperandType output = input;
1455 std::vector<TensorRankMutator> inputRankMutators;
1456 if (operationCode == ANEURALNETWORKS_FLOOR || operationCode == ANEURALNETWORKS_LOGISTIC ||
1457 operationCode == ANEURALNETWORKS_RELU || operationCode == ANEURALNETWORKS_RELU1 ||
1458 operationCode == ANEURALNETWORKS_RELU6 || operationCode == ANEURALNETWORKS_TANH) {
1459 inputRankMutators.push_back({TensorRankConstraint::UpTo(4)});
1460 }
1461 OperationTestBase test(operationCode, {input}, {output}, inputRankMutators);
1462 test.testOpsValidations();
1463 }
1464
TEST(OperationValidationTest,ABS_float16)1465 TEST(OperationValidationTest, ABS_float16) {
1466 activationOpTest(ANEURALNETWORKS_ABS, ANEURALNETWORKS_TENSOR_FLOAT16);
1467 }
1468
TEST(OperationValidationTest,ABS_float32)1469 TEST(OperationValidationTest, ABS_float32) {
1470 activationOpTest(ANEURALNETWORKS_ABS, ANEURALNETWORKS_TENSOR_FLOAT32);
1471 }
1472
TEST(OperationValidationTest,ABS_int32)1473 TEST(OperationValidationTest, ABS_int32) {
1474 activationOpTest(ANEURALNETWORKS_ABS, ANEURALNETWORKS_TENSOR_INT32);
1475 }
1476
TEST(OperationValidationTest,EXP_float16)1477 TEST(OperationValidationTest, EXP_float16) {
1478 activationOpTest(ANEURALNETWORKS_EXP, ANEURALNETWORKS_TENSOR_FLOAT16);
1479 }
1480
TEST(OperationValidationTest,EXP_float32)1481 TEST(OperationValidationTest, EXP_float32) {
1482 activationOpTest(ANEURALNETWORKS_EXP, ANEURALNETWORKS_TENSOR_FLOAT32);
1483 }
1484
TEST(OperationValidationTest,LOG_float16)1485 TEST(OperationValidationTest, LOG_float16) {
1486 activationOpTest(ANEURALNETWORKS_LOG, ANEURALNETWORKS_TENSOR_FLOAT16);
1487 }
1488
TEST(OperationValidationTest,LOG_float32)1489 TEST(OperationValidationTest, LOG_float32) {
1490 activationOpTest(ANEURALNETWORKS_LOG, ANEURALNETWORKS_TENSOR_FLOAT32);
1491 }
1492
TEST(OperationValidationTest,RSQRT_float16)1493 TEST(OperationValidationTest, RSQRT_float16) {
1494 activationOpTest(ANEURALNETWORKS_RSQRT, ANEURALNETWORKS_TENSOR_FLOAT16);
1495 }
1496
TEST(OperationValidationTest,RSQRT_float32)1497 TEST(OperationValidationTest, RSQRT_float32) {
1498 activationOpTest(ANEURALNETWORKS_RSQRT, ANEURALNETWORKS_TENSOR_FLOAT32);
1499 }
1500
TEST(OperationValidationTest,SIN_float16)1501 TEST(OperationValidationTest, SIN_float16) {
1502 activationOpTest(ANEURALNETWORKS_SIN, ANEURALNETWORKS_TENSOR_FLOAT16);
1503 }
1504
TEST(OperationValidationTest,SIN_float32)1505 TEST(OperationValidationTest, SIN_float32) {
1506 activationOpTest(ANEURALNETWORKS_SIN, ANEURALNETWORKS_TENSOR_FLOAT32);
1507 }
1508
TEST(OperationValidationTest,SQRT_float16)1509 TEST(OperationValidationTest, SQRT_float16) {
1510 activationOpTest(ANEURALNETWORKS_SQRT, ANEURALNETWORKS_TENSOR_FLOAT16);
1511 }
1512
TEST(OperationValidationTest,SQRT_float32)1513 TEST(OperationValidationTest, SQRT_float32) {
1514 activationOpTest(ANEURALNETWORKS_SQRT, ANEURALNETWORKS_TENSOR_FLOAT32);
1515 }
1516
TEST(OperationValidationTest,NEG_float16)1517 TEST(OperationValidationTest, NEG_float16) {
1518 activationOpTest(ANEURALNETWORKS_NEG, ANEURALNETWORKS_TENSOR_FLOAT16);
1519 }
1520
TEST(OperationValidationTest,NEG_float32)1521 TEST(OperationValidationTest, NEG_float32) {
1522 activationOpTest(ANEURALNETWORKS_NEG, ANEURALNETWORKS_TENSOR_FLOAT32);
1523 }
1524
TEST(OperationValidationTest,NEG_int32)1525 TEST(OperationValidationTest, NEG_int32) {
1526 activationOpTest(ANEURALNETWORKS_NEG, ANEURALNETWORKS_TENSOR_INT32);
1527 }
1528
TEST(OperationValidationTest,FLOOR_float16)1529 TEST(OperationValidationTest, FLOOR_float16) {
1530 activationOpTest(ANEURALNETWORKS_FLOOR, ANEURALNETWORKS_TENSOR_FLOAT16);
1531 }
1532
TEST(OperationValidationTest,FLOOR_float32)1533 TEST(OperationValidationTest, FLOOR_float32) {
1534 activationOpTest(ANEURALNETWORKS_FLOOR, ANEURALNETWORKS_TENSOR_FLOAT32);
1535 }
1536
TEST(OperationValidationTest,LOGICAL_NOT_bool)1537 TEST(OperationValidationTest, LOGICAL_NOT_bool) {
1538 activationOpTest(ANEURALNETWORKS_LOGICAL_NOT, ANEURALNETWORKS_TENSOR_BOOL8);
1539 }
1540
TEST(OperationValidationTest,TANH_float16)1541 TEST(OperationValidationTest, TANH_float16) {
1542 activationOpTest(ANEURALNETWORKS_TANH, ANEURALNETWORKS_TENSOR_FLOAT16);
1543 }
1544
TEST(OperationValidationTest,TANH_float32)1545 TEST(OperationValidationTest, TANH_float32) {
1546 activationOpTest(ANEURALNETWORKS_TANH, ANEURALNETWORKS_TENSOR_FLOAT32);
1547 }
1548
TEST(OperationValidationTest,TANH_quant8)1549 TEST(OperationValidationTest, TANH_quant8) {
1550 activationOpTest(ANEURALNETWORKS_TANH, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
1551 }
1552
TEST(OperationValidationTest,TANH_quant8_signed)1553 TEST(OperationValidationTest, TANH_quant8_signed) {
1554 activationOpTest(ANEURALNETWORKS_TANH, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
1555 }
1556
TEST(OperationValidationTest,RELU_float16)1557 TEST(OperationValidationTest, RELU_float16) {
1558 activationOpTest(ANEURALNETWORKS_RELU, ANEURALNETWORKS_TENSOR_FLOAT16);
1559 }
1560
TEST(OperationValidationTest,RELU1_float16)1561 TEST(OperationValidationTest, RELU1_float16) {
1562 activationOpTest(ANEURALNETWORKS_RELU, ANEURALNETWORKS_TENSOR_FLOAT16);
1563 }
1564
TEST(OperationValidationTest,RELU6_float16)1565 TEST(OperationValidationTest, RELU6_float16) {
1566 activationOpTest(ANEURALNETWORKS_RELU, ANEURALNETWORKS_TENSOR_FLOAT16);
1567 }
1568
TEST(OperationValidationTest,RELU_float32)1569 TEST(OperationValidationTest, RELU_float32) {
1570 activationOpTest(ANEURALNETWORKS_RELU, ANEURALNETWORKS_TENSOR_FLOAT32);
1571 }
1572
TEST(OperationValidationTest,RELU1_float32)1573 TEST(OperationValidationTest, RELU1_float32) {
1574 activationOpTest(ANEURALNETWORKS_RELU, ANEURALNETWORKS_TENSOR_FLOAT32);
1575 }
1576
TEST(OperationValidationTest,RELU6_float32)1577 TEST(OperationValidationTest, RELU6_float32) {
1578 activationOpTest(ANEURALNETWORKS_RELU, ANEURALNETWORKS_TENSOR_FLOAT32);
1579 }
1580
TEST(OperationValidationTest,RELU_quant8)1581 TEST(OperationValidationTest, RELU_quant8) {
1582 activationOpTest(ANEURALNETWORKS_RELU, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
1583 }
1584
TEST(OperationValidationTest,RELU1_quant8)1585 TEST(OperationValidationTest, RELU1_quant8) {
1586 activationOpTest(ANEURALNETWORKS_RELU, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
1587 }
1588
TEST(OperationValidationTest,RELU6_quant8)1589 TEST(OperationValidationTest, RELU6_quant8) {
1590 activationOpTest(ANEURALNETWORKS_RELU, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
1591 }
1592
TEST(OperationValidationTest,RELU_quant8_signed)1593 TEST(OperationValidationTest, RELU_quant8_signed) {
1594 activationOpTest(ANEURALNETWORKS_RELU, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
1595 }
1596
TEST(OperationValidationTest,RELU1_quant8_signed)1597 TEST(OperationValidationTest, RELU1_quant8_signed) {
1598 activationOpTest(ANEURALNETWORKS_RELU, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
1599 }
1600
TEST(OperationValidationTest,RELU6_quant8_signed)1601 TEST(OperationValidationTest, RELU6_quant8_signed) {
1602 activationOpTest(ANEURALNETWORKS_RELU, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
1603 }
1604
TEST(OperationValidationTest,LOGISTIC_float16)1605 TEST(OperationValidationTest, LOGISTIC_float16) {
1606 activationOpTest(ANEURALNETWORKS_LOGISTIC, ANEURALNETWORKS_TENSOR_FLOAT16);
1607 }
1608
TEST(OperationValidationTest,LOGISTIC_float32)1609 TEST(OperationValidationTest, LOGISTIC_float32) {
1610 activationOpTest(ANEURALNETWORKS_LOGISTIC, ANEURALNETWORKS_TENSOR_FLOAT32);
1611 }
1612
TEST(OperationValidationTest,LOGISTIC_quant8)1613 TEST(OperationValidationTest, LOGISTIC_quant8) {
1614 activationOpTest(ANEURALNETWORKS_LOGISTIC, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
1615 }
1616
TEST(OperationValidationTest,LOGISTIC_quant8_signed)1617 TEST(OperationValidationTest, LOGISTIC_quant8_signed) {
1618 activationOpTest(ANEURALNETWORKS_LOGISTIC, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
1619 }
1620
TEST(OperationValidationTest,HARD_SWISH_float16)1621 TEST(OperationValidationTest, HARD_SWISH_float16) {
1622 activationOpTest(ANEURALNETWORKS_HARD_SWISH, ANEURALNETWORKS_TENSOR_FLOAT16);
1623 }
1624
TEST(OperationValidationTest,HARD_SWISH_float32)1625 TEST(OperationValidationTest, HARD_SWISH_float32) {
1626 activationOpTest(ANEURALNETWORKS_HARD_SWISH, ANEURALNETWORKS_TENSOR_FLOAT32);
1627 }
1628
TEST(OperationValidationTest,HARD_SWISH_quant8)1629 TEST(OperationValidationTest, HARD_SWISH_quant8) {
1630 activationOpTest(ANEURALNETWORKS_HARD_SWISH, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
1631 }
1632
TEST(OperationValidationTest,HARD_SWISH_quant8_signed)1633 TEST(OperationValidationTest, HARD_SWISH_quant8_signed) {
1634 activationOpTest(ANEURALNETWORKS_HARD_SWISH, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
1635 }
1636
eluOpTest(int32_t operandCode)1637 void eluOpTest(int32_t operandCode) {
1638 uint32_t inputDimensions[4] = {2, 2, 2, 2};
1639 ANeuralNetworksOperandType input = getOpType(operandCode, 4, inputDimensions);
1640 ANeuralNetworksOperandType alpha = (operandCode == ANEURALNETWORKS_TENSOR_FLOAT32)
1641 ? getOpType(ANEURALNETWORKS_FLOAT32)
1642 : getOpType(ANEURALNETWORKS_FLOAT16);
1643
1644 ANeuralNetworksOperandType output = input;
1645 OperationTestBase test(ANEURALNETWORKS_ELU, {input, alpha}, {output});
1646 test.testOpsValidations();
1647 }
1648
TEST(OperationValidationTest,ELU_float16)1649 TEST(OperationValidationTest, ELU_float16) {
1650 eluOpTest(ANEURALNETWORKS_TENSOR_FLOAT16);
1651 }
1652
TEST(OperationValidationTest,ELU_float32)1653 TEST(OperationValidationTest, ELU_float32) {
1654 eluOpTest(ANEURALNETWORKS_TENSOR_FLOAT32);
1655 }
1656
reshapeOpTest(int32_t inputOperandCode)1657 void reshapeOpTest(int32_t inputOperandCode) {
1658 SCOPED_TRACE(inputOperandCode);
1659 uint32_t inputDimensions[3] = {2, 3, 4};
1660 ANeuralNetworksOperandType input = getOpType(inputOperandCode, 3, inputDimensions);
1661 uint32_t shapeDims[1] = {2};
1662 ANeuralNetworksOperandType shape = getOpType(ANEURALNETWORKS_TENSOR_INT32, 1, shapeDims);
1663 uint32_t outputDimensions[2] = {4, 6};
1664 ANeuralNetworksOperandType output = getOpType(inputOperandCode, 2, outputDimensions);
1665 OperationTestBase test(ANEURALNETWORKS_RESHAPE, {input, shape}, {output},
1666 {{TensorRankConstraint::UpTo(4)}});
1667 test.testOpsValidations();
1668 }
1669
TEST(OperationValidationTest,RESHAPE)1670 TEST(OperationValidationTest, RESHAPE) {
1671 reshapeOpTest(ANEURALNETWORKS_TENSOR_FLOAT16);
1672 reshapeOpTest(ANEURALNETWORKS_TENSOR_FLOAT32);
1673 reshapeOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
1674 reshapeOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
1675 }
1676
logSoftmaxOpTest(int32_t inputOperandCode)1677 void logSoftmaxOpTest(int32_t inputOperandCode) {
1678 uint32_t inputDimensions[3] = {2, 2, 2};
1679 ANeuralNetworksOperandType input = {.type = inputOperandCode,
1680 .dimensionCount = 3,
1681 .dimensions = inputDimensions,
1682 .scale = 0.0f,
1683 .zeroPoint = 0};
1684 ANeuralNetworksOperandType beta = {.type = (inputOperandCode == ANEURALNETWORKS_TENSOR_FLOAT32)
1685 ? ANEURALNETWORKS_FLOAT32
1686 : ANEURALNETWORKS_FLOAT16,
1687 .dimensionCount = 0,
1688 .dimensions = nullptr,
1689 .scale = 0.0f,
1690 .zeroPoint = 0};
1691 ANeuralNetworksOperandType axis = {.type = ANEURALNETWORKS_INT32,
1692 .dimensionCount = 0,
1693 .dimensions = nullptr,
1694 .scale = 0.0f,
1695 .zeroPoint = 0};
1696
1697 ANeuralNetworksOperandType output = {.type = inputOperandCode,
1698 .dimensionCount = 3,
1699 .dimensions = inputDimensions,
1700 .scale = 0.0f,
1701 .zeroPoint = 0};
1702
1703 OperationTestBase test(ANEURALNETWORKS_LOG_SOFTMAX, {input, beta, axis}, {output});
1704 test.testOpsValidations();
1705 }
1706
TEST(OperationValidationTest,LOG_SOFTMAX_float16)1707 TEST(OperationValidationTest, LOG_SOFTMAX_float16) {
1708 logSoftmaxOpTest(ANEURALNETWORKS_TENSOR_FLOAT16);
1709 }
1710
TEST(OperationValidationTest,LOG_SOFTMAX_float32)1711 TEST(OperationValidationTest, LOG_SOFTMAX_float32) {
1712 logSoftmaxOpTest(ANEURALNETWORKS_TENSOR_FLOAT32);
1713 }
1714
meanOpTest(int32_t inputOperandCode)1715 void meanOpTest(int32_t inputOperandCode) {
1716 uint32_t inputDimensions[3] = {2, 2, 2};
1717 ANeuralNetworksOperandType input = getOpType(inputOperandCode, 3, inputDimensions);
1718 ANeuralNetworksOperandType dims = getOpType(ANEURALNETWORKS_TENSOR_INT32, 1, inputDimensions);
1719 ANeuralNetworksOperandType keepDims = getOpType(ANEURALNETWORKS_INT32);
1720 ANeuralNetworksOperandType output = getOpType(inputOperandCode, 3, inputDimensions);
1721
1722 OperationTestBase test(ANEURALNETWORKS_MEAN, {input, dims, keepDims}, {output},
1723 {{TensorRankConstraint::UpTo(4)}});
1724 test.testOpsValidations();
1725 }
1726
TEST(OperationValidationTest,MEAN_float16)1727 TEST(OperationValidationTest, MEAN_float16) {
1728 meanOpTest(ANEURALNETWORKS_TENSOR_FLOAT16);
1729 }
1730
TEST(OperationValidationTest,MEAN_float32)1731 TEST(OperationValidationTest, MEAN_float32) {
1732 meanOpTest(ANEURALNETWORKS_TENSOR_FLOAT32);
1733 }
1734
TEST(OperationValidationTest,MEAN_quant8)1735 TEST(OperationValidationTest, MEAN_quant8) {
1736 meanOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
1737 }
1738
TEST(OperationValidationTest,MEAN_quant8_signed)1739 TEST(OperationValidationTest, MEAN_quant8_signed) {
1740 meanOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
1741 }
1742
padOpTest(int32_t inputOperandCode)1743 void padOpTest(int32_t inputOperandCode) {
1744 SCOPED_TRACE(inputOperandCode);
1745 uint32_t inputDimensions[4] = {2, 2, 2, 2};
1746 ANeuralNetworksOperandType input = getOpType(inputOperandCode, 4, inputDimensions);
1747 uint32_t padSizeDimensions[1] = {4};
1748 ANeuralNetworksOperandType padSize =
1749 getOpType(ANEURALNETWORKS_TENSOR_INT32, 1, padSizeDimensions);
1750 uint32_t outputDimensions[4] = {4, 3, 4, 3};
1751 ANeuralNetworksOperandType output = getOpType(inputOperandCode, 4, outputDimensions);
1752 OperationTestBase test(ANEURALNETWORKS_PAD, {input, padSize}, {output},
1753 {{TensorRankConstraint::UpTo(4)}});
1754 test.testOpsValidations();
1755 }
1756
TEST(OperationValidationTest,PAD)1757 TEST(OperationValidationTest, PAD) {
1758 padOpTest(ANEURALNETWORKS_TENSOR_FLOAT16);
1759 padOpTest(ANEURALNETWORKS_TENSOR_FLOAT32);
1760 padOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
1761 padOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
1762 }
1763
padV2OpTest(int32_t inputOperandCode)1764 void padV2OpTest(int32_t inputOperandCode) {
1765 SCOPED_TRACE(inputOperandCode);
1766 uint32_t inputDimensions[4] = {2, 2, 2, 2};
1767 ANeuralNetworksOperandType input = getOpType(inputOperandCode, 4, inputDimensions);
1768 uint32_t padSizeDimensions[1] = {4};
1769 ANeuralNetworksOperandType padSize =
1770 getOpType(ANEURALNETWORKS_TENSOR_INT32, 1, padSizeDimensions);
1771 ANeuralNetworksOperandType padValue = getOpType(ANEURALNETWORKS_FLOAT32);
1772 if (inputOperandCode == ANEURALNETWORKS_TENSOR_FLOAT16) {
1773 padValue = getOpType(ANEURALNETWORKS_FLOAT16);
1774 } else if (inputOperandCode == ANEURALNETWORKS_TENSOR_QUANT8_ASYMM ||
1775 inputOperandCode == ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED) {
1776 padValue = getOpType(ANEURALNETWORKS_INT32);
1777 }
1778 uint32_t outputDimensions[4] = {4, 3, 4, 3};
1779 ANeuralNetworksOperandType output = getOpType(inputOperandCode, 4, outputDimensions);
1780 OperationTestBase test(ANEURALNETWORKS_PAD_V2, {input, padSize, padValue}, {output},
1781 {{TensorRankConstraint::UpTo(4)}});
1782 test.testOpsValidations();
1783 }
1784
TEST(OperationValidationTest,PAD_V2)1785 TEST(OperationValidationTest, PAD_V2) {
1786 padV2OpTest(ANEURALNETWORKS_TENSOR_FLOAT16);
1787 padV2OpTest(ANEURALNETWORKS_TENSOR_FLOAT32);
1788 padV2OpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
1789 padV2OpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
1790 }
1791
softmaxOpTest(int32_t operandCode)1792 void softmaxOpTest(int32_t operandCode) {
1793 uint32_t inputDimensions[4] = {2, 2, 2, 2};
1794 ANeuralNetworksOperandType input = getOpType(operandCode, 4, inputDimensions);
1795
1796 ANeuralNetworksOperandType output = input;
1797 ANeuralNetworksOperandType beta = getOpType(ANEURALNETWORKS_FLOAT32);
1798 if (operandCode == ANEURALNETWORKS_TENSOR_FLOAT16) {
1799 beta = getOpType(ANEURALNETWORKS_FLOAT16);
1800 }
1801
1802 OperationTestBase softmaxTest(ANEURALNETWORKS_SOFTMAX, {input, beta}, {output},
1803 {{TensorRankConstraint::UpTo(4)}});
1804 softmaxTest.testOpsValidations();
1805
1806 ANeuralNetworksOperandType axis = getOpType(ANEURALNETWORKS_INT32);
1807 OperationTestBase softmaxAxisTest(ANEURALNETWORKS_SOFTMAX, {input, beta, axis}, {output},
1808 {{TensorRankConstraint::UpTo(4)}});
1809 softmaxAxisTest.testOpsValidations();
1810 }
1811
TEST(OperationValidationTest,SOFTMAX_float16)1812 TEST(OperationValidationTest, SOFTMAX_float16) {
1813 softmaxOpTest(ANEURALNETWORKS_TENSOR_FLOAT16);
1814 }
1815
TEST(OperationValidationTest,SOFTMAX_float32)1816 TEST(OperationValidationTest, SOFTMAX_float32) {
1817 softmaxOpTest(ANEURALNETWORKS_TENSOR_FLOAT32);
1818 }
1819
TEST(OperationValidationTest,SOFTMAX_quant8)1820 TEST(OperationValidationTest, SOFTMAX_quant8) {
1821 softmaxOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
1822 }
1823
TEST(OperationValidationTest,SOFTMAX_quant8_signed)1824 TEST(OperationValidationTest, SOFTMAX_quant8_signed) {
1825 softmaxOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
1826 }
1827
poolingOpTest(ANeuralNetworksOperationType operationCode,int32_t operandCode)1828 void poolingOpTest(ANeuralNetworksOperationType operationCode, int32_t operandCode) {
1829 uint32_t inputDimensions[4] = {2, 4, 4, 2};
1830 ANeuralNetworksOperandType input = getOpType(operandCode, 4, inputDimensions);
1831 ANeuralNetworksOperandType output = input;
1832
1833 ANeuralNetworksOperandType scalar = {.type = ANEURALNETWORKS_INT32,
1834 .dimensionCount = 0,
1835 .dimensions = nullptr,
1836 .scale = 0.0f,
1837 .zeroPoint = 0};
1838 ANeuralNetworksOperandType padLeft = scalar;
1839 ANeuralNetworksOperandType padRight = scalar;
1840 ANeuralNetworksOperandType padTop = scalar;
1841 ANeuralNetworksOperandType padBottom = scalar;
1842 ANeuralNetworksOperandType strideWidth = scalar;
1843 ANeuralNetworksOperandType strideHeight = scalar;
1844 ANeuralNetworksOperandType filterWidth = scalar;
1845 ANeuralNetworksOperandType filterHeight = scalar;
1846 ANeuralNetworksOperandType activation = scalar;
1847
1848 OperationTestBase explicitPoolingTest(operationCode,
1849 {input, padLeft, padRight, padTop, padBottom, strideWidth,
1850 strideHeight, filterWidth, filterHeight, activation},
1851 {output});
1852 explicitPoolingTest.testOpsValidations();
1853
1854 ANeuralNetworksOperandType padImplicit = scalar;
1855 OperationTestBase implicitPoolingTest(
1856 operationCode,
1857 {input, padImplicit, strideWidth, strideHeight, filterWidth, filterHeight, activation},
1858 {output});
1859 implicitPoolingTest.testOpsValidations();
1860
1861 ANeuralNetworksOperandType layout = {.type = ANEURALNETWORKS_BOOL,
1862 .dimensionCount = 0,
1863 .dimensions = nullptr,
1864 .scale = 0.0f,
1865 .zeroPoint = 0};
1866
1867 OperationTestBase explicitNchwPoolingTest(
1868 operationCode,
1869 {input, padLeft, padRight, padTop, padBottom, strideWidth, strideHeight, filterWidth,
1870 filterHeight, activation, layout},
1871 {output});
1872 explicitNchwPoolingTest.testOpsValidations();
1873
1874 OperationTestBase implicitNchwPoolingTest(operationCode,
1875 {input, padImplicit, strideWidth, strideHeight,
1876 filterWidth, filterHeight, activation, layout},
1877 {output});
1878 implicitNchwPoolingTest.testOpsValidations();
1879 }
1880
TEST(OperationValidationTest,AVERAGE_POOL_2D_float16)1881 TEST(OperationValidationTest, AVERAGE_POOL_2D_float16) {
1882 poolingOpTest(ANEURALNETWORKS_AVERAGE_POOL_2D, ANEURALNETWORKS_TENSOR_FLOAT16);
1883 }
1884
TEST(OperationValidationTest,AVERAGE_POOL_2D_float32)1885 TEST(OperationValidationTest, AVERAGE_POOL_2D_float32) {
1886 poolingOpTest(ANEURALNETWORKS_AVERAGE_POOL_2D, ANEURALNETWORKS_TENSOR_FLOAT32);
1887 }
1888
TEST(OperationValidationTest,AVERAGE_POOL_2D_quant8)1889 TEST(OperationValidationTest, AVERAGE_POOL_2D_quant8) {
1890 poolingOpTest(ANEURALNETWORKS_AVERAGE_POOL_2D, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
1891 }
1892
TEST(OperationValidationTest,AVERAGE_POOL_2D_quant8_signed)1893 TEST(OperationValidationTest, AVERAGE_POOL_2D_quant8_signed) {
1894 poolingOpTest(ANEURALNETWORKS_AVERAGE_POOL_2D, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
1895 }
1896
TEST(OperationValidationTest,MAX_POOL_2D_float32)1897 TEST(OperationValidationTest, MAX_POOL_2D_float32) {
1898 poolingOpTest(ANEURALNETWORKS_MAX_POOL_2D, ANEURALNETWORKS_TENSOR_FLOAT32);
1899 }
1900
TEST(OperationValidationTest,MAX_POOL_2D_float16)1901 TEST(OperationValidationTest, MAX_POOL_2D_float16) {
1902 poolingOpTest(ANEURALNETWORKS_MAX_POOL_2D, ANEURALNETWORKS_TENSOR_FLOAT16);
1903 }
1904
TEST(OperationValidationTest,MAX_POOL_2D_quant8)1905 TEST(OperationValidationTest, MAX_POOL_2D_quant8) {
1906 poolingOpTest(ANEURALNETWORKS_MAX_POOL_2D, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
1907 }
1908
TEST(OperationValidationTest,MAX_POOL_2D_quant8_signed)1909 TEST(OperationValidationTest, MAX_POOL_2D_quant8_signed) {
1910 poolingOpTest(ANEURALNETWORKS_MAX_POOL_2D, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
1911 }
1912
TEST(OperationValidationTest,L2_POOL_2D_float16)1913 TEST(OperationValidationTest, L2_POOL_2D_float16) {
1914 poolingOpTest(ANEURALNETWORKS_L2_POOL_2D, ANEURALNETWORKS_TENSOR_FLOAT16);
1915 }
1916
TEST(OperationValidationTest,L2_POOL_2D_float32)1917 TEST(OperationValidationTest, L2_POOL_2D_float32) {
1918 poolingOpTest(ANEURALNETWORKS_L2_POOL_2D, ANEURALNETWORKS_TENSOR_FLOAT32);
1919 }
1920
spaceDepthOpTest(ANeuralNetworksOperationType operationCode,int32_t operandCode)1921 void spaceDepthOpTest(ANeuralNetworksOperationType operationCode, int32_t operandCode) {
1922 uint32_t inputDimensions[4] = {2, 2, 2, 2};
1923 ANeuralNetworksOperandType input = getOpType(operandCode, 4, inputDimensions);
1924
1925 ANeuralNetworksOperandType block_size = {.type = ANEURALNETWORKS_INT32,
1926 .dimensionCount = 0,
1927 .dimensions = nullptr,
1928 .scale = 0.0f,
1929 .zeroPoint = 0};
1930 ANeuralNetworksOperandType output = input;
1931
1932 OperationTestBase spaceDepthTest(operationCode, {input, block_size}, {output});
1933 spaceDepthTest.testOpsValidations();
1934
1935 ANeuralNetworksOperandType layout = {.type = ANEURALNETWORKS_BOOL,
1936 .dimensionCount = 0,
1937 .dimensions = nullptr,
1938 .scale = 0.0f,
1939 .zeroPoint = 0};
1940 OperationTestBase spaceDepthNchwTest(operationCode, {input, block_size, layout}, {output});
1941 spaceDepthNchwTest.testOpsValidations();
1942 }
1943
TEST(OperationValidationTest,SPACE_TO_DEPTH_float16)1944 TEST(OperationValidationTest, SPACE_TO_DEPTH_float16) {
1945 spaceDepthOpTest(ANEURALNETWORKS_SPACE_TO_DEPTH, ANEURALNETWORKS_TENSOR_FLOAT16);
1946 }
1947
TEST(OperationValidationTest,DEPTH_TO_SPACE_float16)1948 TEST(OperationValidationTest, DEPTH_TO_SPACE_float16) {
1949 spaceDepthOpTest(ANEURALNETWORKS_DEPTH_TO_SPACE, ANEURALNETWORKS_TENSOR_FLOAT16);
1950 }
1951
TEST(OperationValidationTest,SPACE_TO_DEPTH_float32)1952 TEST(OperationValidationTest, SPACE_TO_DEPTH_float32) {
1953 spaceDepthOpTest(ANEURALNETWORKS_SPACE_TO_DEPTH, ANEURALNETWORKS_TENSOR_FLOAT32);
1954 }
1955
TEST(OperationValidationTest,DEPTH_TO_SPACE_float32)1956 TEST(OperationValidationTest, DEPTH_TO_SPACE_float32) {
1957 spaceDepthOpTest(ANEURALNETWORKS_DEPTH_TO_SPACE, ANEURALNETWORKS_TENSOR_FLOAT32);
1958 }
1959
TEST(OperationValidationTest,SPACE_TO_DEPTH_quant8)1960 TEST(OperationValidationTest, SPACE_TO_DEPTH_quant8) {
1961 spaceDepthOpTest(ANEURALNETWORKS_SPACE_TO_DEPTH, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
1962 }
1963
TEST(OperationValidationTest,DEPTH_TO_SPACE_quant8)1964 TEST(OperationValidationTest, DEPTH_TO_SPACE_quant8) {
1965 spaceDepthOpTest(ANEURALNETWORKS_DEPTH_TO_SPACE, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
1966 }
1967
TEST(OperationValidationTest,SPACE_TO_DEPTH_quant8signed)1968 TEST(OperationValidationTest, SPACE_TO_DEPTH_quant8signed) {
1969 spaceDepthOpTest(ANEURALNETWORKS_SPACE_TO_DEPTH, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
1970 }
1971
TEST(OperationValidationTest,DEPTH_TO_SPACE_quant8signed)1972 TEST(OperationValidationTest, DEPTH_TO_SPACE_quant8signed) {
1973 spaceDepthOpTest(ANEURALNETWORKS_DEPTH_TO_SPACE, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
1974 }
1975
spaceBatchOpTest(ANeuralNetworksOperationType operationCode,int32_t operandCode)1976 void spaceBatchOpTest(ANeuralNetworksOperationType operationCode, int32_t operandCode) {
1977 uint32_t inputDimensions[4] = {2, 2, 2, 2};
1978 ANeuralNetworksOperandType input = getOpType(operandCode, 4, inputDimensions);
1979
1980 uint32_t blockDimensions[1] = {2};
1981 ANeuralNetworksOperandType blockShape = {.type = ANEURALNETWORKS_TENSOR_INT32,
1982 .dimensionCount = 1,
1983 .dimensions = blockDimensions,
1984 .scale = 0.0f,
1985 .zeroPoint = 0};
1986 ANeuralNetworksOperandType layout = {.type = ANEURALNETWORKS_BOOL,
1987 .dimensionCount = 0,
1988 .dimensions = nullptr,
1989 .scale = 0.0f,
1990 .zeroPoint = 0};
1991
1992 ANeuralNetworksOperandType padding = blockShape;
1993 ANeuralNetworksOperandType output = input;
1994 if (operationCode == ANEURALNETWORKS_SPACE_TO_BATCH_ND) {
1995 OperationTestBase spaceBatchTest(operationCode, {input, blockShape, padding}, {output});
1996 spaceBatchTest.testOpsValidations();
1997
1998 OperationTestBase spaceBatchNchwTest(operationCode, {input, blockShape, padding, layout},
1999 {output});
2000 spaceBatchNchwTest.testOpsValidations();
2001 } else {
2002 OperationTestBase spaceBatchTest(operationCode, {input, blockShape}, {output});
2003 spaceBatchTest.testOpsValidations();
2004
2005 OperationTestBase spaceBatchNchwTest(operationCode, {input, blockShape, layout}, {output});
2006 spaceBatchNchwTest.testOpsValidations();
2007 }
2008 }
2009
TEST(OperationValidationTest,SPACE_TO_BATCH_ND_float16)2010 TEST(OperationValidationTest, SPACE_TO_BATCH_ND_float16) {
2011 spaceBatchOpTest(ANEURALNETWORKS_SPACE_TO_BATCH_ND, ANEURALNETWORKS_TENSOR_FLOAT16);
2012 }
2013
TEST(OperationValidationTest,BATCH_TO_SPACE_ND_float16)2014 TEST(OperationValidationTest, BATCH_TO_SPACE_ND_float16) {
2015 spaceBatchOpTest(ANEURALNETWORKS_BATCH_TO_SPACE_ND, ANEURALNETWORKS_TENSOR_FLOAT16);
2016 }
2017
TEST(OperationValidationTest,SPACE_TO_BATCH_ND_float32)2018 TEST(OperationValidationTest, SPACE_TO_BATCH_ND_float32) {
2019 spaceBatchOpTest(ANEURALNETWORKS_SPACE_TO_BATCH_ND, ANEURALNETWORKS_TENSOR_FLOAT32);
2020 }
2021
TEST(OperationValidationTest,BATCH_TO_SPACE_ND_float32)2022 TEST(OperationValidationTest, BATCH_TO_SPACE_ND_float32) {
2023 spaceBatchOpTest(ANEURALNETWORKS_BATCH_TO_SPACE_ND, ANEURALNETWORKS_TENSOR_FLOAT32);
2024 }
2025
TEST(OperationValidationTest,SPACE_TO_BATCH_ND_quant8)2026 TEST(OperationValidationTest, SPACE_TO_BATCH_ND_quant8) {
2027 spaceBatchOpTest(ANEURALNETWORKS_SPACE_TO_BATCH_ND, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
2028 }
2029
TEST(OperationValidationTest,BATCH_TO_SPACE_ND_quant8)2030 TEST(OperationValidationTest, BATCH_TO_SPACE_ND_quant8) {
2031 spaceBatchOpTest(ANEURALNETWORKS_BATCH_TO_SPACE_ND, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
2032 }
2033
TEST(OperationValidationTest,SPACE_TO_BATCH_ND_quant8signed)2034 TEST(OperationValidationTest, SPACE_TO_BATCH_ND_quant8signed) {
2035 spaceBatchOpTest(ANEURALNETWORKS_SPACE_TO_BATCH_ND, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
2036 }
2037
TEST(OperationValidationTest,BATCH_TO_SPACE_ND_quant8signed)2038 TEST(OperationValidationTest, BATCH_TO_SPACE_ND_quant8signed) {
2039 spaceBatchOpTest(ANEURALNETWORKS_BATCH_TO_SPACE_ND, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
2040 }
2041
transposeAndSqueezeOpTest(ANeuralNetworksOperationType operationCode,int32_t operandCode)2042 void transposeAndSqueezeOpTest(ANeuralNetworksOperationType operationCode, int32_t operandCode) {
2043 uint32_t inputDimensions[4] = {2, 2, 2, 2};
2044 ANeuralNetworksOperandType input = getOpType(operandCode, 4, inputDimensions);
2045
2046 uint32_t blockDimensions[1] = {4};
2047 ANeuralNetworksOperandType dims = {.type = ANEURALNETWORKS_TENSOR_INT32,
2048 .dimensionCount = 1,
2049 .dimensions = blockDimensions,
2050 .scale = 0.0f,
2051 .zeroPoint = 0};
2052
2053 ANeuralNetworksOperandType output = input;
2054 OperationTestBase transposeAndSqueezeTest(operationCode, {input, dims}, {output},
2055 {{TensorRankConstraint::UpTo(4)}});
2056 transposeAndSqueezeTest.testOpsValidations();
2057 }
2058
TEST(OperationValidationTest,TRANSPOSE_float16)2059 TEST(OperationValidationTest, TRANSPOSE_float16) {
2060 transposeAndSqueezeOpTest(ANEURALNETWORKS_TRANSPOSE, ANEURALNETWORKS_TENSOR_FLOAT16);
2061 }
2062
TEST(OperationValidationTest,SQUEEZE_float16)2063 TEST(OperationValidationTest, SQUEEZE_float16) {
2064 transposeAndSqueezeOpTest(ANEURALNETWORKS_SQUEEZE, ANEURALNETWORKS_TENSOR_FLOAT16);
2065 }
2066
TEST(OperationValidationTest,TRANSPOSE_float32)2067 TEST(OperationValidationTest, TRANSPOSE_float32) {
2068 transposeAndSqueezeOpTest(ANEURALNETWORKS_TRANSPOSE, ANEURALNETWORKS_TENSOR_FLOAT32);
2069 }
2070
TEST(OperationValidationTest,SQUEEZE_float32)2071 TEST(OperationValidationTest, SQUEEZE_float32) {
2072 transposeAndSqueezeOpTest(ANEURALNETWORKS_SQUEEZE, ANEURALNETWORKS_TENSOR_FLOAT32);
2073 }
2074
TEST(OperationValidationTest,TRANSPOSE_quant8)2075 TEST(OperationValidationTest, TRANSPOSE_quant8) {
2076 transposeAndSqueezeOpTest(ANEURALNETWORKS_TRANSPOSE, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
2077 }
2078
TEST(OperationValidationTest,TRANSPOSE_quant8signed)2079 TEST(OperationValidationTest, TRANSPOSE_quant8signed) {
2080 transposeAndSqueezeOpTest(ANEURALNETWORKS_TRANSPOSE,
2081 ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
2082 }
2083
TEST(OperationValidationTest,SQUEEZE_quant8)2084 TEST(OperationValidationTest, SQUEEZE_quant8) {
2085 transposeAndSqueezeOpTest(ANEURALNETWORKS_SQUEEZE, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
2086 }
2087
TEST(OperationValidationTest,SQUEEZE_quant8_signed)2088 TEST(OperationValidationTest, SQUEEZE_quant8_signed) {
2089 transposeAndSqueezeOpTest(ANEURALNETWORKS_SQUEEZE, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
2090 }
2091
convOpTest(int32_t inputOperandCode,int32_t filterOperandCode)2092 void convOpTest(int32_t inputOperandCode, int32_t filterOperandCode) {
2093 uint32_t inputDimensions[4] = {2, 4, 4, 2};
2094 ANeuralNetworksOperandType input = getOpType(inputOperandCode, 4, inputDimensions);
2095 ANeuralNetworksOperandType output = input;
2096
2097 float filterScales[2] = {0.5f, 1.0f};
2098 ANeuralNetworksOperandType filter = getOpType(filterOperandCode, 4, inputDimensions);
2099 ANeuralNetworksSymmPerChannelQuantParams filterChannelQuantParams = {
2100 .channelDim = 0,
2101 .scaleCount = 2,
2102 .scales = filterScales,
2103 };
2104
2105 uint32_t biasDimensions[1] = {2};
2106 ANeuralNetworksOperandType bias = {.type = inputOperandCode,
2107 .dimensionCount = 1,
2108 .dimensions = biasDimensions,
2109 .scale = 0.0f,
2110 .zeroPoint = 0};
2111 if (filterOperandCode == ANEURALNETWORKS_TENSOR_QUANT8_ASYMM) {
2112 bias.type = ANEURALNETWORKS_TENSOR_INT32;
2113 bias.scale = 0.25f;
2114 }
2115 if (filterOperandCode == ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED) {
2116 bias.type = ANEURALNETWORKS_TENSOR_INT32;
2117 bias.scale = 0.25f;
2118 }
2119 if (filterOperandCode == ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL) {
2120 bias.type = ANEURALNETWORKS_TENSOR_INT32;
2121 bias.scale = 0.0f;
2122 }
2123
2124 ANeuralNetworksOperandType scalar = {.type = ANEURALNETWORKS_INT32,
2125 .dimensionCount = 0,
2126 .dimensions = nullptr,
2127 .scale = 0.0f,
2128 .zeroPoint = 0};
2129 ANeuralNetworksOperandType padLeft = scalar;
2130 ANeuralNetworksOperandType padRight = scalar;
2131 ANeuralNetworksOperandType padTop = scalar;
2132 ANeuralNetworksOperandType padBottom = scalar;
2133 ANeuralNetworksOperandType strideWidth = scalar;
2134 ANeuralNetworksOperandType strideHeight = scalar;
2135 ANeuralNetworksOperandType dilationHeightFactor = scalar;
2136 ANeuralNetworksOperandType dilationWidthFactor = scalar;
2137 ANeuralNetworksOperandType activation = scalar;
2138
2139 OperationTestBase explicitConvTest(ANEURALNETWORKS_CONV_2D,
2140 {input, filter, bias, padLeft, padRight, padTop, padBottom,
2141 strideWidth, strideHeight, activation},
2142 {output});
2143 if (filterOperandCode == ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL) {
2144 explicitConvTest.setInputSymmPerChannelQuantParams(1, filterChannelQuantParams);
2145 }
2146 explicitConvTest.testOpsValidations();
2147
2148 ANeuralNetworksOperandType padImplicit = scalar;
2149 OperationTestBase implicitConvTest(
2150 ANEURALNETWORKS_CONV_2D,
2151 {input, filter, bias, padImplicit, strideWidth, strideHeight, activation}, {output},
2152 {{TensorRankConstraint::Exactly(4), {0, 1}}});
2153 if (filterOperandCode == ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL) {
2154 implicitConvTest.setInputSymmPerChannelQuantParams(1, filterChannelQuantParams);
2155 }
2156 implicitConvTest.testOpsValidations();
2157
2158 ANeuralNetworksOperandType layout = {.type = ANEURALNETWORKS_BOOL,
2159 .dimensionCount = 0,
2160 .dimensions = nullptr,
2161 .scale = 0.0f,
2162 .zeroPoint = 0};
2163
2164 OperationTestBase explicitNchwConvTest(
2165 ANEURALNETWORKS_CONV_2D,
2166 {input, filter, bias, padLeft, padRight, padTop, padBottom, strideWidth, strideHeight,
2167 activation, layout},
2168 {output});
2169 if (filterOperandCode == ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL) {
2170 explicitNchwConvTest.setInputSymmPerChannelQuantParams(1, filterChannelQuantParams);
2171 }
2172 explicitNchwConvTest.testOpsValidations();
2173
2174 OperationTestBase implicitNchwConvTest(
2175 ANEURALNETWORKS_CONV_2D,
2176 {input, filter, bias, padImplicit, strideWidth, strideHeight, activation, layout},
2177 {output});
2178 if (filterOperandCode == ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL) {
2179 implicitNchwConvTest.setInputSymmPerChannelQuantParams(1, filterChannelQuantParams);
2180 }
2181 implicitNchwConvTest.testOpsValidations();
2182
2183 OperationTestBase explicitDilateConvTest(
2184 ANEURALNETWORKS_CONV_2D,
2185 {input, filter, bias, padLeft, padRight, padTop, padBottom, strideWidth, strideHeight,
2186 activation, layout, dilationWidthFactor, dilationHeightFactor},
2187 {output});
2188 if (filterOperandCode == ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL) {
2189 explicitDilateConvTest.setInputSymmPerChannelQuantParams(1, filterChannelQuantParams);
2190 }
2191 explicitDilateConvTest.testOpsValidations();
2192
2193 OperationTestBase implicitDilateConvTest(
2194 ANEURALNETWORKS_CONV_2D,
2195 {input, filter, bias, padImplicit, strideWidth, strideHeight, activation, layout,
2196 dilationWidthFactor, dilationHeightFactor},
2197 {output});
2198 if (filterOperandCode == ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL) {
2199 implicitDilateConvTest.setInputSymmPerChannelQuantParams(1, filterChannelQuantParams);
2200 }
2201 implicitDilateConvTest.testOpsValidations();
2202 }
2203
TEST(OperationValidationTest,CONV_2D_float16)2204 TEST(OperationValidationTest, CONV_2D_float16) {
2205 convOpTest(ANEURALNETWORKS_TENSOR_FLOAT16, ANEURALNETWORKS_TENSOR_FLOAT16);
2206 }
2207
TEST(OperationValidationTest,CONV_2D_float32)2208 TEST(OperationValidationTest, CONV_2D_float32) {
2209 convOpTest(ANEURALNETWORKS_TENSOR_FLOAT32, ANEURALNETWORKS_TENSOR_FLOAT32);
2210 }
2211
TEST(OperationValidationTest,CONV_2D_quant8)2212 TEST(OperationValidationTest, CONV_2D_quant8) {
2213 convOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
2214 }
2215
TEST(OperationValidationTest,CONV_2D_quant8_per_channel)2216 TEST(OperationValidationTest, CONV_2D_quant8_per_channel) {
2217 convOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM, ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL);
2218 }
2219
TEST(OperationValidationTest,CONV_2D_quant8_signed)2220 TEST(OperationValidationTest, CONV_2D_quant8_signed) {
2221 convOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED,
2222 ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
2223 }
2224
TEST(OperationValidationTest,CONV_2D_quant8_signed_per_channel)2225 TEST(OperationValidationTest, CONV_2D_quant8_signed_per_channel) {
2226 convOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED,
2227 ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL);
2228 }
2229
depthwiseConvOpTest(int32_t inputOperandCode,int32_t filterOperandCode)2230 void depthwiseConvOpTest(int32_t inputOperandCode, int32_t filterOperandCode) {
2231 uint32_t inputDimensions[4] = {1, 2, 2, 2};
2232 ANeuralNetworksOperandType input = getOpType(inputOperandCode, 4, inputDimensions);
2233 ANeuralNetworksOperandType output = input;
2234
2235 float filterScales[2] = {0.5f, 1.0f};
2236 ANeuralNetworksOperandType filter = getOpType(filterOperandCode, 4, inputDimensions);
2237 ANeuralNetworksSymmPerChannelQuantParams filterChannelQuantParams = {
2238 .channelDim = 3,
2239 .scaleCount = 2,
2240 .scales = filterScales,
2241 };
2242
2243 uint32_t biasDimensions[1] = {2};
2244 ANeuralNetworksOperandType bias = {.type = inputOperandCode,
2245 .dimensionCount = 1,
2246 .dimensions = biasDimensions,
2247 .scale = 0.0f,
2248 .zeroPoint = 0};
2249 if (filterOperandCode == ANEURALNETWORKS_TENSOR_QUANT8_ASYMM ||
2250 filterOperandCode == ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED) {
2251 bias.type = ANEURALNETWORKS_TENSOR_INT32;
2252 bias.scale = 0.25f;
2253 }
2254 if (filterOperandCode == ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL) {
2255 bias.type = ANEURALNETWORKS_TENSOR_INT32;
2256 bias.scale = 0.0f;
2257 }
2258
2259 ANeuralNetworksOperandType scalar = {.type = ANEURALNETWORKS_INT32,
2260 .dimensionCount = 0,
2261 .dimensions = nullptr,
2262 .scale = 0.0f,
2263 .zeroPoint = 0};
2264 ANeuralNetworksOperandType padLeft = scalar;
2265 ANeuralNetworksOperandType padRight = scalar;
2266 ANeuralNetworksOperandType padTop = scalar;
2267 ANeuralNetworksOperandType padBottom = scalar;
2268 ANeuralNetworksOperandType strideWidth = scalar;
2269 ANeuralNetworksOperandType strideHeight = scalar;
2270 ANeuralNetworksOperandType multiplier = scalar;
2271 ANeuralNetworksOperandType activation = scalar;
2272
2273 OperationTestBase explicitDepthwiseConvTest(
2274 ANEURALNETWORKS_DEPTHWISE_CONV_2D,
2275 {input, filter, bias, padLeft, padRight, padTop, padBottom, strideWidth, strideHeight,
2276 multiplier, activation},
2277 {output});
2278 if (filterOperandCode == ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL) {
2279 explicitDepthwiseConvTest.setInputSymmPerChannelQuantParams(1, filterChannelQuantParams);
2280 }
2281 explicitDepthwiseConvTest.testOpsValidations();
2282
2283 ANeuralNetworksOperandType padImplicit = scalar;
2284 OperationTestBase implicitDepthwiseConvTest(
2285 ANEURALNETWORKS_DEPTHWISE_CONV_2D,
2286 {input, filter, bias, padImplicit, strideWidth, strideHeight, multiplier, activation},
2287 {output});
2288 if (filterOperandCode == ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL) {
2289 implicitDepthwiseConvTest.setInputSymmPerChannelQuantParams(1, filterChannelQuantParams);
2290 }
2291 implicitDepthwiseConvTest.testOpsValidations();
2292
2293 ANeuralNetworksOperandType layout = {.type = ANEURALNETWORKS_BOOL,
2294 .dimensionCount = 0,
2295 .dimensions = nullptr,
2296 .scale = 0.0f,
2297 .zeroPoint = 0};
2298
2299 OperationTestBase explicitNchwDepthwiseConvTest(
2300 ANEURALNETWORKS_DEPTHWISE_CONV_2D,
2301 {input, filter, bias, padLeft, padRight, padTop, padBottom, strideWidth, strideHeight,
2302 multiplier, activation, layout},
2303 {output});
2304 if (filterOperandCode == ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL) {
2305 explicitNchwDepthwiseConvTest.setInputSymmPerChannelQuantParams(1,
2306 filterChannelQuantParams);
2307 }
2308 explicitNchwDepthwiseConvTest.testOpsValidations();
2309
2310 OperationTestBase implicitNchwDepthwiseConvTest(ANEURALNETWORKS_DEPTHWISE_CONV_2D,
2311 {input, filter, bias, padImplicit, strideWidth,
2312 strideHeight, multiplier, activation, layout},
2313 {output});
2314 if (filterOperandCode == ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL) {
2315 implicitNchwDepthwiseConvTest.setInputSymmPerChannelQuantParams(1,
2316 filterChannelQuantParams);
2317 }
2318 implicitNchwDepthwiseConvTest.testOpsValidations();
2319
2320 ANeuralNetworksOperandType dilationHeightFactor = scalar;
2321 ANeuralNetworksOperandType dilationWidthFactor = scalar;
2322
2323 OperationTestBase explicitDilationDepthwiseConvTest(
2324 ANEURALNETWORKS_DEPTHWISE_CONV_2D,
2325 {input, filter, bias, padLeft, padRight, padTop, padBottom, strideWidth, strideHeight,
2326 multiplier, activation, layout, dilationWidthFactor, dilationHeightFactor},
2327 {output});
2328 if (filterOperandCode == ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL) {
2329 explicitDilationDepthwiseConvTest.setInputSymmPerChannelQuantParams(
2330 1, filterChannelQuantParams);
2331 }
2332 explicitDilationDepthwiseConvTest.testOpsValidations();
2333
2334 OperationTestBase implicitDilationDepthwiseConvTest(
2335 ANEURALNETWORKS_DEPTHWISE_CONV_2D,
2336 {input, filter, bias, padImplicit, strideWidth, strideHeight, multiplier, activation,
2337 layout, dilationWidthFactor, dilationHeightFactor},
2338 {output});
2339 if (filterOperandCode == ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL) {
2340 implicitDilationDepthwiseConvTest.setInputSymmPerChannelQuantParams(
2341 1, filterChannelQuantParams);
2342 }
2343 implicitDilationDepthwiseConvTest.testOpsValidations();
2344 }
2345
TEST(OperationValidationTest,DEPTHWISE_CONV_2D_float32)2346 TEST(OperationValidationTest, DEPTHWISE_CONV_2D_float32) {
2347 depthwiseConvOpTest(ANEURALNETWORKS_TENSOR_FLOAT32, ANEURALNETWORKS_TENSOR_FLOAT32);
2348 }
2349
TEST(OperationValidationTest,DEPTHWISE_CONV_2D_float16)2350 TEST(OperationValidationTest, DEPTHWISE_CONV_2D_float16) {
2351 depthwiseConvOpTest(ANEURALNETWORKS_TENSOR_FLOAT16, ANEURALNETWORKS_TENSOR_FLOAT16);
2352 }
2353
TEST(OperationValidationTest,DEPTHWISE_CONV_2D_quant8)2354 TEST(OperationValidationTest, DEPTHWISE_CONV_2D_quant8) {
2355 depthwiseConvOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
2356 }
2357
TEST(OperationValidationTest,DEPTHWISE_CONV_2D_quant8_per_channel)2358 TEST(OperationValidationTest, DEPTHWISE_CONV_2D_quant8_per_channel) {
2359 depthwiseConvOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM,
2360 ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL);
2361 }
2362
TEST(OperationValidationTest,DEPTHWISE_CONV_2D_quant8_signed)2363 TEST(OperationValidationTest, DEPTHWISE_CONV_2D_quant8_signed) {
2364 depthwiseConvOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED,
2365 ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
2366 }
2367
TEST(OperationValidationTest,DEPTHWISE_CONV_2D_quant8_signed_per_channel)2368 TEST(OperationValidationTest, DEPTHWISE_CONV_2D_quant8_signed_per_channel) {
2369 depthwiseConvOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED,
2370 ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL);
2371 }
2372
fullyConnectedOpTest(int32_t operandCode)2373 void fullyConnectedOpTest(int32_t operandCode) {
2374 uint32_t inputDimensions[2] = {5, 5};
2375 ANeuralNetworksOperandType input = getOpType(operandCode, 2, inputDimensions);
2376
2377 ANeuralNetworksOperandType weights = input;
2378 ANeuralNetworksOperandType output = input;
2379
2380 uint32_t biasDimensions[1] = {5};
2381 ANeuralNetworksOperandType bias = {.type = operandCode,
2382 .dimensionCount = 1,
2383 .dimensions = biasDimensions,
2384 .scale = 0.0f,
2385 .zeroPoint = 0};
2386 if (operandCode == ANEURALNETWORKS_TENSOR_QUANT8_ASYMM ||
2387 operandCode == ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED) {
2388 bias.type = ANEURALNETWORKS_TENSOR_INT32;
2389 bias.scale = 0.25f;
2390 }
2391
2392 ANeuralNetworksOperandType activation = {.type = ANEURALNETWORKS_INT32,
2393 .dimensionCount = 0,
2394 .dimensions = nullptr,
2395 .scale = 0.0f,
2396 .zeroPoint = 0};
2397
2398 OperationTestBase fullyConnectedTest(ANEURALNETWORKS_FULLY_CONNECTED,
2399 {input, weights, bias, activation}, {output},
2400 {{TensorRankConstraint::Between(2, 4), {0}},
2401 {TensorRankConstraint::Exactly(2), {1}},
2402 {TensorRankConstraint::Exactly(1), {2}}});
2403 fullyConnectedTest.testOpsValidations();
2404 }
2405
TEST(OperationValidationTest,FULLY_CONNECTED_float16)2406 TEST(OperationValidationTest, FULLY_CONNECTED_float16) {
2407 fullyConnectedOpTest(ANEURALNETWORKS_TENSOR_FLOAT16);
2408 }
2409
TEST(OperationValidationTest,FULLY_CONNECTED_float32)2410 TEST(OperationValidationTest, FULLY_CONNECTED_float32) {
2411 fullyConnectedOpTest(ANEURALNETWORKS_TENSOR_FLOAT32);
2412 }
2413
TEST(OperationValidationTest,FULLY_CONNECTED_quant8)2414 TEST(OperationValidationTest, FULLY_CONNECTED_quant8) {
2415 fullyConnectedOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
2416 }
2417
TEST(OperationValidationTest,FULLY_CONNECTED_quant8_signed)2418 TEST(OperationValidationTest, FULLY_CONNECTED_quant8_signed) {
2419 fullyConnectedOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
2420 }
2421
concatenationTest(int32_t operandCode)2422 void concatenationTest(int32_t operandCode) {
2423 uint32_t inputDimensions[2] = {5, 5};
2424 ANeuralNetworksOperandType input1 = getOpType(operandCode, 2, inputDimensions);
2425 ANeuralNetworksOperandType input2 = input1;
2426 ANeuralNetworksOperandType output = input1;
2427
2428 ANeuralNetworksOperandType activation = {.type = ANEURALNETWORKS_INT32,
2429 .dimensionCount = 0,
2430 .dimensions = nullptr,
2431 .scale = 0.0f,
2432 .zeroPoint = 0};
2433
2434 OperationTestBase concat2Test(ANEURALNETWORKS_CONCATENATION, {input1, input2, activation},
2435 {output}, {{TensorRankConstraint::UpTo(4), {0, 1}}});
2436 concat2Test.testOpsValidations();
2437
2438 OperationTestBase concat1Test(ANEURALNETWORKS_CONCATENATION, {input1, activation}, {output},
2439 {{TensorRankConstraint::UpTo(4)}});
2440 concat1Test.testOpsValidations();
2441 }
2442
TEST(OperationValidationTest,CONCATENATION_float16)2443 TEST(OperationValidationTest, CONCATENATION_float16) {
2444 concatenationTest(ANEURALNETWORKS_TENSOR_FLOAT16);
2445 }
2446
TEST(OperationValidationTest,CONCATENATION_float32)2447 TEST(OperationValidationTest, CONCATENATION_float32) {
2448 concatenationTest(ANEURALNETWORKS_TENSOR_FLOAT32);
2449 }
2450
TEST(OperationValidationTest,CONCATENATION_quant8)2451 TEST(OperationValidationTest, CONCATENATION_quant8) {
2452 concatenationTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
2453 }
2454
TEST(OperationValidationTest,CONCATENATION_quant8_signed)2455 TEST(OperationValidationTest, CONCATENATION_quant8_signed) {
2456 concatenationTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
2457 }
2458
resizeBilinearOpTest(int32_t inputOperandCode,int32_t scalarOperandCode)2459 void resizeBilinearOpTest(int32_t inputOperandCode, int32_t scalarOperandCode) {
2460 SCOPED_TRACE(inputOperandCode);
2461 uint32_t inputDimensions[4] = {2, 2, 2, 2};
2462 ANeuralNetworksOperandType input = getOpType(inputOperandCode, 4, inputDimensions);
2463 ANeuralNetworksOperandType height = getOpType(scalarOperandCode);
2464 ANeuralNetworksOperandType width = height;
2465 ANeuralNetworksOperandType output = input;
2466
2467 OperationTestBase resizeTest(ANEURALNETWORKS_RESIZE_BILINEAR, {input, height, width}, {output});
2468 resizeTest.testOpsValidations();
2469
2470 ANeuralNetworksOperandType layout = getOpType(ANEURALNETWORKS_BOOL);
2471 OperationTestBase resizeNchwTest(ANEURALNETWORKS_RESIZE_BILINEAR,
2472 {input, height, width, layout}, {output});
2473 resizeNchwTest.testOpsValidations();
2474 }
2475
TEST(OperationValidationTest,RESIZE_BILINEAR)2476 TEST(OperationValidationTest, RESIZE_BILINEAR) {
2477 resizeBilinearOpTest(ANEURALNETWORKS_TENSOR_FLOAT16, ANEURALNETWORKS_INT32);
2478 resizeBilinearOpTest(ANEURALNETWORKS_TENSOR_FLOAT32, ANEURALNETWORKS_INT32);
2479 resizeBilinearOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM, ANEURALNETWORKS_INT32);
2480 resizeBilinearOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED, ANEURALNETWORKS_INT32);
2481 resizeBilinearOpTest(ANEURALNETWORKS_TENSOR_FLOAT16, ANEURALNETWORKS_FLOAT16);
2482 resizeBilinearOpTest(ANEURALNETWORKS_TENSOR_FLOAT32, ANEURALNETWORKS_FLOAT32);
2483 resizeBilinearOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM, ANEURALNETWORKS_FLOAT32);
2484 resizeBilinearOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED, ANEURALNETWORKS_FLOAT32);
2485 }
2486
embeddingLookupTest(int32_t operandCode)2487 void embeddingLookupTest(int32_t operandCode) {
2488 uint32_t lookupDimensions[1] = {5};
2489 ANeuralNetworksOperandType lookup = {.type = ANEURALNETWORKS_TENSOR_INT32,
2490 .dimensionCount = 1,
2491 .dimensions = lookupDimensions,
2492 .scale = 0.0f,
2493 .zeroPoint = 0};
2494
2495 uint32_t inputDimensions[2] = {5, 5};
2496 ANeuralNetworksOperandType input = getOpType(operandCode, 2, inputDimensions);
2497 ANeuralNetworksOperandType output = input;
2498
2499 OperationTestBase embedLookupTest(ANEURALNETWORKS_EMBEDDING_LOOKUP, {lookup, input}, {output});
2500 embedLookupTest.testOpsValidations();
2501 }
2502
TEST(OperationValidationTest,EMBEDDING_LOOKUP_float32)2503 TEST(OperationValidationTest, EMBEDDING_LOOKUP_float32) {
2504 embeddingLookupTest(ANEURALNETWORKS_TENSOR_FLOAT32);
2505 }
2506
TEST(OperationValidationTest,EMBEDDING_LOOKUP_int32)2507 TEST(OperationValidationTest, EMBEDDING_LOOKUP_int32) {
2508 embeddingLookupTest(ANEURALNETWORKS_TENSOR_INT32);
2509 }
2510
TEST(OperationValidationTest,EMBEDDING_LOOKUP_quant8)2511 TEST(OperationValidationTest, EMBEDDING_LOOKUP_quant8) {
2512 embeddingLookupTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
2513 }
2514
TEST(OperationValidationTest,EMBEDDING_LOOKUP_quant8_signed)2515 TEST(OperationValidationTest, EMBEDDING_LOOKUP_quant8_signed) {
2516 embeddingLookupTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
2517 }
2518
hashtableLookupTest(int32_t operandCode)2519 void hashtableLookupTest(int32_t operandCode) {
2520 uint32_t lookupDimensions[1] = {5};
2521 ANeuralNetworksOperandType lookup = {.type = ANEURALNETWORKS_TENSOR_INT32,
2522 .dimensionCount = 1,
2523 .dimensions = lookupDimensions,
2524 .scale = 0.0f,
2525 .zeroPoint = 0};
2526 ANeuralNetworksOperandType keys = lookup;
2527
2528 uint32_t valuesDimensions[2] = {5, 5};
2529 ANeuralNetworksOperandType values = getOpType(operandCode, 2, valuesDimensions);
2530 ANeuralNetworksOperandType output = values;
2531
2532 ANeuralNetworksOperandType hits = lookup;
2533 hits.type = ANEURALNETWORKS_TENSOR_QUANT8_ASYMM;
2534 hits.scale = 1.0f;
2535
2536 OperationTestBase hashLookupTest(ANEURALNETWORKS_HASHTABLE_LOOKUP, {lookup, keys, values},
2537 {output, hits});
2538 hashLookupTest.testOpsValidations();
2539 }
2540
TEST(OperationValidationTest,HASHTABLE_LOOKUP_float32)2541 TEST(OperationValidationTest, HASHTABLE_LOOKUP_float32) {
2542 hashtableLookupTest(ANEURALNETWORKS_TENSOR_FLOAT32);
2543 }
2544
TEST(OperationValidationTest,HASHTABLE_LOOKUP_int32)2545 TEST(OperationValidationTest, HASHTABLE_LOOKUP_int32) {
2546 hashtableLookupTest(ANEURALNETWORKS_TENSOR_INT32);
2547 }
2548
TEST(OperationValidationTest,HASHTABLE_LOOKUP_quant8)2549 TEST(OperationValidationTest, HASHTABLE_LOOKUP_quant8) {
2550 hashtableLookupTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
2551 }
2552
lshProjectionTest(int32_t operandCode,int32_t hashAndWeightOperandCode)2553 void lshProjectionTest(int32_t operandCode, int32_t hashAndWeightOperandCode) {
2554 uint32_t inputDimensions[2] = {5, 5};
2555 ANeuralNetworksOperandType hash = getOpType(hashAndWeightOperandCode, 2, inputDimensions);
2556 ANeuralNetworksOperandType input = getOpType(operandCode, 2, inputDimensions);
2557
2558 uint32_t weightDimensions[1] = {5};
2559 ANeuralNetworksOperandType weight = getOpType(hashAndWeightOperandCode, 1, weightDimensions);
2560
2561 ANeuralNetworksOperandType type = {.type = ANEURALNETWORKS_INT32,
2562 .dimensionCount = 0,
2563 .dimensions = nullptr,
2564 .scale = 0.0f,
2565 .zeroPoint = 0};
2566
2567 ANeuralNetworksOperandType output = weight;
2568 output.type = ANEURALNETWORKS_TENSOR_INT32;
2569
2570 OperationTestBase lshProjTest(ANEURALNETWORKS_LSH_PROJECTION, {hash, input, weight, type},
2571 {output});
2572 lshProjTest.testOpsValidations();
2573 }
2574
TEST(OperationValidationTest,LSH_PROJECTION_float16)2575 TEST(OperationValidationTest, LSH_PROJECTION_float16) {
2576 lshProjectionTest(ANEURALNETWORKS_TENSOR_FLOAT16, ANEURALNETWORKS_TENSOR_FLOAT32);
2577 lshProjectionTest(ANEURALNETWORKS_TENSOR_FLOAT16, ANEURALNETWORKS_TENSOR_FLOAT16);
2578 }
2579
TEST(OperationValidationTest,LSH_PROJECTION_float32)2580 TEST(OperationValidationTest, LSH_PROJECTION_float32) {
2581 lshProjectionTest(ANEURALNETWORKS_TENSOR_FLOAT32, ANEURALNETWORKS_TENSOR_FLOAT32);
2582 lshProjectionTest(ANEURALNETWORKS_TENSOR_FLOAT32, ANEURALNETWORKS_TENSOR_FLOAT16);
2583 }
2584
TEST(OperationValidationTest,LSH_PROJECTION_quant8)2585 TEST(OperationValidationTest, LSH_PROJECTION_quant8) {
2586 lshProjectionTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM, ANEURALNETWORKS_TENSOR_FLOAT32);
2587 lshProjectionTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM, ANEURALNETWORKS_TENSOR_FLOAT16);
2588 }
2589
TEST(OperationValidationTest,LSH_PROJECTION_int32)2590 TEST(OperationValidationTest, LSH_PROJECTION_int32) {
2591 lshProjectionTest(ANEURALNETWORKS_TENSOR_INT32, ANEURALNETWORKS_TENSOR_FLOAT32);
2592 lshProjectionTest(ANEURALNETWORKS_TENSOR_INT32, ANEURALNETWORKS_TENSOR_FLOAT16);
2593 }
2594
TEST(OperationValidationTest,LSTM_float32)2595 TEST(OperationValidationTest, LSTM_float32) {
2596 uint32_t oneDimensional[1] = {5};
2597 uint32_t twoDimensional[2] = {5, 5};
2598 ANeuralNetworksOperandType floatTensor1D = {.type = ANEURALNETWORKS_TENSOR_FLOAT32,
2599 .dimensionCount = 1,
2600 .dimensions = oneDimensional,
2601 .scale = 0.0f,
2602 .zeroPoint = 0};
2603 ANeuralNetworksOperandType floatTensor2D = {.type = ANEURALNETWORKS_TENSOR_FLOAT32,
2604 .dimensionCount = 2,
2605 .dimensions = twoDimensional,
2606 .scale = 0.0f,
2607 .zeroPoint = 0};
2608 ANeuralNetworksOperandType intScalar = {.type = ANEURALNETWORKS_INT32,
2609 .dimensionCount = 0,
2610 .dimensions = nullptr,
2611 .scale = 0.0f,
2612 .zeroPoint = 0};
2613 ANeuralNetworksOperandType floatScalar = {.type = ANEURALNETWORKS_FLOAT32,
2614 .dimensionCount = 0,
2615 .dimensions = nullptr,
2616 .scale = 0.0f,
2617 .zeroPoint = 0};
2618
2619 ANeuralNetworksOperandType input = floatTensor2D;
2620 ANeuralNetworksOperandType inputToInput = floatTensor2D;
2621 ANeuralNetworksOperandType inputToForget = floatTensor2D;
2622 ANeuralNetworksOperandType inputToCell = floatTensor2D;
2623 ANeuralNetworksOperandType inputToOutput = floatTensor2D;
2624 ANeuralNetworksOperandType recurrentToInput = floatTensor2D;
2625 ANeuralNetworksOperandType recurrentToForget = floatTensor2D;
2626 ANeuralNetworksOperandType recurrentToCell = floatTensor2D;
2627 ANeuralNetworksOperandType recurrentToOutput = floatTensor2D;
2628 ANeuralNetworksOperandType cellToInput = floatTensor1D;
2629 ANeuralNetworksOperandType cellToForget = floatTensor1D;
2630 ANeuralNetworksOperandType cellToOutput = floatTensor1D;
2631 ANeuralNetworksOperandType inputGateBias = floatTensor1D;
2632 ANeuralNetworksOperandType forgetGateBias = floatTensor1D;
2633 ANeuralNetworksOperandType cellBias = floatTensor1D;
2634 ANeuralNetworksOperandType outputGateBias = floatTensor1D;
2635 ANeuralNetworksOperandType projWeights = floatTensor2D;
2636 ANeuralNetworksOperandType projBias = floatTensor1D;
2637 ANeuralNetworksOperandType outputStateIn = floatTensor2D;
2638 ANeuralNetworksOperandType cellStateIn = floatTensor2D;
2639 ANeuralNetworksOperandType activation = intScalar;
2640 ANeuralNetworksOperandType clipCellState = floatScalar;
2641 ANeuralNetworksOperandType clipProjLayer = floatScalar;
2642
2643 ANeuralNetworksOperandType scratch = floatTensor2D;
2644 ANeuralNetworksOperandType outputStateOut = floatTensor2D;
2645 ANeuralNetworksOperandType cellStateOut = floatTensor2D;
2646 ANeuralNetworksOperandType output = floatTensor2D;
2647
2648 OperationTestBase lstmTest(ANEURALNETWORKS_LSTM,
2649 {input,
2650 inputToInput,
2651 inputToForget,
2652 inputToCell,
2653 inputToOutput,
2654 recurrentToInput,
2655 recurrentToForget,
2656 recurrentToCell,
2657 recurrentToOutput,
2658 cellToInput,
2659 cellToForget,
2660 cellToOutput,
2661 inputGateBias,
2662 forgetGateBias,
2663 cellBias,
2664 outputGateBias,
2665 projWeights,
2666 projBias,
2667 outputStateIn,
2668 cellStateIn,
2669 activation,
2670 clipCellState,
2671 clipProjLayer},
2672 {scratch, outputStateOut, cellStateOut, output});
2673 lstmTest.testOpsValidations();
2674 }
2675
lstmTestV1_2(int32_t operandCode)2676 void lstmTestV1_2(int32_t operandCode) {
2677 SCOPED_TRACE(operandCode);
2678 uint32_t oneDimensional[1] = {5};
2679 uint32_t twoDimensional[2] = {5, 5};
2680 ANeuralNetworksOperandType floatTensor1D = {.type = operandCode,
2681 .dimensionCount = 1,
2682 .dimensions = oneDimensional,
2683 .scale = 0.0f,
2684 .zeroPoint = 0};
2685 ANeuralNetworksOperandType floatTensor2D = {.type = operandCode,
2686 .dimensionCount = 2,
2687 .dimensions = twoDimensional,
2688 .scale = 0.0f,
2689 .zeroPoint = 0};
2690 ANeuralNetworksOperandType intScalar = {.type = ANEURALNETWORKS_INT32,
2691 .dimensionCount = 0,
2692 .dimensions = nullptr,
2693 .scale = 0.0f,
2694 .zeroPoint = 0};
2695 ANeuralNetworksOperandType floatScalar = {
2696 .type = (operandCode == ANEURALNETWORKS_TENSOR_FLOAT32) ? ANEURALNETWORKS_FLOAT32
2697 : ANEURALNETWORKS_FLOAT16,
2698 .dimensionCount = 0,
2699 .dimensions = nullptr,
2700 .scale = 0.0f,
2701 .zeroPoint = 0};
2702
2703 ANeuralNetworksOperandType input = floatTensor2D;
2704 ANeuralNetworksOperandType inputToInput = floatTensor2D;
2705 ANeuralNetworksOperandType inputToForget = floatTensor2D;
2706 ANeuralNetworksOperandType inputToCell = floatTensor2D;
2707 ANeuralNetworksOperandType inputToOutput = floatTensor2D;
2708 ANeuralNetworksOperandType recurrentToInput = floatTensor2D;
2709 ANeuralNetworksOperandType recurrentToForget = floatTensor2D;
2710 ANeuralNetworksOperandType recurrentToCell = floatTensor2D;
2711 ANeuralNetworksOperandType recurrentToOutput = floatTensor2D;
2712 ANeuralNetworksOperandType cellToInput = floatTensor1D;
2713 ANeuralNetworksOperandType cellToForget = floatTensor1D;
2714 ANeuralNetworksOperandType cellToOutput = floatTensor1D;
2715 ANeuralNetworksOperandType inputGateBias = floatTensor1D;
2716 ANeuralNetworksOperandType forgetGateBias = floatTensor1D;
2717 ANeuralNetworksOperandType cellBias = floatTensor1D;
2718 ANeuralNetworksOperandType outputGateBias = floatTensor1D;
2719 ANeuralNetworksOperandType projWeights = floatTensor2D;
2720 ANeuralNetworksOperandType projBias = floatTensor1D;
2721 ANeuralNetworksOperandType outputStateIn = floatTensor2D;
2722 ANeuralNetworksOperandType cellStateIn = floatTensor2D;
2723 ANeuralNetworksOperandType activation = intScalar;
2724 ANeuralNetworksOperandType clipCellState = floatScalar;
2725 ANeuralNetworksOperandType clipProjLayer = floatScalar;
2726 ANeuralNetworksOperandType inputLayerNormWeights = floatTensor1D;
2727 ANeuralNetworksOperandType forgetLayerNormWeights = floatTensor1D;
2728 ANeuralNetworksOperandType cellLayerNormWeights = floatTensor1D;
2729 ANeuralNetworksOperandType outputLayerNormWeights = floatTensor1D;
2730
2731 ANeuralNetworksOperandType scratch = floatTensor2D;
2732 ANeuralNetworksOperandType outputStateOut = floatTensor2D;
2733 ANeuralNetworksOperandType cellStateOut = floatTensor2D;
2734 ANeuralNetworksOperandType output = floatTensor2D;
2735
2736 OperationTestBase lstmTest(ANEURALNETWORKS_LSTM,
2737 {input,
2738 inputToInput,
2739 inputToForget,
2740 inputToCell,
2741 inputToOutput,
2742 recurrentToInput,
2743 recurrentToForget,
2744 recurrentToCell,
2745 recurrentToOutput,
2746 cellToInput,
2747 cellToForget,
2748 cellToOutput,
2749 inputGateBias,
2750 forgetGateBias,
2751 cellBias,
2752 outputGateBias,
2753 projWeights,
2754 projBias,
2755 outputStateIn,
2756 cellStateIn,
2757 activation,
2758 clipCellState,
2759 clipProjLayer,
2760 inputLayerNormWeights,
2761 forgetLayerNormWeights,
2762 cellLayerNormWeights,
2763 outputLayerNormWeights},
2764 {scratch, outputStateOut, cellStateOut, output});
2765 lstmTest.testOpsValidations();
2766 }
2767
TEST(OperationValidationTest,LSTM_V1_2)2768 TEST(OperationValidationTest, LSTM_V1_2) {
2769 lstmTestV1_2(ANEURALNETWORKS_TENSOR_FLOAT32);
2770 lstmTestV1_2(ANEURALNETWORKS_TENSOR_FLOAT16);
2771 }
2772
lstmBidirectionalSequence(int32_t operandCode)2773 void lstmBidirectionalSequence(int32_t operandCode) {
2774 uint32_t oneDimensional[1] = {5};
2775 uint32_t twoDimensional[2] = {5, 5};
2776 uint32_t threeDimensional[3] = {5, 5, 5};
2777 ANeuralNetworksOperandType floatTensor1D = {
2778 .type = operandCode,
2779 .dimensionCount = 1,
2780 .dimensions = oneDimensional,
2781 .scale = 0.0f,
2782 .zeroPoint = 0,
2783 };
2784 ANeuralNetworksOperandType floatTensor2D = {
2785 .type = operandCode,
2786 .dimensionCount = 2,
2787 .dimensions = twoDimensional,
2788 .scale = 0.0f,
2789 .zeroPoint = 0,
2790 };
2791 ANeuralNetworksOperandType floatTensor3D = {
2792 .type = operandCode,
2793 .dimensionCount = 3,
2794 .dimensions = threeDimensional,
2795 .scale = 0.0f,
2796 .zeroPoint = 0,
2797 };
2798 ANeuralNetworksOperandType intScalar = {
2799 .type = ANEURALNETWORKS_INT32,
2800 .dimensionCount = 0,
2801 .dimensions = nullptr,
2802 .scale = 0.0f,
2803 .zeroPoint = 0,
2804 };
2805 ANeuralNetworksOperandType floatScalar = {
2806 .type = operandCode == ANEURALNETWORKS_TENSOR_FLOAT32 ? ANEURALNETWORKS_FLOAT32
2807 : ANEURALNETWORKS_FLOAT16,
2808 .dimensionCount = 0,
2809 .dimensions = nullptr,
2810 .scale = 0.0f,
2811 .zeroPoint = 0,
2812 };
2813 ANeuralNetworksOperandType boolScalar = {.type = ANEURALNETWORKS_BOOL,
2814 .dimensionCount = 0,
2815 .dimensions = nullptr,
2816 .scale = 0.0f,
2817 .zeroPoint = 0};
2818
2819 ANeuralNetworksOperandType input = floatTensor3D;
2820 ANeuralNetworksOperandType inputToInputFw = floatTensor2D;
2821 ANeuralNetworksOperandType inputToForgetFw = floatTensor2D;
2822 ANeuralNetworksOperandType inputToCellFw = floatTensor2D;
2823 ANeuralNetworksOperandType inputToOutputFw = floatTensor2D;
2824 ANeuralNetworksOperandType recurrentToInputFw = floatTensor2D;
2825 ANeuralNetworksOperandType recurrentToForgetFw = floatTensor2D;
2826 ANeuralNetworksOperandType recurrentToCellFw = floatTensor2D;
2827 ANeuralNetworksOperandType recurrentToOutputFw = floatTensor2D;
2828 ANeuralNetworksOperandType cellToInputFw = floatTensor1D;
2829 ANeuralNetworksOperandType cellToForgetFw = floatTensor1D;
2830 ANeuralNetworksOperandType cellToOutputFw = floatTensor1D;
2831 ANeuralNetworksOperandType inputGateBiasFw = floatTensor1D;
2832 ANeuralNetworksOperandType forgetGateBiasFw = floatTensor1D;
2833 ANeuralNetworksOperandType cellBiasFw = floatTensor1D;
2834 ANeuralNetworksOperandType outputGateBiasFw = floatTensor1D;
2835 ANeuralNetworksOperandType projWeightsFw = floatTensor2D;
2836 ANeuralNetworksOperandType projBiasFw = floatTensor1D;
2837 ANeuralNetworksOperandType outputStateInFw = floatTensor2D;
2838 ANeuralNetworksOperandType cellStateInFw = floatTensor2D;
2839 ANeuralNetworksOperandType inputToInputBw = floatTensor2D;
2840 ANeuralNetworksOperandType inputToForgetBw = floatTensor2D;
2841 ANeuralNetworksOperandType inputToCellBw = floatTensor2D;
2842 ANeuralNetworksOperandType inputToOutputBw = floatTensor2D;
2843 ANeuralNetworksOperandType recurrentToInputBw = floatTensor2D;
2844 ANeuralNetworksOperandType recurrentToForgetBw = floatTensor2D;
2845 ANeuralNetworksOperandType recurrentToCellBw = floatTensor2D;
2846 ANeuralNetworksOperandType recurrentToOutputBw = floatTensor2D;
2847 ANeuralNetworksOperandType cellToInputBw = floatTensor1D;
2848 ANeuralNetworksOperandType cellToForgetBw = floatTensor1D;
2849 ANeuralNetworksOperandType cellToOutputBw = floatTensor1D;
2850 ANeuralNetworksOperandType inputGateBiasBw = floatTensor1D;
2851 ANeuralNetworksOperandType forgetGateBiasBw = floatTensor1D;
2852 ANeuralNetworksOperandType cellBiasBw = floatTensor1D;
2853 ANeuralNetworksOperandType outputGateBiasBw = floatTensor1D;
2854 ANeuralNetworksOperandType projWeightsBw = floatTensor2D;
2855 ANeuralNetworksOperandType projBiasBw = floatTensor1D;
2856 ANeuralNetworksOperandType outputStateInBw = floatTensor2D;
2857 ANeuralNetworksOperandType cellStateInBw = floatTensor2D;
2858 ANeuralNetworksOperandType auxInput = floatTensor3D;
2859 ANeuralNetworksOperandType auxInputToInputFw = floatTensor2D;
2860 ANeuralNetworksOperandType auxInputToForgetFw = floatTensor2D;
2861 ANeuralNetworksOperandType auxInputToCellFw = floatTensor2D;
2862 ANeuralNetworksOperandType auxInputToOutputFw = floatTensor2D;
2863 ANeuralNetworksOperandType auxInputToInputBw = floatTensor2D;
2864 ANeuralNetworksOperandType auxInputToForgetBw = floatTensor2D;
2865 ANeuralNetworksOperandType auxInputToCellBw = floatTensor2D;
2866 ANeuralNetworksOperandType auxInputToOutputBw = floatTensor2D;
2867 ANeuralNetworksOperandType activation = intScalar;
2868 ANeuralNetworksOperandType clipCellState = floatScalar;
2869 ANeuralNetworksOperandType clipProjLayer = floatScalar;
2870 ANeuralNetworksOperandType mergeOutputs = boolScalar;
2871 ANeuralNetworksOperandType timeMajor = boolScalar;
2872 ANeuralNetworksOperandType inputLayerNormWeightsFw = floatTensor1D;
2873 ANeuralNetworksOperandType forgetLayerNormWeightsFw = floatTensor1D;
2874 ANeuralNetworksOperandType cellLayerNormWeightsFw = floatTensor1D;
2875 ANeuralNetworksOperandType outputLayerNormWeightsFw = floatTensor1D;
2876 ANeuralNetworksOperandType inputLayerNormWeightsBw = floatTensor1D;
2877 ANeuralNetworksOperandType forgetLayerNormWeightsBw = floatTensor1D;
2878 ANeuralNetworksOperandType cellLayerNormWeightsBw = floatTensor1D;
2879 ANeuralNetworksOperandType outputLayerNormWeightsBw = floatTensor1D;
2880
2881 ANeuralNetworksOperandType outputFw = floatTensor2D;
2882 ANeuralNetworksOperandType outputBw = floatTensor2D;
2883
2884 OperationTestBase lstmTest(ANEURALNETWORKS_BIDIRECTIONAL_SEQUENCE_LSTM,
2885 {
2886 input,
2887 inputToInputFw,
2888 inputToForgetFw,
2889 inputToCellFw,
2890 inputToOutputFw,
2891 recurrentToInputFw,
2892 recurrentToForgetFw,
2893 recurrentToCellFw,
2894 recurrentToOutputFw,
2895 cellToInputFw,
2896 cellToForgetFw,
2897 cellToOutputFw,
2898 inputGateBiasFw,
2899 forgetGateBiasFw,
2900 cellBiasFw,
2901 outputGateBiasFw,
2902 projWeightsFw,
2903 projBiasFw,
2904 outputStateInFw,
2905 cellStateInFw,
2906 inputToInputBw,
2907 inputToForgetBw,
2908 inputToCellBw,
2909 inputToOutputBw,
2910 recurrentToInputBw,
2911 recurrentToForgetBw,
2912 recurrentToCellBw,
2913 recurrentToOutputBw,
2914 cellToInputBw,
2915 cellToForgetBw,
2916 cellToOutputBw,
2917 inputGateBiasBw,
2918 forgetGateBiasBw,
2919 cellBiasBw,
2920 outputGateBiasBw,
2921 projWeightsBw,
2922 projBiasBw,
2923 outputStateInBw,
2924 cellStateInBw,
2925 auxInput,
2926 auxInputToInputFw,
2927 auxInputToForgetFw,
2928 auxInputToCellFw,
2929 auxInputToOutputFw,
2930 auxInputToInputBw,
2931 auxInputToForgetBw,
2932 auxInputToCellBw,
2933 auxInputToOutputBw,
2934 activation,
2935 clipCellState,
2936 clipProjLayer,
2937 mergeOutputs,
2938 timeMajor,
2939 inputLayerNormWeightsFw,
2940 forgetLayerNormWeightsFw,
2941 cellLayerNormWeightsFw,
2942 outputLayerNormWeightsFw,
2943 inputLayerNormWeightsBw,
2944 forgetLayerNormWeightsBw,
2945 cellLayerNormWeightsBw,
2946 outputLayerNormWeightsBw,
2947 },
2948 {
2949 outputFw,
2950 outputBw,
2951 });
2952
2953 lstmTest.testOpsValidations();
2954 }
2955
TEST(OperationValidationTest,LSTM_BIDIRECTIONAL_SEQUENCE)2956 TEST(OperationValidationTest, LSTM_BIDIRECTIONAL_SEQUENCE) {
2957 lstmBidirectionalSequence(ANEURALNETWORKS_TENSOR_FLOAT32);
2958 lstmBidirectionalSequence(ANEURALNETWORKS_TENSOR_FLOAT16);
2959 }
2960
randomMultinomialOpTest(int32_t operandCode)2961 void randomMultinomialOpTest(int32_t operandCode) {
2962 uint32_t inputDims[2] = {5, 5};
2963 ANeuralNetworksOperandType input = {.type = operandCode,
2964 .dimensionCount = 2,
2965 .dimensions = inputDims,
2966 .scale = 0.0f,
2967 .zeroPoint = 0};
2968 ANeuralNetworksOperandType sample_count = {.type = ANEURALNETWORKS_INT32,
2969 .dimensionCount = 0,
2970 .dimensions = nullptr,
2971 .scale = 0.0f,
2972 .zeroPoint = 0};
2973 uint32_t seedDims[1] = {2};
2974 ANeuralNetworksOperandType seed = {.type = ANEURALNETWORKS_TENSOR_INT32,
2975 .dimensionCount = 1,
2976 .dimensions = seedDims,
2977 .scale = 0.0f,
2978 .zeroPoint = 0};
2979 uint32_t outputDims[2] = {5, 7};
2980 ANeuralNetworksOperandType output = {.type = ANEURALNETWORKS_TENSOR_INT32,
2981 .dimensionCount = 2,
2982 .dimensions = outputDims,
2983 .scale = 0.0f,
2984 .zeroPoint = 0};
2985
2986 OperationTestBase multinomialTest(ANEURALNETWORKS_RANDOM_MULTINOMIAL,
2987 {input, sample_count, seed}, {output});
2988 multinomialTest.testOpsValidations();
2989 }
2990
TEST(OperationValidationTest,RANDOM_MULTINOMIAL_float16)2991 TEST(OperationValidationTest, RANDOM_MULTINOMIAL_float16) {
2992 randomMultinomialOpTest(ANEURALNETWORKS_TENSOR_FLOAT16);
2993 }
2994
TEST(OperationValidationTest,RANDOM_MULTINOMIAL_float32)2995 TEST(OperationValidationTest, RANDOM_MULTINOMIAL_float32) {
2996 randomMultinomialOpTest(ANEURALNETWORKS_TENSOR_FLOAT32);
2997 }
2998
TEST(OperationValidationTest,RNN_float16)2999 TEST(OperationValidationTest, RNN_float16) {
3000 uint32_t oneDimensional[1] = {5};
3001 uint32_t twoDimensional[2] = {5, 5};
3002 ANeuralNetworksOperandType floatTensor1D = {.type = ANEURALNETWORKS_TENSOR_FLOAT16,
3003 .dimensionCount = 1,
3004 .dimensions = oneDimensional,
3005 .scale = 0.0f,
3006 .zeroPoint = 0};
3007 ANeuralNetworksOperandType floatTensor2D = {.type = ANEURALNETWORKS_TENSOR_FLOAT16,
3008 .dimensionCount = 2,
3009 .dimensions = twoDimensional,
3010 .scale = 0.0f,
3011 .zeroPoint = 0};
3012 ANeuralNetworksOperandType intScalar = {.type = ANEURALNETWORKS_INT32,
3013 .dimensionCount = 0,
3014 .dimensions = nullptr,
3015 .scale = 0.0f,
3016 .zeroPoint = 0};
3017
3018 ANeuralNetworksOperandType input = floatTensor2D;
3019 ANeuralNetworksOperandType weights = floatTensor2D;
3020 ANeuralNetworksOperandType recurrentWeights = floatTensor2D;
3021 ANeuralNetworksOperandType bias = floatTensor1D;
3022 ANeuralNetworksOperandType hiddenStateIn = floatTensor2D;
3023 ANeuralNetworksOperandType activation = intScalar;
3024
3025 ANeuralNetworksOperandType hiddenStateOut = floatTensor2D;
3026 ANeuralNetworksOperandType output = floatTensor2D;
3027
3028 OperationTestBase rnnTest(ANEURALNETWORKS_RNN,
3029 {input, weights, recurrentWeights, bias, hiddenStateIn, activation},
3030 {hiddenStateOut, output});
3031 rnnTest.testOpsValidations();
3032 }
3033
TEST(OperationValidationTest,RNN_float32)3034 TEST(OperationValidationTest, RNN_float32) {
3035 uint32_t oneDimensional[1] = {5};
3036 uint32_t twoDimensional[2] = {5, 5};
3037 ANeuralNetworksOperandType floatTensor1D = {.type = ANEURALNETWORKS_TENSOR_FLOAT32,
3038 .dimensionCount = 1,
3039 .dimensions = oneDimensional,
3040 .scale = 0.0f,
3041 .zeroPoint = 0};
3042 ANeuralNetworksOperandType floatTensor2D = {.type = ANEURALNETWORKS_TENSOR_FLOAT32,
3043 .dimensionCount = 2,
3044 .dimensions = twoDimensional,
3045 .scale = 0.0f,
3046 .zeroPoint = 0};
3047 ANeuralNetworksOperandType intScalar = {.type = ANEURALNETWORKS_INT32,
3048 .dimensionCount = 0,
3049 .dimensions = nullptr,
3050 .scale = 0.0f,
3051 .zeroPoint = 0};
3052
3053 ANeuralNetworksOperandType input = floatTensor2D;
3054 ANeuralNetworksOperandType weights = floatTensor2D;
3055 ANeuralNetworksOperandType recurrentWeights = floatTensor2D;
3056 ANeuralNetworksOperandType bias = floatTensor1D;
3057 ANeuralNetworksOperandType hiddenStateIn = floatTensor2D;
3058 ANeuralNetworksOperandType activation = intScalar;
3059
3060 ANeuralNetworksOperandType hiddenStateOut = floatTensor2D;
3061 ANeuralNetworksOperandType output = floatTensor2D;
3062
3063 OperationTestBase rnnTest(ANEURALNETWORKS_RNN,
3064 {input, weights, recurrentWeights, bias, hiddenStateIn, activation},
3065 {hiddenStateOut, output});
3066 rnnTest.testOpsValidations();
3067 }
3068
TEST(OperationValidationTest,SVDF_float32)3069 TEST(OperationValidationTest, SVDF_float32) {
3070 uint32_t oneDimensional[1] = {5};
3071 uint32_t twoDimensional[2] = {5, 5};
3072 ANeuralNetworksOperandType floatTensor1D = {.type = ANEURALNETWORKS_TENSOR_FLOAT32,
3073 .dimensionCount = 1,
3074 .dimensions = oneDimensional,
3075 .scale = 0.0f,
3076 .zeroPoint = 0};
3077 ANeuralNetworksOperandType floatTensor2D = {.type = ANEURALNETWORKS_TENSOR_FLOAT32,
3078 .dimensionCount = 2,
3079 .dimensions = twoDimensional,
3080 .scale = 0.0f,
3081 .zeroPoint = 0};
3082 ANeuralNetworksOperandType intScalar = {.type = ANEURALNETWORKS_INT32,
3083 .dimensionCount = 0,
3084 .dimensions = nullptr,
3085 .scale = 0.0f,
3086 .zeroPoint = 0};
3087
3088 ANeuralNetworksOperandType input = floatTensor2D;
3089 ANeuralNetworksOperandType weightsFeature = floatTensor2D;
3090 ANeuralNetworksOperandType weightsTime = floatTensor2D;
3091 ANeuralNetworksOperandType bias = floatTensor1D;
3092 ANeuralNetworksOperandType stateIn = floatTensor2D;
3093 ANeuralNetworksOperandType rank = intScalar;
3094 ANeuralNetworksOperandType activation = intScalar;
3095
3096 ANeuralNetworksOperandType stateOut = floatTensor2D;
3097 ANeuralNetworksOperandType output = floatTensor2D;
3098
3099 OperationTestBase svdfTest(
3100 ANEURALNETWORKS_SVDF,
3101 {input, weightsFeature, weightsTime, bias, stateIn, rank, activation},
3102 {stateOut, output});
3103 svdfTest.testOpsValidations();
3104 }
3105
TEST(OperationValidationTest,SVDF_float16)3106 TEST(OperationValidationTest, SVDF_float16) {
3107 uint32_t oneDimensional[1] = {5};
3108 uint32_t twoDimensional[2] = {5, 5};
3109 ANeuralNetworksOperandType floatTensor1D = {.type = ANEURALNETWORKS_TENSOR_FLOAT16,
3110 .dimensionCount = 1,
3111 .dimensions = oneDimensional,
3112 .scale = 0.0f,
3113 .zeroPoint = 0};
3114 ANeuralNetworksOperandType floatTensor2D = {.type = ANEURALNETWORKS_TENSOR_FLOAT16,
3115 .dimensionCount = 2,
3116 .dimensions = twoDimensional,
3117 .scale = 0.0f,
3118 .zeroPoint = 0};
3119 ANeuralNetworksOperandType intScalar = {.type = ANEURALNETWORKS_INT32,
3120 .dimensionCount = 0,
3121 .dimensions = nullptr,
3122 .scale = 0.0f,
3123 .zeroPoint = 0};
3124
3125 ANeuralNetworksOperandType input = floatTensor2D;
3126 ANeuralNetworksOperandType weightsFeature = floatTensor2D;
3127 ANeuralNetworksOperandType weightsTime = floatTensor2D;
3128 ANeuralNetworksOperandType bias = floatTensor1D;
3129 ANeuralNetworksOperandType stateIn = floatTensor2D;
3130 ANeuralNetworksOperandType rank = intScalar;
3131 ANeuralNetworksOperandType activation = intScalar;
3132
3133 ANeuralNetworksOperandType stateOut = floatTensor2D;
3134 ANeuralNetworksOperandType output = floatTensor2D;
3135
3136 OperationTestBase svdfTest(
3137 ANEURALNETWORKS_SVDF,
3138 {input, weightsFeature, weightsTime, bias, stateIn, rank, activation},
3139 {stateOut, output});
3140 svdfTest.testOpsValidations();
3141 }
3142
stridedSliceOpTest(int32_t operandCode)3143 void stridedSliceOpTest(int32_t operandCode) {
3144 uint32_t inputDimensions[2] = {5, 5};
3145 ANeuralNetworksOperandType input = getOpType(operandCode, 2, inputDimensions);
3146 ANeuralNetworksOperandType output = input;
3147
3148 uint32_t beginsDimensions[1] = {2};
3149 ANeuralNetworksOperandType begins = {.type = ANEURALNETWORKS_TENSOR_INT32,
3150 .dimensionCount = 1,
3151 .dimensions = beginsDimensions,
3152 .scale = 0.0f,
3153 .zeroPoint = 0};
3154
3155 ANeuralNetworksOperandType ends = begins;
3156 ANeuralNetworksOperandType strides = begins;
3157
3158 ANeuralNetworksOperandType beginMask = {.type = ANEURALNETWORKS_INT32,
3159 .dimensionCount = 0,
3160 .dimensions = nullptr,
3161 .scale = 0.0f,
3162 .zeroPoint = 0};
3163 ANeuralNetworksOperandType endMask = beginMask;
3164 ANeuralNetworksOperandType shrinkAxisMask = beginMask;
3165
3166 OperationTestBase stridedSliceTest(
3167 ANEURALNETWORKS_STRIDED_SLICE,
3168 {input, begins, ends, strides, beginMask, endMask, shrinkAxisMask}, {output},
3169 {{TensorRankConstraint::UpTo(4)}});
3170 stridedSliceTest.testOpsValidations();
3171 }
3172
TEST(OperationValidationTest,STRIDED_SLICE_float32)3173 TEST(OperationValidationTest, STRIDED_SLICE_float32) {
3174 stridedSliceOpTest(ANEURALNETWORKS_TENSOR_FLOAT32);
3175 }
3176
TEST(OperationValidationTest,STRIDED_SLICE_float16)3177 TEST(OperationValidationTest, STRIDED_SLICE_float16) {
3178 stridedSliceOpTest(ANEURALNETWORKS_TENSOR_FLOAT16);
3179 }
3180
TEST(OperationValidationTest,STRIDED_SLICE_quant8)3181 TEST(OperationValidationTest, STRIDED_SLICE_quant8) {
3182 stridedSliceOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
3183 }
3184
TEST(OperationValidationTest,STRIDED_SLICE_quant8_signed)3185 TEST(OperationValidationTest, STRIDED_SLICE_quant8_signed) {
3186 stridedSliceOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
3187 }
3188
roiAlignOpTest(int32_t inputOperandCode,int32_t roiOperandCode,int32_t scalarOperandCode)3189 void roiAlignOpTest(int32_t inputOperandCode, int32_t roiOperandCode, int32_t scalarOperandCode) {
3190 uint32_t inDim[] = {1, 4, 4, 1}, roiDim[] = {4, 4}, batchSplitDim[] = {1};
3191 uint32_t outDim[] = {4, 2, 2, 1};
3192 OperationTestBase roiAlignTest(
3193 ANEURALNETWORKS_ROI_ALIGN,
3194 {getOpType(inputOperandCode, 4, inDim), getOpType(roiOperandCode, 2, roiDim),
3195 getOpType(ANEURALNETWORKS_TENSOR_INT32, 1, batchSplitDim),
3196 getOpType(ANEURALNETWORKS_INT32), getOpType(ANEURALNETWORKS_INT32),
3197 getOpType(scalarOperandCode), getOpType(scalarOperandCode),
3198 getOpType(ANEURALNETWORKS_INT32), getOpType(ANEURALNETWORKS_INT32),
3199 getOpType(ANEURALNETWORKS_BOOL)},
3200 {getOpType(inputOperandCode, 4, outDim)});
3201 roiAlignTest.testOpsValidations();
3202 }
3203
TEST(OperationValidationTest,ROI_ALIGN_float16)3204 TEST(OperationValidationTest, ROI_ALIGN_float16) {
3205 roiAlignOpTest(ANEURALNETWORKS_TENSOR_FLOAT16, ANEURALNETWORKS_TENSOR_FLOAT16,
3206 ANEURALNETWORKS_FLOAT16);
3207 }
3208
TEST(OperationValidationTest,ROI_ALIGN_float32)3209 TEST(OperationValidationTest, ROI_ALIGN_float32) {
3210 roiAlignOpTest(ANEURALNETWORKS_TENSOR_FLOAT32, ANEURALNETWORKS_TENSOR_FLOAT32,
3211 ANEURALNETWORKS_FLOAT32);
3212 }
3213
TEST(OperationValidationTest,ROI_ALIGN_quant8)3214 TEST(OperationValidationTest, ROI_ALIGN_quant8) {
3215 roiAlignOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM, ANEURALNETWORKS_TENSOR_QUANT16_ASYMM,
3216 ANEURALNETWORKS_FLOAT32);
3217 }
3218
TEST(OperationValidationTest,ROI_ALIGN_quant8signed)3219 TEST(OperationValidationTest, ROI_ALIGN_quant8signed) {
3220 roiAlignOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED, ANEURALNETWORKS_TENSOR_QUANT16_ASYMM,
3221 ANEURALNETWORKS_FLOAT32);
3222 }
3223
roiPoolingOpTest(int32_t inputOperandCode,int32_t roiOperandCode,int32_t scalarOperandCode)3224 void roiPoolingOpTest(int32_t inputOperandCode, int32_t roiOperandCode, int32_t scalarOperandCode) {
3225 uint32_t inDim[] = {1, 4, 4, 1}, roiDim[] = {4, 4}, batchSplitDim[] = {1};
3226 uint32_t outDim[] = {4, 2, 2, 1};
3227 OperationTestBase roiPoolingTest(
3228 ANEURALNETWORKS_ROI_POOLING,
3229 {getOpType(inputOperandCode, 4, inDim), getOpType(roiOperandCode, 2, roiDim),
3230 getOpType(ANEURALNETWORKS_TENSOR_INT32, 1, batchSplitDim),
3231 getOpType(ANEURALNETWORKS_INT32), getOpType(ANEURALNETWORKS_INT32),
3232 getOpType(scalarOperandCode), getOpType(scalarOperandCode),
3233 getOpType(ANEURALNETWORKS_BOOL)},
3234 {getOpType(inputOperandCode, 4, outDim)});
3235 roiPoolingTest.testOpsValidations();
3236 }
3237
TEST(OperationValidationTest,ROI_POOLING_float16)3238 TEST(OperationValidationTest, ROI_POOLING_float16) {
3239 roiPoolingOpTest(ANEURALNETWORKS_TENSOR_FLOAT16, ANEURALNETWORKS_TENSOR_FLOAT16,
3240 ANEURALNETWORKS_FLOAT16);
3241 }
3242
TEST(OperationValidationTest,ROI_POOLING_float32)3243 TEST(OperationValidationTest, ROI_POOLING_float32) {
3244 roiPoolingOpTest(ANEURALNETWORKS_TENSOR_FLOAT32, ANEURALNETWORKS_TENSOR_FLOAT32,
3245 ANEURALNETWORKS_FLOAT32);
3246 }
3247
TEST(OperationValidationTest,ROI_POOLING_quant8)3248 TEST(OperationValidationTest, ROI_POOLING_quant8) {
3249 roiPoolingOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM, ANEURALNETWORKS_TENSOR_QUANT16_ASYMM,
3250 ANEURALNETWORKS_FLOAT32);
3251 }
3252
TEST(OperationValidationTest,ROI_POOLING_quant8signed)3253 TEST(OperationValidationTest, ROI_POOLING_quant8signed) {
3254 roiPoolingOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED,
3255 ANEURALNETWORKS_TENSOR_QUANT16_ASYMM, ANEURALNETWORKS_FLOAT32);
3256 }
3257
heatmapMaxKeypointOpTest(int32_t heatmapOperandCode,int32_t roiOperandCode)3258 void heatmapMaxKeypointOpTest(int32_t heatmapOperandCode, int32_t roiOperandCode) {
3259 uint32_t heatmapDim[] = {6, 4, 4, 1}, boxDim[] = {6, 4}, outScoreDim[] = {6, 1},
3260 outKeypointDim[] = {6, 1, 2};
3261 OperationTestBase heatmapMaxKeypointTest(
3262 ANEURALNETWORKS_HEATMAP_MAX_KEYPOINT,
3263 {getOpType(heatmapOperandCode, 4, heatmapDim), getOpType(roiOperandCode, 2, boxDim),
3264 getOpType(ANEURALNETWORKS_BOOL)},
3265 {getOpType(heatmapOperandCode, 2, outScoreDim),
3266 getOpType(roiOperandCode, 3, outKeypointDim)});
3267 heatmapMaxKeypointTest.testOpsValidations();
3268 }
3269
TEST(OperationValidationTest,HEATMAP_MAX_KEYPOINT_float16)3270 TEST(OperationValidationTest, HEATMAP_MAX_KEYPOINT_float16) {
3271 heatmapMaxKeypointOpTest(ANEURALNETWORKS_TENSOR_FLOAT16, ANEURALNETWORKS_TENSOR_FLOAT16);
3272 }
3273
TEST(OperationValidationTest,HEATMAP_MAX_KEYPOINT_float32)3274 TEST(OperationValidationTest, HEATMAP_MAX_KEYPOINT_float32) {
3275 heatmapMaxKeypointOpTest(ANEURALNETWORKS_TENSOR_FLOAT32, ANEURALNETWORKS_TENSOR_FLOAT32);
3276 }
3277
TEST(OperationValidationTest,HEATMAP_MAX_KEYPOINT_quant)3278 TEST(OperationValidationTest, HEATMAP_MAX_KEYPOINT_quant) {
3279 heatmapMaxKeypointOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM,
3280 ANEURALNETWORKS_TENSOR_QUANT16_ASYMM);
3281 }
3282
TEST(OperationValidationTest,HEATMAP_MAX_KEYPOINT_quant_signed)3283 TEST(OperationValidationTest, HEATMAP_MAX_KEYPOINT_quant_signed) {
3284 heatmapMaxKeypointOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED,
3285 ANEURALNETWORKS_TENSOR_QUANT16_ASYMM);
3286 }
3287
instanceNormalizationOpTest(int32_t inputOperandType)3288 void instanceNormalizationOpTest(int32_t inputOperandType) {
3289 SCOPED_TRACE(inputOperandType);
3290 uint32_t inputDims[4] = {4, 4, 4, 4};
3291 ANeuralNetworksOperandType input = getOpType(inputOperandType, 4, inputDims);
3292 ANeuralNetworksOperandType floatScalar = getOpType(ANEURALNETWORKS_FLOAT32);
3293 if (inputOperandType == ANEURALNETWORKS_TENSOR_FLOAT16) {
3294 floatScalar = getOpType(ANEURALNETWORKS_FLOAT16);
3295 }
3296 ANeuralNetworksOperandType gamma = floatScalar;
3297 ANeuralNetworksOperandType beta = floatScalar;
3298 ANeuralNetworksOperandType epsilon = floatScalar;
3299 ANeuralNetworksOperandType isNCHW = getOpType(ANEURALNETWORKS_BOOL);
3300 ANeuralNetworksOperandType output = input;
3301
3302 OperationTestBase test(ANEURALNETWORKS_INSTANCE_NORMALIZATION,
3303 {input, gamma, beta, epsilon, isNCHW}, {output});
3304 test.testOpsValidations();
3305 }
3306
TEST(OperationValidationTest,INSTANCE_NORMALIZATION)3307 TEST(OperationValidationTest, INSTANCE_NORMALIZATION) {
3308 instanceNormalizationOpTest(ANEURALNETWORKS_TENSOR_FLOAT16);
3309 instanceNormalizationOpTest(ANEURALNETWORKS_TENSOR_FLOAT32);
3310 }
3311
groupedConvOpTest(int32_t inputOperandCode,int32_t filterOperandCode)3312 void groupedConvOpTest(int32_t inputOperandCode, int32_t filterOperandCode) {
3313 uint32_t inDim[] = {1, 3, 3, 2}, filterDim[] = {2, 2, 2, 1}, biasDim[] = {2};
3314 uint32_t outDim[] = {1, 2, 2, 2};
3315 ANeuralNetworksOperandType input = getOpType(inputOperandCode, 4, inDim);
3316
3317 float filterScales[2] = {0.5f, 1.0f};
3318 ANeuralNetworksOperandType filter = getOpType(filterOperandCode, 4, filterDim);
3319
3320 ANeuralNetworksSymmPerChannelQuantParams filterChannelQuantParams = {
3321 .channelDim = 0,
3322 .scaleCount = 2,
3323 .scales = filterScales,
3324 };
3325
3326 ANeuralNetworksOperandType bias = getOpType(inputOperandCode, 1, biasDim);
3327 if (filterOperandCode == ANEURALNETWORKS_TENSOR_QUANT8_ASYMM ||
3328 filterOperandCode == ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED) {
3329 bias.type = ANEURALNETWORKS_TENSOR_INT32;
3330 bias.scale = 0.25f;
3331 }
3332 if (filterOperandCode == ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL) {
3333 bias.type = ANEURALNETWORKS_TENSOR_INT32;
3334 bias.scale = 0.0f;
3335 }
3336
3337 ANeuralNetworksOperandType scalar = getOpType(ANEURALNETWORKS_INT32);
3338 ANeuralNetworksOperandType layout = getOpType(ANEURALNETWORKS_BOOL);
3339
3340 ANeuralNetworksOperandType output = getOpType(inputOperandCode, 4, outDim);
3341
3342 OperationTestBase explicitGroupedConvTest(ANEURALNETWORKS_GROUPED_CONV_2D,
3343 {input, filter, bias, scalar, scalar, scalar, scalar,
3344 scalar, scalar, scalar, scalar, layout},
3345 {output});
3346 if (filterOperandCode == ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL) {
3347 explicitGroupedConvTest.setInputSymmPerChannelQuantParams(1, filterChannelQuantParams);
3348 }
3349 explicitGroupedConvTest.testOpsValidations();
3350
3351 OperationTestBase implicitGroupedConvTest(
3352 ANEURALNETWORKS_GROUPED_CONV_2D,
3353 {input, filter, bias, scalar, scalar, scalar, scalar, scalar, layout}, {output});
3354 if (filterOperandCode == ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL) {
3355 implicitGroupedConvTest.setInputSymmPerChannelQuantParams(1, filterChannelQuantParams);
3356 }
3357 implicitGroupedConvTest.testOpsValidations();
3358 }
3359
TEST(OperationValidationTest,GROUPED_CONV_2D_float16)3360 TEST(OperationValidationTest, GROUPED_CONV_2D_float16) {
3361 groupedConvOpTest(ANEURALNETWORKS_TENSOR_FLOAT16, ANEURALNETWORKS_TENSOR_FLOAT16);
3362 }
3363
TEST(OperationValidationTest,GROUPED_CONV_2D_float32)3364 TEST(OperationValidationTest, GROUPED_CONV_2D_float32) {
3365 groupedConvOpTest(ANEURALNETWORKS_TENSOR_FLOAT32, ANEURALNETWORKS_TENSOR_FLOAT32);
3366 }
3367
TEST(OperationValidationTest,GROUPED_CONV_2D_quant8)3368 TEST(OperationValidationTest, GROUPED_CONV_2D_quant8) {
3369 groupedConvOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
3370 }
3371
TEST(OperationValidationTest,GROUPED_CONV_2D_quant8_per_channel)3372 TEST(OperationValidationTest, GROUPED_CONV_2D_quant8_per_channel) {
3373 groupedConvOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM,
3374 ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL);
3375 }
3376
TEST(OperationValidationTest,GROUPED_CONV_2D_quant8signed)3377 TEST(OperationValidationTest, GROUPED_CONV_2D_quant8signed) {
3378 groupedConvOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED,
3379 ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
3380 }
3381
TEST(OperationValidationTest,GROUPED_CONV_2D_quant8signed_per_channel)3382 TEST(OperationValidationTest, GROUPED_CONV_2D_quant8signed_per_channel) {
3383 groupedConvOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED,
3384 ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL);
3385 }
3386
transposeConvOpTest(int32_t inputOperandCode,int32_t filterOperandCode)3387 void transposeConvOpTest(int32_t inputOperandCode, int32_t filterOperandCode) {
3388 uint32_t inDim[] = {1, 2, 2, 2}, filterDim[] = {2, 3, 3, 1}, biasDim[] = {2};
3389 uint32_t outDim[] = {1, 5, 5, 2}, outShapeDim[] = {4};
3390 ANeuralNetworksOperandType input = getOpType(inputOperandCode, 4, inDim);
3391 ANeuralNetworksOperandType filter = getOpType(filterOperandCode, 4, filterDim);
3392
3393 float filterScales[2] = {0.5f, 1.0f};
3394 ANeuralNetworksSymmPerChannelQuantParams filterChannelQuantParams = {
3395 .channelDim = 0,
3396 .scaleCount = 2,
3397 .scales = filterScales,
3398 };
3399
3400 ANeuralNetworksOperandType bias = getOpType(inputOperandCode, 1, biasDim);
3401 if (filterOperandCode == ANEURALNETWORKS_TENSOR_QUANT8_ASYMM ||
3402 filterOperandCode == ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED) {
3403 bias.type = ANEURALNETWORKS_TENSOR_INT32;
3404 bias.scale = 0.25f;
3405 }
3406 if (filterOperandCode == ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL) {
3407 bias.type = ANEURALNETWORKS_TENSOR_INT32;
3408 bias.scale = 0.0f;
3409 }
3410
3411 ANeuralNetworksOperandType scalar = getOpType(ANEURALNETWORKS_INT32);
3412 ANeuralNetworksOperandType layout = getOpType(ANEURALNETWORKS_BOOL);
3413 ANeuralNetworksOperandType output = getOpType(inputOperandCode, 4, outDim);
3414
3415 OperationTestBase explicitTransposeConvTest(
3416 ANEURALNETWORKS_TRANSPOSE_CONV_2D,
3417 {input, filter, bias, scalar, scalar, scalar, scalar, scalar, scalar, scalar, layout},
3418 {output});
3419 if (filterOperandCode == ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL) {
3420 explicitTransposeConvTest.setInputSymmPerChannelQuantParams(1, filterChannelQuantParams);
3421 }
3422 explicitTransposeConvTest.testOpsValidations();
3423
3424 OperationTestBase implicitTransposeConvTest(
3425 ANEURALNETWORKS_TRANSPOSE_CONV_2D,
3426 {input, filter, bias, getOpType(ANEURALNETWORKS_TENSOR_INT32, 1, outShapeDim), scalar,
3427 scalar, scalar, scalar, layout},
3428 {output});
3429 if (filterOperandCode == ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL) {
3430 implicitTransposeConvTest.setInputSymmPerChannelQuantParams(1, filterChannelQuantParams);
3431 }
3432 implicitTransposeConvTest.testOpsValidations();
3433 }
3434
TEST(OperationValidationTest,TRANSPOSE_CONV_2D_float16)3435 TEST(OperationValidationTest, TRANSPOSE_CONV_2D_float16) {
3436 transposeConvOpTest(ANEURALNETWORKS_TENSOR_FLOAT16, ANEURALNETWORKS_TENSOR_FLOAT16);
3437 }
3438
TEST(OperationValidationTest,TRANSPOSE_CONV_2D_float32)3439 TEST(OperationValidationTest, TRANSPOSE_CONV_2D_float32) {
3440 transposeConvOpTest(ANEURALNETWORKS_TENSOR_FLOAT32, ANEURALNETWORKS_TENSOR_FLOAT32);
3441 }
3442
TEST(OperationValidationTest,TRANSPOSE_CONV_2D_quant8)3443 TEST(OperationValidationTest, TRANSPOSE_CONV_2D_quant8) {
3444 transposeConvOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
3445 }
3446
TEST(OperationValidationTest,TRANSPOSE_CONV_2D_quant8_per_channel)3447 TEST(OperationValidationTest, TRANSPOSE_CONV_2D_quant8_per_channel) {
3448 transposeConvOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM,
3449 ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL);
3450 }
3451
TEST(OperationValidationTest,TRANSPOSE_CONV_2D_quant8_signed)3452 TEST(OperationValidationTest, TRANSPOSE_CONV_2D_quant8_signed) {
3453 transposeConvOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED,
3454 ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
3455 }
3456
TEST(OperationValidationTest,TRANSPOSE_CONV_2D_quant8_signed_per_channel)3457 TEST(OperationValidationTest, TRANSPOSE_CONV_2D_quant8_signed_per_channel) {
3458 transposeConvOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED,
3459 ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL);
3460 }
3461
channelShuffleOpTest(int32_t operandCode)3462 void channelShuffleOpTest(int32_t operandCode) {
3463 uint32_t inoutDim[] = {2, 2, 3, 12};
3464 OperationTestBase channelShuffleTest(
3465 ANEURALNETWORKS_CHANNEL_SHUFFLE,
3466 {getOpType(operandCode, 2, inoutDim), getOpType(ANEURALNETWORKS_INT32),
3467 getOpType(ANEURALNETWORKS_INT32)},
3468 {getOpType(operandCode, 2, inoutDim)}, {{TensorRankConstraint::UpTo(4)}});
3469 channelShuffleTest.testOpsValidations();
3470 }
3471
TEST(OperationValidationTest,CHANNEL_SHUFFLE_float16)3472 TEST(OperationValidationTest, CHANNEL_SHUFFLE_float16) {
3473 channelShuffleOpTest(ANEURALNETWORKS_TENSOR_FLOAT16);
3474 }
3475
TEST(OperationValidationTest,CHANNEL_SHUFFLE_float32)3476 TEST(OperationValidationTest, CHANNEL_SHUFFLE_float32) {
3477 channelShuffleOpTest(ANEURALNETWORKS_TENSOR_FLOAT32);
3478 }
3479
TEST(OperationValidationTest,CHANNEL_SHUFFLE_quant8)3480 TEST(OperationValidationTest, CHANNEL_SHUFFLE_quant8) {
3481 channelShuffleOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
3482 }
3483
TEST(OperationValidationTest,CHANNEL_SHUFFLE_quant8signed)3484 TEST(OperationValidationTest, CHANNEL_SHUFFLE_quant8signed) {
3485 channelShuffleOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
3486 }
3487
detectionPostprocessingOpTest(int32_t inputOperandCode)3488 void detectionPostprocessingOpTest(int32_t inputOperandCode) {
3489 SCOPED_TRACE(inputOperandCode);
3490 const int numBatches = 2;
3491 const int numAnchors = 10;
3492 const int numClasses = 5;
3493 const int lengthBoxEncoding = 4;
3494
3495 uint32_t inputDims[3] = {numBatches, numAnchors, numClasses};
3496 ANeuralNetworksOperandType input = getOpType(inputOperandCode, 3, inputDims);
3497 uint32_t deltasDims[3] = {numBatches, numAnchors, lengthBoxEncoding};
3498 ANeuralNetworksOperandType deltas = getOpType(inputOperandCode, 3, deltasDims);
3499 uint32_t anchorsDims[2] = {numAnchors, 4};
3500 ANeuralNetworksOperandType anchors = getOpType(inputOperandCode, 2, anchorsDims);
3501 ANeuralNetworksOperandType scaleScalar = getOpType(ANEURALNETWORKS_FLOAT32);
3502 if (inputOperandCode == ANEURALNETWORKS_TENSOR_FLOAT16) {
3503 scaleScalar = getOpType(ANEURALNETWORKS_FLOAT16);
3504 }
3505 ANeuralNetworksOperandType isRegularNMS = getOpType(ANEURALNETWORKS_BOOL);
3506 ANeuralNetworksOperandType maxNumDetections = getOpType(ANEURALNETWORKS_INT32);
3507 ANeuralNetworksOperandType numOfClassesPerDetection = maxNumDetections;
3508 ANeuralNetworksOperandType numOfDetections = numOfClassesPerDetection;
3509 ANeuralNetworksOperandType scoreThreshold = scaleScalar;
3510 ANeuralNetworksOperandType iouThreshold = scaleScalar;
3511 ANeuralNetworksOperandType includeBackground = getOpType(ANEURALNETWORKS_BOOL);
3512 // Outputs
3513 const int maxNumDetectionsValue = 5;
3514 uint32_t outputScoreDims[2] = {numBatches, maxNumDetectionsValue};
3515 ANeuralNetworksOperandType outputScore = getOpType(inputOperandCode, 2, outputScoreDims);
3516 uint32_t boundingBoxesDims[3] = {numBatches, maxNumDetectionsValue, 4};
3517 ANeuralNetworksOperandType boundingBoxes = getOpType(inputOperandCode, 3, boundingBoxesDims);
3518 ANeuralNetworksOperandType classLabel =
3519 getOpType(ANEURALNETWORKS_TENSOR_INT32, 2, outputScoreDims);
3520 uint32_t numValidDims[1] = {numBatches};
3521 ANeuralNetworksOperandType numValid = getOpType(ANEURALNETWORKS_TENSOR_INT32, 1, numValidDims);
3522
3523 OperationTestBase test(ANEURALNETWORKS_DETECTION_POSTPROCESSING,
3524 {input, deltas, anchors, scaleScalar, scaleScalar, scaleScalar,
3525 scaleScalar, isRegularNMS, maxNumDetections, numOfClassesPerDetection,
3526 numOfDetections, scoreThreshold, iouThreshold, includeBackground},
3527 {outputScore, boundingBoxes, classLabel, numValid});
3528 test.testOpsValidations();
3529 }
3530
TEST(OperationValidationTest,DETECTION_POSTPROCESSING)3531 TEST(OperationValidationTest, DETECTION_POSTPROCESSING) {
3532 detectionPostprocessingOpTest(ANEURALNETWORKS_TENSOR_FLOAT16);
3533 detectionPostprocessingOpTest(ANEURALNETWORKS_TENSOR_FLOAT32);
3534 }
3535
preluOpTest(int32_t operandCode)3536 void preluOpTest(int32_t operandCode) {
3537 uint32_t inoutDim[] = {1, 2, 2, 3}, alphaDim[] = {1, 1, 3};
3538 OperationTestBase preluTest(
3539 ANEURALNETWORKS_PRELU,
3540 {getOpType(operandCode, 4, inoutDim), getOpType(operandCode, 3, alphaDim)},
3541 {getOpType(operandCode, 4, inoutDim)});
3542 preluTest.testOpsValidations();
3543 }
3544
TEST(OperationValidationTest,PRELU_float16)3545 TEST(OperationValidationTest, PRELU_float16) {
3546 preluOpTest(ANEURALNETWORKS_TENSOR_FLOAT16);
3547 }
3548
TEST(OperationValidationTest,PRELU_float32)3549 TEST(OperationValidationTest, PRELU_float32) {
3550 preluOpTest(ANEURALNETWORKS_TENSOR_FLOAT32);
3551 }
3552
TEST(OperationValidationTest,PRELU_quant8)3553 TEST(OperationValidationTest, PRELU_quant8) {
3554 preluOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
3555 }
3556
TEST(OperationValidationTest,PRELU_quant8signed)3557 TEST(OperationValidationTest, PRELU_quant8signed) {
3558 preluOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
3559 }
3560
normalizationOpTest(ANeuralNetworksOperationType operationCode,int32_t operandCode)3561 void normalizationOpTest(ANeuralNetworksOperationType operationCode, int32_t operandCode) {
3562 uint32_t inputDim[] = {2, 2, 2, 2};
3563 OperationTestBase normalizationTest(operationCode, {getOpType(operandCode, 4, inputDim)},
3564 {getOpType(operandCode, 4, inputDim)});
3565 normalizationTest.testOpsValidations();
3566
3567 OperationTestBase normalizationAxisTest(
3568 operationCode, {getOpType(operandCode, 4, inputDim), getOpType(ANEURALNETWORKS_INT32)},
3569 {getOpType(operandCode, 4, inputDim)}, {{TensorRankConstraint::UpTo(4)}});
3570 normalizationAxisTest.testOpsValidations();
3571 }
3572
TEST(OperationValidationTest,L2_NORMALIZATION_float16)3573 TEST(OperationValidationTest, L2_NORMALIZATION_float16) {
3574 normalizationOpTest(ANEURALNETWORKS_L2_NORMALIZATION, ANEURALNETWORKS_TENSOR_FLOAT16);
3575 }
3576
TEST(OperationValidationTest,L2_NORMALIZATION_float32)3577 TEST(OperationValidationTest, L2_NORMALIZATION_float32) {
3578 normalizationOpTest(ANEURALNETWORKS_L2_NORMALIZATION, ANEURALNETWORKS_TENSOR_FLOAT32);
3579 }
3580
TEST(OperationValidationTest,L2_NORMALIZATION_quant8)3581 TEST(OperationValidationTest, L2_NORMALIZATION_quant8) {
3582 normalizationOpTest(ANEURALNETWORKS_L2_NORMALIZATION, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
3583 }
3584
TEST(OperationValidationTest,L2_NORMALIZATION_quant8_signed)3585 TEST(OperationValidationTest, L2_NORMALIZATION_quant8_signed) {
3586 normalizationOpTest(ANEURALNETWORKS_L2_NORMALIZATION,
3587 ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
3588 }
3589
localResponseNormOpTest(int32_t operandCode)3590 void localResponseNormOpTest(int32_t operandCode) {
3591 int32_t floatScalarType = (operandCode == ANEURALNETWORKS_TENSOR_FLOAT32)
3592 ? ANEURALNETWORKS_FLOAT32
3593 : ANEURALNETWORKS_FLOAT16;
3594 uint32_t inputDim[] = {2, 2, 2, 6};
3595 OperationTestBase lrnTest(
3596 ANEURALNETWORKS_LOCAL_RESPONSE_NORMALIZATION,
3597 {getOpType(operandCode, 4, inputDim), getOpType(ANEURALNETWORKS_INT32),
3598 getOpType(floatScalarType), getOpType(floatScalarType), getOpType(floatScalarType)},
3599 {getOpType(operandCode, 4, inputDim)}, {{TensorRankConstraint::UpTo(4), {0}}});
3600 lrnTest.testOpsValidations();
3601
3602 OperationTestBase lrnAxisTest(
3603 ANEURALNETWORKS_LOCAL_RESPONSE_NORMALIZATION,
3604 {getOpType(operandCode, 4, inputDim), getOpType(ANEURALNETWORKS_INT32),
3605 getOpType(floatScalarType), getOpType(floatScalarType), getOpType(floatScalarType),
3606 getOpType(ANEURALNETWORKS_INT32)},
3607 {getOpType(operandCode, 4, inputDim)}, {{TensorRankConstraint::UpTo(4), {0}}});
3608 lrnAxisTest.testOpsValidations();
3609 }
3610
TEST(OperationValidationTest,LOCAL_RESPONSE_NORMALIZATION_float16)3611 TEST(OperationValidationTest, LOCAL_RESPONSE_NORMALIZATION_float16) {
3612 localResponseNormOpTest(ANEURALNETWORKS_TENSOR_FLOAT16);
3613 }
3614
TEST(OperationValidationTest,LOCAL_RESPONSE_NORMALIZATION_float32)3615 TEST(OperationValidationTest, LOCAL_RESPONSE_NORMALIZATION_float32) {
3616 localResponseNormOpTest(ANEURALNETWORKS_TENSOR_FLOAT32);
3617 }
3618
axisAlignedBBoxTransformOpTest(int32_t roiOperandCode,int32_t deltaOperandCode)3619 void axisAlignedBBoxTransformOpTest(int32_t roiOperandCode, int32_t deltaOperandCode) {
3620 uint32_t roiDim[] = {5, 4}, deltaDim[] = {5, 8}, bsDim[] = {5}, imageDim[] = {5, 2};
3621 uint32_t outDim[] = {5, 8};
3622 OperationTestBase axisAlignedBBoxTransformTest(
3623 ANEURALNETWORKS_AXIS_ALIGNED_BBOX_TRANSFORM,
3624 {getOpType(roiOperandCode, 2, roiDim), getOpType(deltaOperandCode, 2, deltaDim),
3625 getOpType(ANEURALNETWORKS_TENSOR_INT32, 1, bsDim),
3626 getOpType(roiOperandCode, 2, imageDim)},
3627 {getOpType(roiOperandCode, 2, outDim)});
3628 axisAlignedBBoxTransformTest.testOpsValidations();
3629 }
3630
TEST(OperationValidationTest,AXIS_ALIGNED_BBOX_TRANSFORM_float16)3631 TEST(OperationValidationTest, AXIS_ALIGNED_BBOX_TRANSFORM_float16) {
3632 axisAlignedBBoxTransformOpTest(ANEURALNETWORKS_TENSOR_FLOAT16, ANEURALNETWORKS_TENSOR_FLOAT16);
3633 }
3634
TEST(OperationValidationTest,AXIS_ALIGNED_BBOX_TRANSFORM_float32)3635 TEST(OperationValidationTest, AXIS_ALIGNED_BBOX_TRANSFORM_float32) {
3636 axisAlignedBBoxTransformOpTest(ANEURALNETWORKS_TENSOR_FLOAT32, ANEURALNETWORKS_TENSOR_FLOAT32);
3637 }
3638
TEST(OperationValidationTest,AXIS_ALIGNED_BBOX_TRANSFORM_quant)3639 TEST(OperationValidationTest, AXIS_ALIGNED_BBOX_TRANSFORM_quant) {
3640 axisAlignedBBoxTransformOpTest(ANEURALNETWORKS_TENSOR_QUANT16_ASYMM,
3641 ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
3642 }
3643
TEST(OperationValidationTest,AXIS_ALIGNED_BBOX_TRANSFORM_quant_signed)3644 TEST(OperationValidationTest, AXIS_ALIGNED_BBOX_TRANSFORM_quant_signed) {
3645 axisAlignedBBoxTransformOpTest(ANEURALNETWORKS_TENSOR_QUANT16_ASYMM,
3646 ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
3647 }
3648
sliceTest(int32_t operandCode)3649 void sliceTest(int32_t operandCode) {
3650 uint32_t inputDim[] = {3, 3, 3};
3651 uint32_t startDim[] = {3};
3652 uint32_t sizeDim[] = {3};
3653 uint32_t outputDim[] = {1, 2, 3};
3654
3655 OperationTestBase sliceTest(ANEURALNETWORKS_SLICE,
3656 {getOpType(operandCode, 3, inputDim),
3657 getOpType(ANEURALNETWORKS_TENSOR_INT32, 1, startDim),
3658 getOpType(ANEURALNETWORKS_TENSOR_INT32, 1, sizeDim)},
3659 {getOpType(operandCode, 3, outputDim)});
3660 sliceTest.testOpsValidations();
3661 }
3662
TEST(OperationValidationTest,SLICE_float32)3663 TEST(OperationValidationTest, SLICE_float32) {
3664 sliceTest(ANEURALNETWORKS_TENSOR_FLOAT32);
3665 }
TEST(OperationValidationTest,SLICE_int32)3666 TEST(OperationValidationTest, SLICE_int32) {
3667 sliceTest(ANEURALNETWORKS_TENSOR_INT32);
3668 }
TEST(OperationValidationTest,SLICE_uint8)3669 TEST(OperationValidationTest, SLICE_uint8) {
3670 sliceTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
3671 }
TEST(OperationValidationTest,SLICE_int8)3672 TEST(OperationValidationTest, SLICE_int8) {
3673 sliceTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
3674 }
TEST(OperationValidationTest,SLICE_float16)3675 TEST(OperationValidationTest, SLICE_float16) {
3676 sliceTest(ANEURALNETWORKS_TENSOR_FLOAT16);
3677 }
3678
logicalTest(ANeuralNetworksOperationType operationCode)3679 void logicalTest(ANeuralNetworksOperationType operationCode) {
3680 uint32_t inputDimensions[4] = {2, 2, 2, 2};
3681 ANeuralNetworksOperandType input1 = {.type = ANEURALNETWORKS_TENSOR_BOOL8,
3682 .dimensionCount = 4,
3683 .dimensions = inputDimensions,
3684 .scale = 0.0f,
3685 .zeroPoint = 0};
3686 ANeuralNetworksOperandType input2 = input1;
3687 ANeuralNetworksOperandType output = input1;
3688
3689 OperationTestBase test(operationCode, {input1, input2}, {output});
3690 test.testOpsValidations();
3691 }
3692
TEST(OperationValidationTest,LOGICAL_AND)3693 TEST(OperationValidationTest, LOGICAL_AND) {
3694 logicalTest(ANEURALNETWORKS_LOGICAL_AND);
3695 }
3696
TEST(OperationValidationTest,LOGICAL_OR)3697 TEST(OperationValidationTest, LOGICAL_OR) {
3698 logicalTest(ANEURALNETWORKS_LOGICAL_OR);
3699 }
3700
comparisonTest(ANeuralNetworksOperationType operationCode,int32_t inputOperandType)3701 void comparisonTest(ANeuralNetworksOperationType operationCode, int32_t inputOperandType) {
3702 uint32_t inputDimensions[4] = {2, 2, 2, 2};
3703 ANeuralNetworksOperandType input1 = getOpType(inputOperandType, 4, inputDimensions);
3704 ANeuralNetworksOperandType input2 = input1;
3705 ANeuralNetworksOperandType output = {.type = ANEURALNETWORKS_TENSOR_BOOL8,
3706 .dimensionCount = 4,
3707 .dimensions = inputDimensions,
3708 .scale = 0.0f,
3709 .zeroPoint = 0};
3710 OperationTestBase test(operationCode, {input1, input2}, {output});
3711 test.testOpsValidations();
3712 }
3713
TEST(OperationValidationTest,LESS)3714 TEST(OperationValidationTest, LESS) {
3715 comparisonTest(ANEURALNETWORKS_LESS, ANEURALNETWORKS_TENSOR_BOOL8);
3716 comparisonTest(ANEURALNETWORKS_LESS, ANEURALNETWORKS_TENSOR_FLOAT16);
3717 comparisonTest(ANEURALNETWORKS_LESS, ANEURALNETWORKS_TENSOR_FLOAT32);
3718 comparisonTest(ANEURALNETWORKS_LESS, ANEURALNETWORKS_TENSOR_INT32);
3719 comparisonTest(ANEURALNETWORKS_LESS, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
3720 comparisonTest(ANEURALNETWORKS_LESS, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
3721 }
3722
TEST(OperationValidationTest,LESS_EQUAL)3723 TEST(OperationValidationTest, LESS_EQUAL) {
3724 comparisonTest(ANEURALNETWORKS_LESS_EQUAL, ANEURALNETWORKS_TENSOR_BOOL8);
3725 comparisonTest(ANEURALNETWORKS_LESS_EQUAL, ANEURALNETWORKS_TENSOR_FLOAT16);
3726 comparisonTest(ANEURALNETWORKS_LESS_EQUAL, ANEURALNETWORKS_TENSOR_FLOAT32);
3727 comparisonTest(ANEURALNETWORKS_LESS_EQUAL, ANEURALNETWORKS_TENSOR_INT32);
3728 comparisonTest(ANEURALNETWORKS_LESS_EQUAL, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
3729 comparisonTest(ANEURALNETWORKS_LESS_EQUAL, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
3730 }
3731
TEST(OperationValidationTest,EQUAL)3732 TEST(OperationValidationTest, EQUAL) {
3733 comparisonTest(ANEURALNETWORKS_EQUAL, ANEURALNETWORKS_TENSOR_BOOL8);
3734 comparisonTest(ANEURALNETWORKS_EQUAL, ANEURALNETWORKS_TENSOR_FLOAT16);
3735 comparisonTest(ANEURALNETWORKS_EQUAL, ANEURALNETWORKS_TENSOR_FLOAT32);
3736 comparisonTest(ANEURALNETWORKS_EQUAL, ANEURALNETWORKS_TENSOR_INT32);
3737 comparisonTest(ANEURALNETWORKS_EQUAL, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
3738 comparisonTest(ANEURALNETWORKS_EQUAL, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
3739 }
3740
TEST(OperationValidationTest,NOT_EQUAL)3741 TEST(OperationValidationTest, NOT_EQUAL) {
3742 comparisonTest(ANEURALNETWORKS_NOT_EQUAL, ANEURALNETWORKS_TENSOR_BOOL8);
3743 comparisonTest(ANEURALNETWORKS_NOT_EQUAL, ANEURALNETWORKS_TENSOR_FLOAT16);
3744 comparisonTest(ANEURALNETWORKS_NOT_EQUAL, ANEURALNETWORKS_TENSOR_FLOAT32);
3745 comparisonTest(ANEURALNETWORKS_NOT_EQUAL, ANEURALNETWORKS_TENSOR_INT32);
3746 comparisonTest(ANEURALNETWORKS_NOT_EQUAL, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
3747 comparisonTest(ANEURALNETWORKS_NOT_EQUAL, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
3748 }
3749
TEST(OperationValidationTest,GREATER)3750 TEST(OperationValidationTest, GREATER) {
3751 comparisonTest(ANEURALNETWORKS_GREATER, ANEURALNETWORKS_TENSOR_BOOL8);
3752 comparisonTest(ANEURALNETWORKS_GREATER, ANEURALNETWORKS_TENSOR_FLOAT16);
3753 comparisonTest(ANEURALNETWORKS_GREATER, ANEURALNETWORKS_TENSOR_FLOAT32);
3754 comparisonTest(ANEURALNETWORKS_GREATER, ANEURALNETWORKS_TENSOR_INT32);
3755 comparisonTest(ANEURALNETWORKS_GREATER, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
3756 comparisonTest(ANEURALNETWORKS_GREATER, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
3757 }
3758
TEST(OperationValidationTest,GREATER_EQUAL)3759 TEST(OperationValidationTest, GREATER_EQUAL) {
3760 comparisonTest(ANEURALNETWORKS_GREATER_EQUAL, ANEURALNETWORKS_TENSOR_BOOL8);
3761 comparisonTest(ANEURALNETWORKS_GREATER_EQUAL, ANEURALNETWORKS_TENSOR_FLOAT16);
3762 comparisonTest(ANEURALNETWORKS_GREATER_EQUAL, ANEURALNETWORKS_TENSOR_FLOAT32);
3763 comparisonTest(ANEURALNETWORKS_GREATER_EQUAL, ANEURALNETWORKS_TENSOR_INT32);
3764 comparisonTest(ANEURALNETWORKS_GREATER_EQUAL, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
3765 comparisonTest(ANEURALNETWORKS_GREATER_EQUAL, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
3766 }
3767
reduceOpTest(ANeuralNetworksOperationType operationCode,int32_t inputOperandType)3768 void reduceOpTest(ANeuralNetworksOperationType operationCode, int32_t inputOperandType) {
3769 uint32_t inputDimensions[4] = {2, 2, 2, 2};
3770 ANeuralNetworksOperandType input1 = getOpType(inputOperandType, 4, inputDimensions);
3771 uint32_t axesDimensions[1] = {2};
3772 ANeuralNetworksOperandType input2 = getOpType(ANEURALNETWORKS_TENSOR_INT32, 1, axesDimensions);
3773 ANeuralNetworksOperandType input3 = getOpType(ANEURALNETWORKS_BOOL, 0);
3774 ANeuralNetworksOperandType output = getOpType(inputOperandType, 4, inputDimensions);
3775 OperationTestBase test(operationCode, {input1, input2, input3}, {output},
3776 {{TensorRankConstraint::UpTo(4)}});
3777 test.testOpsValidations();
3778 }
3779
TEST(OperationValidationTest,REDUCE_PROD)3780 TEST(OperationValidationTest, REDUCE_PROD) {
3781 reduceOpTest(ANEURALNETWORKS_REDUCE_PROD, ANEURALNETWORKS_TENSOR_FLOAT16);
3782 reduceOpTest(ANEURALNETWORKS_REDUCE_PROD, ANEURALNETWORKS_TENSOR_FLOAT32);
3783 }
3784
TEST(OperationValidationTest,REDUCE_SUM)3785 TEST(OperationValidationTest, REDUCE_SUM) {
3786 reduceOpTest(ANEURALNETWORKS_REDUCE_SUM, ANEURALNETWORKS_TENSOR_FLOAT16);
3787 reduceOpTest(ANEURALNETWORKS_REDUCE_SUM, ANEURALNETWORKS_TENSOR_FLOAT32);
3788 }
3789
TEST(OperationValidationTest,REDUCE_MAX)3790 TEST(OperationValidationTest, REDUCE_MAX) {
3791 reduceOpTest(ANEURALNETWORKS_REDUCE_MAX, ANEURALNETWORKS_TENSOR_FLOAT16);
3792 reduceOpTest(ANEURALNETWORKS_REDUCE_MAX, ANEURALNETWORKS_TENSOR_FLOAT32);
3793 reduceOpTest(ANEURALNETWORKS_REDUCE_MAX, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
3794 reduceOpTest(ANEURALNETWORKS_REDUCE_MAX, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
3795 }
3796
TEST(OperationValidationTest,REDUCE_MIN)3797 TEST(OperationValidationTest, REDUCE_MIN) {
3798 reduceOpTest(ANEURALNETWORKS_REDUCE_MIN, ANEURALNETWORKS_TENSOR_FLOAT16);
3799 reduceOpTest(ANEURALNETWORKS_REDUCE_MIN, ANEURALNETWORKS_TENSOR_FLOAT32);
3800 reduceOpTest(ANEURALNETWORKS_REDUCE_MIN, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
3801 reduceOpTest(ANEURALNETWORKS_REDUCE_MIN, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
3802 }
3803
TEST(OperationValidationTest,REDUCE_ANY)3804 TEST(OperationValidationTest, REDUCE_ANY) {
3805 reduceOpTest(ANEURALNETWORKS_REDUCE_ANY, ANEURALNETWORKS_TENSOR_BOOL8);
3806 }
3807
TEST(OperationValidationTest,REDUCE_ALL)3808 TEST(OperationValidationTest, REDUCE_ALL) {
3809 reduceOpTest(ANEURALNETWORKS_REDUCE_ALL, ANEURALNETWORKS_TENSOR_BOOL8);
3810 }
3811
selectTest(ANeuralNetworksOperationType operationCode,int32_t inputOperandType)3812 void selectTest(ANeuralNetworksOperationType operationCode, int32_t inputOperandType) {
3813 uint32_t inputDimensions[4] = {2, 2, 2, 2};
3814 ANeuralNetworksOperandType input0 = getOpType(ANEURALNETWORKS_TENSOR_BOOL8, 4, inputDimensions);
3815 ANeuralNetworksOperandType input1 = getOpType(inputOperandType, 4, inputDimensions);
3816 ANeuralNetworksOperandType input2 = input1;
3817 ANeuralNetworksOperandType output = input1;
3818
3819 OperationTestBase test(operationCode, {input0, input1, input2}, {output});
3820 test.testOpsValidations();
3821 }
3822
TEST(OperationValidationTest,SELECT)3823 TEST(OperationValidationTest, SELECT) {
3824 selectTest(ANEURALNETWORKS_SELECT, ANEURALNETWORKS_TENSOR_FLOAT16);
3825 selectTest(ANEURALNETWORKS_SELECT, ANEURALNETWORKS_TENSOR_FLOAT32);
3826 selectTest(ANEURALNETWORKS_SELECT, ANEURALNETWORKS_TENSOR_INT32);
3827 selectTest(ANEURALNETWORKS_SELECT, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
3828 selectTest(ANEURALNETWORKS_SELECT, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
3829 }
3830
powTest(int32_t inputOperandType)3831 void powTest(int32_t inputOperandType) {
3832 const uint32_t inputDimensions[] = {3, 3};
3833 ANeuralNetworksOperandType inputType = {.type = inputOperandType,
3834 .dimensionCount = 2,
3835 .dimensions = inputDimensions,
3836 .scale = 0.0f,
3837 .zeroPoint = 0};
3838
3839 OperationTestBase test(ANEURALNETWORKS_POW, {inputType, inputType}, {inputType});
3840 test.testOpsValidations();
3841 }
3842
TEST(OperationValidationTest,POW)3843 TEST(OperationValidationTest, POW) {
3844 powTest(ANEURALNETWORKS_TENSOR_FLOAT16);
3845 powTest(ANEURALNETWORKS_TENSOR_FLOAT32);
3846 }
3847
boxWithNmsLimitOpTest(int32_t scoreOperandCode,int32_t roiOperandCode,int32_t scalarOperandCode)3848 void boxWithNmsLimitOpTest(int32_t scoreOperandCode, int32_t roiOperandCode,
3849 int32_t scalarOperandCode) {
3850 uint32_t scoreDim[] = {19, 3}, roiDim[] = {19, 12}, splitDim[] = {2};
3851 uint32_t outScoreDim[] = {12}, outRoiDim[] = {12, 4}, outClassDim[] = {12}, outSplitDim[] = {2};
3852 OperationTestBase boxWithNmsLimitTest(
3853 ANEURALNETWORKS_BOX_WITH_NMS_LIMIT,
3854 {getOpType(scoreOperandCode, 2, scoreDim), getOpType(roiOperandCode, 2, roiDim),
3855 getOpType(ANEURALNETWORKS_TENSOR_INT32, 1, splitDim), getOpType(scalarOperandCode),
3856 getOpType(ANEURALNETWORKS_INT32), getOpType(ANEURALNETWORKS_INT32),
3857 getOpType(scalarOperandCode), getOpType(scalarOperandCode),
3858 getOpType(scalarOperandCode)},
3859 {getOpType(scoreOperandCode, 1, outScoreDim), getOpType(roiOperandCode, 2, outRoiDim),
3860 getOpType(ANEURALNETWORKS_TENSOR_INT32, 1, outClassDim),
3861 getOpType(ANEURALNETWORKS_TENSOR_INT32, 1, outSplitDim)});
3862 boxWithNmsLimitTest.testOpsValidations();
3863 }
3864
TEST(OperationValidationTest,BOX_WITH_NMS_LIMIT_float16)3865 TEST(OperationValidationTest, BOX_WITH_NMS_LIMIT_float16) {
3866 boxWithNmsLimitOpTest(ANEURALNETWORKS_TENSOR_FLOAT16, ANEURALNETWORKS_TENSOR_FLOAT16,
3867 ANEURALNETWORKS_FLOAT16);
3868 }
3869
TEST(OperationValidationTest,BOX_WITH_NMS_LIMIT_float32)3870 TEST(OperationValidationTest, BOX_WITH_NMS_LIMIT_float32) {
3871 boxWithNmsLimitOpTest(ANEURALNETWORKS_TENSOR_FLOAT32, ANEURALNETWORKS_TENSOR_FLOAT32,
3872 ANEURALNETWORKS_FLOAT32);
3873 }
3874
TEST(OperationValidationTest,BOX_WITH_NMS_LIMIT_quant)3875 TEST(OperationValidationTest, BOX_WITH_NMS_LIMIT_quant) {
3876 boxWithNmsLimitOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM, ANEURALNETWORKS_TENSOR_QUANT16_ASYMM,
3877 ANEURALNETWORKS_FLOAT32);
3878 }
3879
TEST(OperationValidationTest,BOX_WITH_NMS_LIMIT_quant_signed)3880 TEST(OperationValidationTest, BOX_WITH_NMS_LIMIT_quant_signed) {
3881 boxWithNmsLimitOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED,
3882 ANEURALNETWORKS_TENSOR_QUANT16_ASYMM, ANEURALNETWORKS_FLOAT32);
3883 }
3884
castOpTest(int32_t inputOperandCode,int32_t outputOperandCode)3885 void castOpTest(int32_t inputOperandCode, int32_t outputOperandCode) {
3886 SCOPED_TRACE(testing::Message()
3887 << "inputType: " << inputOperandCode << ", outputType: " << outputOperandCode);
3888 uint32_t inputDimensions[3] = {2, 2, 2};
3889 ANeuralNetworksOperandType input = getOpType(inputOperandCode, 3, inputDimensions);
3890 ANeuralNetworksOperandType output = getOpType(outputOperandCode, 3, inputDimensions);
3891 OperationTestBase test(ANEURALNETWORKS_CAST, {input}, {output});
3892 test.testOpsValidations();
3893 }
3894
TEST(OperationValidationTest,CAST)3895 TEST(OperationValidationTest, CAST) {
3896 std::vector<int32_t> inputTypes = {ANEURALNETWORKS_TENSOR_FLOAT16,
3897 ANEURALNETWORKS_TENSOR_FLOAT32, ANEURALNETWORKS_TENSOR_INT32,
3898 ANEURALNETWORKS_TENSOR_QUANT8_ASYMM};
3899 std::vector<int32_t> outputTypes = inputTypes;
3900 for (auto inputType : inputTypes) {
3901 for (auto outputType : outputTypes) {
3902 castOpTest(inputType, outputType);
3903 }
3904 }
3905 }
3906
TEST(OperationValidationTest,CAST_identity)3907 TEST(OperationValidationTest, CAST_identity) {
3908 std::vector<int32_t> inputTypes = {
3909 ANEURALNETWORKS_TENSOR_FLOAT32,
3910 ANEURALNETWORKS_TENSOR_INT32,
3911 ANEURALNETWORKS_TENSOR_QUANT8_ASYMM,
3912 ANEURALNETWORKS_TENSOR_QUANT16_SYMM,
3913 ANEURALNETWORKS_TENSOR_FLOAT16,
3914 ANEURALNETWORKS_TENSOR_BOOL8,
3915 ANEURALNETWORKS_TENSOR_QUANT16_ASYMM,
3916 ANEURALNETWORKS_TENSOR_QUANT8_SYMM,
3917 ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED,
3918 };
3919 for (auto inputType : inputTypes) {
3920 castOpTest(inputType, inputType);
3921 }
3922 }
3923
bidirectionlSequenceRNNTest(int32_t inputOperandCode)3924 void bidirectionlSequenceRNNTest(int32_t inputOperandCode) {
3925 const uint32_t batchSize = 2;
3926 const uint32_t maxTime = 3;
3927 const uint32_t inputSize = 4;
3928 const uint32_t numUnits = 5;
3929
3930 uint32_t inputDims[3] = {maxTime, batchSize, inputSize};
3931 uint32_t weightsDims[2] = {inputSize, numUnits};
3932 uint32_t recurrentWeightsDims[2] = {numUnits, numUnits};
3933 uint32_t biasDims[1] = {numUnits};
3934 uint32_t hiddenStateDims[2] = {batchSize, numUnits};
3935 uint32_t outputDims[2] = {batchSize, numUnits};
3936
3937 ANeuralNetworksOperandType input = {.type = inputOperandCode,
3938 .dimensionCount = 3,
3939 .dimensions = inputDims,
3940 .scale = 0.0f,
3941 .zeroPoint = 0};
3942 ANeuralNetworksOperandType fwWeights = {.type = inputOperandCode,
3943 .dimensionCount = 2,
3944 .dimensions = weightsDims,
3945 .scale = 0.0f,
3946 .zeroPoint = 0};
3947 ANeuralNetworksOperandType bwWeights = fwWeights;
3948 ANeuralNetworksOperandType fwRecurrentWeights = {.type = inputOperandCode,
3949 .dimensionCount = 2,
3950 .dimensions = recurrentWeightsDims,
3951 .scale = 0.0f,
3952 .zeroPoint = 0};
3953 ANeuralNetworksOperandType bwRecurrentWeights = fwRecurrentWeights;
3954 ANeuralNetworksOperandType fwBias = {.type = inputOperandCode,
3955 .dimensionCount = 1,
3956 .dimensions = biasDims,
3957 .scale = 0.0f,
3958 .zeroPoint = 0};
3959 ANeuralNetworksOperandType bwBias = fwBias;
3960 ANeuralNetworksOperandType fwHiddenState = {.type = inputOperandCode,
3961 .dimensionCount = 2,
3962 .dimensions = hiddenStateDims,
3963 .scale = 0.0f,
3964 .zeroPoint = 0};
3965 ANeuralNetworksOperandType bwHiddenState = fwHiddenState;
3966 ANeuralNetworksOperandType output = {.type = inputOperandCode,
3967 .dimensionCount = 2,
3968 .dimensions = outputDims,
3969 .scale = 0.0f,
3970 .zeroPoint = 0};
3971 ANeuralNetworksOperandType activation = {.type = ANEURALNETWORKS_INT32,
3972 .dimensionCount = 0,
3973 .dimensions = nullptr,
3974 .scale = 0.0f,
3975 .zeroPoint = 0};
3976 ANeuralNetworksOperandType boolScalar = {.type = ANEURALNETWORKS_BOOL,
3977 .dimensionCount = 0,
3978 .dimensions = nullptr,
3979 .scale = 0.0f,
3980 .zeroPoint = 0};
3981 ANeuralNetworksOperandType timeMajor = boolScalar;
3982 ANeuralNetworksOperandType mergeOutputs = boolScalar;
3983
3984 OperationTestBase rnnTest(ANEURALNETWORKS_BIDIRECTIONAL_SEQUENCE_RNN,
3985 {input, fwWeights, fwRecurrentWeights, fwBias, fwHiddenState,
3986 bwWeights, bwRecurrentWeights, bwBias, bwHiddenState, input,
3987 fwWeights, bwWeights, activation, timeMajor, mergeOutputs},
3988 {output, output});
3989 rnnTest.testOpsValidations();
3990 }
3991
TEST(OperationValidationTest,BIDIRECTIONAL_SEQUENCE_RNN_float32)3992 TEST(OperationValidationTest, BIDIRECTIONAL_SEQUENCE_RNN_float32) {
3993 bidirectionlSequenceRNNTest(ANEURALNETWORKS_TENSOR_FLOAT32);
3994 }
3995
TEST(OperationValidationTest,BIDIRECTIONAL_SEQUENCE_RNN_float16)3996 TEST(OperationValidationTest, BIDIRECTIONAL_SEQUENCE_RNN_float16) {
3997 bidirectionlSequenceRNNTest(ANEURALNETWORKS_TENSOR_FLOAT16);
3998 }
3999
unidirectionlSequenceRNNTest(int32_t inputOperandCode)4000 void unidirectionlSequenceRNNTest(int32_t inputOperandCode) {
4001 const uint32_t batchSize = 2;
4002 const uint32_t maxTime = 3;
4003 const uint32_t inputSize = 4;
4004 const uint32_t numUnits = 5;
4005
4006 uint32_t inputDims[3] = {maxTime, batchSize, inputSize};
4007 uint32_t weightsDims[2] = {inputSize, numUnits};
4008 uint32_t recurrentWeightsDims[2] = {numUnits, numUnits};
4009 uint32_t biasDims[1] = {numUnits};
4010 uint32_t hiddenStateDims[2] = {batchSize, numUnits};
4011 uint32_t outputDims[2] = {batchSize, numUnits};
4012
4013 ANeuralNetworksOperandType input = {.type = inputOperandCode,
4014 .dimensionCount = 3,
4015 .dimensions = inputDims,
4016 .scale = 0.0f,
4017 .zeroPoint = 0};
4018 ANeuralNetworksOperandType weights = {.type = inputOperandCode,
4019 .dimensionCount = 2,
4020 .dimensions = weightsDims,
4021 .scale = 0.0f,
4022 .zeroPoint = 0};
4023 ANeuralNetworksOperandType recurrentWeights = {.type = inputOperandCode,
4024 .dimensionCount = 2,
4025 .dimensions = recurrentWeightsDims,
4026 .scale = 0.0f,
4027 .zeroPoint = 0};
4028 ANeuralNetworksOperandType bias = {.type = inputOperandCode,
4029 .dimensionCount = 1,
4030 .dimensions = biasDims,
4031 .scale = 0.0f,
4032 .zeroPoint = 0};
4033 ANeuralNetworksOperandType hiddenState = {.type = inputOperandCode,
4034 .dimensionCount = 2,
4035 .dimensions = hiddenStateDims,
4036 .scale = 0.0f,
4037 .zeroPoint = 0};
4038 ANeuralNetworksOperandType output = {.type = inputOperandCode,
4039 .dimensionCount = 2,
4040 .dimensions = outputDims,
4041 .scale = 0.0f,
4042 .zeroPoint = 0};
4043 ANeuralNetworksOperandType intScalar = {.type = ANEURALNETWORKS_INT32,
4044 .dimensionCount = 0,
4045 .dimensions = nullptr,
4046 .scale = 0.0f,
4047 .zeroPoint = 0};
4048 ANeuralNetworksOperandType activation = intScalar;
4049 ANeuralNetworksOperandType timeMajor = intScalar;
4050
4051 OperationTestBase rnnTest(
4052 ANEURALNETWORKS_UNIDIRECTIONAL_SEQUENCE_RNN,
4053 {input, weights, recurrentWeights, bias, hiddenState, activation, timeMajor}, {output});
4054 rnnTest.testOpsValidations();
4055 }
4056
TEST(OperationValidationTest,UNIDIRECTIONAL_SEQUENCE_RNN_float32)4057 TEST(OperationValidationTest, UNIDIRECTIONAL_SEQUENCE_RNN_float32) {
4058 unidirectionlSequenceRNNTest(ANEURALNETWORKS_TENSOR_FLOAT32);
4059 }
4060
TEST(OperationValidationTest,UNIDIRECTIONAL_SEQUENCE_RNN_float16)4061 TEST(OperationValidationTest, UNIDIRECTIONAL_SEQUENCE_RNN_float16) {
4062 unidirectionlSequenceRNNTest(ANEURALNETWORKS_TENSOR_FLOAT16);
4063 }
4064
unidirectionalSequenceLSTMTest(int32_t inputOperandCode)4065 void unidirectionalSequenceLSTMTest(int32_t inputOperandCode) {
4066 const uint32_t maxTime = 2;
4067 const uint32_t batchSize = 3;
4068 const uint32_t numUnits = 4;
4069 const uint32_t inputSize = 5;
4070 const uint32_t outputSize = 6;
4071
4072 uint32_t inputDims[3] = {maxTime, batchSize, inputSize};
4073 uint32_t inputWeightsDims[2] = {numUnits, inputSize};
4074 uint32_t recurrentWeightsDims[2] = {numUnits, outputSize};
4075 uint32_t diagonalDims[1] = {numUnits};
4076 uint32_t projectionDims[2] = {outputSize, numUnits};
4077 uint32_t projectionBiasDims[1] = {outputSize};
4078 uint32_t outputStateDims[2] = {batchSize, outputSize};
4079 uint32_t cellStateDims[2] = {batchSize, numUnits};
4080
4081 uint32_t outputDims[3] = {maxTime, batchSize, outputSize};
4082
4083 ANeuralNetworksOperandType input = {.type = inputOperandCode,
4084 .dimensionCount = 3,
4085 .dimensions = inputDims,
4086 .scale = 0.0f,
4087 .zeroPoint = 0};
4088 ANeuralNetworksOperandType inputToInputWeights = {.type = inputOperandCode,
4089 .dimensionCount = 2,
4090 .dimensions = inputWeightsDims,
4091 .scale = 0.0f,
4092 .zeroPoint = 0};
4093 ANeuralNetworksOperandType inputToForgetWeights = inputToInputWeights;
4094 ANeuralNetworksOperandType inputToCellWeights = inputToInputWeights;
4095 ANeuralNetworksOperandType inputToOutputWeights = inputToInputWeights;
4096 ANeuralNetworksOperandType recurrentToInputWeights = {.type = inputOperandCode,
4097 .dimensionCount = 2,
4098 .dimensions = recurrentWeightsDims,
4099 .scale = 0.0f,
4100 .zeroPoint = 0};
4101 ANeuralNetworksOperandType recurrentToForgetWeights = recurrentToInputWeights;
4102 ANeuralNetworksOperandType recurrentToCellWeights = recurrentToInputWeights;
4103 ANeuralNetworksOperandType recurrentToOutputWeights = recurrentToInputWeights;
4104 ANeuralNetworksOperandType cellToInputWeights = {.type = inputOperandCode,
4105 .dimensionCount = 1,
4106 .dimensions = diagonalDims,
4107 .scale = 0.0f,
4108 .zeroPoint = 0};
4109 ANeuralNetworksOperandType cellToForgetWeights = cellToInputWeights;
4110 ANeuralNetworksOperandType cellToOutputWeights = cellToInputWeights;
4111 ANeuralNetworksOperandType inputGateBias = {.type = inputOperandCode,
4112 .dimensionCount = 1,
4113 .dimensions = diagonalDims,
4114 .scale = 0.0f,
4115 .zeroPoint = 0};
4116 ANeuralNetworksOperandType forgetGateBias = inputGateBias;
4117 ANeuralNetworksOperandType cellGateBias = inputGateBias;
4118 ANeuralNetworksOperandType outputGateBias = inputGateBias;
4119 ANeuralNetworksOperandType projectionWeights = {.type = inputOperandCode,
4120 .dimensionCount = 2,
4121 .dimensions = projectionDims,
4122 .scale = 0.0f,
4123 .zeroPoint = 0};
4124 ANeuralNetworksOperandType projectionBias = {.type = inputOperandCode,
4125 .dimensionCount = 1,
4126 .dimensions = projectionBiasDims,
4127 .scale = 0.0f,
4128 .zeroPoint = 0};
4129 ANeuralNetworksOperandType outputStateIn = {.type = inputOperandCode,
4130 .dimensionCount = 2,
4131 .dimensions = outputStateDims,
4132 .scale = 0.0f,
4133 .zeroPoint = 0};
4134 ANeuralNetworksOperandType cellStateIn = {.type = inputOperandCode,
4135 .dimensionCount = 2,
4136 .dimensions = cellStateDims,
4137 .scale = 0.0f,
4138 .zeroPoint = 0};
4139 ANeuralNetworksOperandType intScalar = {
4140 .type = ANEURALNETWORKS_INT32,
4141 .dimensionCount = 0,
4142 .dimensions = nullptr,
4143 .scale = 0.0f,
4144 .zeroPoint = 0,
4145 };
4146 ANeuralNetworksOperandType activation = intScalar;
4147 ANeuralNetworksOperandType floatScalar = {
4148 .type = inputOperandCode == ANEURALNETWORKS_TENSOR_FLOAT32 ? ANEURALNETWORKS_FLOAT32
4149 : ANEURALNETWORKS_FLOAT16,
4150 .dimensionCount = 0,
4151 .dimensions = nullptr,
4152 .scale = 0.0f,
4153 .zeroPoint = 0,
4154 };
4155 ANeuralNetworksOperandType cellClip = floatScalar;
4156 ANeuralNetworksOperandType projClip = floatScalar;
4157 ANeuralNetworksOperandType boolScalar = {
4158 .type = ANEURALNETWORKS_BOOL,
4159 .dimensionCount = 0,
4160 .dimensions = nullptr,
4161 .scale = 0.0f,
4162 .zeroPoint = 0,
4163 };
4164 ANeuralNetworksOperandType timeMajor = boolScalar;
4165 ANeuralNetworksOperandType inputLayerNormWeights = {.type = inputOperandCode,
4166 .dimensionCount = 1,
4167 .dimensions = diagonalDims,
4168 .scale = 0.0f,
4169 .zeroPoint = 0};
4170 ANeuralNetworksOperandType forgetLayerNormWeights = inputLayerNormWeights;
4171 ANeuralNetworksOperandType cellLayerNormWeights = inputLayerNormWeights;
4172 ANeuralNetworksOperandType outputLayerNormWeights = inputLayerNormWeights;
4173
4174 ANeuralNetworksOperandType output = {.type = inputOperandCode,
4175 .dimensionCount = 3,
4176 .dimensions = outputDims,
4177 .scale = 0.0f,
4178 .zeroPoint = 0};
4179
4180 OperationTestBase ulstmTest(ANEURALNETWORKS_UNIDIRECTIONAL_SEQUENCE_LSTM,
4181 {input,
4182 inputToInputWeights,
4183 inputToForgetWeights,
4184 inputToCellWeights,
4185 inputToOutputWeights,
4186 recurrentToInputWeights,
4187 recurrentToForgetWeights,
4188 recurrentToCellWeights,
4189 recurrentToOutputWeights,
4190 cellToInputWeights,
4191 cellToForgetWeights,
4192 cellToOutputWeights,
4193 inputGateBias,
4194 forgetGateBias,
4195 cellGateBias,
4196 outputGateBias,
4197 projectionWeights,
4198 projectionBias,
4199 outputStateIn,
4200 cellStateIn,
4201 activation,
4202 cellClip,
4203 projClip,
4204 timeMajor,
4205 inputLayerNormWeights,
4206 forgetLayerNormWeights,
4207 cellLayerNormWeights,
4208 outputLayerNormWeights},
4209 {output});
4210 ulstmTest.testOpsValidations();
4211 }
4212
TEST(OperationValidationTest,UNIDIRECTIONAL_SEQUENCE_LSTM_float32)4213 TEST(OperationValidationTest, UNIDIRECTIONAL_SEQUENCE_LSTM_float32) {
4214 unidirectionalSequenceLSTMTest(ANEURALNETWORKS_TENSOR_FLOAT32);
4215 }
4216
TEST(OperationValidationTest,UNIDIRECTIONAL_SEQUENCE_LSTM_float16)4217 TEST(OperationValidationTest, UNIDIRECTIONAL_SEQUENCE_LSTM_float16) {
4218 unidirectionalSequenceLSTMTest(ANEURALNETWORKS_TENSOR_FLOAT16);
4219 }
4220
generateProposalsOpTest(int32_t scoreOperandCode,int32_t deltaOperandCode,int32_t anchorOperandCode,int32_t roiOperandCode,int32_t scalarOperandCode)4221 void generateProposalsOpTest(int32_t scoreOperandCode, int32_t deltaOperandCode,
4222 int32_t anchorOperandCode, int32_t roiOperandCode,
4223 int32_t scalarOperandCode) {
4224 uint32_t scoreDim[] = {1, 2, 2, 2}, deltaDim[] = {1, 2, 2, 8}, anchorDim[] = {2, 4},
4225 imageInfoDim[] = {1, 2};
4226 uint32_t outScoreDim[] = {4}, outRoiDim[] = {4, 4}, outSplitDim[] = {1};
4227 OperationTestBase generateProposalsTest(
4228 ANEURALNETWORKS_GENERATE_PROPOSALS,
4229 {getOpType(scoreOperandCode, 4, scoreDim), getOpType(deltaOperandCode, 4, deltaDim),
4230 getOpType(anchorOperandCode, 2, anchorDim), getOpType(roiOperandCode, 2, imageInfoDim),
4231 getOpType(scalarOperandCode), getOpType(scalarOperandCode),
4232 getOpType(ANEURALNETWORKS_INT32), getOpType(ANEURALNETWORKS_INT32),
4233 getOpType(scalarOperandCode), getOpType(scalarOperandCode),
4234 getOpType(ANEURALNETWORKS_BOOL)},
4235 {getOpType(scoreOperandCode, 1, outScoreDim), getOpType(roiOperandCode, 2, outRoiDim),
4236 getOpType(ANEURALNETWORKS_TENSOR_INT32, 1, outSplitDim)});
4237 generateProposalsTest.testOpsValidations();
4238 }
4239
TEST(OperationValidationTest,GENERATE_PROPOSALS_float16)4240 TEST(OperationValidationTest, GENERATE_PROPOSALS_float16) {
4241 generateProposalsOpTest(ANEURALNETWORKS_TENSOR_FLOAT16, ANEURALNETWORKS_TENSOR_FLOAT16,
4242 ANEURALNETWORKS_TENSOR_FLOAT16, ANEURALNETWORKS_TENSOR_FLOAT16,
4243 ANEURALNETWORKS_FLOAT16);
4244 }
4245
TEST(OperationValidationTest,GENERATE_PROPOSALS_float32)4246 TEST(OperationValidationTest, GENERATE_PROPOSALS_float32) {
4247 generateProposalsOpTest(ANEURALNETWORKS_TENSOR_FLOAT32, ANEURALNETWORKS_TENSOR_FLOAT32,
4248 ANEURALNETWORKS_TENSOR_FLOAT32, ANEURALNETWORKS_TENSOR_FLOAT32,
4249 ANEURALNETWORKS_FLOAT32);
4250 }
4251
TEST(OperationValidationTest,GENERATE_PROPOSALS_quant)4252 TEST(OperationValidationTest, GENERATE_PROPOSALS_quant) {
4253 generateProposalsOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM,
4254 ANEURALNETWORKS_TENSOR_QUANT8_ASYMM,
4255 ANEURALNETWORKS_TENSOR_QUANT16_SYMM,
4256 ANEURALNETWORKS_TENSOR_QUANT16_ASYMM, ANEURALNETWORKS_FLOAT32);
4257 }
4258
TEST(OperationValidationTest,GENERATE_PROPOSALS_quant_signed)4259 TEST(OperationValidationTest, GENERATE_PROPOSALS_quant_signed) {
4260 generateProposalsOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED,
4261 ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED,
4262 ANEURALNETWORKS_TENSOR_QUANT16_SYMM,
4263 ANEURALNETWORKS_TENSOR_QUANT16_ASYMM, ANEURALNETWORKS_FLOAT32);
4264 }
4265
resizeNearestNeighborTest(int32_t inputCode,int32_t scalarCode)4266 void resizeNearestNeighborTest(int32_t inputCode, int32_t scalarCode) {
4267 uint32_t inputDim[] = {1, 2, 2, 1}, outputDim[] = {1, 1, 1, 1};
4268 OperationTestBase resizeImageOpTest(ANEURALNETWORKS_RESIZE_NEAREST_NEIGHBOR,
4269 {getOpType(inputCode, 4, inputDim), getOpType(scalarCode),
4270 getOpType(scalarCode), getOpType(ANEURALNETWORKS_BOOL)},
4271 {getOpType(inputCode, 4, outputDim)});
4272 resizeImageOpTest.testOpsValidations();
4273 }
4274
TEST(OperationValidationTest,RESIZE_NEAREST_NEIGHBOR)4275 TEST(OperationValidationTest, RESIZE_NEAREST_NEIGHBOR) {
4276 resizeNearestNeighborTest(ANEURALNETWORKS_TENSOR_FLOAT32, ANEURALNETWORKS_INT32);
4277 resizeNearestNeighborTest(ANEURALNETWORKS_TENSOR_FLOAT32, ANEURALNETWORKS_FLOAT32);
4278 }
4279
TEST(OperationValidationTest,RESIZE_NEAREST_NEIGHBOR_float16)4280 TEST(OperationValidationTest, RESIZE_NEAREST_NEIGHBOR_float16) {
4281 resizeNearestNeighborTest(ANEURALNETWORKS_TENSOR_FLOAT16, ANEURALNETWORKS_INT32);
4282 resizeNearestNeighborTest(ANEURALNETWORKS_TENSOR_FLOAT16, ANEURALNETWORKS_FLOAT16);
4283 }
4284
TEST(OperationValidationTest,RESIZE_NEAREST_NEIGHBOR_quant8)4285 TEST(OperationValidationTest, RESIZE_NEAREST_NEIGHBOR_quant8) {
4286 resizeNearestNeighborTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM, ANEURALNETWORKS_INT32);
4287 resizeNearestNeighborTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM, ANEURALNETWORKS_FLOAT32);
4288 }
4289
TEST(OperationValidationTest,RESIZE_NEAREST_NEIGHBOR_quant8_signed)4290 TEST(OperationValidationTest, RESIZE_NEAREST_NEIGHBOR_quant8_signed) {
4291 resizeNearestNeighborTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED, ANEURALNETWORKS_INT32);
4292 resizeNearestNeighborTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED, ANEURALNETWORKS_FLOAT32);
4293 }
4294
TEST(OperationValidationTest,QUANTIZED_LSTM)4295 TEST(OperationValidationTest, QUANTIZED_LSTM) {
4296 uint32_t oneDimensional[1] = {5};
4297 uint32_t twoDimensional[2] = {5, 5};
4298
4299 ANeuralNetworksOperandType quant8AsymSignedTensor2D = {
4300 .type = ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED,
4301 .dimensionCount = 2,
4302 .dimensions = twoDimensional,
4303 .scale = 0.0078125,
4304 .zeroPoint = 0,
4305 };
4306 ANeuralNetworksOperandType quant8SymTensor2D = {
4307 .type = ANEURALNETWORKS_TENSOR_QUANT8_SYMM,
4308 .dimensionCount = 2,
4309 .dimensions = twoDimensional,
4310 .scale = 0.0078125,
4311 .zeroPoint = 0,
4312 };
4313 ANeuralNetworksOperandType quant16SymTensor1D = {
4314 .type = ANEURALNETWORKS_TENSOR_QUANT16_SYMM,
4315 .dimensionCount = 1,
4316 .dimensions = oneDimensional,
4317 .scale = 1.0,
4318 .zeroPoint = 0,
4319 };
4320 ANeuralNetworksOperandType quant16SymTensor2D = {
4321 .type = ANEURALNETWORKS_TENSOR_QUANT16_SYMM,
4322 .dimensionCount = 2,
4323 .dimensions = twoDimensional,
4324 .scale = 1.0,
4325 .zeroPoint = 0,
4326 };
4327 ANeuralNetworksOperandType int32Tensor1D = {
4328 .type = ANEURALNETWORKS_TENSOR_INT32,
4329 .dimensionCount = 1,
4330 .dimensions = oneDimensional,
4331 .scale = 4.65661e-08,
4332 .zeroPoint = 0,
4333 };
4334 ANeuralNetworksOperandType int32Scalar = {
4335 .type = ANEURALNETWORKS_INT32,
4336 };
4337 ANeuralNetworksOperandType float32Scalar = {
4338 .type = ANEURALNETWORKS_FLOAT32,
4339 };
4340
4341 ANeuralNetworksOperandType input = quant8AsymSignedTensor2D;
4342 ANeuralNetworksOperandType input_to_input_weights = quant8SymTensor2D;
4343 ANeuralNetworksOperandType input_to_forget_weights = quant8SymTensor2D;
4344 ANeuralNetworksOperandType input_to_cell_weights = quant8SymTensor2D;
4345 ANeuralNetworksOperandType input_to_output_weights = quant8SymTensor2D;
4346 ANeuralNetworksOperandType recurrent_to_input_weights = quant8SymTensor2D;
4347 ANeuralNetworksOperandType recurrent_to_forget_weights = quant8SymTensor2D;
4348 ANeuralNetworksOperandType recurrent_to_cell_weights = quant8SymTensor2D;
4349 ANeuralNetworksOperandType recurrent_to_output_weights = quant8SymTensor2D;
4350 ANeuralNetworksOperandType cell_to_input_weights = quant16SymTensor2D;
4351 ANeuralNetworksOperandType cell_to_forget_weights = quant16SymTensor2D;
4352 ANeuralNetworksOperandType cell_to_output_weights = quant16SymTensor2D;
4353 ANeuralNetworksOperandType input_gate_bias = int32Tensor1D;
4354 ANeuralNetworksOperandType forget_gate_bias = int32Tensor1D;
4355 ANeuralNetworksOperandType cell_gate_bias = int32Tensor1D;
4356 ANeuralNetworksOperandType output_gate_bias = int32Tensor1D;
4357 ANeuralNetworksOperandType projection_weights = quant8SymTensor2D;
4358 ANeuralNetworksOperandType projection_bias = int32Tensor1D;
4359 ANeuralNetworksOperandType output_state_in = quant8AsymSignedTensor2D;
4360 ANeuralNetworksOperandType cell_state_in = quant16SymTensor2D;
4361 ANeuralNetworksOperandType input_layer_norm_weights = quant16SymTensor1D;
4362 ANeuralNetworksOperandType forget_layer_norm_weights = quant16SymTensor1D;
4363 ANeuralNetworksOperandType cell_layer_norm_weights = quant16SymTensor1D;
4364 ANeuralNetworksOperandType output_layer_norm_weights = quant16SymTensor1D;
4365 ANeuralNetworksOperandType cell_clip = float32Scalar;
4366 ANeuralNetworksOperandType projection_clip = float32Scalar;
4367 ANeuralNetworksOperandType input_intermediate_scale = float32Scalar;
4368 ANeuralNetworksOperandType forget_intermediate_scale = float32Scalar;
4369 ANeuralNetworksOperandType cell_intermediate_scale = float32Scalar;
4370 ANeuralNetworksOperandType output_intermediate_scale = float32Scalar;
4371 ANeuralNetworksOperandType hidden_state_zero_point = int32Scalar;
4372 ANeuralNetworksOperandType hidden_state_scale = float32Scalar;
4373
4374 ANeuralNetworksOperandType output_state_out = quant8AsymSignedTensor2D;
4375 ANeuralNetworksOperandType cell_state_out = quant16SymTensor2D;
4376 ANeuralNetworksOperandType output = quant8AsymSignedTensor2D;
4377
4378 OperationTestBase test(ANEURALNETWORKS_QUANTIZED_LSTM,
4379 {input,
4380 input_to_input_weights,
4381 input_to_forget_weights,
4382 input_to_cell_weights,
4383 input_to_output_weights,
4384 recurrent_to_input_weights,
4385 recurrent_to_forget_weights,
4386 recurrent_to_cell_weights,
4387 recurrent_to_output_weights,
4388 cell_to_input_weights,
4389 cell_to_forget_weights,
4390 cell_to_output_weights,
4391 input_gate_bias,
4392 forget_gate_bias,
4393 cell_gate_bias,
4394 output_gate_bias,
4395 projection_weights,
4396 projection_bias,
4397 output_state_in,
4398 cell_state_in,
4399 input_layer_norm_weights,
4400 forget_layer_norm_weights,
4401 cell_layer_norm_weights,
4402 output_layer_norm_weights,
4403 cell_clip,
4404 projection_clip,
4405 input_intermediate_scale,
4406 forget_intermediate_scale,
4407 cell_intermediate_scale,
4408 output_intermediate_scale,
4409 hidden_state_zero_point,
4410 hidden_state_scale},
4411 {output_state_out, cell_state_out, output});
4412 test.testOpsValidations();
4413 }
4414
fillTest(int32_t valueOperandType,int32_t outputOperandType)4415 void fillTest(int32_t valueOperandType, int32_t outputOperandType) {
4416 uint32_t inputDimensions[1] = {3};
4417 ANeuralNetworksOperandType input0 = getOpType(ANEURALNETWORKS_TENSOR_INT32, 1, inputDimensions);
4418 ANeuralNetworksOperandType input1 = getOpType(valueOperandType);
4419 uint32_t outputDimensions[3] = {3, 4, 5};
4420 ANeuralNetworksOperandType output = getOpType(outputOperandType, 3, outputDimensions);
4421 OperationTestBase test(ANEURALNETWORKS_FILL, {input0, input1}, {output});
4422 test.testOpsValidations();
4423 }
4424
TEST(OperationValidationTest,FILL_float16)4425 TEST(OperationValidationTest, FILL_float16) {
4426 fillTest(ANEURALNETWORKS_FLOAT16, ANEURALNETWORKS_TENSOR_FLOAT16);
4427 }
4428
TEST(OperationValidationTest,FILL_float32)4429 TEST(OperationValidationTest, FILL_float32) {
4430 fillTest(ANEURALNETWORKS_FLOAT32, ANEURALNETWORKS_TENSOR_FLOAT32);
4431 }
4432
TEST(OperationValidationTest,FILL_int32)4433 TEST(OperationValidationTest, FILL_int32) {
4434 fillTest(ANEURALNETWORKS_INT32, ANEURALNETWORKS_TENSOR_INT32);
4435 }
4436
rankTest(int32_t inputOperandType)4437 void rankTest(int32_t inputOperandType) {
4438 uint32_t inputDimensions[3] = {3, 4, 5};
4439 ANeuralNetworksOperandType input = getOpType(inputOperandType, 3, inputDimensions);
4440 ANeuralNetworksOperandType output = getOpType(ANEURALNETWORKS_INT32);
4441 OperationTestBase test(ANEURALNETWORKS_RANK, {input}, {output});
4442 test.testOpsValidations();
4443 }
4444
TEST(OperationValidationTest,RANK_float16)4445 TEST(OperationValidationTest, RANK_float16) {
4446 rankTest(ANEURALNETWORKS_TENSOR_FLOAT16);
4447 }
4448
TEST(OperationValidationTest,RANK_float32)4449 TEST(OperationValidationTest, RANK_float32) {
4450 rankTest(ANEURALNETWORKS_TENSOR_FLOAT32);
4451 }
4452
TEST(OperationValidationTest,RANK_int32)4453 TEST(OperationValidationTest, RANK_int32) {
4454 rankTest(ANEURALNETWORKS_TENSOR_INT32);
4455 }
4456
TEST(OperationValidationTest,RANK_quant8)4457 TEST(OperationValidationTest, RANK_quant8) {
4458 rankTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
4459 }
4460
TEST(OperationValidationTest,RANK_quant8_signed)4461 TEST(OperationValidationTest, RANK_quant8_signed) {
4462 rankTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
4463 }
4464
makeIdentityModel(const ANeuralNetworksOperandType * type)4465 ANeuralNetworksModel* makeIdentityModel(const ANeuralNetworksOperandType* type) {
4466 ANeuralNetworksModel* model = nullptr;
4467 EXPECT_EQ(ANeuralNetworksModel_create(&model), ANEURALNETWORKS_NO_ERROR);
4468 EXPECT_EQ(ANeuralNetworksModel_addOperand(model, type), ANEURALNETWORKS_NO_ERROR);
4469 EXPECT_EQ(ANeuralNetworksModel_addOperand(model, type), ANEURALNETWORKS_NO_ERROR);
4470 uint32_t inputs[] = {0};
4471 uint32_t outputs[] = {1};
4472 EXPECT_EQ(ANeuralNetworksModel_addOperation(model, ANEURALNETWORKS_CAST, std::size(inputs),
4473 inputs, std::size(outputs), outputs),
4474 ANEURALNETWORKS_NO_ERROR);
4475 EXPECT_EQ(ANeuralNetworksModel_identifyInputsAndOutputs(model, std::size(inputs), inputs,
4476 std::size(outputs), outputs),
4477 ANEURALNETWORKS_NO_ERROR);
4478 EXPECT_EQ(ANeuralNetworksModel_finish(model), ANEURALNETWORKS_NO_ERROR);
4479 return model;
4480 }
4481
testIf(const std::vector<uint32_t> & outerDims,const ANeuralNetworksModel * thenModel,const ANeuralNetworksModel * elseModel,bool testMutations)4482 void testIf(const std::vector<uint32_t>& outerDims, const ANeuralNetworksModel* thenModel,
4483 const ANeuralNetworksModel* elseModel, bool testMutations) {
4484 const uint32_t kThenOperand = 1;
4485 const uint32_t kElseOperand = 2;
4486 const uint32_t boolDims[] = {1};
4487 ANeuralNetworksOperandType boolType =
4488 getOpType(ANEURALNETWORKS_TENSOR_BOOL8, std::size(boolDims), boolDims);
4489 ANeuralNetworksOperandType dataType =
4490 getOpType(ANEURALNETWORKS_TENSOR_FLOAT32, outerDims.size(), outerDims.data());
4491 ANeuralNetworksOperandType modelType = getOpType(ANEURALNETWORKS_MODEL);
4492 OperationTestBase test(ANEURALNETWORKS_IF, {boolType, modelType, modelType, dataType},
4493 {dataType});
4494 test.setInputOperandValueFromModel(kThenOperand, thenModel);
4495 test.setInputOperandValueFromModel(kElseOperand, elseModel);
4496 if (testMutations) {
4497 test.testOpsValidations();
4498 } else {
4499 EXPECT_TRUE(test.testSuccess());
4500 }
4501 }
4502
testIf(const std::vector<uint32_t> & outerDims,const std::vector<uint32_t> & thenDims,const std::vector<uint32_t> & elseDims,bool testMutations)4503 void testIf(const std::vector<uint32_t>& outerDims, const std::vector<uint32_t>& thenDims,
4504 const std::vector<uint32_t>& elseDims, bool testMutations) {
4505 ANeuralNetworksOperandType thenDataType =
4506 getOpType(ANEURALNETWORKS_TENSOR_FLOAT32, thenDims.size(), thenDims.data());
4507 ANeuralNetworksOperandType elseDataType =
4508 getOpType(ANEURALNETWORKS_TENSOR_FLOAT32, elseDims.size(), elseDims.data());
4509 ANeuralNetworksModel* thenModel = makeIdentityModel(&thenDataType);
4510 ANeuralNetworksModel* elseModel = makeIdentityModel(&elseDataType);
4511 testIf(outerDims, thenModel, elseModel, testMutations);
4512 ANeuralNetworksModel_free(thenModel);
4513 ANeuralNetworksModel_free(elseModel);
4514 }
4515
TEST(OperationValidationTest,IF)4516 TEST(OperationValidationTest, IF) {
4517 const std::vector<std::pair<std::string, std::vector<uint32_t>>> configurations = {
4518 {"fully specified", {1, 2, 3}},
4519 {"unknown dimensions", {0, 2, 0}},
4520 {"unknown rank", {}},
4521 };
4522 // We skip mutation testing for all but the first configuration to avoid the
4523 // exponential runtime blowup. The value of additional operand code and
4524 // count mutations is negligible because whether the shapes are fully
4525 // specified should have nothing to do with the operand code or count.
4526 bool testMutations = true;
4527 for (const auto& [outerTrace, outerDims] : configurations) {
4528 SCOPED_TRACE(testing::Message() << "outerDims: " << outerTrace);
4529 for (const auto& [thenTrace, thenDims] : configurations) {
4530 SCOPED_TRACE(testing::Message() << "thenDims: " << thenTrace);
4531 for (const auto& [elseTrace, elseDims] : configurations) {
4532 SCOPED_TRACE(testing::Message() << "elseDims: " << elseTrace);
4533 testIf(outerDims, thenDims, elseDims, testMutations);
4534 testMutations = false;
4535 }
4536 }
4537 }
4538 }
4539
4540 // operand 0 --> +------+
4541 // | LESS | --> operand 2
4542 // operand 1 --> +------+
4543 //
makeWhileCondModel(const ANeuralNetworksOperandType * dataType,const ANeuralNetworksOperandType * boolType)4544 ANeuralNetworksModel* makeWhileCondModel(const ANeuralNetworksOperandType* dataType,
4545 const ANeuralNetworksOperandType* boolType) {
4546 ANeuralNetworksModel* model = nullptr;
4547 EXPECT_EQ(ANeuralNetworksModel_create(&model), ANEURALNETWORKS_NO_ERROR);
4548 EXPECT_EQ(ANeuralNetworksModel_addOperand(model, dataType), ANEURALNETWORKS_NO_ERROR);
4549 EXPECT_EQ(ANeuralNetworksModel_addOperand(model, dataType), ANEURALNETWORKS_NO_ERROR);
4550 EXPECT_EQ(ANeuralNetworksModel_addOperand(model, boolType), ANEURALNETWORKS_NO_ERROR);
4551 const uint32_t inputs[] = {0, 1};
4552 const uint32_t outputs[] = {2};
4553 EXPECT_EQ(ANeuralNetworksModel_addOperation(model, ANEURALNETWORKS_LESS, std::size(inputs),
4554 inputs, std::size(outputs), outputs),
4555 ANEURALNETWORKS_NO_ERROR);
4556 EXPECT_EQ(ANeuralNetworksModel_identifyInputsAndOutputs(model, std::size(inputs), inputs,
4557 std::size(outputs), outputs),
4558 ANEURALNETWORKS_NO_ERROR);
4559 EXPECT_EQ(ANeuralNetworksModel_finish(model), ANEURALNETWORKS_NO_ERROR);
4560 return model;
4561 }
4562
4563 // +------+
4564 // operand 0 --> | CAST | --> operand 2
4565 // +------+
4566 //
4567 // operand 1 --> (unused)
4568 //
makeWhileBodyModel(const ANeuralNetworksOperandType * type)4569 ANeuralNetworksModel* makeWhileBodyModel(const ANeuralNetworksOperandType* type) {
4570 ANeuralNetworksModel* model = nullptr;
4571 EXPECT_EQ(ANeuralNetworksModel_create(&model), ANEURALNETWORKS_NO_ERROR);
4572 EXPECT_EQ(ANeuralNetworksModel_addOperand(model, type), ANEURALNETWORKS_NO_ERROR);
4573 EXPECT_EQ(ANeuralNetworksModel_addOperand(model, type), ANEURALNETWORKS_NO_ERROR);
4574 EXPECT_EQ(ANeuralNetworksModel_addOperand(model, type), ANEURALNETWORKS_NO_ERROR);
4575 const uint32_t castInputs[] = {0};
4576 const uint32_t castOutputs[] = {2};
4577 EXPECT_EQ(ANeuralNetworksModel_addOperation(model, ANEURALNETWORKS_CAST, std::size(castInputs),
4578 castInputs, std::size(castOutputs), castOutputs),
4579 ANEURALNETWORKS_NO_ERROR);
4580 const uint32_t modelInputs[] = {0, 1};
4581 const uint32_t modelOutputs[] = {2};
4582 EXPECT_EQ(ANeuralNetworksModel_identifyInputsAndOutputs(model, std::size(modelInputs),
4583 modelInputs, std::size(modelOutputs),
4584 modelOutputs),
4585 ANEURALNETWORKS_NO_ERROR);
4586 EXPECT_EQ(ANeuralNetworksModel_finish(model), ANEURALNETWORKS_NO_ERROR);
4587 return model;
4588 }
4589
testWhile(const std::vector<uint32_t> & outerDims,const ANeuralNetworksModel * condModel,const ANeuralNetworksModel * bodyModel,bool testMutations)4590 void testWhile(const std::vector<uint32_t>& outerDims, const ANeuralNetworksModel* condModel,
4591 const ANeuralNetworksModel* bodyModel, bool testMutations) {
4592 const uint32_t kCondOperand = 0;
4593 const uint32_t kBodyOperand = 1;
4594 ANeuralNetworksOperandType modelType = getOpType(ANEURALNETWORKS_MODEL);
4595 ANeuralNetworksOperandType dataType =
4596 getOpType(ANEURALNETWORKS_TENSOR_FLOAT32, outerDims.size(), outerDims.data());
4597 OperationTestBase test(ANEURALNETWORKS_WHILE, {modelType, modelType, dataType, dataType},
4598 {dataType});
4599 test.setInputOperandValueFromModel(kCondOperand, condModel);
4600 test.setInputOperandValueFromModel(kBodyOperand, bodyModel);
4601 if (testMutations) {
4602 test.testOpsValidations();
4603 } else {
4604 EXPECT_TRUE(test.testSuccess());
4605 }
4606 }
4607
testWhile(const std::vector<uint32_t> & outerDims,const std::vector<uint32_t> & condDims,const std::vector<uint32_t> & bodyDims,bool testMutations)4608 void testWhile(const std::vector<uint32_t>& outerDims, const std::vector<uint32_t>& condDims,
4609 const std::vector<uint32_t>& bodyDims, bool testMutations) {
4610 const uint32_t boolDims[] = {1};
4611 ANeuralNetworksOperandType boolType =
4612 getOpType(ANEURALNETWORKS_TENSOR_BOOL8, std::size(boolDims), boolDims);
4613 ANeuralNetworksOperandType condDataType =
4614 getOpType(ANEURALNETWORKS_TENSOR_FLOAT32, condDims.size(), condDims.data());
4615 ANeuralNetworksOperandType bodyDataType =
4616 getOpType(ANEURALNETWORKS_TENSOR_FLOAT32, bodyDims.size(), bodyDims.data());
4617 ANeuralNetworksModel* condModel = makeWhileCondModel(&condDataType, &boolType);
4618 ANeuralNetworksModel* bodyModel = makeWhileBodyModel(&bodyDataType);
4619 testWhile(outerDims, condModel, bodyModel, testMutations);
4620 ANeuralNetworksModel_free(condModel);
4621 ANeuralNetworksModel_free(bodyModel);
4622 }
4623
TEST(OperationValidationTest,WHILE)4624 TEST(OperationValidationTest, WHILE) {
4625 const std::vector<std::pair<std::string, std::vector<uint32_t>>> configurations = {
4626 {"fully specified", {1, 2, 3}},
4627 {"unknown dimensions", {0, 2, 0}},
4628 {"unknown rank", {}},
4629 };
4630 // We skip mutation testing for all but the first configuration to avoid the
4631 // exponential runtime blowup. The value of additional operand code and
4632 // count mutations is negligible because whether the shapes are fully
4633 // specified should have nothing to do with the operand code or count.
4634 bool testMutations = true;
4635 for (const auto& [outerTrace, outerDims] : configurations) {
4636 SCOPED_TRACE(testing::Message() << "outerDims: " << outerTrace);
4637 for (const auto& [condTrace, condDims] : configurations) {
4638 SCOPED_TRACE(testing::Message() << "condDims: " << condTrace);
4639 for (const auto& [bodyTrace, bodyDims] : configurations) {
4640 SCOPED_TRACE(testing::Message() << "bodyDims: " << bodyTrace);
4641 testWhile(outerDims, condDims, bodyDims, testMutations);
4642 testMutations = false;
4643 }
4644 }
4645 }
4646 }
4647
4648 } // end namespace
4649