1 /*
2 * Copyright (C) 2019 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "TestNeuralNetworksWrapper.h"
18
19 #include <sys/mman.h>
20 #include <tuple>
21 #include <vector>
22
23 #include <android-base/macros.h>
24 #include <android/sharedmem.h>
25 #include <gtest/gtest.h>
26
27 using namespace android::nn::test_wrapper;
28
29 namespace {
30
31 // We try the following model:
32 //
33 // op2 = ADD(op0, op1)
34 // op4 = TRANSPOSE(op2, op3)
35 //
36 // where op0 is a required model input, should be of dimension (A, B).
37 // op1 is a required constant, should be of dimension (A, 1).
38 // op2 is an internal operand, should be of dimension (A, B).
39 // op3 is an omitted optional constant / model input, should be of dimension (2).
40 // op4 is a model output, should be of dimension (B, A).
41 //
42 // For each operand, we test combinations of dimensions specification level during model
43 // construction time and execution time (if any). All other relevant combinations of the
44 // basic scenarios are then iterated over in TestAll. Note that we don't want to just use
45 // googletest's parametrized tests (TEST_P) as the 16k combinations generated too many
46 // lines of output for the test infrastructure to handle correctly.
47
48 // Which operand to test
49 enum class UnspecifiedOperand {
50 INPUT_MANDATORY,
51 CONST_MANDATORY,
52 TEMPORARY_VARIABLE,
53 INPUT_OPTIONAL,
54 CONST_OPTIONAL,
55 OUTPUT
56 };
57 // How well the dimensional information is specified
58 enum class SpecificationLevel {
59 FULLY_SPECIFIED, // all dimensions are clearly specified without any ambiguity
60 UNSPECIFIED_DIM, // certain dimension is set to 0 as unknown, but rank is well-specified
61 UNSPECIFIED_RANK, // rank is set to 0 as unknown, passing an empty vector for dims
62 UNSPECIFIED_TYPE // only during execution time, passing nullptr for operand type
63 };
64 using UnspecifiedDimensionsTestParam = std::tuple<UnspecifiedOperand,
65 SpecificationLevel, // model construction time
66 SpecificationLevel>; // execution time
67
68 // Indexing
69 constexpr uint32_t kIndex0_Model = 0; // op0, model
70 constexpr uint32_t kIndex1_Model = 1; // op1, model
71 constexpr uint32_t kIndex2_Model = 2; // op2, model
72 constexpr uint32_t kIndex3_Model = 3; // op3, model
73 constexpr uint32_t kIndex4_Model = 4; // op4, model
74 constexpr uint32_t kIndex0_Execution = 5; // op0, execution
75 constexpr uint32_t kIndex3_Execution = 6; // op3, execution
76 constexpr uint32_t kIndex4_Execution = 7; // op4, execution
77 constexpr uint32_t kIndexCount = 8; // count
78
79 constexpr int32_t kValueA = 0;
80 constexpr int32_t kValueB = 2;
81 constexpr uint32_t kDimAGood = 2;
82 constexpr uint32_t kDimABad = 3;
83
84 class UnspecifiedDimensionsTest : public ::testing::TestWithParam<UnspecifiedDimensionsTestParam> {
85 enum class OptionalType { CONST, INPUT }; // omitted operand op3 is an input or const
86 enum class BufferSize { LESS, EQUAL, MORE }; // only used for output buffer size
87 enum class OperandLocation { BUFFER, MEMORY }; // where the operand reside
88 enum class InOutType { INPUT, OUTPUT }; // parameter for setInOut()
89
90 class SharedMemoryForTest {
91 public:
SharedMemoryForTest()92 SharedMemoryForTest() : memory(nullptr), fd(-1), buffer(nullptr), length(0) {}
~SharedMemoryForTest()93 ~SharedMemoryForTest() {
94 if (buffer != nullptr) {
95 munmap(buffer, length);
96 }
97 if (fd > -1) {
98 close(fd);
99 }
100 }
initialize(size_t size,const void * data)101 void initialize(size_t size, const void* data) {
102 length = size;
103 fd = ASharedMemory_create(nullptr, size);
104 ASSERT_GT(fd, -1);
105 buffer = (uint8_t*)mmap(nullptr, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
106 ASSERT_NE(buffer, nullptr);
107 memcpy(buffer, data, size);
108 memory = std::make_shared<Memory>(size, PROT_READ | PROT_WRITE, fd, 0);
109 ASSERT_TRUE(memory->isValid());
110 }
getMemory() const111 const Memory* getMemory() const { return memory.get(); }
getBuffer() const112 const uint8_t* getBuffer() const { return buffer; }
113
114 private:
115 DISALLOW_COPY_AND_ASSIGN(SharedMemoryForTest);
116 std::shared_ptr<Memory> memory;
117 int fd;
118 uint8_t* buffer;
119 size_t length;
120 };
121
toString(SpecificationLevel level)122 std::string toString(SpecificationLevel level) {
123 switch (level) {
124 case SpecificationLevel::FULLY_SPECIFIED:
125 return "FULLY_SPECIFIED";
126 case SpecificationLevel::UNSPECIFIED_DIM:
127 return "UNSPECIFIED_DIM";
128 case SpecificationLevel::UNSPECIFIED_RANK:
129 return "UNSPECIFIED_RANK";
130 case SpecificationLevel::UNSPECIFIED_TYPE:
131 return "UNSPECIFIED_TYPE";
132 default:
133 return "UNKNOWN";
134 }
135 }
136
toString(BufferSize b)137 std::string toString(BufferSize b) {
138 switch (b) {
139 case BufferSize::LESS:
140 return "LESS";
141 case BufferSize::EQUAL:
142 return "EQUAL";
143 case BufferSize::MORE:
144 return "MORE";
145 default:
146 return "UNKNOWN";
147 }
148 }
149
toString(OperandLocation loc)150 std::string toString(OperandLocation loc) {
151 switch (loc) {
152 case OperandLocation::BUFFER:
153 return "BUFFER";
154 case OperandLocation::MEMORY:
155 return "MEMORY";
156 default:
157 return "UNKNOWN";
158 }
159 }
160
161 protected:
SetUp()162 virtual void SetUp() {
163 uint32_t modelIndex, executionIndex;
164 switch (kUnspecifiedOperand) {
165 case UnspecifiedOperand::INPUT_MANDATORY:
166 modelIndex = kIndex0_Model;
167 executionIndex = kIndex0_Execution;
168 mBadIndexChoices = {kIndexCount, modelIndex, executionIndex};
169 mOperandLocationChoices = {OperandLocation::BUFFER, OperandLocation::MEMORY};
170 break;
171 case UnspecifiedOperand::CONST_MANDATORY:
172 modelIndex = kIndex1_Model;
173 executionIndex = kIndexCount;
174 mBadIndexChoices = {kIndexCount, modelIndex};
175 mOperandLocationChoices = {OperandLocation::BUFFER, OperandLocation::MEMORY};
176 break;
177 case UnspecifiedOperand::TEMPORARY_VARIABLE:
178 modelIndex = kIndex2_Model;
179 executionIndex = kIndexCount;
180 mBadIndexChoices = {kIndexCount, modelIndex};
181 mOperandLocationChoices = {OperandLocation::BUFFER};
182 break;
183 case UnspecifiedOperand::INPUT_OPTIONAL:
184 modelIndex = kIndex3_Model;
185 executionIndex = kIndex3_Execution;
186 mBadIndexChoices = {kIndexCount};
187 mOptionalType = OptionalType::INPUT;
188 mOperandLocationChoices = {OperandLocation::BUFFER};
189 break;
190 case UnspecifiedOperand::CONST_OPTIONAL:
191 modelIndex = kIndex3_Model;
192 executionIndex = kIndexCount;
193 mBadIndexChoices = {kIndexCount};
194 mOperandLocationChoices = {OperandLocation::BUFFER};
195 break;
196 case UnspecifiedOperand::OUTPUT:
197 modelIndex = kIndex4_Model;
198 executionIndex = kIndex4_Execution;
199 mBadIndexChoices = {kIndexCount, modelIndex, executionIndex};
200 mOperandLocationChoices = {OperandLocation::BUFFER, OperandLocation::MEMORY};
201 mBufferSizeChoices = {BufferSize::LESS, BufferSize::EQUAL, BufferSize::MORE};
202 break;
203 default:
204 break;
205 }
206 std::vector<SpecificationLevel> levels{
207 SpecificationLevel::UNSPECIFIED_DIM, SpecificationLevel::FULLY_SPECIFIED,
208 SpecificationLevel::UNSPECIFIED_DIM, SpecificationLevel::FULLY_SPECIFIED,
209 SpecificationLevel::UNSPECIFIED_DIM, SpecificationLevel::FULLY_SPECIFIED,
210 SpecificationLevel::FULLY_SPECIFIED, SpecificationLevel::FULLY_SPECIFIED};
211 levels[modelIndex] = kSpecificationLevelModel;
212 if (executionIndex < kIndexCount) {
213 levels[executionIndex] = kSpecificationLevelExecution;
214 }
215 mSpecificationLevels = std::move(levels);
216 }
217
getType(uint32_t index,const std::vector<uint32_t> & dim)218 OperandType getType(uint32_t index, const std::vector<uint32_t>& dim) {
219 const SpecificationLevel l = mSpecificationLevels[index];
220 std::vector<uint32_t> setDim;
221 if (l != SpecificationLevel::UNSPECIFIED_RANK) {
222 for (auto d : dim) {
223 if (d == 0) {
224 setDim.push_back(mBadIndex != index ? kDimAGood : kDimABad);
225 } else {
226 setDim.push_back(l == SpecificationLevel::FULLY_SPECIFIED ? d : 0);
227 }
228 }
229 }
230 float scale = mOperandTypes[index] == Type::TENSOR_QUANT8_ASYMM ? 1.0 : 0.0;
231 return OperandType(mOperandTypes[index], setDim, scale, 0);
232 }
233
getSize(uint32_t index,const std::vector<uint32_t> & dim,BufferSize s=BufferSize::EQUAL)234 uint32_t getSize(uint32_t index, const std::vector<uint32_t>& dim,
235 BufferSize s = BufferSize::EQUAL) {
236 uint32_t n = 1;
237 for (auto d : dim) {
238 n *= (d == 0 ? (mBadIndex != index ? kDimAGood : kDimABad) : d);
239 }
240 if (s == BufferSize::LESS) {
241 n /= 2;
242 } else if (s == BufferSize::MORE) {
243 n *= 2;
244 }
245 return n;
246 };
247
248 template <typename T>
setInOut(Execution * execution,uint32_t index,uint32_t opIndex,const std::vector<uint32_t> & dim,void * buffer,const SharedMemoryForTest * memory,InOutType inOutType,BufferSize bufferSize=BufferSize::EQUAL)249 Result setInOut(Execution* execution, uint32_t index, uint32_t opIndex,
250 const std::vector<uint32_t>& dim, void* buffer,
251 const SharedMemoryForTest* memory, InOutType inOutType,
252 BufferSize bufferSize = BufferSize::EQUAL) {
253 const auto kLevel = mSpecificationLevels[index];
254 size_t size = (buffer == nullptr) ? 0 : getSize(index, dim, bufferSize) * sizeof(T);
255 auto type = getType(index, dim);
256 ANeuralNetworksOperandType* t =
257 (kLevel == SpecificationLevel::UNSPECIFIED_TYPE) ? nullptr : &type.operandType;
258 if (mOperandLocation == OperandLocation::MEMORY && memory != nullptr) {
259 if (inOutType == InOutType::INPUT) {
260 return execution->setInputFromMemory(opIndex, memory->getMemory(), 0, size, t);
261 } else {
262 return execution->setOutputFromMemory(opIndex, memory->getMemory(), 0, size, t);
263 }
264 } else {
265 if (inOutType == InOutType::INPUT) {
266 return execution->setInput(opIndex, buffer, size, t);
267 } else {
268 return execution->setOutput(opIndex, buffer, size, t);
269 }
270 }
271 return Result::NO_ERROR;
272 }
273
274 template <typename T, Type TensorType>
TestOne()275 void TestOne() {
276 // Phase 1: Build Model
277 Model model;
278 auto type0 = getType(kIndex0_Model, {kValueA, kValueB});
279 auto type1 = getType(kIndex1_Model, {kValueA, 1});
280 auto type2 = getType(kIndex2_Model, {kValueA, kValueB});
281 auto type3 = getType(kIndex3_Model, {2});
282 auto type4 = getType(kIndex4_Model, {kValueB, kValueA});
283 OperandType typeActivation(Type::INT32, {}); // activation
284
285 auto op0 = model.addOperand(&type0);
286 auto op1 = model.addOperand(&type1);
287 auto op2 = model.addOperand(&type2);
288 auto op3 = model.addOperand(&type3);
289 auto op4 = model.addOperand(&type4);
290 auto act = model.addOperand(&typeActivation);
291
292 T bufferOp1[2] = {1, 2};
293 SharedMemoryForTest memoryOp1;
294 memoryOp1.initialize(sizeof(bufferOp1), bufferOp1);
295 if (mOperandLocation == OperandLocation::BUFFER) {
296 model.setOperandValue(op1, bufferOp1, sizeof(bufferOp1));
297 } else {
298 model.setOperandValueFromMemory(op1, memoryOp1.getMemory(), 0, sizeof(bufferOp1));
299 }
300 int32_t kActivation = 0;
301 model.setOperandValue(act, &kActivation, sizeof(int32_t));
302 if (mOptionalType == OptionalType::CONST) {
303 model.setOperandValue(op3, nullptr, 0);
304 }
305
306 model.addOperation(ANEURALNETWORKS_ADD, {op0, op1, act}, {op2});
307 model.addOperation(ANEURALNETWORKS_TRANSPOSE, {op2, op3}, {op4});
308 if (mOptionalType == OptionalType::CONST) {
309 model.identifyInputsAndOutputs({op0}, {op4});
310 } else {
311 model.identifyInputsAndOutputs({op0, op3}, {op4});
312 }
313
314 bool expected = expectModelIsValid();
315 ASSERT_EQ(model.isValid(), expected);
316 Result result = model.finish();
317 if (expected) {
318 ASSERT_EQ(result, Result::NO_ERROR);
319 } else {
320 // There is no contract (yet) for specific errors in NeuralNetworks.h,
321 // so we just assert on not being successful.
322 ASSERT_NE(result, Result::NO_ERROR);
323 return;
324 }
325
326 // Phase 2: Compile Model, should always pass
327 Compilation compilation(&model);
328 ASSERT_EQ(compilation.finish(), Result::NO_ERROR);
329
330 std::vector<uint32_t> valueBChoices = {1, 2};
331 for (const auto valueB : valueBChoices) {
332 SCOPED_TRACE("ValueB: " + std::to_string(valueB));
333 if (valueB != kValueB &&
334 (mSpecificationLevels[kIndex0_Model] == SpecificationLevel::FULLY_SPECIFIED ||
335 mSpecificationLevels[kIndex2_Model] == SpecificationLevel::FULLY_SPECIFIED ||
336 mSpecificationLevels[kIndex4_Model] == SpecificationLevel::FULLY_SPECIFIED)) {
337 continue;
338 }
339
340 // Phase 3: Set Execution Input/Output
341 Execution execution(&compilation);
342
343 // Set input0
344 Result result;
345 T bufferOp0[6] = {1, 2, 3, 4, 5, 6};
346 SharedMemoryForTest memoryOp0;
347 memoryOp0.initialize(sizeof(bufferOp0), bufferOp0);
348 result = setInOut<T>(&execution, kIndex0_Execution, 0, {kValueA, valueB}, bufferOp0,
349 &memoryOp0, InOutType::INPUT);
350 ASSERT_EQ(result, expectSetInput0());
351 if (result != Result::NO_ERROR) continue;
352
353 // Set input1, omitted
354 if (mOptionalType == OptionalType::INPUT) {
355 result = setInOut<T>(&execution, kIndex3_Execution, 1, {2}, nullptr, nullptr,
356 InOutType::INPUT);
357 ASSERT_EQ(result, expectSetInput1());
358 if (result != Result::NO_ERROR) continue;
359 }
360
361 // Set output0
362 T bufferOp4[16];
363 SharedMemoryForTest memoryOp4;
364 memoryOp4.initialize(sizeof(bufferOp4), bufferOp4);
365 result = setInOut<T>(&execution, kIndex4_Execution, 0, {valueB, kValueA}, bufferOp4,
366 &memoryOp4, InOutType::OUTPUT, mOutputBufferSize);
367 ASSERT_EQ(result, expectSetOutput0());
368 if (result != Result::NO_ERROR) continue;
369
370 // Phase 4: Compute and Compare Results
371 result = execution.compute();
372 ASSERT_EQ(result, expectCompute());
373 if (result == Result::OP_FAILED) continue;
374
375 std::vector<uint32_t> outputShape;
376 ASSERT_EQ(execution.getOutputOperandDimensions(0, &outputShape), result);
377 std::vector<uint32_t> expectedOutputShape = {valueB, kDimAGood};
378 ASSERT_EQ(outputShape, expectedOutputShape);
379 if (result == Result::OUTPUT_INSUFFICIENT_SIZE) continue;
380
381 const T* outputBuffer = mOperandLocation == OperandLocation::MEMORY
382 ? reinterpret_cast<const T*>(memoryOp4.getBuffer())
383 : bufferOp4;
384 T expected_1x2[2] = {2, 4};
385 T expected_2x2[4] = {2, 5, 3, 6};
386 for (uint32_t i = 0; i < kDimAGood * valueB; i++) {
387 ASSERT_EQ(outputBuffer[i], valueB == 1 ? expected_1x2[i] : expected_2x2[i]);
388 }
389 }
390 }
391
392 // Expect invalid model for the following cases
393 // - op1 is not fully specified (const operand must be fully specified)
394 // - op1 has bad dimension value (const operand size is checked with buffer size)
expectModelIsValid()395 bool expectModelIsValid() {
396 const auto kLevel1_Model = mSpecificationLevels[kIndex1_Model];
397 if (kLevel1_Model != SpecificationLevel::FULLY_SPECIFIED || mBadIndex == kIndex1_Model) {
398 return false;
399 }
400 return true;
401 }
402
403 // Expect BAD_DATA on input0 for the following cases
404 // - the provided type is not fully specified
405 // - the provided type does not agree with the type set at model construction time
406 // - no type is provided and the type is not fully specified at model construction time
expectSetInput0()407 Result expectSetInput0() {
408 const auto kLevel0_Model = mSpecificationLevels[kIndex0_Model];
409 const auto kLevel0_Execution = mSpecificationLevels[kIndex0_Execution];
410 switch (kLevel0_Execution) {
411 case SpecificationLevel::UNSPECIFIED_DIM:
412 case SpecificationLevel::UNSPECIFIED_RANK:
413 return Result::BAD_DATA;
414 case SpecificationLevel::FULLY_SPECIFIED:
415 if ((mBadIndex == kIndex0_Execution || mBadIndex == kIndex0_Model) &&
416 kLevel0_Model != SpecificationLevel::UNSPECIFIED_RANK) {
417 return Result::BAD_DATA;
418 }
419 break;
420 case SpecificationLevel::UNSPECIFIED_TYPE:
421 if (kLevel0_Model == SpecificationLevel::UNSPECIFIED_DIM ||
422 kLevel0_Model == SpecificationLevel::UNSPECIFIED_RANK ||
423 mBadIndex == kIndex0_Model) {
424 return Result::BAD_DATA;
425 }
426 break;
427 default:
428 break;
429 }
430 return Result::NO_ERROR;
431 }
432
433 // Expect BAD_DATA on input1 for the following cases
434 // - the provided type is less detailed as the type set at model construction time
expectSetInput1()435 Result expectSetInput1() {
436 const auto kLevel3_Model = mSpecificationLevels[kIndex3_Model];
437 const auto kLevel3_Execution = mSpecificationLevels[kIndex3_Execution];
438 switch (kLevel3_Execution) {
439 case SpecificationLevel::UNSPECIFIED_DIM:
440 if (kLevel3_Model == SpecificationLevel::FULLY_SPECIFIED) {
441 return Result::BAD_DATA;
442 }
443 break;
444 case SpecificationLevel::UNSPECIFIED_RANK:
445 if (kLevel3_Model != SpecificationLevel::UNSPECIFIED_RANK) {
446 return Result::BAD_DATA;
447 }
448 break;
449 default:
450 break;
451 }
452 return Result::NO_ERROR;
453 }
454
455 // Expect BAD_DATA on output0 for the following cases
456 // - the provided type is less detailed as the type set at model construction time
457 // - the provided type does not agree with the type set at model construction time
458 // - the buffer size does not agree with a fully specified type
expectSetOutput0()459 Result expectSetOutput0() {
460 const auto kLevel4_Model = mSpecificationLevels[kIndex4_Model];
461 const auto kLevel4_Execution = mSpecificationLevels[kIndex4_Execution];
462 switch (kLevel4_Execution) {
463 case SpecificationLevel::UNSPECIFIED_DIM:
464 if (kLevel4_Model == SpecificationLevel::FULLY_SPECIFIED ||
465 (kLevel4_Model == SpecificationLevel::UNSPECIFIED_DIM &&
466 (mBadIndex == kIndex4_Model || mBadIndex == kIndex4_Execution))) {
467 return Result::BAD_DATA;
468 }
469 break;
470 case SpecificationLevel::UNSPECIFIED_RANK:
471 if (kLevel4_Model != SpecificationLevel::UNSPECIFIED_RANK) {
472 return Result::BAD_DATA;
473 }
474 break;
475 case SpecificationLevel::FULLY_SPECIFIED:
476 if (((mBadIndex == kIndex4_Model || mBadIndex == kIndex4_Execution) &&
477 kLevel4_Model != SpecificationLevel::UNSPECIFIED_RANK) ||
478 mOutputBufferSize != BufferSize::EQUAL) {
479 return Result::BAD_DATA;
480 }
481 break;
482 case SpecificationLevel::UNSPECIFIED_TYPE:
483 if (kLevel4_Model == SpecificationLevel::FULLY_SPECIFIED &&
484 (mOutputBufferSize != BufferSize::EQUAL || mBadIndex == kIndex4_Model ||
485 mBadIndex == kIndex4_Execution)) {
486 return Result::BAD_DATA;
487 }
488 break;
489 default:
490 break;
491 }
492 return Result::NO_ERROR;
493 }
494
495 // Expect failure for the following cases
496 // - one of the operands has bad dimension -> OP_FAILED
497 // - insufficient output buffer -> OUTPUT_INSUFFICIENT_SIZE
expectCompute()498 Result expectCompute() {
499 if (mBadIndex < 8) {
500 return Result::OP_FAILED;
501 } else if (mOutputBufferSize == BufferSize::LESS) {
502 return Result::OUTPUT_INSUFFICIENT_SIZE;
503 }
504 return Result::NO_ERROR;
505 }
506
507 // Iterate over combinations of
508 // - mBadIndexChoices: which operand has incorrect dimension
509 // - mOperandLocationChoices: where the operand reside, buffer or shared memory
510 // - mBufferSizeChoices: whether the provided output buffer/memory size is sufficient
511 template <typename T, Type TensorType>
TestAll()512 void TestAll() {
513 SCOPED_TRACE("Model: " + toString(kSpecificationLevelModel));
514 SCOPED_TRACE("Execution: " + toString(kSpecificationLevelExecution));
515 mOperandTypes = {TensorType, TensorType, TensorType, Type::TENSOR_INT32,
516 TensorType, TensorType, Type::TENSOR_INT32, TensorType};
517 for (const auto kBadIndex : mBadIndexChoices) {
518 SCOPED_TRACE("Bad Index: " + std::to_string(mBadIndex));
519 mBadIndex = kBadIndex;
520 if (mBadIndex < 8 &&
521 (mSpecificationLevels[mBadIndex] == SpecificationLevel::UNSPECIFIED_RANK ||
522 mSpecificationLevels[mBadIndex] == SpecificationLevel::UNSPECIFIED_TYPE)) {
523 continue;
524 }
525 for (const auto kOperandLocation : mOperandLocationChoices) {
526 mOperandLocation = kOperandLocation;
527 SCOPED_TRACE("Operand Location: " + toString(mOperandLocation));
528 for (const auto kOutputBufferSize : mBufferSizeChoices) {
529 mOutputBufferSize = kOutputBufferSize;
530 SCOPED_TRACE("Output Buffer Size: " + toString(mOutputBufferSize));
531 TestOne<T, TensorType>();
532 }
533 }
534 }
535 }
536
537 const UnspecifiedOperand kUnspecifiedOperand = std::get<0>(GetParam());
538 const SpecificationLevel kSpecificationLevelModel = std::get<1>(GetParam());
539 const SpecificationLevel kSpecificationLevelExecution = std::get<2>(GetParam());
540
541 std::vector<SpecificationLevel> mSpecificationLevels;
542 std::vector<Type> mOperandTypes;
543 OptionalType mOptionalType = OptionalType::CONST;
544
545 // Iterate all combinations in TestAll()
546 std::vector<uint32_t> mBadIndexChoices;
547 std::vector<OperandLocation> mOperandLocationChoices;
548 std::vector<BufferSize> mBufferSizeChoices = {BufferSize::EQUAL};
549
550 uint32_t mBadIndex;
551 OperandLocation mOperandLocation;
552 BufferSize mOutputBufferSize;
553 };
554
TEST_P(UnspecifiedDimensionsTest,Float32)555 TEST_P(UnspecifiedDimensionsTest, Float32) {
556 TestAll<float, Type::TENSOR_FLOAT32>();
557 }
558
TEST_P(UnspecifiedDimensionsTest,Quant8)559 TEST_P(UnspecifiedDimensionsTest, Quant8) {
560 TestAll<uint8_t, Type::TENSOR_QUANT8_ASYMM>();
561 }
562
TEST_P(UnspecifiedDimensionsTest,Float16)563 TEST_P(UnspecifiedDimensionsTest, Float16) {
564 TestAll<_Float16, Type::TENSOR_FLOAT16>();
565 }
566
567 static const auto kAllSpecificationLevelsModel =
568 testing::Values(SpecificationLevel::FULLY_SPECIFIED, SpecificationLevel::UNSPECIFIED_DIM,
569 SpecificationLevel::UNSPECIFIED_RANK);
570 static const auto kAllSpecificationLevelsExecution =
571 testing::Values(SpecificationLevel::FULLY_SPECIFIED, SpecificationLevel::UNSPECIFIED_DIM,
572 SpecificationLevel::UNSPECIFIED_RANK, SpecificationLevel::UNSPECIFIED_TYPE);
573 static const auto kFullySpecified = testing::Values(SpecificationLevel::FULLY_SPECIFIED);
574
575 INSTANTIATE_TEST_CASE_P(ModelInputTest, UnspecifiedDimensionsTest,
576 testing::Combine(testing::Values(UnspecifiedOperand::INPUT_MANDATORY),
577 kAllSpecificationLevelsModel,
578 kAllSpecificationLevelsExecution));
579
580 INSTANTIATE_TEST_CASE_P(ConstantParameterTest, UnspecifiedDimensionsTest,
581 testing::Combine(testing::Values(UnspecifiedOperand::CONST_MANDATORY),
582 kAllSpecificationLevelsModel, kFullySpecified));
583
584 INSTANTIATE_TEST_CASE_P(TemporaryVariableTest, UnspecifiedDimensionsTest,
585 testing::Combine(testing::Values(UnspecifiedOperand::TEMPORARY_VARIABLE),
586 kAllSpecificationLevelsModel, kFullySpecified));
587
588 INSTANTIATE_TEST_CASE_P(OptionalConstantTest, UnspecifiedDimensionsTest,
589 testing::Combine(testing::Values(UnspecifiedOperand::CONST_OPTIONAL),
590 kAllSpecificationLevelsModel, kFullySpecified));
591
592 INSTANTIATE_TEST_CASE_P(OptionalInputTest, UnspecifiedDimensionsTest,
593 testing::Combine(testing::Values(UnspecifiedOperand::INPUT_OPTIONAL),
594 kAllSpecificationLevelsModel,
595 kAllSpecificationLevelsExecution));
596
597 INSTANTIATE_TEST_CASE_P(ModelOutputTest, UnspecifiedDimensionsTest,
598 testing::Combine(testing::Values(UnspecifiedOperand::OUTPUT),
599 kAllSpecificationLevelsModel,
600 kAllSpecificationLevelsExecution));
601
602 } // end namespace
603