1 // clang-format off
2 // Generated file (from: batch_to_space_quant8_1.mod.py). Do not edit
CreateModel(Model * model)3 void CreateModel(Model *model) {
4 OperandType type0(Type::TENSOR_QUANT8_ASYMM, {4, 2, 2, 1}, 1.0f, 0);
5 OperandType type1(Type::TENSOR_INT32, {2});
6 OperandType type2(Type::TENSOR_QUANT8_ASYMM, {1, 4, 4, 1}, 1.0f, 0);
7 // Phase 1, operands
8 auto input = model->addOperand(&type0);
9 auto block_size = model->addOperand(&type1);
10 auto output = model->addOperand(&type2);
11 // Phase 2, operations
12 static int32_t block_size_init[] = {2, 2};
13 model->setOperandValue(block_size, block_size_init, sizeof(int32_t) * 2);
14 model->addOperation(ANEURALNETWORKS_BATCH_TO_SPACE_ND, {input, block_size}, {output});
15 // Phase 3, inputs and outputs
16 model->identifyInputsAndOutputs(
17 {input},
18 {output});
19 assert(model->isValid());
20 }
21
is_ignored(int i)22 inline bool is_ignored(int i) {
23 static std::set<int> ignore = {};
24 return ignore.find(i) != ignore.end();
25 }
26
CreateModel_dynamic_output_shape(Model * model)27 void CreateModel_dynamic_output_shape(Model *model) {
28 OperandType type0(Type::TENSOR_QUANT8_ASYMM, {4, 2, 2, 1}, 1.0f, 0);
29 OperandType type1(Type::TENSOR_INT32, {2});
30 OperandType type3(Type::TENSOR_QUANT8_ASYMM, {0, 0, 0, 0}, 1.0f, 0);
31 // Phase 1, operands
32 auto input = model->addOperand(&type0);
33 auto block_size = model->addOperand(&type1);
34 auto output = model->addOperand(&type3);
35 // Phase 2, operations
36 static int32_t block_size_init[] = {2, 2};
37 model->setOperandValue(block_size, block_size_init, sizeof(int32_t) * 2);
38 model->addOperation(ANEURALNETWORKS_BATCH_TO_SPACE_ND, {input, block_size}, {output});
39 // Phase 3, inputs and outputs
40 model->identifyInputsAndOutputs(
41 {input},
42 {output});
43 assert(model->isValid());
44 }
45
is_ignored_dynamic_output_shape(int i)46 inline bool is_ignored_dynamic_output_shape(int i) {
47 static std::set<int> ignore = {};
48 return ignore.find(i) != ignore.end();
49 }
50
51