1 // clang-format off
2 // Generated file (from: depthwise_conv2d_quant8.mod.py). Do not edit
CreateModel(Model * model)3 void CreateModel(Model *model) {
4   OperandType type0(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 0);
5   OperandType type1(Type::TENSOR_INT32, {2}, 0.25f, 0);
6   OperandType type2(Type::INT32, {});
7   OperandType type3(Type::TENSOR_QUANT8_ASYMM, {1, 1, 1, 2}, 1.0f, 0);
8   // Phase 1, operands
9   auto op1 = model->addOperand(&type0);
10   auto op2 = model->addOperand(&type0);
11   auto op3 = model->addOperand(&type1);
12   auto pad0 = model->addOperand(&type2);
13   auto stride = model->addOperand(&type2);
14   auto channelMultiplier = model->addOperand(&type2);
15   auto act = model->addOperand(&type2);
16   auto op4 = model->addOperand(&type3);
17   // Phase 2, operations
18   static uint8_t op2_init[] = {2, 4, 2, 0, 2, 2, 2, 0};
19   model->setOperandValue(op2, op2_init, sizeof(uint8_t) * 8);
20   static int32_t op3_init[] = {0, 0};
21   model->setOperandValue(op3, op3_init, sizeof(int32_t) * 2);
22   static int32_t pad0_init[] = {0};
23   model->setOperandValue(pad0, pad0_init, sizeof(int32_t) * 1);
24   static int32_t stride_init[] = {1};
25   model->setOperandValue(stride, stride_init, sizeof(int32_t) * 1);
26   static int32_t channelMultiplier_init[] = {1};
27   model->setOperandValue(channelMultiplier, channelMultiplier_init, sizeof(int32_t) * 1);
28   static int32_t act_init[] = {0};
29   model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
30   model->addOperation(ANEURALNETWORKS_DEPTHWISE_CONV_2D, {op1, op2, op3, pad0, pad0, pad0, pad0, stride, stride, channelMultiplier, act}, {op4});
31   // Phase 3, inputs and outputs
32   model->identifyInputsAndOutputs(
33     {op1},
34     {op4});
35   assert(model->isValid());
36 }
37 
is_ignored(int i)38 inline bool is_ignored(int i) {
39   static std::set<int> ignore = {};
40   return ignore.find(i) != ignore.end();
41 }
42 
CreateModel_dynamic_output_shape(Model * model)43 void CreateModel_dynamic_output_shape(Model *model) {
44   OperandType type0(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 0);
45   OperandType type1(Type::TENSOR_INT32, {2}, 0.25f, 0);
46   OperandType type2(Type::INT32, {});
47   OperandType type4(Type::TENSOR_QUANT8_ASYMM, {0, 0, 0, 0}, 1.0f, 0);
48   // Phase 1, operands
49   auto op1 = model->addOperand(&type0);
50   auto op2 = model->addOperand(&type0);
51   auto op3 = model->addOperand(&type1);
52   auto pad0 = model->addOperand(&type2);
53   auto stride = model->addOperand(&type2);
54   auto channelMultiplier = model->addOperand(&type2);
55   auto act = model->addOperand(&type2);
56   auto op4 = model->addOperand(&type4);
57   // Phase 2, operations
58   static uint8_t op2_init[] = {2, 4, 2, 0, 2, 2, 2, 0};
59   model->setOperandValue(op2, op2_init, sizeof(uint8_t) * 8);
60   static int32_t op3_init[] = {0, 0};
61   model->setOperandValue(op3, op3_init, sizeof(int32_t) * 2);
62   static int32_t pad0_init[] = {0};
63   model->setOperandValue(pad0, pad0_init, sizeof(int32_t) * 1);
64   static int32_t stride_init[] = {1};
65   model->setOperandValue(stride, stride_init, sizeof(int32_t) * 1);
66   static int32_t channelMultiplier_init[] = {1};
67   model->setOperandValue(channelMultiplier, channelMultiplier_init, sizeof(int32_t) * 1);
68   static int32_t act_init[] = {0};
69   model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
70   model->addOperation(ANEURALNETWORKS_DEPTHWISE_CONV_2D, {op1, op2, op3, pad0, pad0, pad0, pad0, stride, stride, channelMultiplier, act}, {op4});
71   // Phase 3, inputs and outputs
72   model->identifyInputsAndOutputs(
73     {op1},
74     {op4});
75   assert(model->isValid());
76 }
77 
is_ignored_dynamic_output_shape(int i)78 inline bool is_ignored_dynamic_output_shape(int i) {
79   static std::set<int> ignore = {};
80   return ignore.find(i) != ignore.end();
81 }
82 
83