1 // clang-format off
2 // Generated file (from: depthwise_conv2d_quant8_2.mod.py). Do not edit
CreateModel(Model * model)3 void CreateModel(Model *model) {
4 OperandType type0(Type::TENSOR_QUANT8_ASYMM, {1, 3, 2, 2}, 0.5f, 127);
5 OperandType type1(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 4}, 0.5f, 127);
6 OperandType type2(Type::TENSOR_INT32, {4}, 0.25f, 0);
7 OperandType type3(Type::INT32, {});
8 OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 1, 4}, 1.0f, 127);
9 // Phase 1, operands
10 auto op1 = model->addOperand(&type0);
11 auto op2 = model->addOperand(&type1);
12 auto op3 = model->addOperand(&type2);
13 auto pad_valid = model->addOperand(&type3);
14 auto stride = model->addOperand(&type3);
15 auto channelMultiplier = model->addOperand(&type3);
16 auto act_none = model->addOperand(&type3);
17 auto op4 = model->addOperand(&type4);
18 // Phase 2, operations
19 static uint8_t op2_init[] = {129, 131, 133, 135, 109, 147, 105, 151, 137, 139, 141, 143, 153, 99, 157, 95};
20 model->setOperandValue(op2, op2_init, sizeof(uint8_t) * 16);
21 static int32_t op3_init[] = {4, 8, 12, 16};
22 model->setOperandValue(op3, op3_init, sizeof(int32_t) * 4);
23 static int32_t pad_valid_init[] = {2};
24 model->setOperandValue(pad_valid, pad_valid_init, sizeof(int32_t) * 1);
25 static int32_t stride_init[] = {1};
26 model->setOperandValue(stride, stride_init, sizeof(int32_t) * 1);
27 static int32_t channelMultiplier_init[] = {2};
28 model->setOperandValue(channelMultiplier, channelMultiplier_init, sizeof(int32_t) * 1);
29 static int32_t act_none_init[] = {0};
30 model->setOperandValue(act_none, act_none_init, sizeof(int32_t) * 1);
31 model->addOperation(ANEURALNETWORKS_DEPTHWISE_CONV_2D, {op1, op2, op3, pad_valid, stride, stride, channelMultiplier, act_none}, {op4});
32 // Phase 3, inputs and outputs
33 model->identifyInputsAndOutputs(
34 {op1},
35 {op4});
36 assert(model->isValid());
37 }
38
is_ignored(int i)39 inline bool is_ignored(int i) {
40 static std::set<int> ignore = {};
41 return ignore.find(i) != ignore.end();
42 }
43
CreateModel_dynamic_output_shape(Model * model)44 void CreateModel_dynamic_output_shape(Model *model) {
45 OperandType type0(Type::TENSOR_QUANT8_ASYMM, {1, 3, 2, 2}, 0.5f, 127);
46 OperandType type1(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 4}, 0.5f, 127);
47 OperandType type2(Type::TENSOR_INT32, {4}, 0.25f, 0);
48 OperandType type3(Type::INT32, {});
49 OperandType type5(Type::TENSOR_QUANT8_ASYMM, {0, 0, 0, 0}, 1.0f, 127);
50 // Phase 1, operands
51 auto op1 = model->addOperand(&type0);
52 auto op2 = model->addOperand(&type1);
53 auto op3 = model->addOperand(&type2);
54 auto pad_valid = model->addOperand(&type3);
55 auto stride = model->addOperand(&type3);
56 auto channelMultiplier = model->addOperand(&type3);
57 auto act_none = model->addOperand(&type3);
58 auto op4 = model->addOperand(&type5);
59 // Phase 2, operations
60 static uint8_t op2_init[] = {129, 131, 133, 135, 109, 147, 105, 151, 137, 139, 141, 143, 153, 99, 157, 95};
61 model->setOperandValue(op2, op2_init, sizeof(uint8_t) * 16);
62 static int32_t op3_init[] = {4, 8, 12, 16};
63 model->setOperandValue(op3, op3_init, sizeof(int32_t) * 4);
64 static int32_t pad_valid_init[] = {2};
65 model->setOperandValue(pad_valid, pad_valid_init, sizeof(int32_t) * 1);
66 static int32_t stride_init[] = {1};
67 model->setOperandValue(stride, stride_init, sizeof(int32_t) * 1);
68 static int32_t channelMultiplier_init[] = {2};
69 model->setOperandValue(channelMultiplier, channelMultiplier_init, sizeof(int32_t) * 1);
70 static int32_t act_none_init[] = {0};
71 model->setOperandValue(act_none, act_none_init, sizeof(int32_t) * 1);
72 model->addOperation(ANEURALNETWORKS_DEPTHWISE_CONV_2D, {op1, op2, op3, pad_valid, stride, stride, channelMultiplier, act_none}, {op4});
73 // Phase 3, inputs and outputs
74 model->identifyInputsAndOutputs(
75 {op1},
76 {op4});
77 assert(model->isValid());
78 }
79
is_ignored_dynamic_output_shape(int i)80 inline bool is_ignored_dynamic_output_shape(int i) {
81 static std::set<int> ignore = {};
82 return ignore.find(i) != ignore.end();
83 }
84
85