1 // clang-format off
2 // Generated file (from: depthwise_conv2d_float_weights_as_inputs_relaxed.mod.py). Do not edit
CreateModel(Model * model)3 void CreateModel(Model *model) {
4 OperandType type0(Type::TENSOR_FLOAT32, {1, 3, 3, 2});
5 OperandType type1(Type::TENSOR_FLOAT32, {1, 2, 2, 4});
6 OperandType type2(Type::TENSOR_FLOAT32, {4});
7 OperandType type3(Type::INT32, {});
8 // Phase 1, operands
9 auto op1 = model->addOperand(&type0);
10 auto op2 = model->addOperand(&type1);
11 auto op3 = model->addOperand(&type2);
12 auto pad0 = model->addOperand(&type3);
13 auto stride = model->addOperand(&type3);
14 auto channelMultiplier = model->addOperand(&type3);
15 auto act = model->addOperand(&type3);
16 auto op4 = model->addOperand(&type1);
17 // Phase 2, operations
18 static int32_t pad0_init[] = {0};
19 model->setOperandValue(pad0, pad0_init, sizeof(int32_t) * 1);
20 static int32_t stride_init[] = {1};
21 model->setOperandValue(stride, stride_init, sizeof(int32_t) * 1);
22 static int32_t channelMultiplier_init[] = {2};
23 model->setOperandValue(channelMultiplier, channelMultiplier_init, sizeof(int32_t) * 1);
24 static int32_t act_init[] = {0};
25 model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
26 model->addOperation(ANEURALNETWORKS_DEPTHWISE_CONV_2D, {op1, op2, op3, pad0, pad0, pad0, pad0, stride, stride, channelMultiplier, act}, {op4});
27 // Phase 3, inputs and outputs
28 model->identifyInputsAndOutputs(
29 {op1, op2, op3},
30 {op4});
31 // Phase 4: set relaxed execution
32 model->relaxComputationFloat32toFloat16(true);
33 assert(model->isValid());
34 }
35
is_ignored(int i)36 inline bool is_ignored(int i) {
37 static std::set<int> ignore = {};
38 return ignore.find(i) != ignore.end();
39 }
40
CreateModel_dynamic_output_shape(Model * model)41 void CreateModel_dynamic_output_shape(Model *model) {
42 OperandType type0(Type::TENSOR_FLOAT32, {1, 3, 3, 2});
43 OperandType type1(Type::TENSOR_FLOAT32, {1, 2, 2, 4});
44 OperandType type2(Type::TENSOR_FLOAT32, {4});
45 OperandType type3(Type::INT32, {});
46 OperandType type4(Type::TENSOR_FLOAT32, {0, 0, 0, 0});
47 // Phase 1, operands
48 auto op1 = model->addOperand(&type0);
49 auto op2 = model->addOperand(&type1);
50 auto op3 = model->addOperand(&type2);
51 auto pad0 = model->addOperand(&type3);
52 auto stride = model->addOperand(&type3);
53 auto channelMultiplier = model->addOperand(&type3);
54 auto act = model->addOperand(&type3);
55 auto op4 = model->addOperand(&type4);
56 // Phase 2, operations
57 static int32_t pad0_init[] = {0};
58 model->setOperandValue(pad0, pad0_init, sizeof(int32_t) * 1);
59 static int32_t stride_init[] = {1};
60 model->setOperandValue(stride, stride_init, sizeof(int32_t) * 1);
61 static int32_t channelMultiplier_init[] = {2};
62 model->setOperandValue(channelMultiplier, channelMultiplier_init, sizeof(int32_t) * 1);
63 static int32_t act_init[] = {0};
64 model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
65 model->addOperation(ANEURALNETWORKS_DEPTHWISE_CONV_2D, {op1, op2, op3, pad0, pad0, pad0, pad0, stride, stride, channelMultiplier, act}, {op4});
66 // Phase 3, inputs and outputs
67 model->identifyInputsAndOutputs(
68 {op1, op2, op3},
69 {op4});
70 // Phase 4: set relaxed execution
71 model->relaxComputationFloat32toFloat16(true);
72 assert(model->isValid());
73 }
74
is_ignored_dynamic_output_shape(int i)75 inline bool is_ignored_dynamic_output_shape(int i) {
76 static std::set<int> ignore = {};
77 return ignore.find(i) != ignore.end();
78 }
79
80