1 // clang-format off
2 // Generated file (from: pad_low_rank.mod.py). Do not edit
CreateModel(Model * model)3 void CreateModel(Model *model) {
4 OperandType type0(Type::TENSOR_FLOAT32, {3});
5 OperandType type1(Type::TENSOR_INT32, {1, 2});
6 OperandType type2(Type::TENSOR_FLOAT32, {7});
7 // Phase 1, operands
8 auto input0 = model->addOperand(&type0);
9 auto paddings = model->addOperand(&type1);
10 auto output0 = model->addOperand(&type2);
11 // Phase 2, operations
12 static int32_t paddings_init[] = {3, 1};
13 model->setOperandValue(paddings, paddings_init, sizeof(int32_t) * 2);
14 model->addOperation(ANEURALNETWORKS_PAD, {input0, paddings}, {output0});
15 // Phase 3, inputs and outputs
16 model->identifyInputsAndOutputs(
17 {input0},
18 {output0});
19 assert(model->isValid());
20 }
21
is_ignored(int i)22 inline bool is_ignored(int i) {
23 static std::set<int> ignore = {};
24 return ignore.find(i) != ignore.end();
25 }
26
CreateModel_float16(Model * model)27 void CreateModel_float16(Model *model) {
28 OperandType type1(Type::TENSOR_INT32, {1, 2});
29 OperandType type3(Type::TENSOR_FLOAT16, {3});
30 OperandType type4(Type::TENSOR_FLOAT16, {7});
31 // Phase 1, operands
32 auto input0 = model->addOperand(&type3);
33 auto paddings = model->addOperand(&type1);
34 auto output0 = model->addOperand(&type4);
35 // Phase 2, operations
36 static int32_t paddings_init[] = {3, 1};
37 model->setOperandValue(paddings, paddings_init, sizeof(int32_t) * 2);
38 model->addOperation(ANEURALNETWORKS_PAD, {input0, paddings}, {output0});
39 // Phase 3, inputs and outputs
40 model->identifyInputsAndOutputs(
41 {input0},
42 {output0});
43 assert(model->isValid());
44 }
45
is_ignored_float16(int i)46 inline bool is_ignored_float16(int i) {
47 static std::set<int> ignore = {};
48 return ignore.find(i) != ignore.end();
49 }
50
CreateModel_dynamic_output_shape(Model * model)51 void CreateModel_dynamic_output_shape(Model *model) {
52 OperandType type0(Type::TENSOR_FLOAT32, {3});
53 OperandType type1(Type::TENSOR_INT32, {1, 2});
54 OperandType type5(Type::TENSOR_FLOAT32, {0});
55 // Phase 1, operands
56 auto input0 = model->addOperand(&type0);
57 auto paddings = model->addOperand(&type1);
58 auto output0 = model->addOperand(&type5);
59 // Phase 2, operations
60 static int32_t paddings_init[] = {3, 1};
61 model->setOperandValue(paddings, paddings_init, sizeof(int32_t) * 2);
62 model->addOperation(ANEURALNETWORKS_PAD, {input0, paddings}, {output0});
63 // Phase 3, inputs and outputs
64 model->identifyInputsAndOutputs(
65 {input0},
66 {output0});
67 assert(model->isValid());
68 }
69
is_ignored_dynamic_output_shape(int i)70 inline bool is_ignored_dynamic_output_shape(int i) {
71 static std::set<int> ignore = {};
72 return ignore.find(i) != ignore.end();
73 }
74
CreateModel_dynamic_output_shape_float16(Model * model)75 void CreateModel_dynamic_output_shape_float16(Model *model) {
76 OperandType type1(Type::TENSOR_INT32, {1, 2});
77 OperandType type3(Type::TENSOR_FLOAT16, {3});
78 OperandType type6(Type::TENSOR_FLOAT16, {0});
79 // Phase 1, operands
80 auto input0 = model->addOperand(&type3);
81 auto paddings = model->addOperand(&type1);
82 auto output0 = model->addOperand(&type6);
83 // Phase 2, operations
84 static int32_t paddings_init[] = {3, 1};
85 model->setOperandValue(paddings, paddings_init, sizeof(int32_t) * 2);
86 model->addOperation(ANEURALNETWORKS_PAD, {input0, paddings}, {output0});
87 // Phase 3, inputs and outputs
88 model->identifyInputsAndOutputs(
89 {input0},
90 {output0});
91 assert(model->isValid());
92 }
93
is_ignored_dynamic_output_shape_float16(int i)94 inline bool is_ignored_dynamic_output_shape_float16(int i) {
95 static std::set<int> ignore = {};
96 return ignore.find(i) != ignore.end();
97 }
98
99