1 // clang-format off
2 // Generated file (from: pad_v2_low_rank.mod.py). Do not edit
CreateModel(Model * model)3 void CreateModel(Model *model) {
4   OperandType type0(Type::TENSOR_FLOAT32, {3});
5   OperandType type1(Type::TENSOR_INT32, {1, 2});
6   OperandType type2(Type::FLOAT32, {});
7   OperandType type3(Type::TENSOR_FLOAT32, {7});
8   // Phase 1, operands
9   auto input0 = model->addOperand(&type0);
10   auto paddings = model->addOperand(&type1);
11   auto pad_value = model->addOperand(&type2);
12   auto output0 = model->addOperand(&type3);
13   // Phase 2, operations
14   static int32_t paddings_init[] = {3, 1};
15   model->setOperandValue(paddings, paddings_init, sizeof(int32_t) * 2);
16   static float pad_value_init[] = {9.9f};
17   model->setOperandValue(pad_value, pad_value_init, sizeof(float) * 1);
18   model->addOperation(ANEURALNETWORKS_PAD_V2, {input0, paddings, pad_value}, {output0});
19   // Phase 3, inputs and outputs
20   model->identifyInputsAndOutputs(
21     {input0},
22     {output0});
23   assert(model->isValid());
24 }
25 
is_ignored(int i)26 inline bool is_ignored(int i) {
27   static std::set<int> ignore = {};
28   return ignore.find(i) != ignore.end();
29 }
30 
CreateModel_float16(Model * model)31 void CreateModel_float16(Model *model) {
32   OperandType type1(Type::TENSOR_INT32, {1, 2});
33   OperandType type4(Type::TENSOR_FLOAT16, {3});
34   OperandType type5(Type::TENSOR_FLOAT16, {7});
35   OperandType type6(Type::FLOAT16, {});
36   // Phase 1, operands
37   auto input0 = model->addOperand(&type4);
38   auto paddings = model->addOperand(&type1);
39   auto pad_value = model->addOperand(&type6);
40   auto output0 = model->addOperand(&type5);
41   // Phase 2, operations
42   static int32_t paddings_init[] = {3, 1};
43   model->setOperandValue(paddings, paddings_init, sizeof(int32_t) * 2);
44   static _Float16 pad_value_init[] = {9.899999618530273f};
45   model->setOperandValue(pad_value, pad_value_init, sizeof(_Float16) * 1);
46   model->addOperation(ANEURALNETWORKS_PAD_V2, {input0, paddings, pad_value}, {output0});
47   // Phase 3, inputs and outputs
48   model->identifyInputsAndOutputs(
49     {input0},
50     {output0});
51   assert(model->isValid());
52 }
53 
is_ignored_float16(int i)54 inline bool is_ignored_float16(int i) {
55   static std::set<int> ignore = {};
56   return ignore.find(i) != ignore.end();
57 }
58 
CreateModel_dynamic_output_shape(Model * model)59 void CreateModel_dynamic_output_shape(Model *model) {
60   OperandType type0(Type::TENSOR_FLOAT32, {3});
61   OperandType type1(Type::TENSOR_INT32, {1, 2});
62   OperandType type2(Type::FLOAT32, {});
63   OperandType type7(Type::TENSOR_FLOAT32, {0});
64   // Phase 1, operands
65   auto input0 = model->addOperand(&type0);
66   auto paddings = model->addOperand(&type1);
67   auto pad_value = model->addOperand(&type2);
68   auto output0 = model->addOperand(&type7);
69   // Phase 2, operations
70   static int32_t paddings_init[] = {3, 1};
71   model->setOperandValue(paddings, paddings_init, sizeof(int32_t) * 2);
72   static float pad_value_init[] = {9.9f};
73   model->setOperandValue(pad_value, pad_value_init, sizeof(float) * 1);
74   model->addOperation(ANEURALNETWORKS_PAD_V2, {input0, paddings, pad_value}, {output0});
75   // Phase 3, inputs and outputs
76   model->identifyInputsAndOutputs(
77     {input0},
78     {output0});
79   assert(model->isValid());
80 }
81 
is_ignored_dynamic_output_shape(int i)82 inline bool is_ignored_dynamic_output_shape(int i) {
83   static std::set<int> ignore = {};
84   return ignore.find(i) != ignore.end();
85 }
86 
CreateModel_dynamic_output_shape_float16(Model * model)87 void CreateModel_dynamic_output_shape_float16(Model *model) {
88   OperandType type1(Type::TENSOR_INT32, {1, 2});
89   OperandType type4(Type::TENSOR_FLOAT16, {3});
90   OperandType type6(Type::FLOAT16, {});
91   OperandType type8(Type::TENSOR_FLOAT16, {0});
92   // Phase 1, operands
93   auto input0 = model->addOperand(&type4);
94   auto paddings = model->addOperand(&type1);
95   auto pad_value = model->addOperand(&type6);
96   auto output0 = model->addOperand(&type8);
97   // Phase 2, operations
98   static int32_t paddings_init[] = {3, 1};
99   model->setOperandValue(paddings, paddings_init, sizeof(int32_t) * 2);
100   static _Float16 pad_value_init[] = {9.899999618530273f};
101   model->setOperandValue(pad_value, pad_value_init, sizeof(_Float16) * 1);
102   model->addOperation(ANEURALNETWORKS_PAD_V2, {input0, paddings, pad_value}, {output0});
103   // Phase 3, inputs and outputs
104   model->identifyInputsAndOutputs(
105     {input0},
106     {output0});
107   assert(model->isValid());
108 }
109 
is_ignored_dynamic_output_shape_float16(int i)110 inline bool is_ignored_dynamic_output_shape_float16(int i) {
111   static std::set<int> ignore = {};
112   return ignore.find(i) != ignore.end();
113 }
114 
115