• Home
  • History
  • Annotate
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // clang-format off
2 // Generated file (from: pad_v2_all_dims.mod.py). Do not edit
CreateModel(Model * model)3 void CreateModel(Model *model) {
4   OperandType type0(Type::TENSOR_FLOAT32, {1, 1, 2, 3});
5   OperandType type1(Type::TENSOR_INT32, {4, 2});
6   OperandType type2(Type::FLOAT32, {});
7   OperandType type3(Type::TENSOR_FLOAT32, {4, 8, 8, 6});
8   // Phase 1, operands
9   auto input0 = model->addOperand(&type0);
10   auto paddings = model->addOperand(&type1);
11   auto pad_value = model->addOperand(&type2);
12   auto output0 = model->addOperand(&type3);
13   // Phase 2, operations
14   static int32_t paddings_init[] = {1, 2, 3, 4, 3, 3, 2, 1};
15   model->setOperandValue(paddings, paddings_init, sizeof(int32_t) * 8);
16   static float pad_value_init[] = {3.9f};
17   model->setOperandValue(pad_value, pad_value_init, sizeof(float) * 1);
18   model->addOperation(ANEURALNETWORKS_PAD_V2, {input0, paddings, pad_value}, {output0});
19   // Phase 3, inputs and outputs
20   model->identifyInputsAndOutputs(
21     {input0},
22     {output0});
23   assert(model->isValid());
24 }
25 
is_ignored(int i)26 inline bool is_ignored(int i) {
27   static std::set<int> ignore = {};
28   return ignore.find(i) != ignore.end();
29 }
30 
CreateModel_float16(Model * model)31 void CreateModel_float16(Model *model) {
32   OperandType type1(Type::TENSOR_INT32, {4, 2});
33   OperandType type4(Type::TENSOR_FLOAT16, {1, 1, 2, 3});
34   OperandType type5(Type::TENSOR_FLOAT16, {4, 8, 8, 6});
35   OperandType type6(Type::FLOAT16, {});
36   // Phase 1, operands
37   auto input0 = model->addOperand(&type4);
38   auto paddings = model->addOperand(&type1);
39   auto pad_value = model->addOperand(&type6);
40   auto output0 = model->addOperand(&type5);
41   // Phase 2, operations
42   static int32_t paddings_init[] = {1, 2, 3, 4, 3, 3, 2, 1};
43   model->setOperandValue(paddings, paddings_init, sizeof(int32_t) * 8);
44   static _Float16 pad_value_init[] = {3.9000000953674316f};
45   model->setOperandValue(pad_value, pad_value_init, sizeof(_Float16) * 1);
46   model->addOperation(ANEURALNETWORKS_PAD_V2, {input0, paddings, pad_value}, {output0});
47   // Phase 3, inputs and outputs
48   model->identifyInputsAndOutputs(
49     {input0},
50     {output0});
51   assert(model->isValid());
52 }
53 
is_ignored_float16(int i)54 inline bool is_ignored_float16(int i) {
55   static std::set<int> ignore = {};
56   return ignore.find(i) != ignore.end();
57 }
58 
CreateModel_relaxed(Model * model)59 void CreateModel_relaxed(Model *model) {
60   OperandType type0(Type::TENSOR_FLOAT32, {1, 1, 2, 3});
61   OperandType type1(Type::TENSOR_INT32, {4, 2});
62   OperandType type2(Type::FLOAT32, {});
63   OperandType type3(Type::TENSOR_FLOAT32, {4, 8, 8, 6});
64   // Phase 1, operands
65   auto input0 = model->addOperand(&type0);
66   auto paddings = model->addOperand(&type1);
67   auto pad_value = model->addOperand(&type2);
68   auto output0 = model->addOperand(&type3);
69   // Phase 2, operations
70   static int32_t paddings_init[] = {1, 2, 3, 4, 3, 3, 2, 1};
71   model->setOperandValue(paddings, paddings_init, sizeof(int32_t) * 8);
72   static float pad_value_init[] = {3.9f};
73   model->setOperandValue(pad_value, pad_value_init, sizeof(float) * 1);
74   model->addOperation(ANEURALNETWORKS_PAD_V2, {input0, paddings, pad_value}, {output0});
75   // Phase 3, inputs and outputs
76   model->identifyInputsAndOutputs(
77     {input0},
78     {output0});
79   // Phase 4: set relaxed execution
80   model->relaxComputationFloat32toFloat16(true);
81   assert(model->isValid());
82 }
83 
is_ignored_relaxed(int i)84 inline bool is_ignored_relaxed(int i) {
85   static std::set<int> ignore = {};
86   return ignore.find(i) != ignore.end();
87 }
88 
CreateModel_dynamic_output_shape(Model * model)89 void CreateModel_dynamic_output_shape(Model *model) {
90   OperandType type0(Type::TENSOR_FLOAT32, {1, 1, 2, 3});
91   OperandType type1(Type::TENSOR_INT32, {4, 2});
92   OperandType type2(Type::FLOAT32, {});
93   OperandType type7(Type::TENSOR_FLOAT32, {0, 0, 0, 0});
94   // Phase 1, operands
95   auto input0 = model->addOperand(&type0);
96   auto paddings = model->addOperand(&type1);
97   auto pad_value = model->addOperand(&type2);
98   auto output0 = model->addOperand(&type7);
99   // Phase 2, operations
100   static int32_t paddings_init[] = {1, 2, 3, 4, 3, 3, 2, 1};
101   model->setOperandValue(paddings, paddings_init, sizeof(int32_t) * 8);
102   static float pad_value_init[] = {3.9f};
103   model->setOperandValue(pad_value, pad_value_init, sizeof(float) * 1);
104   model->addOperation(ANEURALNETWORKS_PAD_V2, {input0, paddings, pad_value}, {output0});
105   // Phase 3, inputs and outputs
106   model->identifyInputsAndOutputs(
107     {input0},
108     {output0});
109   assert(model->isValid());
110 }
111 
is_ignored_dynamic_output_shape(int i)112 inline bool is_ignored_dynamic_output_shape(int i) {
113   static std::set<int> ignore = {};
114   return ignore.find(i) != ignore.end();
115 }
116 
CreateModel_dynamic_output_shape_float16(Model * model)117 void CreateModel_dynamic_output_shape_float16(Model *model) {
118   OperandType type1(Type::TENSOR_INT32, {4, 2});
119   OperandType type4(Type::TENSOR_FLOAT16, {1, 1, 2, 3});
120   OperandType type6(Type::FLOAT16, {});
121   OperandType type8(Type::TENSOR_FLOAT16, {0, 0, 0, 0});
122   // Phase 1, operands
123   auto input0 = model->addOperand(&type4);
124   auto paddings = model->addOperand(&type1);
125   auto pad_value = model->addOperand(&type6);
126   auto output0 = model->addOperand(&type8);
127   // Phase 2, operations
128   static int32_t paddings_init[] = {1, 2, 3, 4, 3, 3, 2, 1};
129   model->setOperandValue(paddings, paddings_init, sizeof(int32_t) * 8);
130   static _Float16 pad_value_init[] = {3.9000000953674316f};
131   model->setOperandValue(pad_value, pad_value_init, sizeof(_Float16) * 1);
132   model->addOperation(ANEURALNETWORKS_PAD_V2, {input0, paddings, pad_value}, {output0});
133   // Phase 3, inputs and outputs
134   model->identifyInputsAndOutputs(
135     {input0},
136     {output0});
137   assert(model->isValid());
138 }
139 
is_ignored_dynamic_output_shape_float16(int i)140 inline bool is_ignored_dynamic_output_shape_float16(int i) {
141   static std::set<int> ignore = {};
142   return ignore.find(i) != ignore.end();
143 }
144 
CreateModel_dynamic_output_shape_relaxed(Model * model)145 void CreateModel_dynamic_output_shape_relaxed(Model *model) {
146   OperandType type0(Type::TENSOR_FLOAT32, {1, 1, 2, 3});
147   OperandType type1(Type::TENSOR_INT32, {4, 2});
148   OperandType type2(Type::FLOAT32, {});
149   OperandType type7(Type::TENSOR_FLOAT32, {0, 0, 0, 0});
150   // Phase 1, operands
151   auto input0 = model->addOperand(&type0);
152   auto paddings = model->addOperand(&type1);
153   auto pad_value = model->addOperand(&type2);
154   auto output0 = model->addOperand(&type7);
155   // Phase 2, operations
156   static int32_t paddings_init[] = {1, 2, 3, 4, 3, 3, 2, 1};
157   model->setOperandValue(paddings, paddings_init, sizeof(int32_t) * 8);
158   static float pad_value_init[] = {3.9f};
159   model->setOperandValue(pad_value, pad_value_init, sizeof(float) * 1);
160   model->addOperation(ANEURALNETWORKS_PAD_V2, {input0, paddings, pad_value}, {output0});
161   // Phase 3, inputs and outputs
162   model->identifyInputsAndOutputs(
163     {input0},
164     {output0});
165   // Phase 4: set relaxed execution
166   model->relaxComputationFloat32toFloat16(true);
167   assert(model->isValid());
168 }
169 
is_ignored_dynamic_output_shape_relaxed(int i)170 inline bool is_ignored_dynamic_output_shape_relaxed(int i) {
171   static std::set<int> ignore = {};
172   return ignore.find(i) != ignore.end();
173 }
174 
175