1 // clang-format off
2 // Generated file (from: sub_v1_2_broadcast.mod.py). Do not edit
CreateModel_none(Model * model)3 void CreateModel_none(Model *model) {
4 OperandType type0(Type::TENSOR_FLOAT32, {1, 2});
5 OperandType type1(Type::TENSOR_FLOAT32, {2, 2});
6 OperandType type2(Type::INT32, {});
7 // Phase 1, operands
8 auto input0 = model->addOperand(&type0);
9 auto input1 = model->addOperand(&type1);
10 auto act = model->addOperand(&type2);
11 auto output0 = model->addOperand(&type1);
12 // Phase 2, operations
13 static int32_t act_init[] = {0};
14 model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
15 model->addOperation(ANEURALNETWORKS_SUB, {input0, input1, act}, {output0});
16 // Phase 3, inputs and outputs
17 model->identifyInputsAndOutputs(
18 {input0, input1},
19 {output0});
20 assert(model->isValid());
21 }
22
is_ignored_none(int i)23 inline bool is_ignored_none(int i) {
24 static std::set<int> ignore = {};
25 return ignore.find(i) != ignore.end();
26 }
27
CreateModel_relu(Model * model)28 void CreateModel_relu(Model *model) {
29 OperandType type0(Type::TENSOR_FLOAT32, {1, 2});
30 OperandType type1(Type::TENSOR_FLOAT32, {2, 2});
31 OperandType type2(Type::INT32, {});
32 // Phase 1, operands
33 auto input0 = model->addOperand(&type0);
34 auto input1 = model->addOperand(&type1);
35 auto act = model->addOperand(&type2);
36 auto output0 = model->addOperand(&type1);
37 // Phase 2, operations
38 static int32_t act_init[] = {1};
39 model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
40 model->addOperation(ANEURALNETWORKS_SUB, {input0, input1, act}, {output0});
41 // Phase 3, inputs and outputs
42 model->identifyInputsAndOutputs(
43 {input0, input1},
44 {output0});
45 assert(model->isValid());
46 }
47
is_ignored_relu(int i)48 inline bool is_ignored_relu(int i) {
49 static std::set<int> ignore = {};
50 return ignore.find(i) != ignore.end();
51 }
52
CreateModel_relu1(Model * model)53 void CreateModel_relu1(Model *model) {
54 OperandType type0(Type::TENSOR_FLOAT32, {1, 2});
55 OperandType type1(Type::TENSOR_FLOAT32, {2, 2});
56 OperandType type2(Type::INT32, {});
57 // Phase 1, operands
58 auto input0 = model->addOperand(&type0);
59 auto input1 = model->addOperand(&type1);
60 auto act = model->addOperand(&type2);
61 auto output0 = model->addOperand(&type1);
62 // Phase 2, operations
63 static int32_t act_init[] = {2};
64 model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
65 model->addOperation(ANEURALNETWORKS_SUB, {input0, input1, act}, {output0});
66 // Phase 3, inputs and outputs
67 model->identifyInputsAndOutputs(
68 {input0, input1},
69 {output0});
70 assert(model->isValid());
71 }
72
is_ignored_relu1(int i)73 inline bool is_ignored_relu1(int i) {
74 static std::set<int> ignore = {};
75 return ignore.find(i) != ignore.end();
76 }
77
CreateModel_relu6(Model * model)78 void CreateModel_relu6(Model *model) {
79 OperandType type0(Type::TENSOR_FLOAT32, {1, 2});
80 OperandType type1(Type::TENSOR_FLOAT32, {2, 2});
81 OperandType type2(Type::INT32, {});
82 // Phase 1, operands
83 auto input0 = model->addOperand(&type0);
84 auto input1 = model->addOperand(&type1);
85 auto act = model->addOperand(&type2);
86 auto output0 = model->addOperand(&type1);
87 // Phase 2, operations
88 static int32_t act_init[] = {3};
89 model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
90 model->addOperation(ANEURALNETWORKS_SUB, {input0, input1, act}, {output0});
91 // Phase 3, inputs and outputs
92 model->identifyInputsAndOutputs(
93 {input0, input1},
94 {output0});
95 assert(model->isValid());
96 }
97
is_ignored_relu6(int i)98 inline bool is_ignored_relu6(int i) {
99 static std::set<int> ignore = {};
100 return ignore.find(i) != ignore.end();
101 }
102
CreateModel_float16_none(Model * model)103 void CreateModel_float16_none(Model *model) {
104 OperandType type2(Type::INT32, {});
105 OperandType type5(Type::TENSOR_FLOAT16, {1, 2});
106 OperandType type6(Type::TENSOR_FLOAT16, {2, 2});
107 // Phase 1, operands
108 auto input0 = model->addOperand(&type5);
109 auto input1 = model->addOperand(&type6);
110 auto act = model->addOperand(&type2);
111 auto output0 = model->addOperand(&type6);
112 // Phase 2, operations
113 static int32_t act_init[] = {0};
114 model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
115 model->addOperation(ANEURALNETWORKS_SUB, {input0, input1, act}, {output0});
116 // Phase 3, inputs and outputs
117 model->identifyInputsAndOutputs(
118 {input0, input1},
119 {output0});
120 assert(model->isValid());
121 }
122
is_ignored_float16_none(int i)123 inline bool is_ignored_float16_none(int i) {
124 static std::set<int> ignore = {};
125 return ignore.find(i) != ignore.end();
126 }
127
CreateModel_float16_relu(Model * model)128 void CreateModel_float16_relu(Model *model) {
129 OperandType type2(Type::INT32, {});
130 OperandType type5(Type::TENSOR_FLOAT16, {1, 2});
131 OperandType type6(Type::TENSOR_FLOAT16, {2, 2});
132 // Phase 1, operands
133 auto input0 = model->addOperand(&type5);
134 auto input1 = model->addOperand(&type6);
135 auto act = model->addOperand(&type2);
136 auto output0 = model->addOperand(&type6);
137 // Phase 2, operations
138 static int32_t act_init[] = {1};
139 model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
140 model->addOperation(ANEURALNETWORKS_SUB, {input0, input1, act}, {output0});
141 // Phase 3, inputs and outputs
142 model->identifyInputsAndOutputs(
143 {input0, input1},
144 {output0});
145 assert(model->isValid());
146 }
147
is_ignored_float16_relu(int i)148 inline bool is_ignored_float16_relu(int i) {
149 static std::set<int> ignore = {};
150 return ignore.find(i) != ignore.end();
151 }
152
CreateModel_float16_relu1(Model * model)153 void CreateModel_float16_relu1(Model *model) {
154 OperandType type2(Type::INT32, {});
155 OperandType type5(Type::TENSOR_FLOAT16, {1, 2});
156 OperandType type6(Type::TENSOR_FLOAT16, {2, 2});
157 // Phase 1, operands
158 auto input0 = model->addOperand(&type5);
159 auto input1 = model->addOperand(&type6);
160 auto act = model->addOperand(&type2);
161 auto output0 = model->addOperand(&type6);
162 // Phase 2, operations
163 static int32_t act_init[] = {2};
164 model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
165 model->addOperation(ANEURALNETWORKS_SUB, {input0, input1, act}, {output0});
166 // Phase 3, inputs and outputs
167 model->identifyInputsAndOutputs(
168 {input0, input1},
169 {output0});
170 assert(model->isValid());
171 }
172
is_ignored_float16_relu1(int i)173 inline bool is_ignored_float16_relu1(int i) {
174 static std::set<int> ignore = {};
175 return ignore.find(i) != ignore.end();
176 }
177
CreateModel_float16_relu6(Model * model)178 void CreateModel_float16_relu6(Model *model) {
179 OperandType type2(Type::INT32, {});
180 OperandType type5(Type::TENSOR_FLOAT16, {1, 2});
181 OperandType type6(Type::TENSOR_FLOAT16, {2, 2});
182 // Phase 1, operands
183 auto input0 = model->addOperand(&type5);
184 auto input1 = model->addOperand(&type6);
185 auto act = model->addOperand(&type2);
186 auto output0 = model->addOperand(&type6);
187 // Phase 2, operations
188 static int32_t act_init[] = {3};
189 model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
190 model->addOperation(ANEURALNETWORKS_SUB, {input0, input1, act}, {output0});
191 // Phase 3, inputs and outputs
192 model->identifyInputsAndOutputs(
193 {input0, input1},
194 {output0});
195 assert(model->isValid());
196 }
197
is_ignored_float16_relu6(int i)198 inline bool is_ignored_float16_relu6(int i) {
199 static std::set<int> ignore = {};
200 return ignore.find(i) != ignore.end();
201 }
202
CreateModel_dynamic_output_shape_none(Model * model)203 void CreateModel_dynamic_output_shape_none(Model *model) {
204 OperandType type0(Type::TENSOR_FLOAT32, {1, 2});
205 OperandType type1(Type::TENSOR_FLOAT32, {2, 2});
206 OperandType type2(Type::INT32, {});
207 OperandType type7(Type::TENSOR_FLOAT32, {0, 0});
208 // Phase 1, operands
209 auto input0 = model->addOperand(&type0);
210 auto input1 = model->addOperand(&type1);
211 auto act = model->addOperand(&type2);
212 auto output0 = model->addOperand(&type7);
213 // Phase 2, operations
214 static int32_t act_init[] = {0};
215 model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
216 model->addOperation(ANEURALNETWORKS_SUB, {input0, input1, act}, {output0});
217 // Phase 3, inputs and outputs
218 model->identifyInputsAndOutputs(
219 {input0, input1},
220 {output0});
221 assert(model->isValid());
222 }
223
is_ignored_dynamic_output_shape_none(int i)224 inline bool is_ignored_dynamic_output_shape_none(int i) {
225 static std::set<int> ignore = {};
226 return ignore.find(i) != ignore.end();
227 }
228
CreateModel_dynamic_output_shape_relu(Model * model)229 void CreateModel_dynamic_output_shape_relu(Model *model) {
230 OperandType type0(Type::TENSOR_FLOAT32, {1, 2});
231 OperandType type1(Type::TENSOR_FLOAT32, {2, 2});
232 OperandType type2(Type::INT32, {});
233 OperandType type7(Type::TENSOR_FLOAT32, {0, 0});
234 // Phase 1, operands
235 auto input0 = model->addOperand(&type0);
236 auto input1 = model->addOperand(&type1);
237 auto act = model->addOperand(&type2);
238 auto output0 = model->addOperand(&type7);
239 // Phase 2, operations
240 static int32_t act_init[] = {1};
241 model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
242 model->addOperation(ANEURALNETWORKS_SUB, {input0, input1, act}, {output0});
243 // Phase 3, inputs and outputs
244 model->identifyInputsAndOutputs(
245 {input0, input1},
246 {output0});
247 assert(model->isValid());
248 }
249
is_ignored_dynamic_output_shape_relu(int i)250 inline bool is_ignored_dynamic_output_shape_relu(int i) {
251 static std::set<int> ignore = {};
252 return ignore.find(i) != ignore.end();
253 }
254
CreateModel_dynamic_output_shape_relu1(Model * model)255 void CreateModel_dynamic_output_shape_relu1(Model *model) {
256 OperandType type0(Type::TENSOR_FLOAT32, {1, 2});
257 OperandType type1(Type::TENSOR_FLOAT32, {2, 2});
258 OperandType type2(Type::INT32, {});
259 OperandType type7(Type::TENSOR_FLOAT32, {0, 0});
260 // Phase 1, operands
261 auto input0 = model->addOperand(&type0);
262 auto input1 = model->addOperand(&type1);
263 auto act = model->addOperand(&type2);
264 auto output0 = model->addOperand(&type7);
265 // Phase 2, operations
266 static int32_t act_init[] = {2};
267 model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
268 model->addOperation(ANEURALNETWORKS_SUB, {input0, input1, act}, {output0});
269 // Phase 3, inputs and outputs
270 model->identifyInputsAndOutputs(
271 {input0, input1},
272 {output0});
273 assert(model->isValid());
274 }
275
is_ignored_dynamic_output_shape_relu1(int i)276 inline bool is_ignored_dynamic_output_shape_relu1(int i) {
277 static std::set<int> ignore = {};
278 return ignore.find(i) != ignore.end();
279 }
280
CreateModel_dynamic_output_shape_relu6(Model * model)281 void CreateModel_dynamic_output_shape_relu6(Model *model) {
282 OperandType type0(Type::TENSOR_FLOAT32, {1, 2});
283 OperandType type1(Type::TENSOR_FLOAT32, {2, 2});
284 OperandType type2(Type::INT32, {});
285 OperandType type7(Type::TENSOR_FLOAT32, {0, 0});
286 // Phase 1, operands
287 auto input0 = model->addOperand(&type0);
288 auto input1 = model->addOperand(&type1);
289 auto act = model->addOperand(&type2);
290 auto output0 = model->addOperand(&type7);
291 // Phase 2, operations
292 static int32_t act_init[] = {3};
293 model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
294 model->addOperation(ANEURALNETWORKS_SUB, {input0, input1, act}, {output0});
295 // Phase 3, inputs and outputs
296 model->identifyInputsAndOutputs(
297 {input0, input1},
298 {output0});
299 assert(model->isValid());
300 }
301
is_ignored_dynamic_output_shape_relu6(int i)302 inline bool is_ignored_dynamic_output_shape_relu6(int i) {
303 static std::set<int> ignore = {};
304 return ignore.find(i) != ignore.end();
305 }
306
CreateModel_dynamic_output_shape_float16_none(Model * model)307 void CreateModel_dynamic_output_shape_float16_none(Model *model) {
308 OperandType type2(Type::INT32, {});
309 OperandType type5(Type::TENSOR_FLOAT16, {1, 2});
310 OperandType type6(Type::TENSOR_FLOAT16, {2, 2});
311 OperandType type8(Type::TENSOR_FLOAT16, {0, 0});
312 // Phase 1, operands
313 auto input0 = model->addOperand(&type5);
314 auto input1 = model->addOperand(&type6);
315 auto act = model->addOperand(&type2);
316 auto output0 = model->addOperand(&type8);
317 // Phase 2, operations
318 static int32_t act_init[] = {0};
319 model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
320 model->addOperation(ANEURALNETWORKS_SUB, {input0, input1, act}, {output0});
321 // Phase 3, inputs and outputs
322 model->identifyInputsAndOutputs(
323 {input0, input1},
324 {output0});
325 assert(model->isValid());
326 }
327
is_ignored_dynamic_output_shape_float16_none(int i)328 inline bool is_ignored_dynamic_output_shape_float16_none(int i) {
329 static std::set<int> ignore = {};
330 return ignore.find(i) != ignore.end();
331 }
332
CreateModel_dynamic_output_shape_float16_relu(Model * model)333 void CreateModel_dynamic_output_shape_float16_relu(Model *model) {
334 OperandType type2(Type::INT32, {});
335 OperandType type5(Type::TENSOR_FLOAT16, {1, 2});
336 OperandType type6(Type::TENSOR_FLOAT16, {2, 2});
337 OperandType type8(Type::TENSOR_FLOAT16, {0, 0});
338 // Phase 1, operands
339 auto input0 = model->addOperand(&type5);
340 auto input1 = model->addOperand(&type6);
341 auto act = model->addOperand(&type2);
342 auto output0 = model->addOperand(&type8);
343 // Phase 2, operations
344 static int32_t act_init[] = {1};
345 model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
346 model->addOperation(ANEURALNETWORKS_SUB, {input0, input1, act}, {output0});
347 // Phase 3, inputs and outputs
348 model->identifyInputsAndOutputs(
349 {input0, input1},
350 {output0});
351 assert(model->isValid());
352 }
353
is_ignored_dynamic_output_shape_float16_relu(int i)354 inline bool is_ignored_dynamic_output_shape_float16_relu(int i) {
355 static std::set<int> ignore = {};
356 return ignore.find(i) != ignore.end();
357 }
358
CreateModel_dynamic_output_shape_float16_relu1(Model * model)359 void CreateModel_dynamic_output_shape_float16_relu1(Model *model) {
360 OperandType type2(Type::INT32, {});
361 OperandType type5(Type::TENSOR_FLOAT16, {1, 2});
362 OperandType type6(Type::TENSOR_FLOAT16, {2, 2});
363 OperandType type8(Type::TENSOR_FLOAT16, {0, 0});
364 // Phase 1, operands
365 auto input0 = model->addOperand(&type5);
366 auto input1 = model->addOperand(&type6);
367 auto act = model->addOperand(&type2);
368 auto output0 = model->addOperand(&type8);
369 // Phase 2, operations
370 static int32_t act_init[] = {2};
371 model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
372 model->addOperation(ANEURALNETWORKS_SUB, {input0, input1, act}, {output0});
373 // Phase 3, inputs and outputs
374 model->identifyInputsAndOutputs(
375 {input0, input1},
376 {output0});
377 assert(model->isValid());
378 }
379
is_ignored_dynamic_output_shape_float16_relu1(int i)380 inline bool is_ignored_dynamic_output_shape_float16_relu1(int i) {
381 static std::set<int> ignore = {};
382 return ignore.find(i) != ignore.end();
383 }
384
CreateModel_dynamic_output_shape_float16_relu6(Model * model)385 void CreateModel_dynamic_output_shape_float16_relu6(Model *model) {
386 OperandType type2(Type::INT32, {});
387 OperandType type5(Type::TENSOR_FLOAT16, {1, 2});
388 OperandType type6(Type::TENSOR_FLOAT16, {2, 2});
389 OperandType type8(Type::TENSOR_FLOAT16, {0, 0});
390 // Phase 1, operands
391 auto input0 = model->addOperand(&type5);
392 auto input1 = model->addOperand(&type6);
393 auto act = model->addOperand(&type2);
394 auto output0 = model->addOperand(&type8);
395 // Phase 2, operations
396 static int32_t act_init[] = {3};
397 model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
398 model->addOperation(ANEURALNETWORKS_SUB, {input0, input1, act}, {output0});
399 // Phase 3, inputs and outputs
400 model->identifyInputsAndOutputs(
401 {input0, input1},
402 {output0});
403 assert(model->isValid());
404 }
405
is_ignored_dynamic_output_shape_float16_relu6(int i)406 inline bool is_ignored_dynamic_output_shape_float16_relu6(int i) {
407 static std::set<int> ignore = {};
408 return ignore.find(i) != ignore.end();
409 }
410
CreateModel_quant8(Model * model)411 void CreateModel_quant8(Model *model) {
412 OperandType type2(Type::INT32, {});
413 OperandType type3(Type::TENSOR_QUANT8_ASYMM, {1, 2}, 1.0f, 0);
414 OperandType type4(Type::TENSOR_QUANT8_ASYMM, {2, 2}, 1.0f, 0);
415 // Phase 1, operands
416 auto input01 = model->addOperand(&type3);
417 auto input11 = model->addOperand(&type4);
418 auto param = model->addOperand(&type2);
419 auto output01 = model->addOperand(&type4);
420 // Phase 2, operations
421 static int32_t param_init[] = {0};
422 model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
423 model->addOperation(ANEURALNETWORKS_SUB, {input01, input11, param}, {output01});
424 // Phase 3, inputs and outputs
425 model->identifyInputsAndOutputs(
426 {input01, input11},
427 {output01});
428 assert(model->isValid());
429 }
430
is_ignored_quant8(int i)431 inline bool is_ignored_quant8(int i) {
432 static std::set<int> ignore = {};
433 return ignore.find(i) != ignore.end();
434 }
435
CreateModel_quant8_dynamic_output_shape(Model * model)436 void CreateModel_quant8_dynamic_output_shape(Model *model) {
437 OperandType type2(Type::INT32, {});
438 OperandType type3(Type::TENSOR_QUANT8_ASYMM, {1, 2}, 1.0f, 0);
439 OperandType type4(Type::TENSOR_QUANT8_ASYMM, {2, 2}, 1.0f, 0);
440 OperandType type9(Type::TENSOR_QUANT8_ASYMM, {0, 0}, 1.0f, 0);
441 // Phase 1, operands
442 auto input01 = model->addOperand(&type3);
443 auto input11 = model->addOperand(&type4);
444 auto param = model->addOperand(&type2);
445 auto output01 = model->addOperand(&type9);
446 // Phase 2, operations
447 static int32_t param_init[] = {0};
448 model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
449 model->addOperation(ANEURALNETWORKS_SUB, {input01, input11, param}, {output01});
450 // Phase 3, inputs and outputs
451 model->identifyInputsAndOutputs(
452 {input01, input11},
453 {output01});
454 assert(model->isValid());
455 }
456
is_ignored_quant8_dynamic_output_shape(int i)457 inline bool is_ignored_quant8_dynamic_output_shape(int i) {
458 static std::set<int> ignore = {};
459 return ignore.find(i) != ignore.end();
460 }
461
462