1 // clang-format off
2 // Generated file (from: minimum.mod.py). Do not edit
CreateModel(Model * model)3 void CreateModel(Model *model) {
4 OperandType type0(Type::TENSOR_FLOAT32, {3, 1, 2});
5 // Phase 1, operands
6 auto input0 = model->addOperand(&type0);
7 auto input1 = model->addOperand(&type0);
8 auto output0 = model->addOperand(&type0);
9 // Phase 2, operations
10 model->addOperation(ANEURALNETWORKS_MINIMUM, {input0, input1}, {output0});
11 // Phase 3, inputs and outputs
12 model->identifyInputsAndOutputs(
13 {input0, input1},
14 {output0});
15 assert(model->isValid());
16 }
17
is_ignored(int i)18 inline bool is_ignored(int i) {
19 static std::set<int> ignore = {};
20 return ignore.find(i) != ignore.end();
21 }
22
CreateModel_relaxed(Model * model)23 void CreateModel_relaxed(Model *model) {
24 OperandType type0(Type::TENSOR_FLOAT32, {3, 1, 2});
25 // Phase 1, operands
26 auto input0 = model->addOperand(&type0);
27 auto input1 = model->addOperand(&type0);
28 auto output0 = model->addOperand(&type0);
29 // Phase 2, operations
30 model->addOperation(ANEURALNETWORKS_MINIMUM, {input0, input1}, {output0});
31 // Phase 3, inputs and outputs
32 model->identifyInputsAndOutputs(
33 {input0, input1},
34 {output0});
35 // Phase 4: set relaxed execution
36 model->relaxComputationFloat32toFloat16(true);
37 assert(model->isValid());
38 }
39
is_ignored_relaxed(int i)40 inline bool is_ignored_relaxed(int i) {
41 static std::set<int> ignore = {};
42 return ignore.find(i) != ignore.end();
43 }
44
CreateModel_float16(Model * model)45 void CreateModel_float16(Model *model) {
46 OperandType type4(Type::TENSOR_FLOAT16, {3, 1, 2});
47 // Phase 1, operands
48 auto input0 = model->addOperand(&type4);
49 auto input1 = model->addOperand(&type4);
50 auto output0 = model->addOperand(&type4);
51 // Phase 2, operations
52 model->addOperation(ANEURALNETWORKS_MINIMUM, {input0, input1}, {output0});
53 // Phase 3, inputs and outputs
54 model->identifyInputsAndOutputs(
55 {input0, input1},
56 {output0});
57 assert(model->isValid());
58 }
59
is_ignored_float16(int i)60 inline bool is_ignored_float16(int i) {
61 static std::set<int> ignore = {};
62 return ignore.find(i) != ignore.end();
63 }
64
CreateModel_int32(Model * model)65 void CreateModel_int32(Model *model) {
66 OperandType type5(Type::TENSOR_INT32, {3, 1, 2});
67 // Phase 1, operands
68 auto input0 = model->addOperand(&type5);
69 auto input1 = model->addOperand(&type5);
70 auto output0 = model->addOperand(&type5);
71 // Phase 2, operations
72 model->addOperation(ANEURALNETWORKS_MINIMUM, {input0, input1}, {output0});
73 // Phase 3, inputs and outputs
74 model->identifyInputsAndOutputs(
75 {input0, input1},
76 {output0});
77 assert(model->isValid());
78 }
79
is_ignored_int32(int i)80 inline bool is_ignored_int32(int i) {
81 static std::set<int> ignore = {};
82 return ignore.find(i) != ignore.end();
83 }
84
CreateModel_quant8(Model * model)85 void CreateModel_quant8(Model *model) {
86 OperandType type6(Type::TENSOR_QUANT8_ASYMM, {3, 1, 2}, 0.5f, 127);
87 OperandType type7(Type::TENSOR_QUANT8_ASYMM, {3, 1, 2}, 1.0f, 100);
88 OperandType type8(Type::TENSOR_QUANT8_ASYMM, {3, 1, 2}, 2.0f, 80);
89 // Phase 1, operands
90 auto input0 = model->addOperand(&type6);
91 auto input1 = model->addOperand(&type7);
92 auto output0 = model->addOperand(&type8);
93 // Phase 2, operations
94 model->addOperation(ANEURALNETWORKS_MINIMUM, {input0, input1}, {output0});
95 // Phase 3, inputs and outputs
96 model->identifyInputsAndOutputs(
97 {input0, input1},
98 {output0});
99 assert(model->isValid());
100 }
101
is_ignored_quant8(int i)102 inline bool is_ignored_quant8(int i) {
103 static std::set<int> ignore = {};
104 return ignore.find(i) != ignore.end();
105 }
106
CreateModel_dynamic_output_shape(Model * model)107 void CreateModel_dynamic_output_shape(Model *model) {
108 OperandType type0(Type::TENSOR_FLOAT32, {3, 1, 2});
109 OperandType type9(Type::TENSOR_FLOAT32, {0, 0, 0});
110 // Phase 1, operands
111 auto input0 = model->addOperand(&type0);
112 auto input1 = model->addOperand(&type0);
113 auto output0 = model->addOperand(&type9);
114 // Phase 2, operations
115 model->addOperation(ANEURALNETWORKS_MINIMUM, {input0, input1}, {output0});
116 // Phase 3, inputs and outputs
117 model->identifyInputsAndOutputs(
118 {input0, input1},
119 {output0});
120 assert(model->isValid());
121 }
122
is_ignored_dynamic_output_shape(int i)123 inline bool is_ignored_dynamic_output_shape(int i) {
124 static std::set<int> ignore = {};
125 return ignore.find(i) != ignore.end();
126 }
127
CreateModel_dynamic_output_shape_relaxed(Model * model)128 void CreateModel_dynamic_output_shape_relaxed(Model *model) {
129 OperandType type0(Type::TENSOR_FLOAT32, {3, 1, 2});
130 OperandType type9(Type::TENSOR_FLOAT32, {0, 0, 0});
131 // Phase 1, operands
132 auto input0 = model->addOperand(&type0);
133 auto input1 = model->addOperand(&type0);
134 auto output0 = model->addOperand(&type9);
135 // Phase 2, operations
136 model->addOperation(ANEURALNETWORKS_MINIMUM, {input0, input1}, {output0});
137 // Phase 3, inputs and outputs
138 model->identifyInputsAndOutputs(
139 {input0, input1},
140 {output0});
141 // Phase 4: set relaxed execution
142 model->relaxComputationFloat32toFloat16(true);
143 assert(model->isValid());
144 }
145
is_ignored_dynamic_output_shape_relaxed(int i)146 inline bool is_ignored_dynamic_output_shape_relaxed(int i) {
147 static std::set<int> ignore = {};
148 return ignore.find(i) != ignore.end();
149 }
150
CreateModel_dynamic_output_shape_float16(Model * model)151 void CreateModel_dynamic_output_shape_float16(Model *model) {
152 OperandType type10(Type::TENSOR_FLOAT16, {0, 0, 0});
153 OperandType type4(Type::TENSOR_FLOAT16, {3, 1, 2});
154 // Phase 1, operands
155 auto input0 = model->addOperand(&type4);
156 auto input1 = model->addOperand(&type4);
157 auto output0 = model->addOperand(&type10);
158 // Phase 2, operations
159 model->addOperation(ANEURALNETWORKS_MINIMUM, {input0, input1}, {output0});
160 // Phase 3, inputs and outputs
161 model->identifyInputsAndOutputs(
162 {input0, input1},
163 {output0});
164 assert(model->isValid());
165 }
166
is_ignored_dynamic_output_shape_float16(int i)167 inline bool is_ignored_dynamic_output_shape_float16(int i) {
168 static std::set<int> ignore = {};
169 return ignore.find(i) != ignore.end();
170 }
171
CreateModel_dynamic_output_shape_int32(Model * model)172 void CreateModel_dynamic_output_shape_int32(Model *model) {
173 OperandType type11(Type::TENSOR_INT32, {0, 0, 0});
174 OperandType type5(Type::TENSOR_INT32, {3, 1, 2});
175 // Phase 1, operands
176 auto input0 = model->addOperand(&type5);
177 auto input1 = model->addOperand(&type5);
178 auto output0 = model->addOperand(&type11);
179 // Phase 2, operations
180 model->addOperation(ANEURALNETWORKS_MINIMUM, {input0, input1}, {output0});
181 // Phase 3, inputs and outputs
182 model->identifyInputsAndOutputs(
183 {input0, input1},
184 {output0});
185 assert(model->isValid());
186 }
187
is_ignored_dynamic_output_shape_int32(int i)188 inline bool is_ignored_dynamic_output_shape_int32(int i) {
189 static std::set<int> ignore = {};
190 return ignore.find(i) != ignore.end();
191 }
192
CreateModel_dynamic_output_shape_quant8(Model * model)193 void CreateModel_dynamic_output_shape_quant8(Model *model) {
194 OperandType type12(Type::TENSOR_QUANT8_ASYMM, {0, 0, 0}, 2.0f, 80);
195 OperandType type6(Type::TENSOR_QUANT8_ASYMM, {3, 1, 2}, 0.5f, 127);
196 OperandType type7(Type::TENSOR_QUANT8_ASYMM, {3, 1, 2}, 1.0f, 100);
197 // Phase 1, operands
198 auto input0 = model->addOperand(&type6);
199 auto input1 = model->addOperand(&type7);
200 auto output0 = model->addOperand(&type12);
201 // Phase 2, operations
202 model->addOperation(ANEURALNETWORKS_MINIMUM, {input0, input1}, {output0});
203 // Phase 3, inputs and outputs
204 model->identifyInputsAndOutputs(
205 {input0, input1},
206 {output0});
207 assert(model->isValid());
208 }
209
is_ignored_dynamic_output_shape_quant8(int i)210 inline bool is_ignored_dynamic_output_shape_quant8(int i) {
211 static std::set<int> ignore = {};
212 return ignore.find(i) != ignore.end();
213 }
214
CreateModel_2(Model * model)215 void CreateModel_2(Model *model) {
216 OperandType type0(Type::TENSOR_FLOAT32, {3, 1, 2});
217 OperandType type1(Type::TENSOR_FLOAT32, {2});
218 // Phase 1, operands
219 auto input01 = model->addOperand(&type0);
220 auto input11 = model->addOperand(&type1);
221 auto output01 = model->addOperand(&type0);
222 // Phase 2, operations
223 model->addOperation(ANEURALNETWORKS_MINIMUM, {input01, input11}, {output01});
224 // Phase 3, inputs and outputs
225 model->identifyInputsAndOutputs(
226 {input01, input11},
227 {output01});
228 assert(model->isValid());
229 }
230
is_ignored_2(int i)231 inline bool is_ignored_2(int i) {
232 static std::set<int> ignore = {};
233 return ignore.find(i) != ignore.end();
234 }
235
CreateModel_relaxed_2(Model * model)236 void CreateModel_relaxed_2(Model *model) {
237 OperandType type0(Type::TENSOR_FLOAT32, {3, 1, 2});
238 OperandType type1(Type::TENSOR_FLOAT32, {2});
239 // Phase 1, operands
240 auto input01 = model->addOperand(&type0);
241 auto input11 = model->addOperand(&type1);
242 auto output01 = model->addOperand(&type0);
243 // Phase 2, operations
244 model->addOperation(ANEURALNETWORKS_MINIMUM, {input01, input11}, {output01});
245 // Phase 3, inputs and outputs
246 model->identifyInputsAndOutputs(
247 {input01, input11},
248 {output01});
249 // Phase 4: set relaxed execution
250 model->relaxComputationFloat32toFloat16(true);
251 assert(model->isValid());
252 }
253
is_ignored_relaxed_2(int i)254 inline bool is_ignored_relaxed_2(int i) {
255 static std::set<int> ignore = {};
256 return ignore.find(i) != ignore.end();
257 }
258
CreateModel_float16_2(Model * model)259 void CreateModel_float16_2(Model *model) {
260 OperandType type13(Type::TENSOR_FLOAT16, {2});
261 OperandType type4(Type::TENSOR_FLOAT16, {3, 1, 2});
262 // Phase 1, operands
263 auto input01 = model->addOperand(&type4);
264 auto input11 = model->addOperand(&type13);
265 auto output01 = model->addOperand(&type4);
266 // Phase 2, operations
267 model->addOperation(ANEURALNETWORKS_MINIMUM, {input01, input11}, {output01});
268 // Phase 3, inputs and outputs
269 model->identifyInputsAndOutputs(
270 {input01, input11},
271 {output01});
272 assert(model->isValid());
273 }
274
is_ignored_float16_2(int i)275 inline bool is_ignored_float16_2(int i) {
276 static std::set<int> ignore = {};
277 return ignore.find(i) != ignore.end();
278 }
279
CreateModel_int32_2(Model * model)280 void CreateModel_int32_2(Model *model) {
281 OperandType type14(Type::TENSOR_INT32, {2});
282 OperandType type5(Type::TENSOR_INT32, {3, 1, 2});
283 // Phase 1, operands
284 auto input01 = model->addOperand(&type5);
285 auto input11 = model->addOperand(&type14);
286 auto output01 = model->addOperand(&type5);
287 // Phase 2, operations
288 model->addOperation(ANEURALNETWORKS_MINIMUM, {input01, input11}, {output01});
289 // Phase 3, inputs and outputs
290 model->identifyInputsAndOutputs(
291 {input01, input11},
292 {output01});
293 assert(model->isValid());
294 }
295
is_ignored_int32_2(int i)296 inline bool is_ignored_int32_2(int i) {
297 static std::set<int> ignore = {};
298 return ignore.find(i) != ignore.end();
299 }
300
CreateModel_quant8_2(Model * model)301 void CreateModel_quant8_2(Model *model) {
302 OperandType type15(Type::TENSOR_QUANT8_ASYMM, {2}, 1.0f, 100);
303 OperandType type6(Type::TENSOR_QUANT8_ASYMM, {3, 1, 2}, 0.5f, 127);
304 OperandType type8(Type::TENSOR_QUANT8_ASYMM, {3, 1, 2}, 2.0f, 80);
305 // Phase 1, operands
306 auto input01 = model->addOperand(&type6);
307 auto input11 = model->addOperand(&type15);
308 auto output01 = model->addOperand(&type8);
309 // Phase 2, operations
310 model->addOperation(ANEURALNETWORKS_MINIMUM, {input01, input11}, {output01});
311 // Phase 3, inputs and outputs
312 model->identifyInputsAndOutputs(
313 {input01, input11},
314 {output01});
315 assert(model->isValid());
316 }
317
is_ignored_quant8_2(int i)318 inline bool is_ignored_quant8_2(int i) {
319 static std::set<int> ignore = {};
320 return ignore.find(i) != ignore.end();
321 }
322
CreateModel_dynamic_output_shape_2(Model * model)323 void CreateModel_dynamic_output_shape_2(Model *model) {
324 OperandType type0(Type::TENSOR_FLOAT32, {3, 1, 2});
325 OperandType type1(Type::TENSOR_FLOAT32, {2});
326 OperandType type9(Type::TENSOR_FLOAT32, {0, 0, 0});
327 // Phase 1, operands
328 auto input01 = model->addOperand(&type0);
329 auto input11 = model->addOperand(&type1);
330 auto output01 = model->addOperand(&type9);
331 // Phase 2, operations
332 model->addOperation(ANEURALNETWORKS_MINIMUM, {input01, input11}, {output01});
333 // Phase 3, inputs and outputs
334 model->identifyInputsAndOutputs(
335 {input01, input11},
336 {output01});
337 assert(model->isValid());
338 }
339
is_ignored_dynamic_output_shape_2(int i)340 inline bool is_ignored_dynamic_output_shape_2(int i) {
341 static std::set<int> ignore = {};
342 return ignore.find(i) != ignore.end();
343 }
344
CreateModel_dynamic_output_shape_relaxed_2(Model * model)345 void CreateModel_dynamic_output_shape_relaxed_2(Model *model) {
346 OperandType type0(Type::TENSOR_FLOAT32, {3, 1, 2});
347 OperandType type1(Type::TENSOR_FLOAT32, {2});
348 OperandType type9(Type::TENSOR_FLOAT32, {0, 0, 0});
349 // Phase 1, operands
350 auto input01 = model->addOperand(&type0);
351 auto input11 = model->addOperand(&type1);
352 auto output01 = model->addOperand(&type9);
353 // Phase 2, operations
354 model->addOperation(ANEURALNETWORKS_MINIMUM, {input01, input11}, {output01});
355 // Phase 3, inputs and outputs
356 model->identifyInputsAndOutputs(
357 {input01, input11},
358 {output01});
359 // Phase 4: set relaxed execution
360 model->relaxComputationFloat32toFloat16(true);
361 assert(model->isValid());
362 }
363
is_ignored_dynamic_output_shape_relaxed_2(int i)364 inline bool is_ignored_dynamic_output_shape_relaxed_2(int i) {
365 static std::set<int> ignore = {};
366 return ignore.find(i) != ignore.end();
367 }
368
CreateModel_dynamic_output_shape_float16_2(Model * model)369 void CreateModel_dynamic_output_shape_float16_2(Model *model) {
370 OperandType type10(Type::TENSOR_FLOAT16, {0, 0, 0});
371 OperandType type13(Type::TENSOR_FLOAT16, {2});
372 OperandType type4(Type::TENSOR_FLOAT16, {3, 1, 2});
373 // Phase 1, operands
374 auto input01 = model->addOperand(&type4);
375 auto input11 = model->addOperand(&type13);
376 auto output01 = model->addOperand(&type10);
377 // Phase 2, operations
378 model->addOperation(ANEURALNETWORKS_MINIMUM, {input01, input11}, {output01});
379 // Phase 3, inputs and outputs
380 model->identifyInputsAndOutputs(
381 {input01, input11},
382 {output01});
383 assert(model->isValid());
384 }
385
is_ignored_dynamic_output_shape_float16_2(int i)386 inline bool is_ignored_dynamic_output_shape_float16_2(int i) {
387 static std::set<int> ignore = {};
388 return ignore.find(i) != ignore.end();
389 }
390
CreateModel_dynamic_output_shape_int32_2(Model * model)391 void CreateModel_dynamic_output_shape_int32_2(Model *model) {
392 OperandType type11(Type::TENSOR_INT32, {0, 0, 0});
393 OperandType type14(Type::TENSOR_INT32, {2});
394 OperandType type5(Type::TENSOR_INT32, {3, 1, 2});
395 // Phase 1, operands
396 auto input01 = model->addOperand(&type5);
397 auto input11 = model->addOperand(&type14);
398 auto output01 = model->addOperand(&type11);
399 // Phase 2, operations
400 model->addOperation(ANEURALNETWORKS_MINIMUM, {input01, input11}, {output01});
401 // Phase 3, inputs and outputs
402 model->identifyInputsAndOutputs(
403 {input01, input11},
404 {output01});
405 assert(model->isValid());
406 }
407
is_ignored_dynamic_output_shape_int32_2(int i)408 inline bool is_ignored_dynamic_output_shape_int32_2(int i) {
409 static std::set<int> ignore = {};
410 return ignore.find(i) != ignore.end();
411 }
412
CreateModel_dynamic_output_shape_quant8_2(Model * model)413 void CreateModel_dynamic_output_shape_quant8_2(Model *model) {
414 OperandType type12(Type::TENSOR_QUANT8_ASYMM, {0, 0, 0}, 2.0f, 80);
415 OperandType type15(Type::TENSOR_QUANT8_ASYMM, {2}, 1.0f, 100);
416 OperandType type6(Type::TENSOR_QUANT8_ASYMM, {3, 1, 2}, 0.5f, 127);
417 // Phase 1, operands
418 auto input01 = model->addOperand(&type6);
419 auto input11 = model->addOperand(&type15);
420 auto output01 = model->addOperand(&type12);
421 // Phase 2, operations
422 model->addOperation(ANEURALNETWORKS_MINIMUM, {input01, input11}, {output01});
423 // Phase 3, inputs and outputs
424 model->identifyInputsAndOutputs(
425 {input01, input11},
426 {output01});
427 assert(model->isValid());
428 }
429
is_ignored_dynamic_output_shape_quant8_2(int i)430 inline bool is_ignored_dynamic_output_shape_quant8_2(int i) {
431 static std::set<int> ignore = {};
432 return ignore.find(i) != ignore.end();
433 }
434
CreateModel_3(Model * model)435 void CreateModel_3(Model *model) {
436 OperandType type2(Type::TENSOR_QUANT8_ASYMM, {2}, 1.0f, 128);
437 OperandType type3(Type::TENSOR_QUANT8_ASYMM, {2}, 0.5f, 128);
438 // Phase 1, operands
439 auto input02 = model->addOperand(&type2);
440 auto input12 = model->addOperand(&type2);
441 auto output02 = model->addOperand(&type3);
442 // Phase 2, operations
443 model->addOperation(ANEURALNETWORKS_MINIMUM, {input02, input12}, {output02});
444 // Phase 3, inputs and outputs
445 model->identifyInputsAndOutputs(
446 {input02, input12},
447 {output02});
448 assert(model->isValid());
449 }
450
is_ignored_3(int i)451 inline bool is_ignored_3(int i) {
452 static std::set<int> ignore = {};
453 return ignore.find(i) != ignore.end();
454 }
455
CreateModel_dynamic_output_shape_3(Model * model)456 void CreateModel_dynamic_output_shape_3(Model *model) {
457 OperandType type16(Type::TENSOR_QUANT8_ASYMM, {0}, 0.5f, 128);
458 OperandType type2(Type::TENSOR_QUANT8_ASYMM, {2}, 1.0f, 128);
459 // Phase 1, operands
460 auto input02 = model->addOperand(&type2);
461 auto input12 = model->addOperand(&type2);
462 auto output02 = model->addOperand(&type16);
463 // Phase 2, operations
464 model->addOperation(ANEURALNETWORKS_MINIMUM, {input02, input12}, {output02});
465 // Phase 3, inputs and outputs
466 model->identifyInputsAndOutputs(
467 {input02, input12},
468 {output02});
469 assert(model->isValid());
470 }
471
is_ignored_dynamic_output_shape_3(int i)472 inline bool is_ignored_dynamic_output_shape_3(int i) {
473 static std::set<int> ignore = {};
474 return ignore.find(i) != ignore.end();
475 }
476
477