1 // clang-format off
2 // Generated file (from: quantize.mod.py). Do not edit
CreateModel_quant8(Model * model)3 void CreateModel_quant8(Model *model) {
4 OperandType type0(Type::TENSOR_FLOAT32, {300});
5 OperandType type14(Type::TENSOR_QUANT8_ASYMM, {300}, 1.0f, 0);
6 // Phase 1, operands
7 auto input0 = model->addOperand(&type0);
8 auto output0 = model->addOperand(&type14);
9 // Phase 2, operations
10 model->addOperation(ANEURALNETWORKS_QUANTIZE, {input0}, {output0});
11 // Phase 3, inputs and outputs
12 model->identifyInputsAndOutputs(
13 {input0},
14 {output0});
15 assert(model->isValid());
16 }
17
is_ignored_quant8(int i)18 inline bool is_ignored_quant8(int i) {
19 static std::set<int> ignore = {};
20 return ignore.find(i) != ignore.end();
21 }
22
CreateModel_dynamic_output_shape_quant8(Model * model)23 void CreateModel_dynamic_output_shape_quant8(Model *model) {
24 OperandType type0(Type::TENSOR_FLOAT32, {300});
25 OperandType type15(Type::TENSOR_QUANT8_ASYMM, {0}, 1.0f, 0);
26 // Phase 1, operands
27 auto input0 = model->addOperand(&type0);
28 auto output0 = model->addOperand(&type15);
29 // Phase 2, operations
30 model->addOperation(ANEURALNETWORKS_QUANTIZE, {input0}, {output0});
31 // Phase 3, inputs and outputs
32 model->identifyInputsAndOutputs(
33 {input0},
34 {output0});
35 assert(model->isValid());
36 }
37
is_ignored_dynamic_output_shape_quant8(int i)38 inline bool is_ignored_dynamic_output_shape_quant8(int i) {
39 static std::set<int> ignore = {};
40 return ignore.find(i) != ignore.end();
41 }
42
CreateModel_quant8_2(Model * model)43 void CreateModel_quant8_2(Model *model) {
44 OperandType type0(Type::TENSOR_FLOAT32, {300});
45 OperandType type16(Type::TENSOR_QUANT8_ASYMM, {300}, 1.0f, 1);
46 // Phase 1, operands
47 auto input01 = model->addOperand(&type0);
48 auto output01 = model->addOperand(&type16);
49 // Phase 2, operations
50 model->addOperation(ANEURALNETWORKS_QUANTIZE, {input01}, {output01});
51 // Phase 3, inputs and outputs
52 model->identifyInputsAndOutputs(
53 {input01},
54 {output01});
55 assert(model->isValid());
56 }
57
is_ignored_quant8_2(int i)58 inline bool is_ignored_quant8_2(int i) {
59 static std::set<int> ignore = {};
60 return ignore.find(i) != ignore.end();
61 }
62
CreateModel_dynamic_output_shape_quant8_2(Model * model)63 void CreateModel_dynamic_output_shape_quant8_2(Model *model) {
64 OperandType type0(Type::TENSOR_FLOAT32, {300});
65 OperandType type17(Type::TENSOR_QUANT8_ASYMM, {0}, 1.0f, 1);
66 // Phase 1, operands
67 auto input01 = model->addOperand(&type0);
68 auto output01 = model->addOperand(&type17);
69 // Phase 2, operations
70 model->addOperation(ANEURALNETWORKS_QUANTIZE, {input01}, {output01});
71 // Phase 3, inputs and outputs
72 model->identifyInputsAndOutputs(
73 {input01},
74 {output01});
75 assert(model->isValid());
76 }
77
is_ignored_dynamic_output_shape_quant8_2(int i)78 inline bool is_ignored_dynamic_output_shape_quant8_2(int i) {
79 static std::set<int> ignore = {};
80 return ignore.find(i) != ignore.end();
81 }
82
CreateModel_quant8_3(Model * model)83 void CreateModel_quant8_3(Model *model) {
84 OperandType type0(Type::TENSOR_FLOAT32, {300});
85 OperandType type18(Type::TENSOR_QUANT8_ASYMM, {300}, 0.01f, 120);
86 // Phase 1, operands
87 auto input02 = model->addOperand(&type0);
88 auto output02 = model->addOperand(&type18);
89 // Phase 2, operations
90 model->addOperation(ANEURALNETWORKS_QUANTIZE, {input02}, {output02});
91 // Phase 3, inputs and outputs
92 model->identifyInputsAndOutputs(
93 {input02},
94 {output02});
95 assert(model->isValid());
96 }
97
is_ignored_quant8_3(int i)98 inline bool is_ignored_quant8_3(int i) {
99 static std::set<int> ignore = {};
100 return ignore.find(i) != ignore.end();
101 }
102
CreateModel_dynamic_output_shape_quant8_3(Model * model)103 void CreateModel_dynamic_output_shape_quant8_3(Model *model) {
104 OperandType type0(Type::TENSOR_FLOAT32, {300});
105 OperandType type19(Type::TENSOR_QUANT8_ASYMM, {0}, 0.01f, 120);
106 // Phase 1, operands
107 auto input02 = model->addOperand(&type0);
108 auto output02 = model->addOperand(&type19);
109 // Phase 2, operations
110 model->addOperation(ANEURALNETWORKS_QUANTIZE, {input02}, {output02});
111 // Phase 3, inputs and outputs
112 model->identifyInputsAndOutputs(
113 {input02},
114 {output02});
115 assert(model->isValid());
116 }
117
is_ignored_dynamic_output_shape_quant8_3(int i)118 inline bool is_ignored_dynamic_output_shape_quant8_3(int i) {
119 static std::set<int> ignore = {};
120 return ignore.find(i) != ignore.end();
121 }
122
CreateModel_quant8_4(Model * model)123 void CreateModel_quant8_4(Model *model) {
124 OperandType type0(Type::TENSOR_FLOAT32, {300});
125 OperandType type20(Type::TENSOR_QUANT8_ASYMM, {300}, 10.0f, 120);
126 // Phase 1, operands
127 auto input03 = model->addOperand(&type0);
128 auto output03 = model->addOperand(&type20);
129 // Phase 2, operations
130 model->addOperation(ANEURALNETWORKS_QUANTIZE, {input03}, {output03});
131 // Phase 3, inputs and outputs
132 model->identifyInputsAndOutputs(
133 {input03},
134 {output03});
135 assert(model->isValid());
136 }
137
is_ignored_quant8_4(int i)138 inline bool is_ignored_quant8_4(int i) {
139 static std::set<int> ignore = {};
140 return ignore.find(i) != ignore.end();
141 }
142
CreateModel_dynamic_output_shape_quant8_4(Model * model)143 void CreateModel_dynamic_output_shape_quant8_4(Model *model) {
144 OperandType type0(Type::TENSOR_FLOAT32, {300});
145 OperandType type21(Type::TENSOR_QUANT8_ASYMM, {0}, 10.0f, 120);
146 // Phase 1, operands
147 auto input03 = model->addOperand(&type0);
148 auto output03 = model->addOperand(&type21);
149 // Phase 2, operations
150 model->addOperation(ANEURALNETWORKS_QUANTIZE, {input03}, {output03});
151 // Phase 3, inputs and outputs
152 model->identifyInputsAndOutputs(
153 {input03},
154 {output03});
155 assert(model->isValid());
156 }
157
is_ignored_dynamic_output_shape_quant8_4(int i)158 inline bool is_ignored_dynamic_output_shape_quant8_4(int i) {
159 static std::set<int> ignore = {};
160 return ignore.find(i) != ignore.end();
161 }
162
CreateModel_quant8_5(Model * model)163 void CreateModel_quant8_5(Model *model) {
164 OperandType type1(Type::TENSOR_FLOAT16, {300});
165 OperandType type14(Type::TENSOR_QUANT8_ASYMM, {300}, 1.0f, 0);
166 // Phase 1, operands
167 auto input04 = model->addOperand(&type1);
168 auto output04 = model->addOperand(&type14);
169 // Phase 2, operations
170 model->addOperation(ANEURALNETWORKS_QUANTIZE, {input04}, {output04});
171 // Phase 3, inputs and outputs
172 model->identifyInputsAndOutputs(
173 {input04},
174 {output04});
175 assert(model->isValid());
176 }
177
is_ignored_quant8_5(int i)178 inline bool is_ignored_quant8_5(int i) {
179 static std::set<int> ignore = {};
180 return ignore.find(i) != ignore.end();
181 }
182
CreateModel_dynamic_output_shape_quant8_5(Model * model)183 void CreateModel_dynamic_output_shape_quant8_5(Model *model) {
184 OperandType type1(Type::TENSOR_FLOAT16, {300});
185 OperandType type15(Type::TENSOR_QUANT8_ASYMM, {0}, 1.0f, 0);
186 // Phase 1, operands
187 auto input04 = model->addOperand(&type1);
188 auto output04 = model->addOperand(&type15);
189 // Phase 2, operations
190 model->addOperation(ANEURALNETWORKS_QUANTIZE, {input04}, {output04});
191 // Phase 3, inputs and outputs
192 model->identifyInputsAndOutputs(
193 {input04},
194 {output04});
195 assert(model->isValid());
196 }
197
is_ignored_dynamic_output_shape_quant8_5(int i)198 inline bool is_ignored_dynamic_output_shape_quant8_5(int i) {
199 static std::set<int> ignore = {};
200 return ignore.find(i) != ignore.end();
201 }
202
CreateModel_quant8_6(Model * model)203 void CreateModel_quant8_6(Model *model) {
204 OperandType type1(Type::TENSOR_FLOAT16, {300});
205 OperandType type16(Type::TENSOR_QUANT8_ASYMM, {300}, 1.0f, 1);
206 // Phase 1, operands
207 auto input05 = model->addOperand(&type1);
208 auto output05 = model->addOperand(&type16);
209 // Phase 2, operations
210 model->addOperation(ANEURALNETWORKS_QUANTIZE, {input05}, {output05});
211 // Phase 3, inputs and outputs
212 model->identifyInputsAndOutputs(
213 {input05},
214 {output05});
215 assert(model->isValid());
216 }
217
is_ignored_quant8_6(int i)218 inline bool is_ignored_quant8_6(int i) {
219 static std::set<int> ignore = {};
220 return ignore.find(i) != ignore.end();
221 }
222
CreateModel_dynamic_output_shape_quant8_6(Model * model)223 void CreateModel_dynamic_output_shape_quant8_6(Model *model) {
224 OperandType type1(Type::TENSOR_FLOAT16, {300});
225 OperandType type17(Type::TENSOR_QUANT8_ASYMM, {0}, 1.0f, 1);
226 // Phase 1, operands
227 auto input05 = model->addOperand(&type1);
228 auto output05 = model->addOperand(&type17);
229 // Phase 2, operations
230 model->addOperation(ANEURALNETWORKS_QUANTIZE, {input05}, {output05});
231 // Phase 3, inputs and outputs
232 model->identifyInputsAndOutputs(
233 {input05},
234 {output05});
235 assert(model->isValid());
236 }
237
is_ignored_dynamic_output_shape_quant8_6(int i)238 inline bool is_ignored_dynamic_output_shape_quant8_6(int i) {
239 static std::set<int> ignore = {};
240 return ignore.find(i) != ignore.end();
241 }
242
CreateModel_quant8_7(Model * model)243 void CreateModel_quant8_7(Model *model) {
244 OperandType type1(Type::TENSOR_FLOAT16, {300});
245 OperandType type18(Type::TENSOR_QUANT8_ASYMM, {300}, 0.01f, 120);
246 // Phase 1, operands
247 auto input06 = model->addOperand(&type1);
248 auto output06 = model->addOperand(&type18);
249 // Phase 2, operations
250 model->addOperation(ANEURALNETWORKS_QUANTIZE, {input06}, {output06});
251 // Phase 3, inputs and outputs
252 model->identifyInputsAndOutputs(
253 {input06},
254 {output06});
255 assert(model->isValid());
256 }
257
is_ignored_quant8_7(int i)258 inline bool is_ignored_quant8_7(int i) {
259 static std::set<int> ignore = {};
260 return ignore.find(i) != ignore.end();
261 }
262
CreateModel_dynamic_output_shape_quant8_7(Model * model)263 void CreateModel_dynamic_output_shape_quant8_7(Model *model) {
264 OperandType type1(Type::TENSOR_FLOAT16, {300});
265 OperandType type19(Type::TENSOR_QUANT8_ASYMM, {0}, 0.01f, 120);
266 // Phase 1, operands
267 auto input06 = model->addOperand(&type1);
268 auto output06 = model->addOperand(&type19);
269 // Phase 2, operations
270 model->addOperation(ANEURALNETWORKS_QUANTIZE, {input06}, {output06});
271 // Phase 3, inputs and outputs
272 model->identifyInputsAndOutputs(
273 {input06},
274 {output06});
275 assert(model->isValid());
276 }
277
is_ignored_dynamic_output_shape_quant8_7(int i)278 inline bool is_ignored_dynamic_output_shape_quant8_7(int i) {
279 static std::set<int> ignore = {};
280 return ignore.find(i) != ignore.end();
281 }
282
CreateModel_quant8_8(Model * model)283 void CreateModel_quant8_8(Model *model) {
284 OperandType type1(Type::TENSOR_FLOAT16, {300});
285 OperandType type20(Type::TENSOR_QUANT8_ASYMM, {300}, 10.0f, 120);
286 // Phase 1, operands
287 auto input07 = model->addOperand(&type1);
288 auto output07 = model->addOperand(&type20);
289 // Phase 2, operations
290 model->addOperation(ANEURALNETWORKS_QUANTIZE, {input07}, {output07});
291 // Phase 3, inputs and outputs
292 model->identifyInputsAndOutputs(
293 {input07},
294 {output07});
295 assert(model->isValid());
296 }
297
is_ignored_quant8_8(int i)298 inline bool is_ignored_quant8_8(int i) {
299 static std::set<int> ignore = {};
300 return ignore.find(i) != ignore.end();
301 }
302
CreateModel_dynamic_output_shape_quant8_8(Model * model)303 void CreateModel_dynamic_output_shape_quant8_8(Model *model) {
304 OperandType type1(Type::TENSOR_FLOAT16, {300});
305 OperandType type21(Type::TENSOR_QUANT8_ASYMM, {0}, 10.0f, 120);
306 // Phase 1, operands
307 auto input07 = model->addOperand(&type1);
308 auto output07 = model->addOperand(&type21);
309 // Phase 2, operations
310 model->addOperation(ANEURALNETWORKS_QUANTIZE, {input07}, {output07});
311 // Phase 3, inputs and outputs
312 model->identifyInputsAndOutputs(
313 {input07},
314 {output07});
315 assert(model->isValid());
316 }
317
is_ignored_dynamic_output_shape_quant8_8(int i)318 inline bool is_ignored_dynamic_output_shape_quant8_8(int i) {
319 static std::set<int> ignore = {};
320 return ignore.find(i) != ignore.end();
321 }
322
CreateModel_zero_sized(Model * model)323 void CreateModel_zero_sized(Model *model) {
324 OperandType type10(Type::BOOL, {});
325 OperandType type11(Type::TENSOR_FLOAT32, {1, 1, 1, 1});
326 OperandType type12(Type::TENSOR_FLOAT32, {0, 2, 2, 1});
327 OperandType type13(Type::TENSOR_QUANT8_ASYMM, {0, 2, 2, 1}, 0.1f, 128);
328 OperandType type2(Type::TENSOR_FLOAT32, {1, 2});
329 OperandType type3(Type::TENSOR_FLOAT32, {1, 8});
330 OperandType type4(Type::TENSOR_FLOAT32, {0});
331 OperandType type5(Type::TENSOR_INT32, {0});
332 OperandType type6(Type::TENSOR_FLOAT32, {0, 4});
333 OperandType type7(Type::TENSOR_INT32, {1});
334 OperandType type8(Type::FLOAT32, {});
335 OperandType type9(Type::INT32, {});
336 // Phase 1, operands
337 auto scores = model->addOperand(&type2);
338 auto roi = model->addOperand(&type3);
339 auto param = model->addOperand(&type7);
340 auto param1 = model->addOperand(&type8);
341 auto param2 = model->addOperand(&type9);
342 auto param3 = model->addOperand(&type9);
343 auto param4 = model->addOperand(&type8);
344 auto param5 = model->addOperand(&type8);
345 auto param6 = model->addOperand(&type8);
346 auto scoresOut = model->addOperand(&type4);
347 auto roiOut = model->addOperand(&type6);
348 auto classesOut = model->addOperand(&type5);
349 auto batchSplitOut = model->addOperand(&type5);
350 auto in = model->addOperand(&type11);
351 auto param7 = model->addOperand(&type9);
352 auto param8 = model->addOperand(&type9);
353 auto param9 = model->addOperand(&type8);
354 auto param10 = model->addOperand(&type8);
355 auto param11 = model->addOperand(&type9);
356 auto param12 = model->addOperand(&type9);
357 auto layout = model->addOperand(&type10);
358 auto featureMap = model->addOperand(&type12);
359 auto out = model->addOperand(&type13);
360 // Phase 2, operations
361 static float scores_init[] = {0.9f, 0.1f};
362 model->setOperandValue(scores, scores_init, sizeof(float) * 2);
363 static float roi_init[] = {1.0f, 1.0f, 10.0f, 10.0f, 0.0f, 0.0f, 10.0f, 10.0f};
364 model->setOperandValue(roi, roi_init, sizeof(float) * 8);
365 static int32_t param_init[] = {0};
366 model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
367 static float param1_init[] = {0.3f};
368 model->setOperandValue(param1, param1_init, sizeof(float) * 1);
369 static int32_t param2_init[] = {-1};
370 model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
371 static int32_t param3_init[] = {0};
372 model->setOperandValue(param3, param3_init, sizeof(int32_t) * 1);
373 static float param4_init[] = {0.4f};
374 model->setOperandValue(param4, param4_init, sizeof(float) * 1);
375 static float param5_init[] = {1.0f};
376 model->setOperandValue(param5, param5_init, sizeof(float) * 1);
377 static float param6_init[] = {0.3f};
378 model->setOperandValue(param6, param6_init, sizeof(float) * 1);
379 static int32_t param7_init[] = {2};
380 model->setOperandValue(param7, param7_init, sizeof(int32_t) * 1);
381 static int32_t param8_init[] = {2};
382 model->setOperandValue(param8, param8_init, sizeof(int32_t) * 1);
383 static float param9_init[] = {2.0f};
384 model->setOperandValue(param9, param9_init, sizeof(float) * 1);
385 static float param10_init[] = {2.0f};
386 model->setOperandValue(param10, param10_init, sizeof(float) * 1);
387 static int32_t param11_init[] = {4};
388 model->setOperandValue(param11, param11_init, sizeof(int32_t) * 1);
389 static int32_t param12_init[] = {4};
390 model->setOperandValue(param12, param12_init, sizeof(int32_t) * 1);
391 static bool8 layout_init[] = {false};
392 model->setOperandValue(layout, layout_init, sizeof(bool8) * 1);
393 model->addOperation(ANEURALNETWORKS_BOX_WITH_NMS_LIMIT, {scores, roi, param, param1, param2, param3, param4, param5, param6}, {scoresOut, roiOut, classesOut, batchSplitOut});
394 model->addOperation(ANEURALNETWORKS_ROI_ALIGN, {in, roiOut, batchSplitOut, param7, param8, param9, param10, param11, param12, layout}, {featureMap});
395 model->addOperation(ANEURALNETWORKS_QUANTIZE, {featureMap}, {out});
396 // Phase 3, inputs and outputs
397 model->identifyInputsAndOutputs(
398 {in},
399 {scoresOut, classesOut, out});
400 assert(model->isValid());
401 }
402
is_ignored_zero_sized(int i)403 inline bool is_ignored_zero_sized(int i) {
404 static std::set<int> ignore = {};
405 return ignore.find(i) != ignore.end();
406 }
407
CreateModel_zero_sized_relaxed(Model * model)408 void CreateModel_zero_sized_relaxed(Model *model) {
409 OperandType type10(Type::BOOL, {});
410 OperandType type11(Type::TENSOR_FLOAT32, {1, 1, 1, 1});
411 OperandType type12(Type::TENSOR_FLOAT32, {0, 2, 2, 1});
412 OperandType type13(Type::TENSOR_QUANT8_ASYMM, {0, 2, 2, 1}, 0.1f, 128);
413 OperandType type2(Type::TENSOR_FLOAT32, {1, 2});
414 OperandType type3(Type::TENSOR_FLOAT32, {1, 8});
415 OperandType type4(Type::TENSOR_FLOAT32, {0});
416 OperandType type5(Type::TENSOR_INT32, {0});
417 OperandType type6(Type::TENSOR_FLOAT32, {0, 4});
418 OperandType type7(Type::TENSOR_INT32, {1});
419 OperandType type8(Type::FLOAT32, {});
420 OperandType type9(Type::INT32, {});
421 // Phase 1, operands
422 auto scores = model->addOperand(&type2);
423 auto roi = model->addOperand(&type3);
424 auto param = model->addOperand(&type7);
425 auto param1 = model->addOperand(&type8);
426 auto param2 = model->addOperand(&type9);
427 auto param3 = model->addOperand(&type9);
428 auto param4 = model->addOperand(&type8);
429 auto param5 = model->addOperand(&type8);
430 auto param6 = model->addOperand(&type8);
431 auto scoresOut = model->addOperand(&type4);
432 auto roiOut = model->addOperand(&type6);
433 auto classesOut = model->addOperand(&type5);
434 auto batchSplitOut = model->addOperand(&type5);
435 auto in = model->addOperand(&type11);
436 auto param7 = model->addOperand(&type9);
437 auto param8 = model->addOperand(&type9);
438 auto param9 = model->addOperand(&type8);
439 auto param10 = model->addOperand(&type8);
440 auto param11 = model->addOperand(&type9);
441 auto param12 = model->addOperand(&type9);
442 auto layout = model->addOperand(&type10);
443 auto featureMap = model->addOperand(&type12);
444 auto out = model->addOperand(&type13);
445 // Phase 2, operations
446 static float scores_init[] = {0.9f, 0.1f};
447 model->setOperandValue(scores, scores_init, sizeof(float) * 2);
448 static float roi_init[] = {1.0f, 1.0f, 10.0f, 10.0f, 0.0f, 0.0f, 10.0f, 10.0f};
449 model->setOperandValue(roi, roi_init, sizeof(float) * 8);
450 static int32_t param_init[] = {0};
451 model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
452 static float param1_init[] = {0.3f};
453 model->setOperandValue(param1, param1_init, sizeof(float) * 1);
454 static int32_t param2_init[] = {-1};
455 model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
456 static int32_t param3_init[] = {0};
457 model->setOperandValue(param3, param3_init, sizeof(int32_t) * 1);
458 static float param4_init[] = {0.4f};
459 model->setOperandValue(param4, param4_init, sizeof(float) * 1);
460 static float param5_init[] = {1.0f};
461 model->setOperandValue(param5, param5_init, sizeof(float) * 1);
462 static float param6_init[] = {0.3f};
463 model->setOperandValue(param6, param6_init, sizeof(float) * 1);
464 static int32_t param7_init[] = {2};
465 model->setOperandValue(param7, param7_init, sizeof(int32_t) * 1);
466 static int32_t param8_init[] = {2};
467 model->setOperandValue(param8, param8_init, sizeof(int32_t) * 1);
468 static float param9_init[] = {2.0f};
469 model->setOperandValue(param9, param9_init, sizeof(float) * 1);
470 static float param10_init[] = {2.0f};
471 model->setOperandValue(param10, param10_init, sizeof(float) * 1);
472 static int32_t param11_init[] = {4};
473 model->setOperandValue(param11, param11_init, sizeof(int32_t) * 1);
474 static int32_t param12_init[] = {4};
475 model->setOperandValue(param12, param12_init, sizeof(int32_t) * 1);
476 static bool8 layout_init[] = {false};
477 model->setOperandValue(layout, layout_init, sizeof(bool8) * 1);
478 model->addOperation(ANEURALNETWORKS_BOX_WITH_NMS_LIMIT, {scores, roi, param, param1, param2, param3, param4, param5, param6}, {scoresOut, roiOut, classesOut, batchSplitOut});
479 model->addOperation(ANEURALNETWORKS_ROI_ALIGN, {in, roiOut, batchSplitOut, param7, param8, param9, param10, param11, param12, layout}, {featureMap});
480 model->addOperation(ANEURALNETWORKS_QUANTIZE, {featureMap}, {out});
481 // Phase 3, inputs and outputs
482 model->identifyInputsAndOutputs(
483 {in},
484 {scoresOut, classesOut, out});
485 // Phase 4: set relaxed execution
486 model->relaxComputationFloat32toFloat16(true);
487 assert(model->isValid());
488 }
489
is_ignored_zero_sized_relaxed(int i)490 inline bool is_ignored_zero_sized_relaxed(int i) {
491 static std::set<int> ignore = {};
492 return ignore.find(i) != ignore.end();
493 }
494
CreateModel_zero_sized_float16(Model * model)495 void CreateModel_zero_sized_float16(Model *model) {
496 OperandType type10(Type::BOOL, {});
497 OperandType type13(Type::TENSOR_QUANT8_ASYMM, {0, 2, 2, 1}, 0.1f, 128);
498 OperandType type22(Type::TENSOR_FLOAT16, {0, 2, 2, 1});
499 OperandType type23(Type::TENSOR_FLOAT16, {1, 1, 1, 1});
500 OperandType type24(Type::FLOAT16, {});
501 OperandType type25(Type::TENSOR_FLOAT16, {1, 8});
502 OperandType type26(Type::TENSOR_FLOAT16, {0, 4});
503 OperandType type27(Type::TENSOR_FLOAT16, {1, 2});
504 OperandType type28(Type::TENSOR_FLOAT16, {0});
505 OperandType type5(Type::TENSOR_INT32, {0});
506 OperandType type7(Type::TENSOR_INT32, {1});
507 OperandType type9(Type::INT32, {});
508 // Phase 1, operands
509 auto scores = model->addOperand(&type27);
510 auto roi = model->addOperand(&type25);
511 auto param = model->addOperand(&type7);
512 auto param1 = model->addOperand(&type24);
513 auto param2 = model->addOperand(&type9);
514 auto param3 = model->addOperand(&type9);
515 auto param4 = model->addOperand(&type24);
516 auto param5 = model->addOperand(&type24);
517 auto param6 = model->addOperand(&type24);
518 auto scoresOut = model->addOperand(&type28);
519 auto roiOut = model->addOperand(&type26);
520 auto classesOut = model->addOperand(&type5);
521 auto batchSplitOut = model->addOperand(&type5);
522 auto in = model->addOperand(&type23);
523 auto param7 = model->addOperand(&type9);
524 auto param8 = model->addOperand(&type9);
525 auto param9 = model->addOperand(&type24);
526 auto param10 = model->addOperand(&type24);
527 auto param11 = model->addOperand(&type9);
528 auto param12 = model->addOperand(&type9);
529 auto layout = model->addOperand(&type10);
530 auto featureMap = model->addOperand(&type22);
531 auto out = model->addOperand(&type13);
532 // Phase 2, operations
533 static _Float16 scores_init[] = {0.8999999761581421f, 0.10000000149011612f};
534 model->setOperandValue(scores, scores_init, sizeof(_Float16) * 2);
535 static _Float16 roi_init[] = {1.0f, 1.0f, 10.0f, 10.0f, 0.0f, 0.0f, 10.0f, 10.0f};
536 model->setOperandValue(roi, roi_init, sizeof(_Float16) * 8);
537 static int32_t param_init[] = {0};
538 model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
539 static _Float16 param1_init[] = {0.30000001192092896f};
540 model->setOperandValue(param1, param1_init, sizeof(_Float16) * 1);
541 static int32_t param2_init[] = {-1};
542 model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
543 static int32_t param3_init[] = {0};
544 model->setOperandValue(param3, param3_init, sizeof(int32_t) * 1);
545 static _Float16 param4_init[] = {0.4000000059604645f};
546 model->setOperandValue(param4, param4_init, sizeof(_Float16) * 1);
547 static _Float16 param5_init[] = {1.0f};
548 model->setOperandValue(param5, param5_init, sizeof(_Float16) * 1);
549 static _Float16 param6_init[] = {0.30000001192092896f};
550 model->setOperandValue(param6, param6_init, sizeof(_Float16) * 1);
551 static int32_t param7_init[] = {2};
552 model->setOperandValue(param7, param7_init, sizeof(int32_t) * 1);
553 static int32_t param8_init[] = {2};
554 model->setOperandValue(param8, param8_init, sizeof(int32_t) * 1);
555 static _Float16 param9_init[] = {2.0f};
556 model->setOperandValue(param9, param9_init, sizeof(_Float16) * 1);
557 static _Float16 param10_init[] = {2.0f};
558 model->setOperandValue(param10, param10_init, sizeof(_Float16) * 1);
559 static int32_t param11_init[] = {4};
560 model->setOperandValue(param11, param11_init, sizeof(int32_t) * 1);
561 static int32_t param12_init[] = {4};
562 model->setOperandValue(param12, param12_init, sizeof(int32_t) * 1);
563 static bool8 layout_init[] = {false};
564 model->setOperandValue(layout, layout_init, sizeof(bool8) * 1);
565 model->addOperation(ANEURALNETWORKS_BOX_WITH_NMS_LIMIT, {scores, roi, param, param1, param2, param3, param4, param5, param6}, {scoresOut, roiOut, classesOut, batchSplitOut});
566 model->addOperation(ANEURALNETWORKS_ROI_ALIGN, {in, roiOut, batchSplitOut, param7, param8, param9, param10, param11, param12, layout}, {featureMap});
567 model->addOperation(ANEURALNETWORKS_QUANTIZE, {featureMap}, {out});
568 // Phase 3, inputs and outputs
569 model->identifyInputsAndOutputs(
570 {in},
571 {scoresOut, classesOut, out});
572 assert(model->isValid());
573 }
574
is_ignored_zero_sized_float16(int i)575 inline bool is_ignored_zero_sized_float16(int i) {
576 static std::set<int> ignore = {};
577 return ignore.find(i) != ignore.end();
578 }
579
CreateModel_zero_sized_dynamic_output_shape(Model * model)580 void CreateModel_zero_sized_dynamic_output_shape(Model *model) {
581 OperandType type10(Type::BOOL, {});
582 OperandType type11(Type::TENSOR_FLOAT32, {1, 1, 1, 1});
583 OperandType type12(Type::TENSOR_FLOAT32, {0, 2, 2, 1});
584 OperandType type2(Type::TENSOR_FLOAT32, {1, 2});
585 OperandType type29(Type::TENSOR_QUANT8_ASYMM, {0, 0, 0, 0}, 0.1f, 128);
586 OperandType type3(Type::TENSOR_FLOAT32, {1, 8});
587 OperandType type4(Type::TENSOR_FLOAT32, {0});
588 OperandType type5(Type::TENSOR_INT32, {0});
589 OperandType type6(Type::TENSOR_FLOAT32, {0, 4});
590 OperandType type7(Type::TENSOR_INT32, {1});
591 OperandType type8(Type::FLOAT32, {});
592 OperandType type9(Type::INT32, {});
593 // Phase 1, operands
594 auto scores = model->addOperand(&type2);
595 auto roi = model->addOperand(&type3);
596 auto param = model->addOperand(&type7);
597 auto param1 = model->addOperand(&type8);
598 auto param2 = model->addOperand(&type9);
599 auto param3 = model->addOperand(&type9);
600 auto param4 = model->addOperand(&type8);
601 auto param5 = model->addOperand(&type8);
602 auto param6 = model->addOperand(&type8);
603 auto scoresOut = model->addOperand(&type4);
604 auto roiOut = model->addOperand(&type6);
605 auto classesOut = model->addOperand(&type5);
606 auto batchSplitOut = model->addOperand(&type5);
607 auto in = model->addOperand(&type11);
608 auto param7 = model->addOperand(&type9);
609 auto param8 = model->addOperand(&type9);
610 auto param9 = model->addOperand(&type8);
611 auto param10 = model->addOperand(&type8);
612 auto param11 = model->addOperand(&type9);
613 auto param12 = model->addOperand(&type9);
614 auto layout = model->addOperand(&type10);
615 auto featureMap = model->addOperand(&type12);
616 auto out = model->addOperand(&type29);
617 // Phase 2, operations
618 static float scores_init[] = {0.9f, 0.1f};
619 model->setOperandValue(scores, scores_init, sizeof(float) * 2);
620 static float roi_init[] = {1.0f, 1.0f, 10.0f, 10.0f, 0.0f, 0.0f, 10.0f, 10.0f};
621 model->setOperandValue(roi, roi_init, sizeof(float) * 8);
622 static int32_t param_init[] = {0};
623 model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
624 static float param1_init[] = {0.3f};
625 model->setOperandValue(param1, param1_init, sizeof(float) * 1);
626 static int32_t param2_init[] = {-1};
627 model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
628 static int32_t param3_init[] = {0};
629 model->setOperandValue(param3, param3_init, sizeof(int32_t) * 1);
630 static float param4_init[] = {0.4f};
631 model->setOperandValue(param4, param4_init, sizeof(float) * 1);
632 static float param5_init[] = {1.0f};
633 model->setOperandValue(param5, param5_init, sizeof(float) * 1);
634 static float param6_init[] = {0.3f};
635 model->setOperandValue(param6, param6_init, sizeof(float) * 1);
636 static int32_t param7_init[] = {2};
637 model->setOperandValue(param7, param7_init, sizeof(int32_t) * 1);
638 static int32_t param8_init[] = {2};
639 model->setOperandValue(param8, param8_init, sizeof(int32_t) * 1);
640 static float param9_init[] = {2.0f};
641 model->setOperandValue(param9, param9_init, sizeof(float) * 1);
642 static float param10_init[] = {2.0f};
643 model->setOperandValue(param10, param10_init, sizeof(float) * 1);
644 static int32_t param11_init[] = {4};
645 model->setOperandValue(param11, param11_init, sizeof(int32_t) * 1);
646 static int32_t param12_init[] = {4};
647 model->setOperandValue(param12, param12_init, sizeof(int32_t) * 1);
648 static bool8 layout_init[] = {false};
649 model->setOperandValue(layout, layout_init, sizeof(bool8) * 1);
650 model->addOperation(ANEURALNETWORKS_BOX_WITH_NMS_LIMIT, {scores, roi, param, param1, param2, param3, param4, param5, param6}, {scoresOut, roiOut, classesOut, batchSplitOut});
651 model->addOperation(ANEURALNETWORKS_ROI_ALIGN, {in, roiOut, batchSplitOut, param7, param8, param9, param10, param11, param12, layout}, {featureMap});
652 model->addOperation(ANEURALNETWORKS_QUANTIZE, {featureMap}, {out});
653 // Phase 3, inputs and outputs
654 model->identifyInputsAndOutputs(
655 {in},
656 {scoresOut, classesOut, out});
657 assert(model->isValid());
658 }
659
is_ignored_zero_sized_dynamic_output_shape(int i)660 inline bool is_ignored_zero_sized_dynamic_output_shape(int i) {
661 static std::set<int> ignore = {};
662 return ignore.find(i) != ignore.end();
663 }
664
CreateModel_zero_sized_dynamic_output_shape_relaxed(Model * model)665 void CreateModel_zero_sized_dynamic_output_shape_relaxed(Model *model) {
666 OperandType type10(Type::BOOL, {});
667 OperandType type11(Type::TENSOR_FLOAT32, {1, 1, 1, 1});
668 OperandType type12(Type::TENSOR_FLOAT32, {0, 2, 2, 1});
669 OperandType type2(Type::TENSOR_FLOAT32, {1, 2});
670 OperandType type29(Type::TENSOR_QUANT8_ASYMM, {0, 0, 0, 0}, 0.1f, 128);
671 OperandType type3(Type::TENSOR_FLOAT32, {1, 8});
672 OperandType type4(Type::TENSOR_FLOAT32, {0});
673 OperandType type5(Type::TENSOR_INT32, {0});
674 OperandType type6(Type::TENSOR_FLOAT32, {0, 4});
675 OperandType type7(Type::TENSOR_INT32, {1});
676 OperandType type8(Type::FLOAT32, {});
677 OperandType type9(Type::INT32, {});
678 // Phase 1, operands
679 auto scores = model->addOperand(&type2);
680 auto roi = model->addOperand(&type3);
681 auto param = model->addOperand(&type7);
682 auto param1 = model->addOperand(&type8);
683 auto param2 = model->addOperand(&type9);
684 auto param3 = model->addOperand(&type9);
685 auto param4 = model->addOperand(&type8);
686 auto param5 = model->addOperand(&type8);
687 auto param6 = model->addOperand(&type8);
688 auto scoresOut = model->addOperand(&type4);
689 auto roiOut = model->addOperand(&type6);
690 auto classesOut = model->addOperand(&type5);
691 auto batchSplitOut = model->addOperand(&type5);
692 auto in = model->addOperand(&type11);
693 auto param7 = model->addOperand(&type9);
694 auto param8 = model->addOperand(&type9);
695 auto param9 = model->addOperand(&type8);
696 auto param10 = model->addOperand(&type8);
697 auto param11 = model->addOperand(&type9);
698 auto param12 = model->addOperand(&type9);
699 auto layout = model->addOperand(&type10);
700 auto featureMap = model->addOperand(&type12);
701 auto out = model->addOperand(&type29);
702 // Phase 2, operations
703 static float scores_init[] = {0.9f, 0.1f};
704 model->setOperandValue(scores, scores_init, sizeof(float) * 2);
705 static float roi_init[] = {1.0f, 1.0f, 10.0f, 10.0f, 0.0f, 0.0f, 10.0f, 10.0f};
706 model->setOperandValue(roi, roi_init, sizeof(float) * 8);
707 static int32_t param_init[] = {0};
708 model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
709 static float param1_init[] = {0.3f};
710 model->setOperandValue(param1, param1_init, sizeof(float) * 1);
711 static int32_t param2_init[] = {-1};
712 model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
713 static int32_t param3_init[] = {0};
714 model->setOperandValue(param3, param3_init, sizeof(int32_t) * 1);
715 static float param4_init[] = {0.4f};
716 model->setOperandValue(param4, param4_init, sizeof(float) * 1);
717 static float param5_init[] = {1.0f};
718 model->setOperandValue(param5, param5_init, sizeof(float) * 1);
719 static float param6_init[] = {0.3f};
720 model->setOperandValue(param6, param6_init, sizeof(float) * 1);
721 static int32_t param7_init[] = {2};
722 model->setOperandValue(param7, param7_init, sizeof(int32_t) * 1);
723 static int32_t param8_init[] = {2};
724 model->setOperandValue(param8, param8_init, sizeof(int32_t) * 1);
725 static float param9_init[] = {2.0f};
726 model->setOperandValue(param9, param9_init, sizeof(float) * 1);
727 static float param10_init[] = {2.0f};
728 model->setOperandValue(param10, param10_init, sizeof(float) * 1);
729 static int32_t param11_init[] = {4};
730 model->setOperandValue(param11, param11_init, sizeof(int32_t) * 1);
731 static int32_t param12_init[] = {4};
732 model->setOperandValue(param12, param12_init, sizeof(int32_t) * 1);
733 static bool8 layout_init[] = {false};
734 model->setOperandValue(layout, layout_init, sizeof(bool8) * 1);
735 model->addOperation(ANEURALNETWORKS_BOX_WITH_NMS_LIMIT, {scores, roi, param, param1, param2, param3, param4, param5, param6}, {scoresOut, roiOut, classesOut, batchSplitOut});
736 model->addOperation(ANEURALNETWORKS_ROI_ALIGN, {in, roiOut, batchSplitOut, param7, param8, param9, param10, param11, param12, layout}, {featureMap});
737 model->addOperation(ANEURALNETWORKS_QUANTIZE, {featureMap}, {out});
738 // Phase 3, inputs and outputs
739 model->identifyInputsAndOutputs(
740 {in},
741 {scoresOut, classesOut, out});
742 // Phase 4: set relaxed execution
743 model->relaxComputationFloat32toFloat16(true);
744 assert(model->isValid());
745 }
746
is_ignored_zero_sized_dynamic_output_shape_relaxed(int i)747 inline bool is_ignored_zero_sized_dynamic_output_shape_relaxed(int i) {
748 static std::set<int> ignore = {};
749 return ignore.find(i) != ignore.end();
750 }
751
CreateModel_zero_sized_dynamic_output_shape_float16(Model * model)752 void CreateModel_zero_sized_dynamic_output_shape_float16(Model *model) {
753 OperandType type10(Type::BOOL, {});
754 OperandType type22(Type::TENSOR_FLOAT16, {0, 2, 2, 1});
755 OperandType type23(Type::TENSOR_FLOAT16, {1, 1, 1, 1});
756 OperandType type24(Type::FLOAT16, {});
757 OperandType type25(Type::TENSOR_FLOAT16, {1, 8});
758 OperandType type26(Type::TENSOR_FLOAT16, {0, 4});
759 OperandType type27(Type::TENSOR_FLOAT16, {1, 2});
760 OperandType type29(Type::TENSOR_QUANT8_ASYMM, {0, 0, 0, 0}, 0.1f, 128);
761 OperandType type30(Type::TENSOR_FLOAT16, {0});
762 OperandType type5(Type::TENSOR_INT32, {0});
763 OperandType type7(Type::TENSOR_INT32, {1});
764 OperandType type9(Type::INT32, {});
765 // Phase 1, operands
766 auto scores = model->addOperand(&type27);
767 auto roi = model->addOperand(&type25);
768 auto param = model->addOperand(&type7);
769 auto param1 = model->addOperand(&type24);
770 auto param2 = model->addOperand(&type9);
771 auto param3 = model->addOperand(&type9);
772 auto param4 = model->addOperand(&type24);
773 auto param5 = model->addOperand(&type24);
774 auto param6 = model->addOperand(&type24);
775 auto scoresOut = model->addOperand(&type30);
776 auto roiOut = model->addOperand(&type26);
777 auto classesOut = model->addOperand(&type5);
778 auto batchSplitOut = model->addOperand(&type5);
779 auto in = model->addOperand(&type23);
780 auto param7 = model->addOperand(&type9);
781 auto param8 = model->addOperand(&type9);
782 auto param9 = model->addOperand(&type24);
783 auto param10 = model->addOperand(&type24);
784 auto param11 = model->addOperand(&type9);
785 auto param12 = model->addOperand(&type9);
786 auto layout = model->addOperand(&type10);
787 auto featureMap = model->addOperand(&type22);
788 auto out = model->addOperand(&type29);
789 // Phase 2, operations
790 static _Float16 scores_init[] = {0.8999999761581421f, 0.10000000149011612f};
791 model->setOperandValue(scores, scores_init, sizeof(_Float16) * 2);
792 static _Float16 roi_init[] = {1.0f, 1.0f, 10.0f, 10.0f, 0.0f, 0.0f, 10.0f, 10.0f};
793 model->setOperandValue(roi, roi_init, sizeof(_Float16) * 8);
794 static int32_t param_init[] = {0};
795 model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
796 static _Float16 param1_init[] = {0.30000001192092896f};
797 model->setOperandValue(param1, param1_init, sizeof(_Float16) * 1);
798 static int32_t param2_init[] = {-1};
799 model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
800 static int32_t param3_init[] = {0};
801 model->setOperandValue(param3, param3_init, sizeof(int32_t) * 1);
802 static _Float16 param4_init[] = {0.4000000059604645f};
803 model->setOperandValue(param4, param4_init, sizeof(_Float16) * 1);
804 static _Float16 param5_init[] = {1.0f};
805 model->setOperandValue(param5, param5_init, sizeof(_Float16) * 1);
806 static _Float16 param6_init[] = {0.30000001192092896f};
807 model->setOperandValue(param6, param6_init, sizeof(_Float16) * 1);
808 static int32_t param7_init[] = {2};
809 model->setOperandValue(param7, param7_init, sizeof(int32_t) * 1);
810 static int32_t param8_init[] = {2};
811 model->setOperandValue(param8, param8_init, sizeof(int32_t) * 1);
812 static _Float16 param9_init[] = {2.0f};
813 model->setOperandValue(param9, param9_init, sizeof(_Float16) * 1);
814 static _Float16 param10_init[] = {2.0f};
815 model->setOperandValue(param10, param10_init, sizeof(_Float16) * 1);
816 static int32_t param11_init[] = {4};
817 model->setOperandValue(param11, param11_init, sizeof(int32_t) * 1);
818 static int32_t param12_init[] = {4};
819 model->setOperandValue(param12, param12_init, sizeof(int32_t) * 1);
820 static bool8 layout_init[] = {false};
821 model->setOperandValue(layout, layout_init, sizeof(bool8) * 1);
822 model->addOperation(ANEURALNETWORKS_BOX_WITH_NMS_LIMIT, {scores, roi, param, param1, param2, param3, param4, param5, param6}, {scoresOut, roiOut, classesOut, batchSplitOut});
823 model->addOperation(ANEURALNETWORKS_ROI_ALIGN, {in, roiOut, batchSplitOut, param7, param8, param9, param10, param11, param12, layout}, {featureMap});
824 model->addOperation(ANEURALNETWORKS_QUANTIZE, {featureMap}, {out});
825 // Phase 3, inputs and outputs
826 model->identifyInputsAndOutputs(
827 {in},
828 {scoresOut, classesOut, out});
829 assert(model->isValid());
830 }
831
is_ignored_zero_sized_dynamic_output_shape_float16(int i)832 inline bool is_ignored_zero_sized_dynamic_output_shape_float16(int i) {
833 static std::set<int> ignore = {};
834 return ignore.find(i) != ignore.end();
835 }
836
837