1 // clang-format off
2 // Generated file (from: box_with_nms_limit_hard.mod.py). Do not edit
CreateModel(Model * model)3 void CreateModel(Model *model) {
4 OperandType type0(Type::TENSOR_FLOAT32, {19, 3});
5 OperandType type1(Type::TENSOR_FLOAT32, {19, 12});
6 OperandType type2(Type::TENSOR_INT32, {19});
7 OperandType type3(Type::TENSOR_FLOAT32, {12});
8 OperandType type4(Type::TENSOR_FLOAT32, {12, 4});
9 OperandType type5(Type::TENSOR_INT32, {12});
10 OperandType type6(Type::FLOAT32, {});
11 OperandType type7(Type::INT32, {});
12 // Phase 1, operands
13 auto scores = model->addOperand(&type0);
14 auto roi = model->addOperand(&type1);
15 auto batchSplit = model->addOperand(&type2);
16 auto param = model->addOperand(&type6);
17 auto param1 = model->addOperand(&type7);
18 auto param2 = model->addOperand(&type7);
19 auto param3 = model->addOperand(&type6);
20 auto param4 = model->addOperand(&type6);
21 auto param5 = model->addOperand(&type6);
22 auto scoresOut = model->addOperand(&type3);
23 auto roiOut = model->addOperand(&type4);
24 auto classesOut = model->addOperand(&type5);
25 auto batchSplitOut = model->addOperand(&type5);
26 // Phase 2, operations
27 static float param_init[] = {0.3f};
28 model->setOperandValue(param, param_init, sizeof(float) * 1);
29 static int32_t param1_init[] = {-1};
30 model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
31 static int32_t param2_init[] = {0};
32 model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
33 static float param3_init[] = {0.4f};
34 model->setOperandValue(param3, param3_init, sizeof(float) * 1);
35 static float param4_init[] = {1.0f};
36 model->setOperandValue(param4, param4_init, sizeof(float) * 1);
37 static float param5_init[] = {0.3f};
38 model->setOperandValue(param5, param5_init, sizeof(float) * 1);
39 model->addOperation(ANEURALNETWORKS_BOX_WITH_NMS_LIMIT, {scores, roi, batchSplit, param, param1, param2, param3, param4, param5}, {scoresOut, roiOut, classesOut, batchSplitOut});
40 // Phase 3, inputs and outputs
41 model->identifyInputsAndOutputs(
42 {scores, roi, batchSplit},
43 {scoresOut, roiOut, classesOut, batchSplitOut});
44 assert(model->isValid());
45 }
46
is_ignored(int i)47 inline bool is_ignored(int i) {
48 static std::set<int> ignore = {};
49 return ignore.find(i) != ignore.end();
50 }
51
CreateModel_relaxed(Model * model)52 void CreateModel_relaxed(Model *model) {
53 OperandType type0(Type::TENSOR_FLOAT32, {19, 3});
54 OperandType type1(Type::TENSOR_FLOAT32, {19, 12});
55 OperandType type2(Type::TENSOR_INT32, {19});
56 OperandType type3(Type::TENSOR_FLOAT32, {12});
57 OperandType type4(Type::TENSOR_FLOAT32, {12, 4});
58 OperandType type5(Type::TENSOR_INT32, {12});
59 OperandType type6(Type::FLOAT32, {});
60 OperandType type7(Type::INT32, {});
61 // Phase 1, operands
62 auto scores = model->addOperand(&type0);
63 auto roi = model->addOperand(&type1);
64 auto batchSplit = model->addOperand(&type2);
65 auto param = model->addOperand(&type6);
66 auto param1 = model->addOperand(&type7);
67 auto param2 = model->addOperand(&type7);
68 auto param3 = model->addOperand(&type6);
69 auto param4 = model->addOperand(&type6);
70 auto param5 = model->addOperand(&type6);
71 auto scoresOut = model->addOperand(&type3);
72 auto roiOut = model->addOperand(&type4);
73 auto classesOut = model->addOperand(&type5);
74 auto batchSplitOut = model->addOperand(&type5);
75 // Phase 2, operations
76 static float param_init[] = {0.3f};
77 model->setOperandValue(param, param_init, sizeof(float) * 1);
78 static int32_t param1_init[] = {-1};
79 model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
80 static int32_t param2_init[] = {0};
81 model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
82 static float param3_init[] = {0.4f};
83 model->setOperandValue(param3, param3_init, sizeof(float) * 1);
84 static float param4_init[] = {1.0f};
85 model->setOperandValue(param4, param4_init, sizeof(float) * 1);
86 static float param5_init[] = {0.3f};
87 model->setOperandValue(param5, param5_init, sizeof(float) * 1);
88 model->addOperation(ANEURALNETWORKS_BOX_WITH_NMS_LIMIT, {scores, roi, batchSplit, param, param1, param2, param3, param4, param5}, {scoresOut, roiOut, classesOut, batchSplitOut});
89 // Phase 3, inputs and outputs
90 model->identifyInputsAndOutputs(
91 {scores, roi, batchSplit},
92 {scoresOut, roiOut, classesOut, batchSplitOut});
93 // Phase 4: set relaxed execution
94 model->relaxComputationFloat32toFloat16(true);
95 assert(model->isValid());
96 }
97
is_ignored_relaxed(int i)98 inline bool is_ignored_relaxed(int i) {
99 static std::set<int> ignore = {};
100 return ignore.find(i) != ignore.end();
101 }
102
CreateModel_float16(Model * model)103 void CreateModel_float16(Model *model) {
104 OperandType type11(Type::FLOAT16, {});
105 OperandType type12(Type::TENSOR_FLOAT16, {19, 12});
106 OperandType type13(Type::TENSOR_FLOAT16, {12, 4});
107 OperandType type14(Type::TENSOR_FLOAT16, {19, 3});
108 OperandType type15(Type::TENSOR_FLOAT16, {12});
109 OperandType type2(Type::TENSOR_INT32, {19});
110 OperandType type5(Type::TENSOR_INT32, {12});
111 OperandType type7(Type::INT32, {});
112 // Phase 1, operands
113 auto scores = model->addOperand(&type14);
114 auto roi = model->addOperand(&type12);
115 auto batchSplit = model->addOperand(&type2);
116 auto param = model->addOperand(&type11);
117 auto param1 = model->addOperand(&type7);
118 auto param2 = model->addOperand(&type7);
119 auto param3 = model->addOperand(&type11);
120 auto param4 = model->addOperand(&type11);
121 auto param5 = model->addOperand(&type11);
122 auto scoresOut = model->addOperand(&type15);
123 auto roiOut = model->addOperand(&type13);
124 auto classesOut = model->addOperand(&type5);
125 auto batchSplitOut = model->addOperand(&type5);
126 // Phase 2, operations
127 static _Float16 param_init[] = {0.30000001192092896f};
128 model->setOperandValue(param, param_init, sizeof(_Float16) * 1);
129 static int32_t param1_init[] = {-1};
130 model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
131 static int32_t param2_init[] = {0};
132 model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
133 static _Float16 param3_init[] = {0.4000000059604645f};
134 model->setOperandValue(param3, param3_init, sizeof(_Float16) * 1);
135 static _Float16 param4_init[] = {1.0f};
136 model->setOperandValue(param4, param4_init, sizeof(_Float16) * 1);
137 static _Float16 param5_init[] = {0.30000001192092896f};
138 model->setOperandValue(param5, param5_init, sizeof(_Float16) * 1);
139 model->addOperation(ANEURALNETWORKS_BOX_WITH_NMS_LIMIT, {scores, roi, batchSplit, param, param1, param2, param3, param4, param5}, {scoresOut, roiOut, classesOut, batchSplitOut});
140 // Phase 3, inputs and outputs
141 model->identifyInputsAndOutputs(
142 {scores, roi, batchSplit},
143 {scoresOut, roiOut, classesOut, batchSplitOut});
144 assert(model->isValid());
145 }
146
is_ignored_float16(int i)147 inline bool is_ignored_float16(int i) {
148 static std::set<int> ignore = {};
149 return ignore.find(i) != ignore.end();
150 }
151
CreateModel_quant8(Model * model)152 void CreateModel_quant8(Model *model) {
153 OperandType type16(Type::TENSOR_QUANT16_ASYMM, {19, 12}, 0.125f, 0);
154 OperandType type17(Type::TENSOR_QUANT16_ASYMM, {12, 4}, 0.125f, 0);
155 OperandType type18(Type::TENSOR_QUANT8_ASYMM, {19, 3}, 0.01f, 0);
156 OperandType type19(Type::TENSOR_QUANT8_ASYMM, {12}, 0.01f, 0);
157 OperandType type2(Type::TENSOR_INT32, {19});
158 OperandType type5(Type::TENSOR_INT32, {12});
159 OperandType type6(Type::FLOAT32, {});
160 OperandType type7(Type::INT32, {});
161 // Phase 1, operands
162 auto scores = model->addOperand(&type18);
163 auto roi = model->addOperand(&type16);
164 auto batchSplit = model->addOperand(&type2);
165 auto param = model->addOperand(&type6);
166 auto param1 = model->addOperand(&type7);
167 auto param2 = model->addOperand(&type7);
168 auto param3 = model->addOperand(&type6);
169 auto param4 = model->addOperand(&type6);
170 auto param5 = model->addOperand(&type6);
171 auto scoresOut = model->addOperand(&type19);
172 auto roiOut = model->addOperand(&type17);
173 auto classesOut = model->addOperand(&type5);
174 auto batchSplitOut = model->addOperand(&type5);
175 // Phase 2, operations
176 static float param_init[] = {0.3f};
177 model->setOperandValue(param, param_init, sizeof(float) * 1);
178 static int32_t param1_init[] = {-1};
179 model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
180 static int32_t param2_init[] = {0};
181 model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
182 static float param3_init[] = {0.4f};
183 model->setOperandValue(param3, param3_init, sizeof(float) * 1);
184 static float param4_init[] = {1.0f};
185 model->setOperandValue(param4, param4_init, sizeof(float) * 1);
186 static float param5_init[] = {0.3f};
187 model->setOperandValue(param5, param5_init, sizeof(float) * 1);
188 model->addOperation(ANEURALNETWORKS_BOX_WITH_NMS_LIMIT, {scores, roi, batchSplit, param, param1, param2, param3, param4, param5}, {scoresOut, roiOut, classesOut, batchSplitOut});
189 // Phase 3, inputs and outputs
190 model->identifyInputsAndOutputs(
191 {scores, roi, batchSplit},
192 {scoresOut, roiOut, classesOut, batchSplitOut});
193 assert(model->isValid());
194 }
195
is_ignored_quant8(int i)196 inline bool is_ignored_quant8(int i) {
197 static std::set<int> ignore = {};
198 return ignore.find(i) != ignore.end();
199 }
200
CreateModel_dynamic_output_shape(Model * model)201 void CreateModel_dynamic_output_shape(Model *model) {
202 OperandType type0(Type::TENSOR_FLOAT32, {19, 3});
203 OperandType type1(Type::TENSOR_FLOAT32, {19, 12});
204 OperandType type2(Type::TENSOR_INT32, {19});
205 OperandType type20(Type::TENSOR_FLOAT32, {0});
206 OperandType type21(Type::TENSOR_FLOAT32, {0, 0});
207 OperandType type22(Type::TENSOR_INT32, {0});
208 OperandType type6(Type::FLOAT32, {});
209 OperandType type7(Type::INT32, {});
210 // Phase 1, operands
211 auto scores = model->addOperand(&type0);
212 auto roi = model->addOperand(&type1);
213 auto batchSplit = model->addOperand(&type2);
214 auto param = model->addOperand(&type6);
215 auto param1 = model->addOperand(&type7);
216 auto param2 = model->addOperand(&type7);
217 auto param3 = model->addOperand(&type6);
218 auto param4 = model->addOperand(&type6);
219 auto param5 = model->addOperand(&type6);
220 auto scoresOut = model->addOperand(&type20);
221 auto roiOut = model->addOperand(&type21);
222 auto classesOut = model->addOperand(&type22);
223 auto batchSplitOut = model->addOperand(&type22);
224 // Phase 2, operations
225 static float param_init[] = {0.3f};
226 model->setOperandValue(param, param_init, sizeof(float) * 1);
227 static int32_t param1_init[] = {-1};
228 model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
229 static int32_t param2_init[] = {0};
230 model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
231 static float param3_init[] = {0.4f};
232 model->setOperandValue(param3, param3_init, sizeof(float) * 1);
233 static float param4_init[] = {1.0f};
234 model->setOperandValue(param4, param4_init, sizeof(float) * 1);
235 static float param5_init[] = {0.3f};
236 model->setOperandValue(param5, param5_init, sizeof(float) * 1);
237 model->addOperation(ANEURALNETWORKS_BOX_WITH_NMS_LIMIT, {scores, roi, batchSplit, param, param1, param2, param3, param4, param5}, {scoresOut, roiOut, classesOut, batchSplitOut});
238 // Phase 3, inputs and outputs
239 model->identifyInputsAndOutputs(
240 {scores, roi, batchSplit},
241 {scoresOut, roiOut, classesOut, batchSplitOut});
242 assert(model->isValid());
243 }
244
is_ignored_dynamic_output_shape(int i)245 inline bool is_ignored_dynamic_output_shape(int i) {
246 static std::set<int> ignore = {};
247 return ignore.find(i) != ignore.end();
248 }
249
CreateModel_dynamic_output_shape_relaxed(Model * model)250 void CreateModel_dynamic_output_shape_relaxed(Model *model) {
251 OperandType type0(Type::TENSOR_FLOAT32, {19, 3});
252 OperandType type1(Type::TENSOR_FLOAT32, {19, 12});
253 OperandType type2(Type::TENSOR_INT32, {19});
254 OperandType type20(Type::TENSOR_FLOAT32, {0});
255 OperandType type21(Type::TENSOR_FLOAT32, {0, 0});
256 OperandType type22(Type::TENSOR_INT32, {0});
257 OperandType type6(Type::FLOAT32, {});
258 OperandType type7(Type::INT32, {});
259 // Phase 1, operands
260 auto scores = model->addOperand(&type0);
261 auto roi = model->addOperand(&type1);
262 auto batchSplit = model->addOperand(&type2);
263 auto param = model->addOperand(&type6);
264 auto param1 = model->addOperand(&type7);
265 auto param2 = model->addOperand(&type7);
266 auto param3 = model->addOperand(&type6);
267 auto param4 = model->addOperand(&type6);
268 auto param5 = model->addOperand(&type6);
269 auto scoresOut = model->addOperand(&type20);
270 auto roiOut = model->addOperand(&type21);
271 auto classesOut = model->addOperand(&type22);
272 auto batchSplitOut = model->addOperand(&type22);
273 // Phase 2, operations
274 static float param_init[] = {0.3f};
275 model->setOperandValue(param, param_init, sizeof(float) * 1);
276 static int32_t param1_init[] = {-1};
277 model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
278 static int32_t param2_init[] = {0};
279 model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
280 static float param3_init[] = {0.4f};
281 model->setOperandValue(param3, param3_init, sizeof(float) * 1);
282 static float param4_init[] = {1.0f};
283 model->setOperandValue(param4, param4_init, sizeof(float) * 1);
284 static float param5_init[] = {0.3f};
285 model->setOperandValue(param5, param5_init, sizeof(float) * 1);
286 model->addOperation(ANEURALNETWORKS_BOX_WITH_NMS_LIMIT, {scores, roi, batchSplit, param, param1, param2, param3, param4, param5}, {scoresOut, roiOut, classesOut, batchSplitOut});
287 // Phase 3, inputs and outputs
288 model->identifyInputsAndOutputs(
289 {scores, roi, batchSplit},
290 {scoresOut, roiOut, classesOut, batchSplitOut});
291 // Phase 4: set relaxed execution
292 model->relaxComputationFloat32toFloat16(true);
293 assert(model->isValid());
294 }
295
is_ignored_dynamic_output_shape_relaxed(int i)296 inline bool is_ignored_dynamic_output_shape_relaxed(int i) {
297 static std::set<int> ignore = {};
298 return ignore.find(i) != ignore.end();
299 }
300
CreateModel_dynamic_output_shape_float16(Model * model)301 void CreateModel_dynamic_output_shape_float16(Model *model) {
302 OperandType type11(Type::FLOAT16, {});
303 OperandType type12(Type::TENSOR_FLOAT16, {19, 12});
304 OperandType type14(Type::TENSOR_FLOAT16, {19, 3});
305 OperandType type2(Type::TENSOR_INT32, {19});
306 OperandType type22(Type::TENSOR_INT32, {0});
307 OperandType type23(Type::TENSOR_FLOAT16, {0});
308 OperandType type24(Type::TENSOR_FLOAT16, {0, 0});
309 OperandType type7(Type::INT32, {});
310 // Phase 1, operands
311 auto scores = model->addOperand(&type14);
312 auto roi = model->addOperand(&type12);
313 auto batchSplit = model->addOperand(&type2);
314 auto param = model->addOperand(&type11);
315 auto param1 = model->addOperand(&type7);
316 auto param2 = model->addOperand(&type7);
317 auto param3 = model->addOperand(&type11);
318 auto param4 = model->addOperand(&type11);
319 auto param5 = model->addOperand(&type11);
320 auto scoresOut = model->addOperand(&type23);
321 auto roiOut = model->addOperand(&type24);
322 auto classesOut = model->addOperand(&type22);
323 auto batchSplitOut = model->addOperand(&type22);
324 // Phase 2, operations
325 static _Float16 param_init[] = {0.30000001192092896f};
326 model->setOperandValue(param, param_init, sizeof(_Float16) * 1);
327 static int32_t param1_init[] = {-1};
328 model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
329 static int32_t param2_init[] = {0};
330 model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
331 static _Float16 param3_init[] = {0.4000000059604645f};
332 model->setOperandValue(param3, param3_init, sizeof(_Float16) * 1);
333 static _Float16 param4_init[] = {1.0f};
334 model->setOperandValue(param4, param4_init, sizeof(_Float16) * 1);
335 static _Float16 param5_init[] = {0.30000001192092896f};
336 model->setOperandValue(param5, param5_init, sizeof(_Float16) * 1);
337 model->addOperation(ANEURALNETWORKS_BOX_WITH_NMS_LIMIT, {scores, roi, batchSplit, param, param1, param2, param3, param4, param5}, {scoresOut, roiOut, classesOut, batchSplitOut});
338 // Phase 3, inputs and outputs
339 model->identifyInputsAndOutputs(
340 {scores, roi, batchSplit},
341 {scoresOut, roiOut, classesOut, batchSplitOut});
342 assert(model->isValid());
343 }
344
is_ignored_dynamic_output_shape_float16(int i)345 inline bool is_ignored_dynamic_output_shape_float16(int i) {
346 static std::set<int> ignore = {};
347 return ignore.find(i) != ignore.end();
348 }
349
CreateModel_dynamic_output_shape_quant8(Model * model)350 void CreateModel_dynamic_output_shape_quant8(Model *model) {
351 OperandType type16(Type::TENSOR_QUANT16_ASYMM, {19, 12}, 0.125f, 0);
352 OperandType type18(Type::TENSOR_QUANT8_ASYMM, {19, 3}, 0.01f, 0);
353 OperandType type2(Type::TENSOR_INT32, {19});
354 OperandType type22(Type::TENSOR_INT32, {0});
355 OperandType type25(Type::TENSOR_QUANT8_ASYMM, {0}, 0.01f, 0);
356 OperandType type26(Type::TENSOR_QUANT16_ASYMM, {0, 0}, 0.125f, 0);
357 OperandType type6(Type::FLOAT32, {});
358 OperandType type7(Type::INT32, {});
359 // Phase 1, operands
360 auto scores = model->addOperand(&type18);
361 auto roi = model->addOperand(&type16);
362 auto batchSplit = model->addOperand(&type2);
363 auto param = model->addOperand(&type6);
364 auto param1 = model->addOperand(&type7);
365 auto param2 = model->addOperand(&type7);
366 auto param3 = model->addOperand(&type6);
367 auto param4 = model->addOperand(&type6);
368 auto param5 = model->addOperand(&type6);
369 auto scoresOut = model->addOperand(&type25);
370 auto roiOut = model->addOperand(&type26);
371 auto classesOut = model->addOperand(&type22);
372 auto batchSplitOut = model->addOperand(&type22);
373 // Phase 2, operations
374 static float param_init[] = {0.3f};
375 model->setOperandValue(param, param_init, sizeof(float) * 1);
376 static int32_t param1_init[] = {-1};
377 model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
378 static int32_t param2_init[] = {0};
379 model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
380 static float param3_init[] = {0.4f};
381 model->setOperandValue(param3, param3_init, sizeof(float) * 1);
382 static float param4_init[] = {1.0f};
383 model->setOperandValue(param4, param4_init, sizeof(float) * 1);
384 static float param5_init[] = {0.3f};
385 model->setOperandValue(param5, param5_init, sizeof(float) * 1);
386 model->addOperation(ANEURALNETWORKS_BOX_WITH_NMS_LIMIT, {scores, roi, batchSplit, param, param1, param2, param3, param4, param5}, {scoresOut, roiOut, classesOut, batchSplitOut});
387 // Phase 3, inputs and outputs
388 model->identifyInputsAndOutputs(
389 {scores, roi, batchSplit},
390 {scoresOut, roiOut, classesOut, batchSplitOut});
391 assert(model->isValid());
392 }
393
is_ignored_dynamic_output_shape_quant8(int i)394 inline bool is_ignored_dynamic_output_shape_quant8(int i) {
395 static std::set<int> ignore = {};
396 return ignore.find(i) != ignore.end();
397 }
398
CreateModel_2(Model * model)399 void CreateModel_2(Model *model) {
400 OperandType type0(Type::TENSOR_FLOAT32, {19, 3});
401 OperandType type1(Type::TENSOR_FLOAT32, {19, 12});
402 OperandType type10(Type::TENSOR_INT32, {10});
403 OperandType type2(Type::TENSOR_INT32, {19});
404 OperandType type6(Type::FLOAT32, {});
405 OperandType type7(Type::INT32, {});
406 OperandType type8(Type::TENSOR_FLOAT32, {10});
407 OperandType type9(Type::TENSOR_FLOAT32, {10, 4});
408 // Phase 1, operands
409 auto scores1 = model->addOperand(&type0);
410 auto roi1 = model->addOperand(&type1);
411 auto batchSplit1 = model->addOperand(&type2);
412 auto param6 = model->addOperand(&type6);
413 auto param7 = model->addOperand(&type7);
414 auto param8 = model->addOperand(&type7);
415 auto param9 = model->addOperand(&type6);
416 auto param10 = model->addOperand(&type6);
417 auto param11 = model->addOperand(&type6);
418 auto scoresOut1 = model->addOperand(&type8);
419 auto roiOut1 = model->addOperand(&type9);
420 auto classesOut1 = model->addOperand(&type10);
421 auto batchSplitOut1 = model->addOperand(&type10);
422 // Phase 2, operations
423 static float param6_init[] = {0.3f};
424 model->setOperandValue(param6, param6_init, sizeof(float) * 1);
425 static int32_t param7_init[] = {5};
426 model->setOperandValue(param7, param7_init, sizeof(int32_t) * 1);
427 static int32_t param8_init[] = {0};
428 model->setOperandValue(param8, param8_init, sizeof(int32_t) * 1);
429 static float param9_init[] = {0.4f};
430 model->setOperandValue(param9, param9_init, sizeof(float) * 1);
431 static float param10_init[] = {0.5f};
432 model->setOperandValue(param10, param10_init, sizeof(float) * 1);
433 static float param11_init[] = {0.3f};
434 model->setOperandValue(param11, param11_init, sizeof(float) * 1);
435 model->addOperation(ANEURALNETWORKS_BOX_WITH_NMS_LIMIT, {scores1, roi1, batchSplit1, param6, param7, param8, param9, param10, param11}, {scoresOut1, roiOut1, classesOut1, batchSplitOut1});
436 // Phase 3, inputs and outputs
437 model->identifyInputsAndOutputs(
438 {scores1, roi1, batchSplit1},
439 {scoresOut1, roiOut1, classesOut1, batchSplitOut1});
440 assert(model->isValid());
441 }
442
is_ignored_2(int i)443 inline bool is_ignored_2(int i) {
444 static std::set<int> ignore = {};
445 return ignore.find(i) != ignore.end();
446 }
447
CreateModel_relaxed_2(Model * model)448 void CreateModel_relaxed_2(Model *model) {
449 OperandType type0(Type::TENSOR_FLOAT32, {19, 3});
450 OperandType type1(Type::TENSOR_FLOAT32, {19, 12});
451 OperandType type10(Type::TENSOR_INT32, {10});
452 OperandType type2(Type::TENSOR_INT32, {19});
453 OperandType type6(Type::FLOAT32, {});
454 OperandType type7(Type::INT32, {});
455 OperandType type8(Type::TENSOR_FLOAT32, {10});
456 OperandType type9(Type::TENSOR_FLOAT32, {10, 4});
457 // Phase 1, operands
458 auto scores1 = model->addOperand(&type0);
459 auto roi1 = model->addOperand(&type1);
460 auto batchSplit1 = model->addOperand(&type2);
461 auto param6 = model->addOperand(&type6);
462 auto param7 = model->addOperand(&type7);
463 auto param8 = model->addOperand(&type7);
464 auto param9 = model->addOperand(&type6);
465 auto param10 = model->addOperand(&type6);
466 auto param11 = model->addOperand(&type6);
467 auto scoresOut1 = model->addOperand(&type8);
468 auto roiOut1 = model->addOperand(&type9);
469 auto classesOut1 = model->addOperand(&type10);
470 auto batchSplitOut1 = model->addOperand(&type10);
471 // Phase 2, operations
472 static float param6_init[] = {0.3f};
473 model->setOperandValue(param6, param6_init, sizeof(float) * 1);
474 static int32_t param7_init[] = {5};
475 model->setOperandValue(param7, param7_init, sizeof(int32_t) * 1);
476 static int32_t param8_init[] = {0};
477 model->setOperandValue(param8, param8_init, sizeof(int32_t) * 1);
478 static float param9_init[] = {0.4f};
479 model->setOperandValue(param9, param9_init, sizeof(float) * 1);
480 static float param10_init[] = {0.5f};
481 model->setOperandValue(param10, param10_init, sizeof(float) * 1);
482 static float param11_init[] = {0.3f};
483 model->setOperandValue(param11, param11_init, sizeof(float) * 1);
484 model->addOperation(ANEURALNETWORKS_BOX_WITH_NMS_LIMIT, {scores1, roi1, batchSplit1, param6, param7, param8, param9, param10, param11}, {scoresOut1, roiOut1, classesOut1, batchSplitOut1});
485 // Phase 3, inputs and outputs
486 model->identifyInputsAndOutputs(
487 {scores1, roi1, batchSplit1},
488 {scoresOut1, roiOut1, classesOut1, batchSplitOut1});
489 // Phase 4: set relaxed execution
490 model->relaxComputationFloat32toFloat16(true);
491 assert(model->isValid());
492 }
493
is_ignored_relaxed_2(int i)494 inline bool is_ignored_relaxed_2(int i) {
495 static std::set<int> ignore = {};
496 return ignore.find(i) != ignore.end();
497 }
498
CreateModel_float16_2(Model * model)499 void CreateModel_float16_2(Model *model) {
500 OperandType type10(Type::TENSOR_INT32, {10});
501 OperandType type11(Type::FLOAT16, {});
502 OperandType type12(Type::TENSOR_FLOAT16, {19, 12});
503 OperandType type14(Type::TENSOR_FLOAT16, {19, 3});
504 OperandType type2(Type::TENSOR_INT32, {19});
505 OperandType type27(Type::TENSOR_FLOAT16, {10, 4});
506 OperandType type28(Type::TENSOR_FLOAT16, {10});
507 OperandType type7(Type::INT32, {});
508 // Phase 1, operands
509 auto scores1 = model->addOperand(&type14);
510 auto roi1 = model->addOperand(&type12);
511 auto batchSplit1 = model->addOperand(&type2);
512 auto param6 = model->addOperand(&type11);
513 auto param7 = model->addOperand(&type7);
514 auto param8 = model->addOperand(&type7);
515 auto param9 = model->addOperand(&type11);
516 auto param10 = model->addOperand(&type11);
517 auto param11 = model->addOperand(&type11);
518 auto scoresOut1 = model->addOperand(&type28);
519 auto roiOut1 = model->addOperand(&type27);
520 auto classesOut1 = model->addOperand(&type10);
521 auto batchSplitOut1 = model->addOperand(&type10);
522 // Phase 2, operations
523 static _Float16 param6_init[] = {0.30000001192092896f};
524 model->setOperandValue(param6, param6_init, sizeof(_Float16) * 1);
525 static int32_t param7_init[] = {5};
526 model->setOperandValue(param7, param7_init, sizeof(int32_t) * 1);
527 static int32_t param8_init[] = {0};
528 model->setOperandValue(param8, param8_init, sizeof(int32_t) * 1);
529 static _Float16 param9_init[] = {0.4000000059604645f};
530 model->setOperandValue(param9, param9_init, sizeof(_Float16) * 1);
531 static _Float16 param10_init[] = {0.5f};
532 model->setOperandValue(param10, param10_init, sizeof(_Float16) * 1);
533 static _Float16 param11_init[] = {0.30000001192092896f};
534 model->setOperandValue(param11, param11_init, sizeof(_Float16) * 1);
535 model->addOperation(ANEURALNETWORKS_BOX_WITH_NMS_LIMIT, {scores1, roi1, batchSplit1, param6, param7, param8, param9, param10, param11}, {scoresOut1, roiOut1, classesOut1, batchSplitOut1});
536 // Phase 3, inputs and outputs
537 model->identifyInputsAndOutputs(
538 {scores1, roi1, batchSplit1},
539 {scoresOut1, roiOut1, classesOut1, batchSplitOut1});
540 assert(model->isValid());
541 }
542
is_ignored_float16_2(int i)543 inline bool is_ignored_float16_2(int i) {
544 static std::set<int> ignore = {};
545 return ignore.find(i) != ignore.end();
546 }
547
CreateModel_quant8_2(Model * model)548 void CreateModel_quant8_2(Model *model) {
549 OperandType type10(Type::TENSOR_INT32, {10});
550 OperandType type16(Type::TENSOR_QUANT16_ASYMM, {19, 12}, 0.125f, 0);
551 OperandType type2(Type::TENSOR_INT32, {19});
552 OperandType type29(Type::TENSOR_QUANT16_ASYMM, {10, 4}, 0.125f, 0);
553 OperandType type30(Type::TENSOR_QUANT8_ASYMM, {19, 3}, 0.01f, 128);
554 OperandType type31(Type::TENSOR_QUANT8_ASYMM, {10}, 0.01f, 128);
555 OperandType type6(Type::FLOAT32, {});
556 OperandType type7(Type::INT32, {});
557 // Phase 1, operands
558 auto scores1 = model->addOperand(&type30);
559 auto roi1 = model->addOperand(&type16);
560 auto batchSplit1 = model->addOperand(&type2);
561 auto param6 = model->addOperand(&type6);
562 auto param7 = model->addOperand(&type7);
563 auto param8 = model->addOperand(&type7);
564 auto param9 = model->addOperand(&type6);
565 auto param10 = model->addOperand(&type6);
566 auto param11 = model->addOperand(&type6);
567 auto scoresOut1 = model->addOperand(&type31);
568 auto roiOut1 = model->addOperand(&type29);
569 auto classesOut1 = model->addOperand(&type10);
570 auto batchSplitOut1 = model->addOperand(&type10);
571 // Phase 2, operations
572 static float param6_init[] = {0.3f};
573 model->setOperandValue(param6, param6_init, sizeof(float) * 1);
574 static int32_t param7_init[] = {5};
575 model->setOperandValue(param7, param7_init, sizeof(int32_t) * 1);
576 static int32_t param8_init[] = {0};
577 model->setOperandValue(param8, param8_init, sizeof(int32_t) * 1);
578 static float param9_init[] = {0.4f};
579 model->setOperandValue(param9, param9_init, sizeof(float) * 1);
580 static float param10_init[] = {0.5f};
581 model->setOperandValue(param10, param10_init, sizeof(float) * 1);
582 static float param11_init[] = {0.3f};
583 model->setOperandValue(param11, param11_init, sizeof(float) * 1);
584 model->addOperation(ANEURALNETWORKS_BOX_WITH_NMS_LIMIT, {scores1, roi1, batchSplit1, param6, param7, param8, param9, param10, param11}, {scoresOut1, roiOut1, classesOut1, batchSplitOut1});
585 // Phase 3, inputs and outputs
586 model->identifyInputsAndOutputs(
587 {scores1, roi1, batchSplit1},
588 {scoresOut1, roiOut1, classesOut1, batchSplitOut1});
589 assert(model->isValid());
590 }
591
is_ignored_quant8_2(int i)592 inline bool is_ignored_quant8_2(int i) {
593 static std::set<int> ignore = {};
594 return ignore.find(i) != ignore.end();
595 }
596
CreateModel_dynamic_output_shape_2(Model * model)597 void CreateModel_dynamic_output_shape_2(Model *model) {
598 OperandType type0(Type::TENSOR_FLOAT32, {19, 3});
599 OperandType type1(Type::TENSOR_FLOAT32, {19, 12});
600 OperandType type2(Type::TENSOR_INT32, {19});
601 OperandType type20(Type::TENSOR_FLOAT32, {0});
602 OperandType type21(Type::TENSOR_FLOAT32, {0, 0});
603 OperandType type22(Type::TENSOR_INT32, {0});
604 OperandType type6(Type::FLOAT32, {});
605 OperandType type7(Type::INT32, {});
606 // Phase 1, operands
607 auto scores1 = model->addOperand(&type0);
608 auto roi1 = model->addOperand(&type1);
609 auto batchSplit1 = model->addOperand(&type2);
610 auto param6 = model->addOperand(&type6);
611 auto param7 = model->addOperand(&type7);
612 auto param8 = model->addOperand(&type7);
613 auto param9 = model->addOperand(&type6);
614 auto param10 = model->addOperand(&type6);
615 auto param11 = model->addOperand(&type6);
616 auto scoresOut1 = model->addOperand(&type20);
617 auto roiOut1 = model->addOperand(&type21);
618 auto classesOut1 = model->addOperand(&type22);
619 auto batchSplitOut1 = model->addOperand(&type22);
620 // Phase 2, operations
621 static float param6_init[] = {0.3f};
622 model->setOperandValue(param6, param6_init, sizeof(float) * 1);
623 static int32_t param7_init[] = {5};
624 model->setOperandValue(param7, param7_init, sizeof(int32_t) * 1);
625 static int32_t param8_init[] = {0};
626 model->setOperandValue(param8, param8_init, sizeof(int32_t) * 1);
627 static float param9_init[] = {0.4f};
628 model->setOperandValue(param9, param9_init, sizeof(float) * 1);
629 static float param10_init[] = {0.5f};
630 model->setOperandValue(param10, param10_init, sizeof(float) * 1);
631 static float param11_init[] = {0.3f};
632 model->setOperandValue(param11, param11_init, sizeof(float) * 1);
633 model->addOperation(ANEURALNETWORKS_BOX_WITH_NMS_LIMIT, {scores1, roi1, batchSplit1, param6, param7, param8, param9, param10, param11}, {scoresOut1, roiOut1, classesOut1, batchSplitOut1});
634 // Phase 3, inputs and outputs
635 model->identifyInputsAndOutputs(
636 {scores1, roi1, batchSplit1},
637 {scoresOut1, roiOut1, classesOut1, batchSplitOut1});
638 assert(model->isValid());
639 }
640
is_ignored_dynamic_output_shape_2(int i)641 inline bool is_ignored_dynamic_output_shape_2(int i) {
642 static std::set<int> ignore = {};
643 return ignore.find(i) != ignore.end();
644 }
645
CreateModel_dynamic_output_shape_relaxed_2(Model * model)646 void CreateModel_dynamic_output_shape_relaxed_2(Model *model) {
647 OperandType type0(Type::TENSOR_FLOAT32, {19, 3});
648 OperandType type1(Type::TENSOR_FLOAT32, {19, 12});
649 OperandType type2(Type::TENSOR_INT32, {19});
650 OperandType type20(Type::TENSOR_FLOAT32, {0});
651 OperandType type21(Type::TENSOR_FLOAT32, {0, 0});
652 OperandType type22(Type::TENSOR_INT32, {0});
653 OperandType type6(Type::FLOAT32, {});
654 OperandType type7(Type::INT32, {});
655 // Phase 1, operands
656 auto scores1 = model->addOperand(&type0);
657 auto roi1 = model->addOperand(&type1);
658 auto batchSplit1 = model->addOperand(&type2);
659 auto param6 = model->addOperand(&type6);
660 auto param7 = model->addOperand(&type7);
661 auto param8 = model->addOperand(&type7);
662 auto param9 = model->addOperand(&type6);
663 auto param10 = model->addOperand(&type6);
664 auto param11 = model->addOperand(&type6);
665 auto scoresOut1 = model->addOperand(&type20);
666 auto roiOut1 = model->addOperand(&type21);
667 auto classesOut1 = model->addOperand(&type22);
668 auto batchSplitOut1 = model->addOperand(&type22);
669 // Phase 2, operations
670 static float param6_init[] = {0.3f};
671 model->setOperandValue(param6, param6_init, sizeof(float) * 1);
672 static int32_t param7_init[] = {5};
673 model->setOperandValue(param7, param7_init, sizeof(int32_t) * 1);
674 static int32_t param8_init[] = {0};
675 model->setOperandValue(param8, param8_init, sizeof(int32_t) * 1);
676 static float param9_init[] = {0.4f};
677 model->setOperandValue(param9, param9_init, sizeof(float) * 1);
678 static float param10_init[] = {0.5f};
679 model->setOperandValue(param10, param10_init, sizeof(float) * 1);
680 static float param11_init[] = {0.3f};
681 model->setOperandValue(param11, param11_init, sizeof(float) * 1);
682 model->addOperation(ANEURALNETWORKS_BOX_WITH_NMS_LIMIT, {scores1, roi1, batchSplit1, param6, param7, param8, param9, param10, param11}, {scoresOut1, roiOut1, classesOut1, batchSplitOut1});
683 // Phase 3, inputs and outputs
684 model->identifyInputsAndOutputs(
685 {scores1, roi1, batchSplit1},
686 {scoresOut1, roiOut1, classesOut1, batchSplitOut1});
687 // Phase 4: set relaxed execution
688 model->relaxComputationFloat32toFloat16(true);
689 assert(model->isValid());
690 }
691
is_ignored_dynamic_output_shape_relaxed_2(int i)692 inline bool is_ignored_dynamic_output_shape_relaxed_2(int i) {
693 static std::set<int> ignore = {};
694 return ignore.find(i) != ignore.end();
695 }
696
CreateModel_dynamic_output_shape_float16_2(Model * model)697 void CreateModel_dynamic_output_shape_float16_2(Model *model) {
698 OperandType type11(Type::FLOAT16, {});
699 OperandType type12(Type::TENSOR_FLOAT16, {19, 12});
700 OperandType type14(Type::TENSOR_FLOAT16, {19, 3});
701 OperandType type2(Type::TENSOR_INT32, {19});
702 OperandType type22(Type::TENSOR_INT32, {0});
703 OperandType type23(Type::TENSOR_FLOAT16, {0});
704 OperandType type24(Type::TENSOR_FLOAT16, {0, 0});
705 OperandType type7(Type::INT32, {});
706 // Phase 1, operands
707 auto scores1 = model->addOperand(&type14);
708 auto roi1 = model->addOperand(&type12);
709 auto batchSplit1 = model->addOperand(&type2);
710 auto param6 = model->addOperand(&type11);
711 auto param7 = model->addOperand(&type7);
712 auto param8 = model->addOperand(&type7);
713 auto param9 = model->addOperand(&type11);
714 auto param10 = model->addOperand(&type11);
715 auto param11 = model->addOperand(&type11);
716 auto scoresOut1 = model->addOperand(&type23);
717 auto roiOut1 = model->addOperand(&type24);
718 auto classesOut1 = model->addOperand(&type22);
719 auto batchSplitOut1 = model->addOperand(&type22);
720 // Phase 2, operations
721 static _Float16 param6_init[] = {0.30000001192092896f};
722 model->setOperandValue(param6, param6_init, sizeof(_Float16) * 1);
723 static int32_t param7_init[] = {5};
724 model->setOperandValue(param7, param7_init, sizeof(int32_t) * 1);
725 static int32_t param8_init[] = {0};
726 model->setOperandValue(param8, param8_init, sizeof(int32_t) * 1);
727 static _Float16 param9_init[] = {0.4000000059604645f};
728 model->setOperandValue(param9, param9_init, sizeof(_Float16) * 1);
729 static _Float16 param10_init[] = {0.5f};
730 model->setOperandValue(param10, param10_init, sizeof(_Float16) * 1);
731 static _Float16 param11_init[] = {0.30000001192092896f};
732 model->setOperandValue(param11, param11_init, sizeof(_Float16) * 1);
733 model->addOperation(ANEURALNETWORKS_BOX_WITH_NMS_LIMIT, {scores1, roi1, batchSplit1, param6, param7, param8, param9, param10, param11}, {scoresOut1, roiOut1, classesOut1, batchSplitOut1});
734 // Phase 3, inputs and outputs
735 model->identifyInputsAndOutputs(
736 {scores1, roi1, batchSplit1},
737 {scoresOut1, roiOut1, classesOut1, batchSplitOut1});
738 assert(model->isValid());
739 }
740
is_ignored_dynamic_output_shape_float16_2(int i)741 inline bool is_ignored_dynamic_output_shape_float16_2(int i) {
742 static std::set<int> ignore = {};
743 return ignore.find(i) != ignore.end();
744 }
745
CreateModel_dynamic_output_shape_quant8_2(Model * model)746 void CreateModel_dynamic_output_shape_quant8_2(Model *model) {
747 OperandType type16(Type::TENSOR_QUANT16_ASYMM, {19, 12}, 0.125f, 0);
748 OperandType type2(Type::TENSOR_INT32, {19});
749 OperandType type22(Type::TENSOR_INT32, {0});
750 OperandType type26(Type::TENSOR_QUANT16_ASYMM, {0, 0}, 0.125f, 0);
751 OperandType type30(Type::TENSOR_QUANT8_ASYMM, {19, 3}, 0.01f, 128);
752 OperandType type32(Type::TENSOR_QUANT8_ASYMM, {0}, 0.01f, 128);
753 OperandType type6(Type::FLOAT32, {});
754 OperandType type7(Type::INT32, {});
755 // Phase 1, operands
756 auto scores1 = model->addOperand(&type30);
757 auto roi1 = model->addOperand(&type16);
758 auto batchSplit1 = model->addOperand(&type2);
759 auto param6 = model->addOperand(&type6);
760 auto param7 = model->addOperand(&type7);
761 auto param8 = model->addOperand(&type7);
762 auto param9 = model->addOperand(&type6);
763 auto param10 = model->addOperand(&type6);
764 auto param11 = model->addOperand(&type6);
765 auto scoresOut1 = model->addOperand(&type32);
766 auto roiOut1 = model->addOperand(&type26);
767 auto classesOut1 = model->addOperand(&type22);
768 auto batchSplitOut1 = model->addOperand(&type22);
769 // Phase 2, operations
770 static float param6_init[] = {0.3f};
771 model->setOperandValue(param6, param6_init, sizeof(float) * 1);
772 static int32_t param7_init[] = {5};
773 model->setOperandValue(param7, param7_init, sizeof(int32_t) * 1);
774 static int32_t param8_init[] = {0};
775 model->setOperandValue(param8, param8_init, sizeof(int32_t) * 1);
776 static float param9_init[] = {0.4f};
777 model->setOperandValue(param9, param9_init, sizeof(float) * 1);
778 static float param10_init[] = {0.5f};
779 model->setOperandValue(param10, param10_init, sizeof(float) * 1);
780 static float param11_init[] = {0.3f};
781 model->setOperandValue(param11, param11_init, sizeof(float) * 1);
782 model->addOperation(ANEURALNETWORKS_BOX_WITH_NMS_LIMIT, {scores1, roi1, batchSplit1, param6, param7, param8, param9, param10, param11}, {scoresOut1, roiOut1, classesOut1, batchSplitOut1});
783 // Phase 3, inputs and outputs
784 model->identifyInputsAndOutputs(
785 {scores1, roi1, batchSplit1},
786 {scoresOut1, roiOut1, classesOut1, batchSplitOut1});
787 assert(model->isValid());
788 }
789
is_ignored_dynamic_output_shape_quant8_2(int i)790 inline bool is_ignored_dynamic_output_shape_quant8_2(int i) {
791 static std::set<int> ignore = {};
792 return ignore.find(i) != ignore.end();
793 }
794
795