• Home
  • History
  • Annotate
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // clang-format off
2 // Generated file (from: batch_to_space_v1_2.mod.py). Do not edit
CreateModel_nhwc(Model * model)3 void CreateModel_nhwc(Model *model) {
4   OperandType type0(Type::BOOL, {});
5   OperandType type1(Type::TENSOR_FLOAT32, {4, 1, 1, 2});
6   OperandType type2(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
7   OperandType type3(Type::TENSOR_INT32, {2});
8   // Phase 1, operands
9   auto op1 = model->addOperand(&type1);
10   auto param = model->addOperand(&type3);
11   auto layout = model->addOperand(&type0);
12   auto op4 = model->addOperand(&type2);
13   // Phase 2, operations
14   static int32_t param_init[] = {2, 2};
15   model->setOperandValue(param, param_init, sizeof(int32_t) * 2);
16   static bool8 layout_init[] = {false};
17   model->setOperandValue(layout, layout_init, sizeof(bool8) * 1);
18   model->addOperation(ANEURALNETWORKS_BATCH_TO_SPACE_ND, {op1, param, layout}, {op4});
19   // Phase 3, inputs and outputs
20   model->identifyInputsAndOutputs(
21     {op1},
22     {op4});
23   assert(model->isValid());
24 }
25 
is_ignored_nhwc(int i)26 inline bool is_ignored_nhwc(int i) {
27   static std::set<int> ignore = {};
28   return ignore.find(i) != ignore.end();
29 }
30 
CreateModel_nhwc_relaxed(Model * model)31 void CreateModel_nhwc_relaxed(Model *model) {
32   OperandType type0(Type::BOOL, {});
33   OperandType type1(Type::TENSOR_FLOAT32, {4, 1, 1, 2});
34   OperandType type2(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
35   OperandType type3(Type::TENSOR_INT32, {2});
36   // Phase 1, operands
37   auto op1 = model->addOperand(&type1);
38   auto param = model->addOperand(&type3);
39   auto layout = model->addOperand(&type0);
40   auto op4 = model->addOperand(&type2);
41   // Phase 2, operations
42   static int32_t param_init[] = {2, 2};
43   model->setOperandValue(param, param_init, sizeof(int32_t) * 2);
44   static bool8 layout_init[] = {false};
45   model->setOperandValue(layout, layout_init, sizeof(bool8) * 1);
46   model->addOperation(ANEURALNETWORKS_BATCH_TO_SPACE_ND, {op1, param, layout}, {op4});
47   // Phase 3, inputs and outputs
48   model->identifyInputsAndOutputs(
49     {op1},
50     {op4});
51   // Phase 4: set relaxed execution
52   model->relaxComputationFloat32toFloat16(true);
53   assert(model->isValid());
54 }
55 
is_ignored_nhwc_relaxed(int i)56 inline bool is_ignored_nhwc_relaxed(int i) {
57   static std::set<int> ignore = {};
58   return ignore.find(i) != ignore.end();
59 }
60 
CreateModel_nhwc_float16(Model * model)61 void CreateModel_nhwc_float16(Model *model) {
62   OperandType type0(Type::BOOL, {});
63   OperandType type3(Type::TENSOR_INT32, {2});
64   OperandType type6(Type::TENSOR_FLOAT16, {4, 1, 1, 2});
65   OperandType type7(Type::TENSOR_FLOAT16, {1, 2, 2, 2});
66   // Phase 1, operands
67   auto op1 = model->addOperand(&type6);
68   auto param = model->addOperand(&type3);
69   auto layout = model->addOperand(&type0);
70   auto op4 = model->addOperand(&type7);
71   // Phase 2, operations
72   static int32_t param_init[] = {2, 2};
73   model->setOperandValue(param, param_init, sizeof(int32_t) * 2);
74   static bool8 layout_init[] = {false};
75   model->setOperandValue(layout, layout_init, sizeof(bool8) * 1);
76   model->addOperation(ANEURALNETWORKS_BATCH_TO_SPACE_ND, {op1, param, layout}, {op4});
77   // Phase 3, inputs and outputs
78   model->identifyInputsAndOutputs(
79     {op1},
80     {op4});
81   assert(model->isValid());
82 }
83 
is_ignored_nhwc_float16(int i)84 inline bool is_ignored_nhwc_float16(int i) {
85   static std::set<int> ignore = {};
86   return ignore.find(i) != ignore.end();
87 }
88 
CreateModel_nhwc_quant8(Model * model)89 void CreateModel_nhwc_quant8(Model *model) {
90   OperandType type0(Type::BOOL, {});
91   OperandType type3(Type::TENSOR_INT32, {2});
92   OperandType type8(Type::TENSOR_QUANT8_ASYMM, {4, 1, 1, 2}, 0.1f, 0);
93   OperandType type9(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.1f, 0);
94   // Phase 1, operands
95   auto op1 = model->addOperand(&type8);
96   auto param = model->addOperand(&type3);
97   auto layout = model->addOperand(&type0);
98   auto op4 = model->addOperand(&type9);
99   // Phase 2, operations
100   static int32_t param_init[] = {2, 2};
101   model->setOperandValue(param, param_init, sizeof(int32_t) * 2);
102   static bool8 layout_init[] = {false};
103   model->setOperandValue(layout, layout_init, sizeof(bool8) * 1);
104   model->addOperation(ANEURALNETWORKS_BATCH_TO_SPACE_ND, {op1, param, layout}, {op4});
105   // Phase 3, inputs and outputs
106   model->identifyInputsAndOutputs(
107     {op1},
108     {op4});
109   assert(model->isValid());
110 }
111 
is_ignored_nhwc_quant8(int i)112 inline bool is_ignored_nhwc_quant8(int i) {
113   static std::set<int> ignore = {};
114   return ignore.find(i) != ignore.end();
115 }
116 
CreateModel_nchw(Model * model)117 void CreateModel_nchw(Model *model) {
118   OperandType type0(Type::BOOL, {});
119   OperandType type10(Type::TENSOR_FLOAT32, {4, 2, 1, 1});
120   OperandType type2(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
121   OperandType type3(Type::TENSOR_INT32, {2});
122   // Phase 1, operands
123   auto op1 = model->addOperand(&type10);
124   auto param = model->addOperand(&type3);
125   auto layout = model->addOperand(&type0);
126   auto op4 = model->addOperand(&type2);
127   // Phase 2, operations
128   static int32_t param_init[] = {2, 2};
129   model->setOperandValue(param, param_init, sizeof(int32_t) * 2);
130   static bool8 layout_init[] = {true};
131   model->setOperandValue(layout, layout_init, sizeof(bool8) * 1);
132   model->addOperation(ANEURALNETWORKS_BATCH_TO_SPACE_ND, {op1, param, layout}, {op4});
133   // Phase 3, inputs and outputs
134   model->identifyInputsAndOutputs(
135     {op1},
136     {op4});
137   assert(model->isValid());
138 }
139 
is_ignored_nchw(int i)140 inline bool is_ignored_nchw(int i) {
141   static std::set<int> ignore = {};
142   return ignore.find(i) != ignore.end();
143 }
144 
CreateModel_nchw_relaxed(Model * model)145 void CreateModel_nchw_relaxed(Model *model) {
146   OperandType type0(Type::BOOL, {});
147   OperandType type10(Type::TENSOR_FLOAT32, {4, 2, 1, 1});
148   OperandType type2(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
149   OperandType type3(Type::TENSOR_INT32, {2});
150   // Phase 1, operands
151   auto op1 = model->addOperand(&type10);
152   auto param = model->addOperand(&type3);
153   auto layout = model->addOperand(&type0);
154   auto op4 = model->addOperand(&type2);
155   // Phase 2, operations
156   static int32_t param_init[] = {2, 2};
157   model->setOperandValue(param, param_init, sizeof(int32_t) * 2);
158   static bool8 layout_init[] = {true};
159   model->setOperandValue(layout, layout_init, sizeof(bool8) * 1);
160   model->addOperation(ANEURALNETWORKS_BATCH_TO_SPACE_ND, {op1, param, layout}, {op4});
161   // Phase 3, inputs and outputs
162   model->identifyInputsAndOutputs(
163     {op1},
164     {op4});
165   // Phase 4: set relaxed execution
166   model->relaxComputationFloat32toFloat16(true);
167   assert(model->isValid());
168 }
169 
is_ignored_nchw_relaxed(int i)170 inline bool is_ignored_nchw_relaxed(int i) {
171   static std::set<int> ignore = {};
172   return ignore.find(i) != ignore.end();
173 }
174 
CreateModel_nchw_float16(Model * model)175 void CreateModel_nchw_float16(Model *model) {
176   OperandType type0(Type::BOOL, {});
177   OperandType type11(Type::TENSOR_FLOAT16, {4, 2, 1, 1});
178   OperandType type3(Type::TENSOR_INT32, {2});
179   OperandType type7(Type::TENSOR_FLOAT16, {1, 2, 2, 2});
180   // Phase 1, operands
181   auto op1 = model->addOperand(&type11);
182   auto param = model->addOperand(&type3);
183   auto layout = model->addOperand(&type0);
184   auto op4 = model->addOperand(&type7);
185   // Phase 2, operations
186   static int32_t param_init[] = {2, 2};
187   model->setOperandValue(param, param_init, sizeof(int32_t) * 2);
188   static bool8 layout_init[] = {true};
189   model->setOperandValue(layout, layout_init, sizeof(bool8) * 1);
190   model->addOperation(ANEURALNETWORKS_BATCH_TO_SPACE_ND, {op1, param, layout}, {op4});
191   // Phase 3, inputs and outputs
192   model->identifyInputsAndOutputs(
193     {op1},
194     {op4});
195   assert(model->isValid());
196 }
197 
is_ignored_nchw_float16(int i)198 inline bool is_ignored_nchw_float16(int i) {
199   static std::set<int> ignore = {};
200   return ignore.find(i) != ignore.end();
201 }
202 
CreateModel_nchw_quant8(Model * model)203 void CreateModel_nchw_quant8(Model *model) {
204   OperandType type0(Type::BOOL, {});
205   OperandType type12(Type::TENSOR_QUANT8_ASYMM, {4, 2, 1, 1}, 0.1f, 0);
206   OperandType type3(Type::TENSOR_INT32, {2});
207   OperandType type9(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.1f, 0);
208   // Phase 1, operands
209   auto op1 = model->addOperand(&type12);
210   auto param = model->addOperand(&type3);
211   auto layout = model->addOperand(&type0);
212   auto op4 = model->addOperand(&type9);
213   // Phase 2, operations
214   static int32_t param_init[] = {2, 2};
215   model->setOperandValue(param, param_init, sizeof(int32_t) * 2);
216   static bool8 layout_init[] = {true};
217   model->setOperandValue(layout, layout_init, sizeof(bool8) * 1);
218   model->addOperation(ANEURALNETWORKS_BATCH_TO_SPACE_ND, {op1, param, layout}, {op4});
219   // Phase 3, inputs and outputs
220   model->identifyInputsAndOutputs(
221     {op1},
222     {op4});
223   assert(model->isValid());
224 }
225 
is_ignored_nchw_quant8(int i)226 inline bool is_ignored_nchw_quant8(int i) {
227   static std::set<int> ignore = {};
228   return ignore.find(i) != ignore.end();
229 }
230 
CreateModel_dynamic_output_shape_nhwc(Model * model)231 void CreateModel_dynamic_output_shape_nhwc(Model *model) {
232   OperandType type0(Type::BOOL, {});
233   OperandType type1(Type::TENSOR_FLOAT32, {4, 1, 1, 2});
234   OperandType type13(Type::TENSOR_FLOAT32, {0, 0, 0, 0});
235   OperandType type3(Type::TENSOR_INT32, {2});
236   // Phase 1, operands
237   auto op1 = model->addOperand(&type1);
238   auto param = model->addOperand(&type3);
239   auto layout = model->addOperand(&type0);
240   auto op4 = model->addOperand(&type13);
241   // Phase 2, operations
242   static int32_t param_init[] = {2, 2};
243   model->setOperandValue(param, param_init, sizeof(int32_t) * 2);
244   static bool8 layout_init[] = {false};
245   model->setOperandValue(layout, layout_init, sizeof(bool8) * 1);
246   model->addOperation(ANEURALNETWORKS_BATCH_TO_SPACE_ND, {op1, param, layout}, {op4});
247   // Phase 3, inputs and outputs
248   model->identifyInputsAndOutputs(
249     {op1},
250     {op4});
251   assert(model->isValid());
252 }
253 
is_ignored_dynamic_output_shape_nhwc(int i)254 inline bool is_ignored_dynamic_output_shape_nhwc(int i) {
255   static std::set<int> ignore = {};
256   return ignore.find(i) != ignore.end();
257 }
258 
CreateModel_dynamic_output_shape_nhwc_relaxed(Model * model)259 void CreateModel_dynamic_output_shape_nhwc_relaxed(Model *model) {
260   OperandType type0(Type::BOOL, {});
261   OperandType type1(Type::TENSOR_FLOAT32, {4, 1, 1, 2});
262   OperandType type13(Type::TENSOR_FLOAT32, {0, 0, 0, 0});
263   OperandType type3(Type::TENSOR_INT32, {2});
264   // Phase 1, operands
265   auto op1 = model->addOperand(&type1);
266   auto param = model->addOperand(&type3);
267   auto layout = model->addOperand(&type0);
268   auto op4 = model->addOperand(&type13);
269   // Phase 2, operations
270   static int32_t param_init[] = {2, 2};
271   model->setOperandValue(param, param_init, sizeof(int32_t) * 2);
272   static bool8 layout_init[] = {false};
273   model->setOperandValue(layout, layout_init, sizeof(bool8) * 1);
274   model->addOperation(ANEURALNETWORKS_BATCH_TO_SPACE_ND, {op1, param, layout}, {op4});
275   // Phase 3, inputs and outputs
276   model->identifyInputsAndOutputs(
277     {op1},
278     {op4});
279   // Phase 4: set relaxed execution
280   model->relaxComputationFloat32toFloat16(true);
281   assert(model->isValid());
282 }
283 
is_ignored_dynamic_output_shape_nhwc_relaxed(int i)284 inline bool is_ignored_dynamic_output_shape_nhwc_relaxed(int i) {
285   static std::set<int> ignore = {};
286   return ignore.find(i) != ignore.end();
287 }
288 
CreateModel_dynamic_output_shape_nhwc_float16(Model * model)289 void CreateModel_dynamic_output_shape_nhwc_float16(Model *model) {
290   OperandType type0(Type::BOOL, {});
291   OperandType type14(Type::TENSOR_FLOAT16, {0, 0, 0, 0});
292   OperandType type3(Type::TENSOR_INT32, {2});
293   OperandType type6(Type::TENSOR_FLOAT16, {4, 1, 1, 2});
294   // Phase 1, operands
295   auto op1 = model->addOperand(&type6);
296   auto param = model->addOperand(&type3);
297   auto layout = model->addOperand(&type0);
298   auto op4 = model->addOperand(&type14);
299   // Phase 2, operations
300   static int32_t param_init[] = {2, 2};
301   model->setOperandValue(param, param_init, sizeof(int32_t) * 2);
302   static bool8 layout_init[] = {false};
303   model->setOperandValue(layout, layout_init, sizeof(bool8) * 1);
304   model->addOperation(ANEURALNETWORKS_BATCH_TO_SPACE_ND, {op1, param, layout}, {op4});
305   // Phase 3, inputs and outputs
306   model->identifyInputsAndOutputs(
307     {op1},
308     {op4});
309   assert(model->isValid());
310 }
311 
is_ignored_dynamic_output_shape_nhwc_float16(int i)312 inline bool is_ignored_dynamic_output_shape_nhwc_float16(int i) {
313   static std::set<int> ignore = {};
314   return ignore.find(i) != ignore.end();
315 }
316 
CreateModel_dynamic_output_shape_nhwc_quant8(Model * model)317 void CreateModel_dynamic_output_shape_nhwc_quant8(Model *model) {
318   OperandType type0(Type::BOOL, {});
319   OperandType type15(Type::TENSOR_QUANT8_ASYMM, {0, 0, 0, 0}, 0.1f, 0);
320   OperandType type3(Type::TENSOR_INT32, {2});
321   OperandType type8(Type::TENSOR_QUANT8_ASYMM, {4, 1, 1, 2}, 0.1f, 0);
322   // Phase 1, operands
323   auto op1 = model->addOperand(&type8);
324   auto param = model->addOperand(&type3);
325   auto layout = model->addOperand(&type0);
326   auto op4 = model->addOperand(&type15);
327   // Phase 2, operations
328   static int32_t param_init[] = {2, 2};
329   model->setOperandValue(param, param_init, sizeof(int32_t) * 2);
330   static bool8 layout_init[] = {false};
331   model->setOperandValue(layout, layout_init, sizeof(bool8) * 1);
332   model->addOperation(ANEURALNETWORKS_BATCH_TO_SPACE_ND, {op1, param, layout}, {op4});
333   // Phase 3, inputs and outputs
334   model->identifyInputsAndOutputs(
335     {op1},
336     {op4});
337   assert(model->isValid());
338 }
339 
is_ignored_dynamic_output_shape_nhwc_quant8(int i)340 inline bool is_ignored_dynamic_output_shape_nhwc_quant8(int i) {
341   static std::set<int> ignore = {};
342   return ignore.find(i) != ignore.end();
343 }
344 
CreateModel_dynamic_output_shape_nchw(Model * model)345 void CreateModel_dynamic_output_shape_nchw(Model *model) {
346   OperandType type0(Type::BOOL, {});
347   OperandType type10(Type::TENSOR_FLOAT32, {4, 2, 1, 1});
348   OperandType type13(Type::TENSOR_FLOAT32, {0, 0, 0, 0});
349   OperandType type3(Type::TENSOR_INT32, {2});
350   // Phase 1, operands
351   auto op1 = model->addOperand(&type10);
352   auto param = model->addOperand(&type3);
353   auto layout = model->addOperand(&type0);
354   auto op4 = model->addOperand(&type13);
355   // Phase 2, operations
356   static int32_t param_init[] = {2, 2};
357   model->setOperandValue(param, param_init, sizeof(int32_t) * 2);
358   static bool8 layout_init[] = {true};
359   model->setOperandValue(layout, layout_init, sizeof(bool8) * 1);
360   model->addOperation(ANEURALNETWORKS_BATCH_TO_SPACE_ND, {op1, param, layout}, {op4});
361   // Phase 3, inputs and outputs
362   model->identifyInputsAndOutputs(
363     {op1},
364     {op4});
365   assert(model->isValid());
366 }
367 
is_ignored_dynamic_output_shape_nchw(int i)368 inline bool is_ignored_dynamic_output_shape_nchw(int i) {
369   static std::set<int> ignore = {};
370   return ignore.find(i) != ignore.end();
371 }
372 
CreateModel_dynamic_output_shape_nchw_relaxed(Model * model)373 void CreateModel_dynamic_output_shape_nchw_relaxed(Model *model) {
374   OperandType type0(Type::BOOL, {});
375   OperandType type10(Type::TENSOR_FLOAT32, {4, 2, 1, 1});
376   OperandType type13(Type::TENSOR_FLOAT32, {0, 0, 0, 0});
377   OperandType type3(Type::TENSOR_INT32, {2});
378   // Phase 1, operands
379   auto op1 = model->addOperand(&type10);
380   auto param = model->addOperand(&type3);
381   auto layout = model->addOperand(&type0);
382   auto op4 = model->addOperand(&type13);
383   // Phase 2, operations
384   static int32_t param_init[] = {2, 2};
385   model->setOperandValue(param, param_init, sizeof(int32_t) * 2);
386   static bool8 layout_init[] = {true};
387   model->setOperandValue(layout, layout_init, sizeof(bool8) * 1);
388   model->addOperation(ANEURALNETWORKS_BATCH_TO_SPACE_ND, {op1, param, layout}, {op4});
389   // Phase 3, inputs and outputs
390   model->identifyInputsAndOutputs(
391     {op1},
392     {op4});
393   // Phase 4: set relaxed execution
394   model->relaxComputationFloat32toFloat16(true);
395   assert(model->isValid());
396 }
397 
is_ignored_dynamic_output_shape_nchw_relaxed(int i)398 inline bool is_ignored_dynamic_output_shape_nchw_relaxed(int i) {
399   static std::set<int> ignore = {};
400   return ignore.find(i) != ignore.end();
401 }
402 
CreateModel_dynamic_output_shape_nchw_float16(Model * model)403 void CreateModel_dynamic_output_shape_nchw_float16(Model *model) {
404   OperandType type0(Type::BOOL, {});
405   OperandType type11(Type::TENSOR_FLOAT16, {4, 2, 1, 1});
406   OperandType type14(Type::TENSOR_FLOAT16, {0, 0, 0, 0});
407   OperandType type3(Type::TENSOR_INT32, {2});
408   // Phase 1, operands
409   auto op1 = model->addOperand(&type11);
410   auto param = model->addOperand(&type3);
411   auto layout = model->addOperand(&type0);
412   auto op4 = model->addOperand(&type14);
413   // Phase 2, operations
414   static int32_t param_init[] = {2, 2};
415   model->setOperandValue(param, param_init, sizeof(int32_t) * 2);
416   static bool8 layout_init[] = {true};
417   model->setOperandValue(layout, layout_init, sizeof(bool8) * 1);
418   model->addOperation(ANEURALNETWORKS_BATCH_TO_SPACE_ND, {op1, param, layout}, {op4});
419   // Phase 3, inputs and outputs
420   model->identifyInputsAndOutputs(
421     {op1},
422     {op4});
423   assert(model->isValid());
424 }
425 
is_ignored_dynamic_output_shape_nchw_float16(int i)426 inline bool is_ignored_dynamic_output_shape_nchw_float16(int i) {
427   static std::set<int> ignore = {};
428   return ignore.find(i) != ignore.end();
429 }
430 
CreateModel_dynamic_output_shape_nchw_quant8(Model * model)431 void CreateModel_dynamic_output_shape_nchw_quant8(Model *model) {
432   OperandType type0(Type::BOOL, {});
433   OperandType type12(Type::TENSOR_QUANT8_ASYMM, {4, 2, 1, 1}, 0.1f, 0);
434   OperandType type15(Type::TENSOR_QUANT8_ASYMM, {0, 0, 0, 0}, 0.1f, 0);
435   OperandType type3(Type::TENSOR_INT32, {2});
436   // Phase 1, operands
437   auto op1 = model->addOperand(&type12);
438   auto param = model->addOperand(&type3);
439   auto layout = model->addOperand(&type0);
440   auto op4 = model->addOperand(&type15);
441   // Phase 2, operations
442   static int32_t param_init[] = {2, 2};
443   model->setOperandValue(param, param_init, sizeof(int32_t) * 2);
444   static bool8 layout_init[] = {true};
445   model->setOperandValue(layout, layout_init, sizeof(bool8) * 1);
446   model->addOperation(ANEURALNETWORKS_BATCH_TO_SPACE_ND, {op1, param, layout}, {op4});
447   // Phase 3, inputs and outputs
448   model->identifyInputsAndOutputs(
449     {op1},
450     {op4});
451   assert(model->isValid());
452 }
453 
is_ignored_dynamic_output_shape_nchw_quant8(int i)454 inline bool is_ignored_dynamic_output_shape_nchw_quant8(int i) {
455   static std::set<int> ignore = {};
456   return ignore.find(i) != ignore.end();
457 }
458 
CreateModel_nhwc_2(Model * model)459 void CreateModel_nhwc_2(Model *model) {
460   OperandType type0(Type::BOOL, {});
461   OperandType type3(Type::TENSOR_INT32, {2});
462   OperandType type4(Type::TENSOR_FLOAT32, {4, 2, 2, 1});
463   OperandType type5(Type::TENSOR_FLOAT32, {1, 4, 4, 1});
464   // Phase 1, operands
465   auto op11 = model->addOperand(&type4);
466   auto param1 = model->addOperand(&type3);
467   auto layout = model->addOperand(&type0);
468   auto op41 = model->addOperand(&type5);
469   // Phase 2, operations
470   static int32_t param1_init[] = {2, 2};
471   model->setOperandValue(param1, param1_init, sizeof(int32_t) * 2);
472   static bool8 layout_init[] = {false};
473   model->setOperandValue(layout, layout_init, sizeof(bool8) * 1);
474   model->addOperation(ANEURALNETWORKS_BATCH_TO_SPACE_ND, {op11, param1, layout}, {op41});
475   // Phase 3, inputs and outputs
476   model->identifyInputsAndOutputs(
477     {op11},
478     {op41});
479   assert(model->isValid());
480 }
481 
is_ignored_nhwc_2(int i)482 inline bool is_ignored_nhwc_2(int i) {
483   static std::set<int> ignore = {};
484   return ignore.find(i) != ignore.end();
485 }
486 
CreateModel_nhwc_relaxed_2(Model * model)487 void CreateModel_nhwc_relaxed_2(Model *model) {
488   OperandType type0(Type::BOOL, {});
489   OperandType type3(Type::TENSOR_INT32, {2});
490   OperandType type4(Type::TENSOR_FLOAT32, {4, 2, 2, 1});
491   OperandType type5(Type::TENSOR_FLOAT32, {1, 4, 4, 1});
492   // Phase 1, operands
493   auto op11 = model->addOperand(&type4);
494   auto param1 = model->addOperand(&type3);
495   auto layout = model->addOperand(&type0);
496   auto op41 = model->addOperand(&type5);
497   // Phase 2, operations
498   static int32_t param1_init[] = {2, 2};
499   model->setOperandValue(param1, param1_init, sizeof(int32_t) * 2);
500   static bool8 layout_init[] = {false};
501   model->setOperandValue(layout, layout_init, sizeof(bool8) * 1);
502   model->addOperation(ANEURALNETWORKS_BATCH_TO_SPACE_ND, {op11, param1, layout}, {op41});
503   // Phase 3, inputs and outputs
504   model->identifyInputsAndOutputs(
505     {op11},
506     {op41});
507   // Phase 4: set relaxed execution
508   model->relaxComputationFloat32toFloat16(true);
509   assert(model->isValid());
510 }
511 
is_ignored_nhwc_relaxed_2(int i)512 inline bool is_ignored_nhwc_relaxed_2(int i) {
513   static std::set<int> ignore = {};
514   return ignore.find(i) != ignore.end();
515 }
516 
CreateModel_nhwc_float16_2(Model * model)517 void CreateModel_nhwc_float16_2(Model *model) {
518   OperandType type0(Type::BOOL, {});
519   OperandType type16(Type::TENSOR_FLOAT16, {4, 2, 2, 1});
520   OperandType type17(Type::TENSOR_FLOAT16, {1, 4, 4, 1});
521   OperandType type3(Type::TENSOR_INT32, {2});
522   // Phase 1, operands
523   auto op11 = model->addOperand(&type16);
524   auto param1 = model->addOperand(&type3);
525   auto layout = model->addOperand(&type0);
526   auto op41 = model->addOperand(&type17);
527   // Phase 2, operations
528   static int32_t param1_init[] = {2, 2};
529   model->setOperandValue(param1, param1_init, sizeof(int32_t) * 2);
530   static bool8 layout_init[] = {false};
531   model->setOperandValue(layout, layout_init, sizeof(bool8) * 1);
532   model->addOperation(ANEURALNETWORKS_BATCH_TO_SPACE_ND, {op11, param1, layout}, {op41});
533   // Phase 3, inputs and outputs
534   model->identifyInputsAndOutputs(
535     {op11},
536     {op41});
537   assert(model->isValid());
538 }
539 
is_ignored_nhwc_float16_2(int i)540 inline bool is_ignored_nhwc_float16_2(int i) {
541   static std::set<int> ignore = {};
542   return ignore.find(i) != ignore.end();
543 }
544 
CreateModel_nhwc_quant8_2(Model * model)545 void CreateModel_nhwc_quant8_2(Model *model) {
546   OperandType type0(Type::BOOL, {});
547   OperandType type18(Type::TENSOR_QUANT8_ASYMM, {4, 2, 2, 1}, 0.5f, 128);
548   OperandType type19(Type::TENSOR_QUANT8_ASYMM, {1, 4, 4, 1}, 0.5f, 128);
549   OperandType type3(Type::TENSOR_INT32, {2});
550   // Phase 1, operands
551   auto op11 = model->addOperand(&type18);
552   auto param1 = model->addOperand(&type3);
553   auto layout = model->addOperand(&type0);
554   auto op41 = model->addOperand(&type19);
555   // Phase 2, operations
556   static int32_t param1_init[] = {2, 2};
557   model->setOperandValue(param1, param1_init, sizeof(int32_t) * 2);
558   static bool8 layout_init[] = {false};
559   model->setOperandValue(layout, layout_init, sizeof(bool8) * 1);
560   model->addOperation(ANEURALNETWORKS_BATCH_TO_SPACE_ND, {op11, param1, layout}, {op41});
561   // Phase 3, inputs and outputs
562   model->identifyInputsAndOutputs(
563     {op11},
564     {op41});
565   assert(model->isValid());
566 }
567 
is_ignored_nhwc_quant8_2(int i)568 inline bool is_ignored_nhwc_quant8_2(int i) {
569   static std::set<int> ignore = {};
570   return ignore.find(i) != ignore.end();
571 }
572 
CreateModel_nchw_2(Model * model)573 void CreateModel_nchw_2(Model *model) {
574   OperandType type0(Type::BOOL, {});
575   OperandType type20(Type::TENSOR_FLOAT32, {4, 1, 2, 2});
576   OperandType type21(Type::TENSOR_FLOAT32, {1, 1, 4, 4});
577   OperandType type3(Type::TENSOR_INT32, {2});
578   // Phase 1, operands
579   auto op11 = model->addOperand(&type20);
580   auto param1 = model->addOperand(&type3);
581   auto layout = model->addOperand(&type0);
582   auto op41 = model->addOperand(&type21);
583   // Phase 2, operations
584   static int32_t param1_init[] = {2, 2};
585   model->setOperandValue(param1, param1_init, sizeof(int32_t) * 2);
586   static bool8 layout_init[] = {true};
587   model->setOperandValue(layout, layout_init, sizeof(bool8) * 1);
588   model->addOperation(ANEURALNETWORKS_BATCH_TO_SPACE_ND, {op11, param1, layout}, {op41});
589   // Phase 3, inputs and outputs
590   model->identifyInputsAndOutputs(
591     {op11},
592     {op41});
593   assert(model->isValid());
594 }
595 
is_ignored_nchw_2(int i)596 inline bool is_ignored_nchw_2(int i) {
597   static std::set<int> ignore = {};
598   return ignore.find(i) != ignore.end();
599 }
600 
CreateModel_nchw_relaxed_2(Model * model)601 void CreateModel_nchw_relaxed_2(Model *model) {
602   OperandType type0(Type::BOOL, {});
603   OperandType type20(Type::TENSOR_FLOAT32, {4, 1, 2, 2});
604   OperandType type21(Type::TENSOR_FLOAT32, {1, 1, 4, 4});
605   OperandType type3(Type::TENSOR_INT32, {2});
606   // Phase 1, operands
607   auto op11 = model->addOperand(&type20);
608   auto param1 = model->addOperand(&type3);
609   auto layout = model->addOperand(&type0);
610   auto op41 = model->addOperand(&type21);
611   // Phase 2, operations
612   static int32_t param1_init[] = {2, 2};
613   model->setOperandValue(param1, param1_init, sizeof(int32_t) * 2);
614   static bool8 layout_init[] = {true};
615   model->setOperandValue(layout, layout_init, sizeof(bool8) * 1);
616   model->addOperation(ANEURALNETWORKS_BATCH_TO_SPACE_ND, {op11, param1, layout}, {op41});
617   // Phase 3, inputs and outputs
618   model->identifyInputsAndOutputs(
619     {op11},
620     {op41});
621   // Phase 4: set relaxed execution
622   model->relaxComputationFloat32toFloat16(true);
623   assert(model->isValid());
624 }
625 
is_ignored_nchw_relaxed_2(int i)626 inline bool is_ignored_nchw_relaxed_2(int i) {
627   static std::set<int> ignore = {};
628   return ignore.find(i) != ignore.end();
629 }
630 
CreateModel_nchw_float16_2(Model * model)631 void CreateModel_nchw_float16_2(Model *model) {
632   OperandType type0(Type::BOOL, {});
633   OperandType type22(Type::TENSOR_FLOAT16, {4, 1, 2, 2});
634   OperandType type23(Type::TENSOR_FLOAT16, {1, 1, 4, 4});
635   OperandType type3(Type::TENSOR_INT32, {2});
636   // Phase 1, operands
637   auto op11 = model->addOperand(&type22);
638   auto param1 = model->addOperand(&type3);
639   auto layout = model->addOperand(&type0);
640   auto op41 = model->addOperand(&type23);
641   // Phase 2, operations
642   static int32_t param1_init[] = {2, 2};
643   model->setOperandValue(param1, param1_init, sizeof(int32_t) * 2);
644   static bool8 layout_init[] = {true};
645   model->setOperandValue(layout, layout_init, sizeof(bool8) * 1);
646   model->addOperation(ANEURALNETWORKS_BATCH_TO_SPACE_ND, {op11, param1, layout}, {op41});
647   // Phase 3, inputs and outputs
648   model->identifyInputsAndOutputs(
649     {op11},
650     {op41});
651   assert(model->isValid());
652 }
653 
is_ignored_nchw_float16_2(int i)654 inline bool is_ignored_nchw_float16_2(int i) {
655   static std::set<int> ignore = {};
656   return ignore.find(i) != ignore.end();
657 }
658 
CreateModel_nchw_quant8_2(Model * model)659 void CreateModel_nchw_quant8_2(Model *model) {
660   OperandType type0(Type::BOOL, {});
661   OperandType type24(Type::TENSOR_QUANT8_ASYMM, {4, 1, 2, 2}, 0.5f, 128);
662   OperandType type25(Type::TENSOR_QUANT8_ASYMM, {1, 1, 4, 4}, 0.5f, 128);
663   OperandType type3(Type::TENSOR_INT32, {2});
664   // Phase 1, operands
665   auto op11 = model->addOperand(&type24);
666   auto param1 = model->addOperand(&type3);
667   auto layout = model->addOperand(&type0);
668   auto op41 = model->addOperand(&type25);
669   // Phase 2, operations
670   static int32_t param1_init[] = {2, 2};
671   model->setOperandValue(param1, param1_init, sizeof(int32_t) * 2);
672   static bool8 layout_init[] = {true};
673   model->setOperandValue(layout, layout_init, sizeof(bool8) * 1);
674   model->addOperation(ANEURALNETWORKS_BATCH_TO_SPACE_ND, {op11, param1, layout}, {op41});
675   // Phase 3, inputs and outputs
676   model->identifyInputsAndOutputs(
677     {op11},
678     {op41});
679   assert(model->isValid());
680 }
681 
is_ignored_nchw_quant8_2(int i)682 inline bool is_ignored_nchw_quant8_2(int i) {
683   static std::set<int> ignore = {};
684   return ignore.find(i) != ignore.end();
685 }
686 
CreateModel_dynamic_output_shape_nhwc_2(Model * model)687 void CreateModel_dynamic_output_shape_nhwc_2(Model *model) {
688   OperandType type0(Type::BOOL, {});
689   OperandType type13(Type::TENSOR_FLOAT32, {0, 0, 0, 0});
690   OperandType type3(Type::TENSOR_INT32, {2});
691   OperandType type4(Type::TENSOR_FLOAT32, {4, 2, 2, 1});
692   // Phase 1, operands
693   auto op11 = model->addOperand(&type4);
694   auto param1 = model->addOperand(&type3);
695   auto layout = model->addOperand(&type0);
696   auto op41 = model->addOperand(&type13);
697   // Phase 2, operations
698   static int32_t param1_init[] = {2, 2};
699   model->setOperandValue(param1, param1_init, sizeof(int32_t) * 2);
700   static bool8 layout_init[] = {false};
701   model->setOperandValue(layout, layout_init, sizeof(bool8) * 1);
702   model->addOperation(ANEURALNETWORKS_BATCH_TO_SPACE_ND, {op11, param1, layout}, {op41});
703   // Phase 3, inputs and outputs
704   model->identifyInputsAndOutputs(
705     {op11},
706     {op41});
707   assert(model->isValid());
708 }
709 
is_ignored_dynamic_output_shape_nhwc_2(int i)710 inline bool is_ignored_dynamic_output_shape_nhwc_2(int i) {
711   static std::set<int> ignore = {};
712   return ignore.find(i) != ignore.end();
713 }
714 
CreateModel_dynamic_output_shape_nhwc_relaxed_2(Model * model)715 void CreateModel_dynamic_output_shape_nhwc_relaxed_2(Model *model) {
716   OperandType type0(Type::BOOL, {});
717   OperandType type13(Type::TENSOR_FLOAT32, {0, 0, 0, 0});
718   OperandType type3(Type::TENSOR_INT32, {2});
719   OperandType type4(Type::TENSOR_FLOAT32, {4, 2, 2, 1});
720   // Phase 1, operands
721   auto op11 = model->addOperand(&type4);
722   auto param1 = model->addOperand(&type3);
723   auto layout = model->addOperand(&type0);
724   auto op41 = model->addOperand(&type13);
725   // Phase 2, operations
726   static int32_t param1_init[] = {2, 2};
727   model->setOperandValue(param1, param1_init, sizeof(int32_t) * 2);
728   static bool8 layout_init[] = {false};
729   model->setOperandValue(layout, layout_init, sizeof(bool8) * 1);
730   model->addOperation(ANEURALNETWORKS_BATCH_TO_SPACE_ND, {op11, param1, layout}, {op41});
731   // Phase 3, inputs and outputs
732   model->identifyInputsAndOutputs(
733     {op11},
734     {op41});
735   // Phase 4: set relaxed execution
736   model->relaxComputationFloat32toFloat16(true);
737   assert(model->isValid());
738 }
739 
is_ignored_dynamic_output_shape_nhwc_relaxed_2(int i)740 inline bool is_ignored_dynamic_output_shape_nhwc_relaxed_2(int i) {
741   static std::set<int> ignore = {};
742   return ignore.find(i) != ignore.end();
743 }
744 
CreateModel_dynamic_output_shape_nhwc_float16_2(Model * model)745 void CreateModel_dynamic_output_shape_nhwc_float16_2(Model *model) {
746   OperandType type0(Type::BOOL, {});
747   OperandType type14(Type::TENSOR_FLOAT16, {0, 0, 0, 0});
748   OperandType type16(Type::TENSOR_FLOAT16, {4, 2, 2, 1});
749   OperandType type3(Type::TENSOR_INT32, {2});
750   // Phase 1, operands
751   auto op11 = model->addOperand(&type16);
752   auto param1 = model->addOperand(&type3);
753   auto layout = model->addOperand(&type0);
754   auto op41 = model->addOperand(&type14);
755   // Phase 2, operations
756   static int32_t param1_init[] = {2, 2};
757   model->setOperandValue(param1, param1_init, sizeof(int32_t) * 2);
758   static bool8 layout_init[] = {false};
759   model->setOperandValue(layout, layout_init, sizeof(bool8) * 1);
760   model->addOperation(ANEURALNETWORKS_BATCH_TO_SPACE_ND, {op11, param1, layout}, {op41});
761   // Phase 3, inputs and outputs
762   model->identifyInputsAndOutputs(
763     {op11},
764     {op41});
765   assert(model->isValid());
766 }
767 
is_ignored_dynamic_output_shape_nhwc_float16_2(int i)768 inline bool is_ignored_dynamic_output_shape_nhwc_float16_2(int i) {
769   static std::set<int> ignore = {};
770   return ignore.find(i) != ignore.end();
771 }
772 
CreateModel_dynamic_output_shape_nhwc_quant8_2(Model * model)773 void CreateModel_dynamic_output_shape_nhwc_quant8_2(Model *model) {
774   OperandType type0(Type::BOOL, {});
775   OperandType type18(Type::TENSOR_QUANT8_ASYMM, {4, 2, 2, 1}, 0.5f, 128);
776   OperandType type26(Type::TENSOR_QUANT8_ASYMM, {0, 0, 0, 0}, 0.5f, 128);
777   OperandType type3(Type::TENSOR_INT32, {2});
778   // Phase 1, operands
779   auto op11 = model->addOperand(&type18);
780   auto param1 = model->addOperand(&type3);
781   auto layout = model->addOperand(&type0);
782   auto op41 = model->addOperand(&type26);
783   // Phase 2, operations
784   static int32_t param1_init[] = {2, 2};
785   model->setOperandValue(param1, param1_init, sizeof(int32_t) * 2);
786   static bool8 layout_init[] = {false};
787   model->setOperandValue(layout, layout_init, sizeof(bool8) * 1);
788   model->addOperation(ANEURALNETWORKS_BATCH_TO_SPACE_ND, {op11, param1, layout}, {op41});
789   // Phase 3, inputs and outputs
790   model->identifyInputsAndOutputs(
791     {op11},
792     {op41});
793   assert(model->isValid());
794 }
795 
is_ignored_dynamic_output_shape_nhwc_quant8_2(int i)796 inline bool is_ignored_dynamic_output_shape_nhwc_quant8_2(int i) {
797   static std::set<int> ignore = {};
798   return ignore.find(i) != ignore.end();
799 }
800 
CreateModel_dynamic_output_shape_nchw_2(Model * model)801 void CreateModel_dynamic_output_shape_nchw_2(Model *model) {
802   OperandType type0(Type::BOOL, {});
803   OperandType type13(Type::TENSOR_FLOAT32, {0, 0, 0, 0});
804   OperandType type20(Type::TENSOR_FLOAT32, {4, 1, 2, 2});
805   OperandType type3(Type::TENSOR_INT32, {2});
806   // Phase 1, operands
807   auto op11 = model->addOperand(&type20);
808   auto param1 = model->addOperand(&type3);
809   auto layout = model->addOperand(&type0);
810   auto op41 = model->addOperand(&type13);
811   // Phase 2, operations
812   static int32_t param1_init[] = {2, 2};
813   model->setOperandValue(param1, param1_init, sizeof(int32_t) * 2);
814   static bool8 layout_init[] = {true};
815   model->setOperandValue(layout, layout_init, sizeof(bool8) * 1);
816   model->addOperation(ANEURALNETWORKS_BATCH_TO_SPACE_ND, {op11, param1, layout}, {op41});
817   // Phase 3, inputs and outputs
818   model->identifyInputsAndOutputs(
819     {op11},
820     {op41});
821   assert(model->isValid());
822 }
823 
is_ignored_dynamic_output_shape_nchw_2(int i)824 inline bool is_ignored_dynamic_output_shape_nchw_2(int i) {
825   static std::set<int> ignore = {};
826   return ignore.find(i) != ignore.end();
827 }
828 
CreateModel_dynamic_output_shape_nchw_relaxed_2(Model * model)829 void CreateModel_dynamic_output_shape_nchw_relaxed_2(Model *model) {
830   OperandType type0(Type::BOOL, {});
831   OperandType type13(Type::TENSOR_FLOAT32, {0, 0, 0, 0});
832   OperandType type20(Type::TENSOR_FLOAT32, {4, 1, 2, 2});
833   OperandType type3(Type::TENSOR_INT32, {2});
834   // Phase 1, operands
835   auto op11 = model->addOperand(&type20);
836   auto param1 = model->addOperand(&type3);
837   auto layout = model->addOperand(&type0);
838   auto op41 = model->addOperand(&type13);
839   // Phase 2, operations
840   static int32_t param1_init[] = {2, 2};
841   model->setOperandValue(param1, param1_init, sizeof(int32_t) * 2);
842   static bool8 layout_init[] = {true};
843   model->setOperandValue(layout, layout_init, sizeof(bool8) * 1);
844   model->addOperation(ANEURALNETWORKS_BATCH_TO_SPACE_ND, {op11, param1, layout}, {op41});
845   // Phase 3, inputs and outputs
846   model->identifyInputsAndOutputs(
847     {op11},
848     {op41});
849   // Phase 4: set relaxed execution
850   model->relaxComputationFloat32toFloat16(true);
851   assert(model->isValid());
852 }
853 
is_ignored_dynamic_output_shape_nchw_relaxed_2(int i)854 inline bool is_ignored_dynamic_output_shape_nchw_relaxed_2(int i) {
855   static std::set<int> ignore = {};
856   return ignore.find(i) != ignore.end();
857 }
858 
CreateModel_dynamic_output_shape_nchw_float16_2(Model * model)859 void CreateModel_dynamic_output_shape_nchw_float16_2(Model *model) {
860   OperandType type0(Type::BOOL, {});
861   OperandType type14(Type::TENSOR_FLOAT16, {0, 0, 0, 0});
862   OperandType type22(Type::TENSOR_FLOAT16, {4, 1, 2, 2});
863   OperandType type3(Type::TENSOR_INT32, {2});
864   // Phase 1, operands
865   auto op11 = model->addOperand(&type22);
866   auto param1 = model->addOperand(&type3);
867   auto layout = model->addOperand(&type0);
868   auto op41 = model->addOperand(&type14);
869   // Phase 2, operations
870   static int32_t param1_init[] = {2, 2};
871   model->setOperandValue(param1, param1_init, sizeof(int32_t) * 2);
872   static bool8 layout_init[] = {true};
873   model->setOperandValue(layout, layout_init, sizeof(bool8) * 1);
874   model->addOperation(ANEURALNETWORKS_BATCH_TO_SPACE_ND, {op11, param1, layout}, {op41});
875   // Phase 3, inputs and outputs
876   model->identifyInputsAndOutputs(
877     {op11},
878     {op41});
879   assert(model->isValid());
880 }
881 
is_ignored_dynamic_output_shape_nchw_float16_2(int i)882 inline bool is_ignored_dynamic_output_shape_nchw_float16_2(int i) {
883   static std::set<int> ignore = {};
884   return ignore.find(i) != ignore.end();
885 }
886 
CreateModel_dynamic_output_shape_nchw_quant8_2(Model * model)887 void CreateModel_dynamic_output_shape_nchw_quant8_2(Model *model) {
888   OperandType type0(Type::BOOL, {});
889   OperandType type24(Type::TENSOR_QUANT8_ASYMM, {4, 1, 2, 2}, 0.5f, 128);
890   OperandType type26(Type::TENSOR_QUANT8_ASYMM, {0, 0, 0, 0}, 0.5f, 128);
891   OperandType type3(Type::TENSOR_INT32, {2});
892   // Phase 1, operands
893   auto op11 = model->addOperand(&type24);
894   auto param1 = model->addOperand(&type3);
895   auto layout = model->addOperand(&type0);
896   auto op41 = model->addOperand(&type26);
897   // Phase 2, operations
898   static int32_t param1_init[] = {2, 2};
899   model->setOperandValue(param1, param1_init, sizeof(int32_t) * 2);
900   static bool8 layout_init[] = {true};
901   model->setOperandValue(layout, layout_init, sizeof(bool8) * 1);
902   model->addOperation(ANEURALNETWORKS_BATCH_TO_SPACE_ND, {op11, param1, layout}, {op41});
903   // Phase 3, inputs and outputs
904   model->identifyInputsAndOutputs(
905     {op11},
906     {op41});
907   assert(model->isValid());
908 }
909 
is_ignored_dynamic_output_shape_nchw_quant8_2(int i)910 inline bool is_ignored_dynamic_output_shape_nchw_quant8_2(int i) {
911   static std::set<int> ignore = {};
912   return ignore.find(i) != ignore.end();
913 }
914 
915