1 // clang-format off
2 // Generated file (from: unidirectional_sequence_rnn.mod.py). Do not edit
CreateModel(Model * model)3 void CreateModel(Model *model) {
4 OperandType type0(Type::TENSOR_FLOAT32, {2, 16, 8});
5 OperandType type1(Type::TENSOR_FLOAT32, {16, 8});
6 OperandType type2(Type::TENSOR_FLOAT32, {16, 16});
7 OperandType type3(Type::TENSOR_FLOAT32, {16});
8 OperandType type4(Type::TENSOR_FLOAT32, {2, 16});
9 OperandType type5(Type::TENSOR_FLOAT32, {2, 16, 16});
10 OperandType type6(Type::INT32, {});
11 // Phase 1, operands
12 auto input = model->addOperand(&type0);
13 auto weights = model->addOperand(&type1);
14 auto recurrent_weights = model->addOperand(&type2);
15 auto bias = model->addOperand(&type3);
16 auto hidden_state = model->addOperand(&type4);
17 auto activation = model->addOperand(&type6);
18 auto time_major = model->addOperand(&type6);
19 auto output = model->addOperand(&type5);
20 // Phase 2, operations
21 static int32_t activation_init[] = {1};
22 model->setOperandValue(activation, activation_init, sizeof(int32_t) * 1);
23 static int32_t time_major_init[] = {0};
24 model->setOperandValue(time_major, time_major_init, sizeof(int32_t) * 1);
25 model->addOperation(ANEURALNETWORKS_UNIDIRECTIONAL_SEQUENCE_RNN, {input, weights, recurrent_weights, bias, hidden_state, activation, time_major}, {output});
26 // Phase 3, inputs and outputs
27 model->identifyInputsAndOutputs(
28 {input, weights, recurrent_weights, bias, hidden_state},
29 {output});
30 assert(model->isValid());
31 }
32
is_ignored(int i)33 inline bool is_ignored(int i) {
34 static std::set<int> ignore = {};
35 return ignore.find(i) != ignore.end();
36 }
37
CreateModel_relaxed(Model * model)38 void CreateModel_relaxed(Model *model) {
39 OperandType type0(Type::TENSOR_FLOAT32, {2, 16, 8});
40 OperandType type1(Type::TENSOR_FLOAT32, {16, 8});
41 OperandType type2(Type::TENSOR_FLOAT32, {16, 16});
42 OperandType type3(Type::TENSOR_FLOAT32, {16});
43 OperandType type4(Type::TENSOR_FLOAT32, {2, 16});
44 OperandType type5(Type::TENSOR_FLOAT32, {2, 16, 16});
45 OperandType type6(Type::INT32, {});
46 // Phase 1, operands
47 auto input = model->addOperand(&type0);
48 auto weights = model->addOperand(&type1);
49 auto recurrent_weights = model->addOperand(&type2);
50 auto bias = model->addOperand(&type3);
51 auto hidden_state = model->addOperand(&type4);
52 auto activation = model->addOperand(&type6);
53 auto time_major = model->addOperand(&type6);
54 auto output = model->addOperand(&type5);
55 // Phase 2, operations
56 static int32_t activation_init[] = {1};
57 model->setOperandValue(activation, activation_init, sizeof(int32_t) * 1);
58 static int32_t time_major_init[] = {0};
59 model->setOperandValue(time_major, time_major_init, sizeof(int32_t) * 1);
60 model->addOperation(ANEURALNETWORKS_UNIDIRECTIONAL_SEQUENCE_RNN, {input, weights, recurrent_weights, bias, hidden_state, activation, time_major}, {output});
61 // Phase 3, inputs and outputs
62 model->identifyInputsAndOutputs(
63 {input, weights, recurrent_weights, bias, hidden_state},
64 {output});
65 // Phase 4: set relaxed execution
66 model->relaxComputationFloat32toFloat16(true);
67 assert(model->isValid());
68 }
69
is_ignored_relaxed(int i)70 inline bool is_ignored_relaxed(int i) {
71 static std::set<int> ignore = {};
72 return ignore.find(i) != ignore.end();
73 }
74
CreateModel_float16(Model * model)75 void CreateModel_float16(Model *model) {
76 OperandType type10(Type::TENSOR_FLOAT16, {2, 16});
77 OperandType type11(Type::TENSOR_FLOAT16, {2, 16, 8});
78 OperandType type12(Type::TENSOR_FLOAT16, {2, 16, 16});
79 OperandType type13(Type::TENSOR_FLOAT16, {16, 16});
80 OperandType type14(Type::TENSOR_FLOAT16, {16, 8});
81 OperandType type6(Type::INT32, {});
82 OperandType type9(Type::TENSOR_FLOAT16, {16});
83 // Phase 1, operands
84 auto input = model->addOperand(&type11);
85 auto weights = model->addOperand(&type14);
86 auto recurrent_weights = model->addOperand(&type13);
87 auto bias = model->addOperand(&type9);
88 auto hidden_state = model->addOperand(&type10);
89 auto activation = model->addOperand(&type6);
90 auto time_major = model->addOperand(&type6);
91 auto output = model->addOperand(&type12);
92 // Phase 2, operations
93 static int32_t activation_init[] = {1};
94 model->setOperandValue(activation, activation_init, sizeof(int32_t) * 1);
95 static int32_t time_major_init[] = {0};
96 model->setOperandValue(time_major, time_major_init, sizeof(int32_t) * 1);
97 model->addOperation(ANEURALNETWORKS_UNIDIRECTIONAL_SEQUENCE_RNN, {input, weights, recurrent_weights, bias, hidden_state, activation, time_major}, {output});
98 // Phase 3, inputs and outputs
99 model->identifyInputsAndOutputs(
100 {input, weights, recurrent_weights, bias, hidden_state},
101 {output});
102 assert(model->isValid());
103 }
104
is_ignored_float16(int i)105 inline bool is_ignored_float16(int i) {
106 static std::set<int> ignore = {};
107 return ignore.find(i) != ignore.end();
108 }
109
CreateModel_dynamic_output_shape(Model * model)110 void CreateModel_dynamic_output_shape(Model *model) {
111 OperandType type0(Type::TENSOR_FLOAT32, {2, 16, 8});
112 OperandType type1(Type::TENSOR_FLOAT32, {16, 8});
113 OperandType type15(Type::TENSOR_FLOAT32, {0, 0, 0});
114 OperandType type2(Type::TENSOR_FLOAT32, {16, 16});
115 OperandType type3(Type::TENSOR_FLOAT32, {16});
116 OperandType type4(Type::TENSOR_FLOAT32, {2, 16});
117 OperandType type6(Type::INT32, {});
118 // Phase 1, operands
119 auto input = model->addOperand(&type0);
120 auto weights = model->addOperand(&type1);
121 auto recurrent_weights = model->addOperand(&type2);
122 auto bias = model->addOperand(&type3);
123 auto hidden_state = model->addOperand(&type4);
124 auto activation = model->addOperand(&type6);
125 auto time_major = model->addOperand(&type6);
126 auto output = model->addOperand(&type15);
127 // Phase 2, operations
128 static int32_t activation_init[] = {1};
129 model->setOperandValue(activation, activation_init, sizeof(int32_t) * 1);
130 static int32_t time_major_init[] = {0};
131 model->setOperandValue(time_major, time_major_init, sizeof(int32_t) * 1);
132 model->addOperation(ANEURALNETWORKS_UNIDIRECTIONAL_SEQUENCE_RNN, {input, weights, recurrent_weights, bias, hidden_state, activation, time_major}, {output});
133 // Phase 3, inputs and outputs
134 model->identifyInputsAndOutputs(
135 {input, weights, recurrent_weights, bias, hidden_state},
136 {output});
137 assert(model->isValid());
138 }
139
is_ignored_dynamic_output_shape(int i)140 inline bool is_ignored_dynamic_output_shape(int i) {
141 static std::set<int> ignore = {};
142 return ignore.find(i) != ignore.end();
143 }
144
CreateModel_dynamic_output_shape_relaxed(Model * model)145 void CreateModel_dynamic_output_shape_relaxed(Model *model) {
146 OperandType type0(Type::TENSOR_FLOAT32, {2, 16, 8});
147 OperandType type1(Type::TENSOR_FLOAT32, {16, 8});
148 OperandType type15(Type::TENSOR_FLOAT32, {0, 0, 0});
149 OperandType type2(Type::TENSOR_FLOAT32, {16, 16});
150 OperandType type3(Type::TENSOR_FLOAT32, {16});
151 OperandType type4(Type::TENSOR_FLOAT32, {2, 16});
152 OperandType type6(Type::INT32, {});
153 // Phase 1, operands
154 auto input = model->addOperand(&type0);
155 auto weights = model->addOperand(&type1);
156 auto recurrent_weights = model->addOperand(&type2);
157 auto bias = model->addOperand(&type3);
158 auto hidden_state = model->addOperand(&type4);
159 auto activation = model->addOperand(&type6);
160 auto time_major = model->addOperand(&type6);
161 auto output = model->addOperand(&type15);
162 // Phase 2, operations
163 static int32_t activation_init[] = {1};
164 model->setOperandValue(activation, activation_init, sizeof(int32_t) * 1);
165 static int32_t time_major_init[] = {0};
166 model->setOperandValue(time_major, time_major_init, sizeof(int32_t) * 1);
167 model->addOperation(ANEURALNETWORKS_UNIDIRECTIONAL_SEQUENCE_RNN, {input, weights, recurrent_weights, bias, hidden_state, activation, time_major}, {output});
168 // Phase 3, inputs and outputs
169 model->identifyInputsAndOutputs(
170 {input, weights, recurrent_weights, bias, hidden_state},
171 {output});
172 // Phase 4: set relaxed execution
173 model->relaxComputationFloat32toFloat16(true);
174 assert(model->isValid());
175 }
176
is_ignored_dynamic_output_shape_relaxed(int i)177 inline bool is_ignored_dynamic_output_shape_relaxed(int i) {
178 static std::set<int> ignore = {};
179 return ignore.find(i) != ignore.end();
180 }
181
CreateModel_dynamic_output_shape_float16(Model * model)182 void CreateModel_dynamic_output_shape_float16(Model *model) {
183 OperandType type10(Type::TENSOR_FLOAT16, {2, 16});
184 OperandType type11(Type::TENSOR_FLOAT16, {2, 16, 8});
185 OperandType type13(Type::TENSOR_FLOAT16, {16, 16});
186 OperandType type14(Type::TENSOR_FLOAT16, {16, 8});
187 OperandType type16(Type::TENSOR_FLOAT16, {0, 0, 0});
188 OperandType type6(Type::INT32, {});
189 OperandType type9(Type::TENSOR_FLOAT16, {16});
190 // Phase 1, operands
191 auto input = model->addOperand(&type11);
192 auto weights = model->addOperand(&type14);
193 auto recurrent_weights = model->addOperand(&type13);
194 auto bias = model->addOperand(&type9);
195 auto hidden_state = model->addOperand(&type10);
196 auto activation = model->addOperand(&type6);
197 auto time_major = model->addOperand(&type6);
198 auto output = model->addOperand(&type16);
199 // Phase 2, operations
200 static int32_t activation_init[] = {1};
201 model->setOperandValue(activation, activation_init, sizeof(int32_t) * 1);
202 static int32_t time_major_init[] = {0};
203 model->setOperandValue(time_major, time_major_init, sizeof(int32_t) * 1);
204 model->addOperation(ANEURALNETWORKS_UNIDIRECTIONAL_SEQUENCE_RNN, {input, weights, recurrent_weights, bias, hidden_state, activation, time_major}, {output});
205 // Phase 3, inputs and outputs
206 model->identifyInputsAndOutputs(
207 {input, weights, recurrent_weights, bias, hidden_state},
208 {output});
209 assert(model->isValid());
210 }
211
is_ignored_dynamic_output_shape_float16(int i)212 inline bool is_ignored_dynamic_output_shape_float16(int i) {
213 static std::set<int> ignore = {};
214 return ignore.find(i) != ignore.end();
215 }
216
CreateModel_2(Model * model)217 void CreateModel_2(Model *model) {
218 OperandType type1(Type::TENSOR_FLOAT32, {16, 8});
219 OperandType type2(Type::TENSOR_FLOAT32, {16, 16});
220 OperandType type3(Type::TENSOR_FLOAT32, {16});
221 OperandType type4(Type::TENSOR_FLOAT32, {2, 16});
222 OperandType type6(Type::INT32, {});
223 OperandType type7(Type::TENSOR_FLOAT32, {16, 2, 8});
224 OperandType type8(Type::TENSOR_FLOAT32, {16, 2, 16});
225 // Phase 1, operands
226 auto input1 = model->addOperand(&type7);
227 auto weights1 = model->addOperand(&type1);
228 auto recurrent_weights1 = model->addOperand(&type2);
229 auto bias1 = model->addOperand(&type3);
230 auto hidden_state1 = model->addOperand(&type4);
231 auto activation1 = model->addOperand(&type6);
232 auto time_major1 = model->addOperand(&type6);
233 auto output1 = model->addOperand(&type8);
234 // Phase 2, operations
235 static int32_t activation1_init[] = {1};
236 model->setOperandValue(activation1, activation1_init, sizeof(int32_t) * 1);
237 static int32_t time_major1_init[] = {1};
238 model->setOperandValue(time_major1, time_major1_init, sizeof(int32_t) * 1);
239 model->addOperation(ANEURALNETWORKS_UNIDIRECTIONAL_SEQUENCE_RNN, {input1, weights1, recurrent_weights1, bias1, hidden_state1, activation1, time_major1}, {output1});
240 // Phase 3, inputs and outputs
241 model->identifyInputsAndOutputs(
242 {input1, weights1, recurrent_weights1, bias1, hidden_state1},
243 {output1});
244 assert(model->isValid());
245 }
246
is_ignored_2(int i)247 inline bool is_ignored_2(int i) {
248 static std::set<int> ignore = {};
249 return ignore.find(i) != ignore.end();
250 }
251
CreateModel_relaxed_2(Model * model)252 void CreateModel_relaxed_2(Model *model) {
253 OperandType type1(Type::TENSOR_FLOAT32, {16, 8});
254 OperandType type2(Type::TENSOR_FLOAT32, {16, 16});
255 OperandType type3(Type::TENSOR_FLOAT32, {16});
256 OperandType type4(Type::TENSOR_FLOAT32, {2, 16});
257 OperandType type6(Type::INT32, {});
258 OperandType type7(Type::TENSOR_FLOAT32, {16, 2, 8});
259 OperandType type8(Type::TENSOR_FLOAT32, {16, 2, 16});
260 // Phase 1, operands
261 auto input1 = model->addOperand(&type7);
262 auto weights1 = model->addOperand(&type1);
263 auto recurrent_weights1 = model->addOperand(&type2);
264 auto bias1 = model->addOperand(&type3);
265 auto hidden_state1 = model->addOperand(&type4);
266 auto activation1 = model->addOperand(&type6);
267 auto time_major1 = model->addOperand(&type6);
268 auto output1 = model->addOperand(&type8);
269 // Phase 2, operations
270 static int32_t activation1_init[] = {1};
271 model->setOperandValue(activation1, activation1_init, sizeof(int32_t) * 1);
272 static int32_t time_major1_init[] = {1};
273 model->setOperandValue(time_major1, time_major1_init, sizeof(int32_t) * 1);
274 model->addOperation(ANEURALNETWORKS_UNIDIRECTIONAL_SEQUENCE_RNN, {input1, weights1, recurrent_weights1, bias1, hidden_state1, activation1, time_major1}, {output1});
275 // Phase 3, inputs and outputs
276 model->identifyInputsAndOutputs(
277 {input1, weights1, recurrent_weights1, bias1, hidden_state1},
278 {output1});
279 // Phase 4: set relaxed execution
280 model->relaxComputationFloat32toFloat16(true);
281 assert(model->isValid());
282 }
283
is_ignored_relaxed_2(int i)284 inline bool is_ignored_relaxed_2(int i) {
285 static std::set<int> ignore = {};
286 return ignore.find(i) != ignore.end();
287 }
288
CreateModel_float16_2(Model * model)289 void CreateModel_float16_2(Model *model) {
290 OperandType type10(Type::TENSOR_FLOAT16, {2, 16});
291 OperandType type13(Type::TENSOR_FLOAT16, {16, 16});
292 OperandType type14(Type::TENSOR_FLOAT16, {16, 8});
293 OperandType type17(Type::TENSOR_FLOAT16, {16, 2, 8});
294 OperandType type18(Type::TENSOR_FLOAT16, {16, 2, 16});
295 OperandType type6(Type::INT32, {});
296 OperandType type9(Type::TENSOR_FLOAT16, {16});
297 // Phase 1, operands
298 auto input1 = model->addOperand(&type17);
299 auto weights1 = model->addOperand(&type14);
300 auto recurrent_weights1 = model->addOperand(&type13);
301 auto bias1 = model->addOperand(&type9);
302 auto hidden_state1 = model->addOperand(&type10);
303 auto activation1 = model->addOperand(&type6);
304 auto time_major1 = model->addOperand(&type6);
305 auto output1 = model->addOperand(&type18);
306 // Phase 2, operations
307 static int32_t activation1_init[] = {1};
308 model->setOperandValue(activation1, activation1_init, sizeof(int32_t) * 1);
309 static int32_t time_major1_init[] = {1};
310 model->setOperandValue(time_major1, time_major1_init, sizeof(int32_t) * 1);
311 model->addOperation(ANEURALNETWORKS_UNIDIRECTIONAL_SEQUENCE_RNN, {input1, weights1, recurrent_weights1, bias1, hidden_state1, activation1, time_major1}, {output1});
312 // Phase 3, inputs and outputs
313 model->identifyInputsAndOutputs(
314 {input1, weights1, recurrent_weights1, bias1, hidden_state1},
315 {output1});
316 assert(model->isValid());
317 }
318
is_ignored_float16_2(int i)319 inline bool is_ignored_float16_2(int i) {
320 static std::set<int> ignore = {};
321 return ignore.find(i) != ignore.end();
322 }
323
CreateModel_dynamic_output_shape_2(Model * model)324 void CreateModel_dynamic_output_shape_2(Model *model) {
325 OperandType type1(Type::TENSOR_FLOAT32, {16, 8});
326 OperandType type15(Type::TENSOR_FLOAT32, {0, 0, 0});
327 OperandType type2(Type::TENSOR_FLOAT32, {16, 16});
328 OperandType type3(Type::TENSOR_FLOAT32, {16});
329 OperandType type4(Type::TENSOR_FLOAT32, {2, 16});
330 OperandType type6(Type::INT32, {});
331 OperandType type7(Type::TENSOR_FLOAT32, {16, 2, 8});
332 // Phase 1, operands
333 auto input1 = model->addOperand(&type7);
334 auto weights1 = model->addOperand(&type1);
335 auto recurrent_weights1 = model->addOperand(&type2);
336 auto bias1 = model->addOperand(&type3);
337 auto hidden_state1 = model->addOperand(&type4);
338 auto activation1 = model->addOperand(&type6);
339 auto time_major1 = model->addOperand(&type6);
340 auto output1 = model->addOperand(&type15);
341 // Phase 2, operations
342 static int32_t activation1_init[] = {1};
343 model->setOperandValue(activation1, activation1_init, sizeof(int32_t) * 1);
344 static int32_t time_major1_init[] = {1};
345 model->setOperandValue(time_major1, time_major1_init, sizeof(int32_t) * 1);
346 model->addOperation(ANEURALNETWORKS_UNIDIRECTIONAL_SEQUENCE_RNN, {input1, weights1, recurrent_weights1, bias1, hidden_state1, activation1, time_major1}, {output1});
347 // Phase 3, inputs and outputs
348 model->identifyInputsAndOutputs(
349 {input1, weights1, recurrent_weights1, bias1, hidden_state1},
350 {output1});
351 assert(model->isValid());
352 }
353
is_ignored_dynamic_output_shape_2(int i)354 inline bool is_ignored_dynamic_output_shape_2(int i) {
355 static std::set<int> ignore = {};
356 return ignore.find(i) != ignore.end();
357 }
358
CreateModel_dynamic_output_shape_relaxed_2(Model * model)359 void CreateModel_dynamic_output_shape_relaxed_2(Model *model) {
360 OperandType type1(Type::TENSOR_FLOAT32, {16, 8});
361 OperandType type15(Type::TENSOR_FLOAT32, {0, 0, 0});
362 OperandType type2(Type::TENSOR_FLOAT32, {16, 16});
363 OperandType type3(Type::TENSOR_FLOAT32, {16});
364 OperandType type4(Type::TENSOR_FLOAT32, {2, 16});
365 OperandType type6(Type::INT32, {});
366 OperandType type7(Type::TENSOR_FLOAT32, {16, 2, 8});
367 // Phase 1, operands
368 auto input1 = model->addOperand(&type7);
369 auto weights1 = model->addOperand(&type1);
370 auto recurrent_weights1 = model->addOperand(&type2);
371 auto bias1 = model->addOperand(&type3);
372 auto hidden_state1 = model->addOperand(&type4);
373 auto activation1 = model->addOperand(&type6);
374 auto time_major1 = model->addOperand(&type6);
375 auto output1 = model->addOperand(&type15);
376 // Phase 2, operations
377 static int32_t activation1_init[] = {1};
378 model->setOperandValue(activation1, activation1_init, sizeof(int32_t) * 1);
379 static int32_t time_major1_init[] = {1};
380 model->setOperandValue(time_major1, time_major1_init, sizeof(int32_t) * 1);
381 model->addOperation(ANEURALNETWORKS_UNIDIRECTIONAL_SEQUENCE_RNN, {input1, weights1, recurrent_weights1, bias1, hidden_state1, activation1, time_major1}, {output1});
382 // Phase 3, inputs and outputs
383 model->identifyInputsAndOutputs(
384 {input1, weights1, recurrent_weights1, bias1, hidden_state1},
385 {output1});
386 // Phase 4: set relaxed execution
387 model->relaxComputationFloat32toFloat16(true);
388 assert(model->isValid());
389 }
390
is_ignored_dynamic_output_shape_relaxed_2(int i)391 inline bool is_ignored_dynamic_output_shape_relaxed_2(int i) {
392 static std::set<int> ignore = {};
393 return ignore.find(i) != ignore.end();
394 }
395
CreateModel_dynamic_output_shape_float16_2(Model * model)396 void CreateModel_dynamic_output_shape_float16_2(Model *model) {
397 OperandType type10(Type::TENSOR_FLOAT16, {2, 16});
398 OperandType type13(Type::TENSOR_FLOAT16, {16, 16});
399 OperandType type14(Type::TENSOR_FLOAT16, {16, 8});
400 OperandType type16(Type::TENSOR_FLOAT16, {0, 0, 0});
401 OperandType type17(Type::TENSOR_FLOAT16, {16, 2, 8});
402 OperandType type6(Type::INT32, {});
403 OperandType type9(Type::TENSOR_FLOAT16, {16});
404 // Phase 1, operands
405 auto input1 = model->addOperand(&type17);
406 auto weights1 = model->addOperand(&type14);
407 auto recurrent_weights1 = model->addOperand(&type13);
408 auto bias1 = model->addOperand(&type9);
409 auto hidden_state1 = model->addOperand(&type10);
410 auto activation1 = model->addOperand(&type6);
411 auto time_major1 = model->addOperand(&type6);
412 auto output1 = model->addOperand(&type16);
413 // Phase 2, operations
414 static int32_t activation1_init[] = {1};
415 model->setOperandValue(activation1, activation1_init, sizeof(int32_t) * 1);
416 static int32_t time_major1_init[] = {1};
417 model->setOperandValue(time_major1, time_major1_init, sizeof(int32_t) * 1);
418 model->addOperation(ANEURALNETWORKS_UNIDIRECTIONAL_SEQUENCE_RNN, {input1, weights1, recurrent_weights1, bias1, hidden_state1, activation1, time_major1}, {output1});
419 // Phase 3, inputs and outputs
420 model->identifyInputsAndOutputs(
421 {input1, weights1, recurrent_weights1, bias1, hidden_state1},
422 {output1});
423 assert(model->isValid());
424 }
425
is_ignored_dynamic_output_shape_float16_2(int i)426 inline bool is_ignored_dynamic_output_shape_float16_2(int i) {
427 static std::set<int> ignore = {};
428 return ignore.find(i) != ignore.end();
429 }
430
431