1 // clang-format off
2 // Generated file (from: quantized_lstm.mod.py). Do not edit
CreateModel(Model * model)3 void CreateModel(Model *model) {
4 OperandType type0(Type::TENSOR_QUANT8_ASYMM, {2, 2}, 0.0078125f, 128);
5 OperandType type1(Type::TENSOR_QUANT8_ASYMM, {4, 2}, 0.00408021f, 100);
6 OperandType type2(Type::TENSOR_QUANT8_ASYMM, {4, 4}, 0.00408021f, 100);
7 OperandType type3(Type::TENSOR_INT32, {4}, 3.1876640625e-05f, 0);
8 OperandType type4(Type::TENSOR_QUANT16_SYMM, {2, 4}, 0.00048828125f, 0);
9 OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 4}, 0.0078125f, 128);
10 // Phase 1, operands
11 auto input = model->addOperand(&type0);
12 auto inputToInputWeights = model->addOperand(&type1);
13 auto inputToForgetWeights = model->addOperand(&type1);
14 auto inputToCellWeights = model->addOperand(&type1);
15 auto inputToOutputWeights = model->addOperand(&type1);
16 auto recurrentToInputWeights = model->addOperand(&type2);
17 auto recurrentToForgetWeights = model->addOperand(&type2);
18 auto recurrentToCellWeights = model->addOperand(&type2);
19 auto recurrentToOutputWeights = model->addOperand(&type2);
20 auto inputGateBias = model->addOperand(&type3);
21 auto forgetGateBias = model->addOperand(&type3);
22 auto cellGateBias = model->addOperand(&type3);
23 auto outputGateBias = model->addOperand(&type3);
24 auto prevCellState = model->addOperand(&type4);
25 auto prevOutput = model->addOperand(&type5);
26 auto cellStateOut = model->addOperand(&type4);
27 auto output = model->addOperand(&type5);
28 // Phase 2, operations
29 model->addOperation(ANEURALNETWORKS_QUANTIZED_16BIT_LSTM, {input, inputToInputWeights, inputToForgetWeights, inputToCellWeights, inputToOutputWeights, recurrentToInputWeights, recurrentToForgetWeights, recurrentToCellWeights, recurrentToOutputWeights, inputGateBias, forgetGateBias, cellGateBias, outputGateBias, prevCellState, prevOutput}, {cellStateOut, output});
30 // Phase 3, inputs and outputs
31 model->identifyInputsAndOutputs(
32 {input, inputToInputWeights, inputToForgetWeights, inputToCellWeights, inputToOutputWeights, recurrentToInputWeights, recurrentToForgetWeights, recurrentToCellWeights, recurrentToOutputWeights, inputGateBias, forgetGateBias, cellGateBias, outputGateBias, prevCellState, prevOutput},
33 {cellStateOut, output});
34 assert(model->isValid());
35 }
36
is_ignored(int i)37 inline bool is_ignored(int i) {
38 static std::set<int> ignore = {};
39 return ignore.find(i) != ignore.end();
40 }
41
CreateModel_relaxed(Model * model)42 void CreateModel_relaxed(Model *model) {
43 OperandType type0(Type::TENSOR_QUANT8_ASYMM, {2, 2}, 0.0078125f, 128);
44 OperandType type1(Type::TENSOR_QUANT8_ASYMM, {4, 2}, 0.00408021f, 100);
45 OperandType type2(Type::TENSOR_QUANT8_ASYMM, {4, 4}, 0.00408021f, 100);
46 OperandType type3(Type::TENSOR_INT32, {4}, 3.1876640625e-05f, 0);
47 OperandType type4(Type::TENSOR_QUANT16_SYMM, {2, 4}, 0.00048828125f, 0);
48 OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 4}, 0.0078125f, 128);
49 // Phase 1, operands
50 auto input = model->addOperand(&type0);
51 auto inputToInputWeights = model->addOperand(&type1);
52 auto inputToForgetWeights = model->addOperand(&type1);
53 auto inputToCellWeights = model->addOperand(&type1);
54 auto inputToOutputWeights = model->addOperand(&type1);
55 auto recurrentToInputWeights = model->addOperand(&type2);
56 auto recurrentToForgetWeights = model->addOperand(&type2);
57 auto recurrentToCellWeights = model->addOperand(&type2);
58 auto recurrentToOutputWeights = model->addOperand(&type2);
59 auto inputGateBias = model->addOperand(&type3);
60 auto forgetGateBias = model->addOperand(&type3);
61 auto cellGateBias = model->addOperand(&type3);
62 auto outputGateBias = model->addOperand(&type3);
63 auto prevCellState = model->addOperand(&type4);
64 auto prevOutput = model->addOperand(&type5);
65 auto cellStateOut = model->addOperand(&type4);
66 auto output = model->addOperand(&type5);
67 // Phase 2, operations
68 model->addOperation(ANEURALNETWORKS_QUANTIZED_16BIT_LSTM, {input, inputToInputWeights, inputToForgetWeights, inputToCellWeights, inputToOutputWeights, recurrentToInputWeights, recurrentToForgetWeights, recurrentToCellWeights, recurrentToOutputWeights, inputGateBias, forgetGateBias, cellGateBias, outputGateBias, prevCellState, prevOutput}, {cellStateOut, output});
69 // Phase 3, inputs and outputs
70 model->identifyInputsAndOutputs(
71 {input, inputToInputWeights, inputToForgetWeights, inputToCellWeights, inputToOutputWeights, recurrentToInputWeights, recurrentToForgetWeights, recurrentToCellWeights, recurrentToOutputWeights, inputGateBias, forgetGateBias, cellGateBias, outputGateBias, prevCellState, prevOutput},
72 {cellStateOut, output});
73 // Phase 4: set relaxed execution
74 model->relaxComputationFloat32toFloat16(true);
75 assert(model->isValid());
76 }
77
is_ignored_relaxed(int i)78 inline bool is_ignored_relaxed(int i) {
79 static std::set<int> ignore = {};
80 return ignore.find(i) != ignore.end();
81 }
82
CreateModel_dynamic_output_shape(Model * model)83 void CreateModel_dynamic_output_shape(Model *model) {
84 OperandType type0(Type::TENSOR_QUANT8_ASYMM, {2, 2}, 0.0078125f, 128);
85 OperandType type1(Type::TENSOR_QUANT8_ASYMM, {4, 2}, 0.00408021f, 100);
86 OperandType type10(Type::TENSOR_QUANT8_ASYMM, {0, 0}, 0.0078125f, 128);
87 OperandType type2(Type::TENSOR_QUANT8_ASYMM, {4, 4}, 0.00408021f, 100);
88 OperandType type3(Type::TENSOR_INT32, {4}, 3.1876640625e-05f, 0);
89 OperandType type4(Type::TENSOR_QUANT16_SYMM, {2, 4}, 0.00048828125f, 0);
90 OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 4}, 0.0078125f, 128);
91 OperandType type9(Type::TENSOR_QUANT16_SYMM, {0, 0}, 0.00048828125f, 0);
92 // Phase 1, operands
93 auto input = model->addOperand(&type0);
94 auto inputToInputWeights = model->addOperand(&type1);
95 auto inputToForgetWeights = model->addOperand(&type1);
96 auto inputToCellWeights = model->addOperand(&type1);
97 auto inputToOutputWeights = model->addOperand(&type1);
98 auto recurrentToInputWeights = model->addOperand(&type2);
99 auto recurrentToForgetWeights = model->addOperand(&type2);
100 auto recurrentToCellWeights = model->addOperand(&type2);
101 auto recurrentToOutputWeights = model->addOperand(&type2);
102 auto inputGateBias = model->addOperand(&type3);
103 auto forgetGateBias = model->addOperand(&type3);
104 auto cellGateBias = model->addOperand(&type3);
105 auto outputGateBias = model->addOperand(&type3);
106 auto prevCellState = model->addOperand(&type4);
107 auto prevOutput = model->addOperand(&type5);
108 auto cellStateOut = model->addOperand(&type9);
109 auto output = model->addOperand(&type10);
110 // Phase 2, operations
111 model->addOperation(ANEURALNETWORKS_QUANTIZED_16BIT_LSTM, {input, inputToInputWeights, inputToForgetWeights, inputToCellWeights, inputToOutputWeights, recurrentToInputWeights, recurrentToForgetWeights, recurrentToCellWeights, recurrentToOutputWeights, inputGateBias, forgetGateBias, cellGateBias, outputGateBias, prevCellState, prevOutput}, {cellStateOut, output});
112 // Phase 3, inputs and outputs
113 model->identifyInputsAndOutputs(
114 {input, inputToInputWeights, inputToForgetWeights, inputToCellWeights, inputToOutputWeights, recurrentToInputWeights, recurrentToForgetWeights, recurrentToCellWeights, recurrentToOutputWeights, inputGateBias, forgetGateBias, cellGateBias, outputGateBias, prevCellState, prevOutput},
115 {cellStateOut, output});
116 assert(model->isValid());
117 }
118
is_ignored_dynamic_output_shape(int i)119 inline bool is_ignored_dynamic_output_shape(int i) {
120 static std::set<int> ignore = {};
121 return ignore.find(i) != ignore.end();
122 }
123
CreateModel_dynamic_output_shape_relaxed(Model * model)124 void CreateModel_dynamic_output_shape_relaxed(Model *model) {
125 OperandType type0(Type::TENSOR_QUANT8_ASYMM, {2, 2}, 0.0078125f, 128);
126 OperandType type1(Type::TENSOR_QUANT8_ASYMM, {4, 2}, 0.00408021f, 100);
127 OperandType type10(Type::TENSOR_QUANT8_ASYMM, {0, 0}, 0.0078125f, 128);
128 OperandType type2(Type::TENSOR_QUANT8_ASYMM, {4, 4}, 0.00408021f, 100);
129 OperandType type3(Type::TENSOR_INT32, {4}, 3.1876640625e-05f, 0);
130 OperandType type4(Type::TENSOR_QUANT16_SYMM, {2, 4}, 0.00048828125f, 0);
131 OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 4}, 0.0078125f, 128);
132 OperandType type9(Type::TENSOR_QUANT16_SYMM, {0, 0}, 0.00048828125f, 0);
133 // Phase 1, operands
134 auto input = model->addOperand(&type0);
135 auto inputToInputWeights = model->addOperand(&type1);
136 auto inputToForgetWeights = model->addOperand(&type1);
137 auto inputToCellWeights = model->addOperand(&type1);
138 auto inputToOutputWeights = model->addOperand(&type1);
139 auto recurrentToInputWeights = model->addOperand(&type2);
140 auto recurrentToForgetWeights = model->addOperand(&type2);
141 auto recurrentToCellWeights = model->addOperand(&type2);
142 auto recurrentToOutputWeights = model->addOperand(&type2);
143 auto inputGateBias = model->addOperand(&type3);
144 auto forgetGateBias = model->addOperand(&type3);
145 auto cellGateBias = model->addOperand(&type3);
146 auto outputGateBias = model->addOperand(&type3);
147 auto prevCellState = model->addOperand(&type4);
148 auto prevOutput = model->addOperand(&type5);
149 auto cellStateOut = model->addOperand(&type9);
150 auto output = model->addOperand(&type10);
151 // Phase 2, operations
152 model->addOperation(ANEURALNETWORKS_QUANTIZED_16BIT_LSTM, {input, inputToInputWeights, inputToForgetWeights, inputToCellWeights, inputToOutputWeights, recurrentToInputWeights, recurrentToForgetWeights, recurrentToCellWeights, recurrentToOutputWeights, inputGateBias, forgetGateBias, cellGateBias, outputGateBias, prevCellState, prevOutput}, {cellStateOut, output});
153 // Phase 3, inputs and outputs
154 model->identifyInputsAndOutputs(
155 {input, inputToInputWeights, inputToForgetWeights, inputToCellWeights, inputToOutputWeights, recurrentToInputWeights, recurrentToForgetWeights, recurrentToCellWeights, recurrentToOutputWeights, inputGateBias, forgetGateBias, cellGateBias, outputGateBias, prevCellState, prevOutput},
156 {cellStateOut, output});
157 // Phase 4: set relaxed execution
158 model->relaxComputationFloat32toFloat16(true);
159 assert(model->isValid());
160 }
161
is_ignored_dynamic_output_shape_relaxed(int i)162 inline bool is_ignored_dynamic_output_shape_relaxed(int i) {
163 static std::set<int> ignore = {};
164 return ignore.find(i) != ignore.end();
165 }
166
CreateModel_2(Model * model)167 void CreateModel_2(Model *model) {
168 OperandType type1(Type::TENSOR_QUANT8_ASYMM, {4, 2}, 0.00408021f, 100);
169 OperandType type2(Type::TENSOR_QUANT8_ASYMM, {4, 4}, 0.00408021f, 100);
170 OperandType type3(Type::TENSOR_INT32, {4}, 3.1876640625e-05f, 0);
171 OperandType type6(Type::TENSOR_QUANT8_ASYMM, {1, 2}, 0.0078125f, 128);
172 OperandType type7(Type::TENSOR_QUANT16_SYMM, {1, 4}, 0.00048828125f, 0);
173 OperandType type8(Type::TENSOR_QUANT8_ASYMM, {1, 4}, 0.0078125f, 128);
174 // Phase 1, operands
175 auto input1 = model->addOperand(&type6);
176 auto inputToInputWeights1 = model->addOperand(&type1);
177 auto inputToForgetWeights1 = model->addOperand(&type1);
178 auto inputToCellWeights1 = model->addOperand(&type1);
179 auto inputToOutputWeights1 = model->addOperand(&type1);
180 auto recurrentToInputWeights1 = model->addOperand(&type2);
181 auto recurrentToForgetWeights1 = model->addOperand(&type2);
182 auto recurrentToCellWeights1 = model->addOperand(&type2);
183 auto recurrentToOutputWeights1 = model->addOperand(&type2);
184 auto inputGateBias1 = model->addOperand(&type3);
185 auto forgetGateBias1 = model->addOperand(&type3);
186 auto cellGateBias1 = model->addOperand(&type3);
187 auto outputGateBias1 = model->addOperand(&type3);
188 auto prevCellState1 = model->addOperand(&type7);
189 auto prevOutput1 = model->addOperand(&type8);
190 auto cellStateOut1 = model->addOperand(&type7);
191 auto output1 = model->addOperand(&type8);
192 // Phase 2, operations
193 static uint8_t inputToInputWeights1_init[] = {146, 250, 235, 171, 10, 218, 171, 108};
194 model->setOperandValue(inputToInputWeights1, inputToInputWeights1_init, sizeof(uint8_t) * 8);
195 static uint8_t inputToForgetWeights1_init[] = {24, 50, 132, 179, 158, 110, 3, 169};
196 model->setOperandValue(inputToForgetWeights1, inputToForgetWeights1_init, sizeof(uint8_t) * 8);
197 static uint8_t inputToCellWeights1_init[] = {133, 34, 29, 49, 206, 109, 54, 183};
198 model->setOperandValue(inputToCellWeights1, inputToCellWeights1_init, sizeof(uint8_t) * 8);
199 static uint8_t inputToOutputWeights1_init[] = {195, 187, 11, 99, 109, 10, 218, 48};
200 model->setOperandValue(inputToOutputWeights1, inputToOutputWeights1_init, sizeof(uint8_t) * 8);
201 static uint8_t recurrentToInputWeights1_init[] = {254, 206, 77, 168, 71, 20, 215, 6, 223, 7, 118, 225, 59, 130, 174, 26};
202 model->setOperandValue(recurrentToInputWeights1, recurrentToInputWeights1_init, sizeof(uint8_t) * 16);
203 static uint8_t recurrentToForgetWeights1_init[] = {137, 240, 103, 52, 68, 51, 237, 112, 0, 220, 89, 23, 69, 4, 207, 253};
204 model->setOperandValue(recurrentToForgetWeights1, recurrentToForgetWeights1_init, sizeof(uint8_t) * 16);
205 static uint8_t recurrentToCellWeights1_init[] = {172, 60, 205, 65, 14, 0, 140, 168, 240, 223, 133, 56, 142, 64, 246, 216};
206 model->setOperandValue(recurrentToCellWeights1, recurrentToCellWeights1_init, sizeof(uint8_t) * 16);
207 static uint8_t recurrentToOutputWeights1_init[] = {106, 214, 67, 23, 59, 158, 45, 3, 119, 132, 49, 205, 129, 218, 11, 98};
208 model->setOperandValue(recurrentToOutputWeights1, recurrentToOutputWeights1_init, sizeof(uint8_t) * 16);
209 static int32_t inputGateBias1_init[] = {-7876, 13488, -726, 32839};
210 model->setOperandValue(inputGateBias1, inputGateBias1_init, sizeof(int32_t) * 4);
211 static int32_t forgetGateBias1_init[] = {9206, -46884, -11693, -38724};
212 model->setOperandValue(forgetGateBias1, forgetGateBias1_init, sizeof(int32_t) * 4);
213 static int32_t cellGateBias1_init[] = {39481, 48624, 48976, -21419};
214 model->setOperandValue(cellGateBias1, cellGateBias1_init, sizeof(int32_t) * 4);
215 static int32_t outputGateBias1_init[] = {-58999, -17050, -41852, -40538};
216 model->setOperandValue(outputGateBias1, outputGateBias1_init, sizeof(int32_t) * 4);
217 model->addOperation(ANEURALNETWORKS_QUANTIZED_16BIT_LSTM, {input1, inputToInputWeights1, inputToForgetWeights1, inputToCellWeights1, inputToOutputWeights1, recurrentToInputWeights1, recurrentToForgetWeights1, recurrentToCellWeights1, recurrentToOutputWeights1, inputGateBias1, forgetGateBias1, cellGateBias1, outputGateBias1, prevCellState1, prevOutput1}, {cellStateOut1, output1});
218 // Phase 3, inputs and outputs
219 model->identifyInputsAndOutputs(
220 {input1, prevCellState1, prevOutput1},
221 {cellStateOut1, output1});
222 assert(model->isValid());
223 }
224
is_ignored_2(int i)225 inline bool is_ignored_2(int i) {
226 static std::set<int> ignore = {};
227 return ignore.find(i) != ignore.end();
228 }
229
CreateModel_relaxed_2(Model * model)230 void CreateModel_relaxed_2(Model *model) {
231 OperandType type1(Type::TENSOR_QUANT8_ASYMM, {4, 2}, 0.00408021f, 100);
232 OperandType type2(Type::TENSOR_QUANT8_ASYMM, {4, 4}, 0.00408021f, 100);
233 OperandType type3(Type::TENSOR_INT32, {4}, 3.1876640625e-05f, 0);
234 OperandType type6(Type::TENSOR_QUANT8_ASYMM, {1, 2}, 0.0078125f, 128);
235 OperandType type7(Type::TENSOR_QUANT16_SYMM, {1, 4}, 0.00048828125f, 0);
236 OperandType type8(Type::TENSOR_QUANT8_ASYMM, {1, 4}, 0.0078125f, 128);
237 // Phase 1, operands
238 auto input1 = model->addOperand(&type6);
239 auto inputToInputWeights1 = model->addOperand(&type1);
240 auto inputToForgetWeights1 = model->addOperand(&type1);
241 auto inputToCellWeights1 = model->addOperand(&type1);
242 auto inputToOutputWeights1 = model->addOperand(&type1);
243 auto recurrentToInputWeights1 = model->addOperand(&type2);
244 auto recurrentToForgetWeights1 = model->addOperand(&type2);
245 auto recurrentToCellWeights1 = model->addOperand(&type2);
246 auto recurrentToOutputWeights1 = model->addOperand(&type2);
247 auto inputGateBias1 = model->addOperand(&type3);
248 auto forgetGateBias1 = model->addOperand(&type3);
249 auto cellGateBias1 = model->addOperand(&type3);
250 auto outputGateBias1 = model->addOperand(&type3);
251 auto prevCellState1 = model->addOperand(&type7);
252 auto prevOutput1 = model->addOperand(&type8);
253 auto cellStateOut1 = model->addOperand(&type7);
254 auto output1 = model->addOperand(&type8);
255 // Phase 2, operations
256 static uint8_t inputToInputWeights1_init[] = {146, 250, 235, 171, 10, 218, 171, 108};
257 model->setOperandValue(inputToInputWeights1, inputToInputWeights1_init, sizeof(uint8_t) * 8);
258 static uint8_t inputToForgetWeights1_init[] = {24, 50, 132, 179, 158, 110, 3, 169};
259 model->setOperandValue(inputToForgetWeights1, inputToForgetWeights1_init, sizeof(uint8_t) * 8);
260 static uint8_t inputToCellWeights1_init[] = {133, 34, 29, 49, 206, 109, 54, 183};
261 model->setOperandValue(inputToCellWeights1, inputToCellWeights1_init, sizeof(uint8_t) * 8);
262 static uint8_t inputToOutputWeights1_init[] = {195, 187, 11, 99, 109, 10, 218, 48};
263 model->setOperandValue(inputToOutputWeights1, inputToOutputWeights1_init, sizeof(uint8_t) * 8);
264 static uint8_t recurrentToInputWeights1_init[] = {254, 206, 77, 168, 71, 20, 215, 6, 223, 7, 118, 225, 59, 130, 174, 26};
265 model->setOperandValue(recurrentToInputWeights1, recurrentToInputWeights1_init, sizeof(uint8_t) * 16);
266 static uint8_t recurrentToForgetWeights1_init[] = {137, 240, 103, 52, 68, 51, 237, 112, 0, 220, 89, 23, 69, 4, 207, 253};
267 model->setOperandValue(recurrentToForgetWeights1, recurrentToForgetWeights1_init, sizeof(uint8_t) * 16);
268 static uint8_t recurrentToCellWeights1_init[] = {172, 60, 205, 65, 14, 0, 140, 168, 240, 223, 133, 56, 142, 64, 246, 216};
269 model->setOperandValue(recurrentToCellWeights1, recurrentToCellWeights1_init, sizeof(uint8_t) * 16);
270 static uint8_t recurrentToOutputWeights1_init[] = {106, 214, 67, 23, 59, 158, 45, 3, 119, 132, 49, 205, 129, 218, 11, 98};
271 model->setOperandValue(recurrentToOutputWeights1, recurrentToOutputWeights1_init, sizeof(uint8_t) * 16);
272 static int32_t inputGateBias1_init[] = {-7876, 13488, -726, 32839};
273 model->setOperandValue(inputGateBias1, inputGateBias1_init, sizeof(int32_t) * 4);
274 static int32_t forgetGateBias1_init[] = {9206, -46884, -11693, -38724};
275 model->setOperandValue(forgetGateBias1, forgetGateBias1_init, sizeof(int32_t) * 4);
276 static int32_t cellGateBias1_init[] = {39481, 48624, 48976, -21419};
277 model->setOperandValue(cellGateBias1, cellGateBias1_init, sizeof(int32_t) * 4);
278 static int32_t outputGateBias1_init[] = {-58999, -17050, -41852, -40538};
279 model->setOperandValue(outputGateBias1, outputGateBias1_init, sizeof(int32_t) * 4);
280 model->addOperation(ANEURALNETWORKS_QUANTIZED_16BIT_LSTM, {input1, inputToInputWeights1, inputToForgetWeights1, inputToCellWeights1, inputToOutputWeights1, recurrentToInputWeights1, recurrentToForgetWeights1, recurrentToCellWeights1, recurrentToOutputWeights1, inputGateBias1, forgetGateBias1, cellGateBias1, outputGateBias1, prevCellState1, prevOutput1}, {cellStateOut1, output1});
281 // Phase 3, inputs and outputs
282 model->identifyInputsAndOutputs(
283 {input1, prevCellState1, prevOutput1},
284 {cellStateOut1, output1});
285 // Phase 4: set relaxed execution
286 model->relaxComputationFloat32toFloat16(true);
287 assert(model->isValid());
288 }
289
is_ignored_relaxed_2(int i)290 inline bool is_ignored_relaxed_2(int i) {
291 static std::set<int> ignore = {};
292 return ignore.find(i) != ignore.end();
293 }
294
CreateModel_dynamic_output_shape_2(Model * model)295 void CreateModel_dynamic_output_shape_2(Model *model) {
296 OperandType type1(Type::TENSOR_QUANT8_ASYMM, {4, 2}, 0.00408021f, 100);
297 OperandType type10(Type::TENSOR_QUANT8_ASYMM, {0, 0}, 0.0078125f, 128);
298 OperandType type2(Type::TENSOR_QUANT8_ASYMM, {4, 4}, 0.00408021f, 100);
299 OperandType type3(Type::TENSOR_INT32, {4}, 3.1876640625e-05f, 0);
300 OperandType type6(Type::TENSOR_QUANT8_ASYMM, {1, 2}, 0.0078125f, 128);
301 OperandType type7(Type::TENSOR_QUANT16_SYMM, {1, 4}, 0.00048828125f, 0);
302 OperandType type8(Type::TENSOR_QUANT8_ASYMM, {1, 4}, 0.0078125f, 128);
303 OperandType type9(Type::TENSOR_QUANT16_SYMM, {0, 0}, 0.00048828125f, 0);
304 // Phase 1, operands
305 auto input1 = model->addOperand(&type6);
306 auto inputToInputWeights1 = model->addOperand(&type1);
307 auto inputToForgetWeights1 = model->addOperand(&type1);
308 auto inputToCellWeights1 = model->addOperand(&type1);
309 auto inputToOutputWeights1 = model->addOperand(&type1);
310 auto recurrentToInputWeights1 = model->addOperand(&type2);
311 auto recurrentToForgetWeights1 = model->addOperand(&type2);
312 auto recurrentToCellWeights1 = model->addOperand(&type2);
313 auto recurrentToOutputWeights1 = model->addOperand(&type2);
314 auto inputGateBias1 = model->addOperand(&type3);
315 auto forgetGateBias1 = model->addOperand(&type3);
316 auto cellGateBias1 = model->addOperand(&type3);
317 auto outputGateBias1 = model->addOperand(&type3);
318 auto prevCellState1 = model->addOperand(&type7);
319 auto prevOutput1 = model->addOperand(&type8);
320 auto cellStateOut1 = model->addOperand(&type9);
321 auto output1 = model->addOperand(&type10);
322 // Phase 2, operations
323 static uint8_t inputToInputWeights1_init[] = {146, 250, 235, 171, 10, 218, 171, 108};
324 model->setOperandValue(inputToInputWeights1, inputToInputWeights1_init, sizeof(uint8_t) * 8);
325 static uint8_t inputToForgetWeights1_init[] = {24, 50, 132, 179, 158, 110, 3, 169};
326 model->setOperandValue(inputToForgetWeights1, inputToForgetWeights1_init, sizeof(uint8_t) * 8);
327 static uint8_t inputToCellWeights1_init[] = {133, 34, 29, 49, 206, 109, 54, 183};
328 model->setOperandValue(inputToCellWeights1, inputToCellWeights1_init, sizeof(uint8_t) * 8);
329 static uint8_t inputToOutputWeights1_init[] = {195, 187, 11, 99, 109, 10, 218, 48};
330 model->setOperandValue(inputToOutputWeights1, inputToOutputWeights1_init, sizeof(uint8_t) * 8);
331 static uint8_t recurrentToInputWeights1_init[] = {254, 206, 77, 168, 71, 20, 215, 6, 223, 7, 118, 225, 59, 130, 174, 26};
332 model->setOperandValue(recurrentToInputWeights1, recurrentToInputWeights1_init, sizeof(uint8_t) * 16);
333 static uint8_t recurrentToForgetWeights1_init[] = {137, 240, 103, 52, 68, 51, 237, 112, 0, 220, 89, 23, 69, 4, 207, 253};
334 model->setOperandValue(recurrentToForgetWeights1, recurrentToForgetWeights1_init, sizeof(uint8_t) * 16);
335 static uint8_t recurrentToCellWeights1_init[] = {172, 60, 205, 65, 14, 0, 140, 168, 240, 223, 133, 56, 142, 64, 246, 216};
336 model->setOperandValue(recurrentToCellWeights1, recurrentToCellWeights1_init, sizeof(uint8_t) * 16);
337 static uint8_t recurrentToOutputWeights1_init[] = {106, 214, 67, 23, 59, 158, 45, 3, 119, 132, 49, 205, 129, 218, 11, 98};
338 model->setOperandValue(recurrentToOutputWeights1, recurrentToOutputWeights1_init, sizeof(uint8_t) * 16);
339 static int32_t inputGateBias1_init[] = {-7876, 13488, -726, 32839};
340 model->setOperandValue(inputGateBias1, inputGateBias1_init, sizeof(int32_t) * 4);
341 static int32_t forgetGateBias1_init[] = {9206, -46884, -11693, -38724};
342 model->setOperandValue(forgetGateBias1, forgetGateBias1_init, sizeof(int32_t) * 4);
343 static int32_t cellGateBias1_init[] = {39481, 48624, 48976, -21419};
344 model->setOperandValue(cellGateBias1, cellGateBias1_init, sizeof(int32_t) * 4);
345 static int32_t outputGateBias1_init[] = {-58999, -17050, -41852, -40538};
346 model->setOperandValue(outputGateBias1, outputGateBias1_init, sizeof(int32_t) * 4);
347 model->addOperation(ANEURALNETWORKS_QUANTIZED_16BIT_LSTM, {input1, inputToInputWeights1, inputToForgetWeights1, inputToCellWeights1, inputToOutputWeights1, recurrentToInputWeights1, recurrentToForgetWeights1, recurrentToCellWeights1, recurrentToOutputWeights1, inputGateBias1, forgetGateBias1, cellGateBias1, outputGateBias1, prevCellState1, prevOutput1}, {cellStateOut1, output1});
348 // Phase 3, inputs and outputs
349 model->identifyInputsAndOutputs(
350 {input1, prevCellState1, prevOutput1},
351 {cellStateOut1, output1});
352 assert(model->isValid());
353 }
354
is_ignored_dynamic_output_shape_2(int i)355 inline bool is_ignored_dynamic_output_shape_2(int i) {
356 static std::set<int> ignore = {};
357 return ignore.find(i) != ignore.end();
358 }
359
CreateModel_dynamic_output_shape_relaxed_2(Model * model)360 void CreateModel_dynamic_output_shape_relaxed_2(Model *model) {
361 OperandType type1(Type::TENSOR_QUANT8_ASYMM, {4, 2}, 0.00408021f, 100);
362 OperandType type10(Type::TENSOR_QUANT8_ASYMM, {0, 0}, 0.0078125f, 128);
363 OperandType type2(Type::TENSOR_QUANT8_ASYMM, {4, 4}, 0.00408021f, 100);
364 OperandType type3(Type::TENSOR_INT32, {4}, 3.1876640625e-05f, 0);
365 OperandType type6(Type::TENSOR_QUANT8_ASYMM, {1, 2}, 0.0078125f, 128);
366 OperandType type7(Type::TENSOR_QUANT16_SYMM, {1, 4}, 0.00048828125f, 0);
367 OperandType type8(Type::TENSOR_QUANT8_ASYMM, {1, 4}, 0.0078125f, 128);
368 OperandType type9(Type::TENSOR_QUANT16_SYMM, {0, 0}, 0.00048828125f, 0);
369 // Phase 1, operands
370 auto input1 = model->addOperand(&type6);
371 auto inputToInputWeights1 = model->addOperand(&type1);
372 auto inputToForgetWeights1 = model->addOperand(&type1);
373 auto inputToCellWeights1 = model->addOperand(&type1);
374 auto inputToOutputWeights1 = model->addOperand(&type1);
375 auto recurrentToInputWeights1 = model->addOperand(&type2);
376 auto recurrentToForgetWeights1 = model->addOperand(&type2);
377 auto recurrentToCellWeights1 = model->addOperand(&type2);
378 auto recurrentToOutputWeights1 = model->addOperand(&type2);
379 auto inputGateBias1 = model->addOperand(&type3);
380 auto forgetGateBias1 = model->addOperand(&type3);
381 auto cellGateBias1 = model->addOperand(&type3);
382 auto outputGateBias1 = model->addOperand(&type3);
383 auto prevCellState1 = model->addOperand(&type7);
384 auto prevOutput1 = model->addOperand(&type8);
385 auto cellStateOut1 = model->addOperand(&type9);
386 auto output1 = model->addOperand(&type10);
387 // Phase 2, operations
388 static uint8_t inputToInputWeights1_init[] = {146, 250, 235, 171, 10, 218, 171, 108};
389 model->setOperandValue(inputToInputWeights1, inputToInputWeights1_init, sizeof(uint8_t) * 8);
390 static uint8_t inputToForgetWeights1_init[] = {24, 50, 132, 179, 158, 110, 3, 169};
391 model->setOperandValue(inputToForgetWeights1, inputToForgetWeights1_init, sizeof(uint8_t) * 8);
392 static uint8_t inputToCellWeights1_init[] = {133, 34, 29, 49, 206, 109, 54, 183};
393 model->setOperandValue(inputToCellWeights1, inputToCellWeights1_init, sizeof(uint8_t) * 8);
394 static uint8_t inputToOutputWeights1_init[] = {195, 187, 11, 99, 109, 10, 218, 48};
395 model->setOperandValue(inputToOutputWeights1, inputToOutputWeights1_init, sizeof(uint8_t) * 8);
396 static uint8_t recurrentToInputWeights1_init[] = {254, 206, 77, 168, 71, 20, 215, 6, 223, 7, 118, 225, 59, 130, 174, 26};
397 model->setOperandValue(recurrentToInputWeights1, recurrentToInputWeights1_init, sizeof(uint8_t) * 16);
398 static uint8_t recurrentToForgetWeights1_init[] = {137, 240, 103, 52, 68, 51, 237, 112, 0, 220, 89, 23, 69, 4, 207, 253};
399 model->setOperandValue(recurrentToForgetWeights1, recurrentToForgetWeights1_init, sizeof(uint8_t) * 16);
400 static uint8_t recurrentToCellWeights1_init[] = {172, 60, 205, 65, 14, 0, 140, 168, 240, 223, 133, 56, 142, 64, 246, 216};
401 model->setOperandValue(recurrentToCellWeights1, recurrentToCellWeights1_init, sizeof(uint8_t) * 16);
402 static uint8_t recurrentToOutputWeights1_init[] = {106, 214, 67, 23, 59, 158, 45, 3, 119, 132, 49, 205, 129, 218, 11, 98};
403 model->setOperandValue(recurrentToOutputWeights1, recurrentToOutputWeights1_init, sizeof(uint8_t) * 16);
404 static int32_t inputGateBias1_init[] = {-7876, 13488, -726, 32839};
405 model->setOperandValue(inputGateBias1, inputGateBias1_init, sizeof(int32_t) * 4);
406 static int32_t forgetGateBias1_init[] = {9206, -46884, -11693, -38724};
407 model->setOperandValue(forgetGateBias1, forgetGateBias1_init, sizeof(int32_t) * 4);
408 static int32_t cellGateBias1_init[] = {39481, 48624, 48976, -21419};
409 model->setOperandValue(cellGateBias1, cellGateBias1_init, sizeof(int32_t) * 4);
410 static int32_t outputGateBias1_init[] = {-58999, -17050, -41852, -40538};
411 model->setOperandValue(outputGateBias1, outputGateBias1_init, sizeof(int32_t) * 4);
412 model->addOperation(ANEURALNETWORKS_QUANTIZED_16BIT_LSTM, {input1, inputToInputWeights1, inputToForgetWeights1, inputToCellWeights1, inputToOutputWeights1, recurrentToInputWeights1, recurrentToForgetWeights1, recurrentToCellWeights1, recurrentToOutputWeights1, inputGateBias1, forgetGateBias1, cellGateBias1, outputGateBias1, prevCellState1, prevOutput1}, {cellStateOut1, output1});
413 // Phase 3, inputs and outputs
414 model->identifyInputsAndOutputs(
415 {input1, prevCellState1, prevOutput1},
416 {cellStateOut1, output1});
417 // Phase 4: set relaxed execution
418 model->relaxComputationFloat32toFloat16(true);
419 assert(model->isValid());
420 }
421
is_ignored_dynamic_output_shape_relaxed_2(int i)422 inline bool is_ignored_dynamic_output_shape_relaxed_2(int i) {
423 static std::set<int> ignore = {};
424 return ignore.find(i) != ignore.end();
425 }
426
427