1 // Auto-generated file. Do not edit!
2 // Template: src/f32-dwconv2d-chw/5x5p2-wasmsimd-loadsplat.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <wasm_simd128.h>
13
14 #include <xnnpack/dwconv.h>
15 #include <xnnpack/math.h>
16
17
18
xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_1x4_acc5(size_t input_height,size_t input_width,const float * input,const float * weights,const float * zero,float * output,uint32_t padding_top,const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS (1)])19 void xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_1x4_acc5(
20 size_t input_height,
21 size_t input_width,
22 const float* input,
23 const float* weights,
24 const float* zero,
25 float* output,
26 uint32_t padding_top,
27 const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
28 {
29 assert(input_height != 0);
30 assert(input_width != 0);
31 assert(input_width % sizeof(float) == 0);
32 assert(padding_top == 2);
33
34 const v128_t vmask = wasm_v128_load(params->scalar.mask);
35 const v128_t vmax = wasm_v32x4_load_splat(¶ms->scalar.max);
36 const v128_t vmin = wasm_v32x4_load_splat(¶ms->scalar.min);
37
38 const v128_t vw0123 = wasm_v128_load(weights);
39 const v128_t vw4567 = wasm_v128_load(weights + 4);
40 const v128_t vw89AB = wasm_v128_load(weights + 8);
41 const v128_t vwCDEF = wasm_v128_load(weights + 12);
42 const v128_t vwGHIJ = wasm_v128_load(weights + 16);
43 const v128_t vwKLMN = wasm_v128_load(weights + 20);
44 const v128_t vwOP = wasm_v64x2_load_splat(weights + 24);
45 const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
46 const v128_t vk00 = wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
47 const v128_t vk01 = wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
48 const v128_t vk02 = wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
49 const v128_t vk03 = wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
50 const v128_t vk04 = wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
51 const v128_t vk10 = wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
52 const v128_t vk11 = wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
53 const v128_t vk12 = wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0);
54 const v128_t vk13 = wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1);
55 const v128_t vk14 = wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2);
56 const v128_t vk20 = wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3);
57 const v128_t vk21 = wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0);
58 const v128_t vk22 = wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1);
59 const v128_t vk23 = wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2);
60 const v128_t vk24 = wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3);
61 const v128_t vk30 = wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0);
62 const v128_t vk31 = wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1);
63 const v128_t vk32 = wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2);
64 const v128_t vk33 = wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3);
65 const v128_t vk34 = wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0);
66 const v128_t vk40 = wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1);
67 const v128_t vk41 = wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2);
68 const v128_t vk42 = wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3);
69 const v128_t vk43 = wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0);
70 const v128_t vk44 = wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1);
71
72 const v128_t vzero = wasm_f32x4_splat(0.0f);
73
74 const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
75
76 const float* i0 = zero;
77 const float* i1 = zero;
78 const float* i2 = input;
79 const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
80 const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
81
82 float* o0 = output;
83
84 size_t output_height = input_height;
85 do {
86 if XNN_UNPREDICTABLE(output_height < 2) {
87 i3 = zero;
88 }
89 if XNN_UNPREDICTABLE(output_height < 3) {
90 i4 = zero;
91 }
92
93 v128_t vi0x0123 = vzero;
94 v128_t vi1x0123 = vzero;
95 v128_t vi2x0123 = vzero;
96 v128_t vi3x0123 = vzero;
97 v128_t vi4x0123 = vzero;
98
99 v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
100 v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
101 v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
102 v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
103 v128_t vi4x4567 = wasm_v128_load(i4); i4 += 4;
104
105 size_t w = input_width;
106 for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
107 v128_t vo0p0 = vbias;
108
109 const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
110 const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
111 const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
112 const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
113 const v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
114
115 v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, vk02);
116
117 v128_t vo0p2 = wasm_f32x4_mul(vi1x4567, vk12);
118
119 v128_t vo0p3 = wasm_f32x4_mul(vi2x4567, vk22);
120
121 v128_t vo0p4 = wasm_f32x4_mul(vi3x4567, vk32);
122
123 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x4567, vk42));
124
125 const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
126 const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
127 const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
128 const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
129 const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
130
131 vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x3456, vk01));
132
133 vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x3456, vk11));
134
135 vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x3456, vk21));
136
137 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, vk31));
138
139 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x3456, vk41));
140
141 const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
142 vi0x0123 = vi0x4567;
143 const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
144 vi1x0123 = vi1x4567;
145 const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
146 vi2x0123 = vi2x4567;
147 const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
148 vi3x0123 = vi3x4567;
149 const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
150 vi4x0123 = vi4x4567;
151
152 vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x2345, vk00));
153
154 vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x2345, vk10));
155
156 vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x2345, vk20));
157
158 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, vk30));
159
160 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x2345, vk40));
161
162 const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
163 const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
164 const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
165 const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
166 const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
167
168 vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x5678, vk03));
169
170 vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x5678, vk13));
171
172 vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x5678, vk23));
173
174 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
175
176 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, vk43));
177
178 const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
179 vi0x4567 = vi0x89AB;
180 const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
181 vi1x4567 = vi1x89AB;
182 const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
183 vi2x4567 = vi2x89AB;
184 const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
185 vi3x4567 = vi3x89AB;
186 const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
187 vi4x4567 = vi4x89AB;
188
189 vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x6789, vk04));
190
191 vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x6789, vk14));
192
193 vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x6789, vk24));
194
195 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, vk34));
196
197 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x6789, vk44));
198
199 vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
200 vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
201 vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
202 vo0p0 = wasm_f32x4_add(vo0p0, vo0p4);
203
204 v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
205 vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
206
207 wasm_v128_store(o0, vo0); o0 += 4;
208 }
209 // Always process the last block of 5..8 pixels.
210 if XNN_LIKELY(w > 4 * sizeof(float)) {
211 v128_t vo0p0 = vbias;
212
213 v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
214 v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
215 v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
216 v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
217 v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
218
219 vi0x89AB = wasm_v128_and(vmask, vi0x89AB);
220 vi1x89AB = wasm_v128_and(vmask, vi1x89AB);
221 vi2x89AB = wasm_v128_and(vmask, vi2x89AB);
222 vi3x89AB = wasm_v128_and(vmask, vi3x89AB);
223 vi4x89AB = wasm_v128_and(vmask, vi4x89AB);
224
225 v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, vk02);
226
227 v128_t vo0p2 = wasm_f32x4_mul(vi1x4567, vk12);
228
229 v128_t vo0p3 = wasm_f32x4_mul(vi2x4567, vk22);
230
231 v128_t vo0p4 = wasm_f32x4_mul(vi3x4567, vk32);
232
233 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x4567, vk42));
234
235 const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
236 const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
237 const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
238 const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
239 const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
240
241 vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x3456, vk01));
242
243 vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x3456, vk11));
244
245 vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x3456, vk21));
246
247 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, vk31));
248
249 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x3456, vk41));
250
251 const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
252 vi0x0123 = vi0x4567;
253 const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
254 vi1x0123 = vi1x4567;
255 const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
256 vi2x0123 = vi2x4567;
257 const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
258 vi3x0123 = vi3x4567;
259 const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
260 vi4x0123 = vi4x4567;
261
262 vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x2345, vk00));
263
264 vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x2345, vk10));
265
266 vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x2345, vk20));
267
268 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, vk30));
269
270 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x2345, vk40));
271
272 const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
273 const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
274 const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
275 const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
276 const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
277
278 vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x5678, vk03));
279
280 vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x5678, vk13));
281
282 vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x5678, vk23));
283
284 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
285
286 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, vk43));
287
288 const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
289 vi0x4567 = vi0x89AB;
290 const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
291 vi1x4567 = vi1x89AB;
292 const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
293 vi2x4567 = vi2x89AB;
294 const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
295 vi3x4567 = vi3x89AB;
296 const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
297 vi4x4567 = vi4x89AB;
298
299 vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x6789, vk04));
300
301 vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x6789, vk14));
302
303 vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x6789, vk24));
304
305 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, vk34));
306
307 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x6789, vk44));
308
309 vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
310 vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
311 vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
312 vo0p0 = wasm_f32x4_add(vo0p0, vo0p4);
313
314 v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
315 vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
316
317 wasm_v128_store(o0, vo0); o0 += 4;
318
319 w -= 4 * sizeof(float);
320 }
321 assert(w >= 1 * sizeof(float));
322 assert(w <= 4 * sizeof(float));
323 {
324 v128_t vo0p0 = vbias;
325
326 vi0x4567 = wasm_v128_and(vmask, vi0x4567);
327 vi1x4567 = wasm_v128_and(vmask, vi1x4567);
328 vi2x4567 = wasm_v128_and(vmask, vi2x4567);
329 vi3x4567 = wasm_v128_and(vmask, vi3x4567);
330 vi4x4567 = wasm_v128_and(vmask, vi4x4567);
331
332 v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, vk02);
333
334 v128_t vo0p2 = wasm_f32x4_mul(vi1x4567, vk12);
335
336 v128_t vo0p3 = wasm_f32x4_mul(vi2x4567, vk22);
337
338 v128_t vo0p4 = wasm_f32x4_mul(vi3x4567, vk32);
339
340 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x4567, vk42));
341
342 const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
343 const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
344 const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
345 const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
346 const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
347
348 vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x3456, vk01));
349
350 vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x3456, vk11));
351
352 vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x3456, vk21));
353
354 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, vk31));
355
356 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x3456, vk41));
357
358 const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
359 const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
360 const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
361 const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
362 const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
363
364 vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x2345, vk00));
365
366 vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x2345, vk10));
367
368 vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x2345, vk20));
369
370 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, vk30));
371
372 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x2345, vk40));
373
374 const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
375 const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
376 const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
377 const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
378 const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
379
380 vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x5678, vk03));
381
382 vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x5678, vk13));
383
384 vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x5678, vk23));
385
386 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
387
388 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, vk43));
389
390 const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x5678, vzero, 1, 2, 3, 4);
391 const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x5678, vzero, 1, 2, 3, 4);
392 const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x5678, vzero, 1, 2, 3, 4);
393 const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x5678, vzero, 1, 2, 3, 4);
394 const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x5678, vzero, 1, 2, 3, 4);
395
396 vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x6789, vk04));
397
398 vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1x6789, vk14));
399
400 vo0p4 = wasm_f32x4_add(vo0p4, wasm_f32x4_mul(vi2x6789, vk24));
401
402 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, vk34));
403
404 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x6789, vk44));
405
406 vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
407 vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
408 vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
409 vo0p0 = wasm_f32x4_add(vo0p0, vo0p4);
410
411 v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
412 vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
413
414 if XNN_LIKELY(w & (4 * sizeof(float))) {
415 wasm_v128_store(o0, vo0); o0 += 4;
416 } else {
417 if (w & (2 * sizeof(float))) {
418 *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
419
420 vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
421 }
422 if (w & (1 * sizeof(float))) {
423 *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
424 }
425 }
426 }
427
428 i0 = (const float*) ((uintptr_t) i1 - input_decrement);
429 i1 = (const float*) ((uintptr_t) i2 - input_decrement);
430 i2 = (const float*) ((uintptr_t) i1 + input_width);
431 i3 = (const float*) ((uintptr_t) i2 + input_width);
432 i4 = (const float*) ((uintptr_t) i3 + input_width);
433
434
435 } while (--output_height != 0);
436 }
437