1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-splat.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <wasm_simd128.h>
13 
14 #include <xnnpack/dwconv.h>
15 #include <xnnpack/math.h>
16 
17 
18 
xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_1x4_acc4(size_t input_height,size_t input_width,const float * input,const float * weights,const float * zero,float * output,uint32_t padding_top,const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS (1)])19 void xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_1x4_acc4(
20     size_t input_height,
21     size_t input_width,
22     const float* input,
23     const float* weights,
24     const float* zero,
25     float* output,
26     uint32_t padding_top,
27     const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
28 {
29   assert(input_height != 0);
30   assert(input_width != 0);
31   assert(input_width % sizeof(float) == 0);
32   assert(padding_top >= 1);
33   assert(padding_top <= 2);
34 
35   const v128_t vmask_even = wasm_v128_load(params->scalar.mask_even);
36   const v128_t vmask_odd  = wasm_v128_load(params->scalar.mask_odd);
37   const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
38   const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
39 
40   const v128_t vw0123 = wasm_v128_load(weights);
41   const v128_t vw4567 = wasm_v128_load(weights + 4);
42   const v128_t vw89AB = wasm_v128_load(weights + 8);
43   const v128_t vwCDEF = wasm_v128_load(weights + 12);
44   const v128_t vwGHIJ = wasm_v128_load(weights + 16);
45   const v128_t vwKLMN = wasm_v128_load(weights + 20);
46   const v128_t vwOP = wasm_v64x2_load_splat(weights + 24);
47 
48   const v128_t vzero = wasm_f32x4_splat(0.0f);
49 
50   const uint32_t padding_top_less_1 = padding_top - 1;
51   const size_t input_decrement = round_up_po2(input_width, 8 * sizeof(float));
52 
53   const float* i0 = zero;
54   const float* i1 = (const float*) ((uintptr_t) input - ((-padding_top_less_1) & input_width));
55   const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
56   if XNN_UNPREDICTABLE(padding_top_less_1 != 0) {
57     i1 = zero;
58   }
59   const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
60   const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
61 
62 
63   float* o0 = output;
64 
65   size_t padded_input_height = input_height + (padding_top_less_1 + 1) + 2 /* padding bottom */;
66   size_t output_height = (padded_input_height - 5 /* kernel size */ + 2 /* subsampling */) / 2;
67   do {
68     if XNN_UNPREDICTABLE(padded_input_height < 6) {
69       i3 = zero;
70     }
71     if XNN_UNPREDICTABLE(padded_input_height < 7) {
72       i4 = zero;
73     }
74 
75     v128_t vi0x0246 = vzero;
76     v128_t vi1x0246 = vzero;
77     v128_t vi2x0246 = vzero;
78     v128_t vi3x0246 = vzero;
79     v128_t vi4x0246 = vzero;
80 
81     v128_t vi0x1357 = vzero;
82     v128_t vi1x1357 = vzero;
83     v128_t vi2x1357 = vzero;
84     v128_t vi3x1357 = vzero;
85     v128_t vi4x1357 = vzero;
86 
87     const v128_t vi0x89AB = wasm_v128_load(i0);
88     const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
89     i0 += 8;
90     const v128_t vi1x89AB = wasm_v128_load(i1);
91     const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
92     i1 += 8;
93     const v128_t vi2x89AB = wasm_v128_load(i2);
94     const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
95     i2 += 8;
96     const v128_t vi3x89AB = wasm_v128_load(i3);
97     const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
98     i3 += 8;
99     const v128_t vi4x89AB = wasm_v128_load(i4);
100     const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
101     i4 += 8;
102 
103     v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
104     v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
105     v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
106     v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
107     v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
108     v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
109     v128_t vi3x8ACE = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6);
110     v128_t vi3x9BDF = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7);
111     v128_t vi4x8ACE = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6);
112     v128_t vi4x9BDF = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7);
113 
114     size_t w = input_width;
115     for (; w > 8 * sizeof(float); w -= 8 * sizeof(float)) {
116       v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
117 
118       v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
119 
120       v128_t vo0p2 = wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0));
121 
122       v128_t vo0p3 = wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1));
123 
124       vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
125 
126       vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
127 
128       vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
129 
130       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
131 
132       vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
133 
134       vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
135 
136       vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
137 
138       const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
139       vi0x0246 = vi0x8ACE;
140       const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
141       vi1x0246 = vi1x8ACE;
142       const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
143       vi2x0246 = vi2x8ACE;
144       const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
145       vi3x0246 = vi3x8ACE;
146       const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
147       vi4x0246 = vi4x8ACE;
148 
149       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
150 
151       vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x68AC, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
152 
153       vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x68AC, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
154 
155       vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi3x68AC, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
156 
157       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x68AC, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
158 
159       const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
160       vi0x1357 = vi0x9BDF;
161       const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
162       vi1x1357 = vi1x9BDF;
163       const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
164       vi2x1357 = vi2x9BDF;
165       const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
166       vi3x1357 = vi3x9BDF;
167       const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
168       vi4x1357 = vi4x9BDF;
169 
170       const v128_t vi0xGHIJ = wasm_v128_load(i0);
171       const v128_t vi0xKLMN = wasm_v128_load(i0 + 4);
172       i0 += 8;
173       const v128_t vi1xGHIJ = wasm_v128_load(i1);
174       const v128_t vi1xKLMN = wasm_v128_load(i1 + 4);
175       i1 += 8;
176       const v128_t vi2xGHIJ = wasm_v128_load(i2);
177       const v128_t vi2xKLMN = wasm_v128_load(i2 + 4);
178       i2 += 8;
179       const v128_t vi3xGHIJ = wasm_v128_load(i3);
180       const v128_t vi3xKLMN = wasm_v128_load(i3 + 4);
181       i3 += 8;
182       const v128_t vi4xGHIJ = wasm_v128_load(i4);
183       const v128_t vi4xKLMN = wasm_v128_load(i4 + 4);
184       i4 += 8;
185 
186       const v128_t vi0xGIKM = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 0, 2, 4, 6);
187       const v128_t vi0xHJLN = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 1, 3, 5, 7);
188       const v128_t vi1xGIKM = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 0, 2, 4, 6);
189       const v128_t vi1xHJLN = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 1, 3, 5, 7);
190       const v128_t vi2xGIKM = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 0, 2, 4, 6);
191       const v128_t vi2xHJLN = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 1, 3, 5, 7);
192       const v128_t vi3xGIKM = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 0, 2, 4, 6);
193       const v128_t vi3xHJLN = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 1, 3, 5, 7);
194       const v128_t vi4xGIKM = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 0, 2, 4, 6);
195       const v128_t vi4xHJLN = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 1, 3, 5, 7);
196 
197       vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
198 
199       vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
200 
201       vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi2x79BD, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
202 
203       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x79BD, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
204 
205       vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x79BD, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
206 
207       const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vi0xGIKM, 1, 2, 3, 4);
208       vi0x8ACE = vi0xGIKM;
209       vi0x9BDF = vi0xHJLN;
210       const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vi1xGIKM, 1, 2, 3, 4);
211       vi1x8ACE = vi1xGIKM;
212       vi1x9BDF = vi1xHJLN;
213       const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vi2xGIKM, 1, 2, 3, 4);
214       vi2x8ACE = vi2xGIKM;
215       vi2x9BDF = vi2xHJLN;
216       const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vi3xGIKM, 1, 2, 3, 4);
217       vi3x8ACE = vi3xGIKM;
218       vi3x9BDF = vi3xHJLN;
219       const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vi4xGIKM, 1, 2, 3, 4);
220       vi4x8ACE = vi4xGIKM;
221       vi4x9BDF = vi4xHJLN;
222 
223       vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0xACEG, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
224 
225       vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1xACEG, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
226 
227       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
228 
229       vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3xACEG, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
230 
231       vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4xACEG, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
232 
233       vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
234       vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
235       vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
236 
237       v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
238       vo0 = wasm_f32x4_min(vo0, vmax);
239 
240       wasm_v128_store(o0, vo0); o0 += 4;
241     }
242     // Last block has 1-8 pixels to process.
243     assert(w <= 8 * sizeof(float));
244     assert(w >= 1 * sizeof(float));
245     {
246       v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
247 
248       vi0x8ACE = wasm_v128_and(vmask_even, vi0x8ACE);
249       vi1x8ACE = wasm_v128_and(vmask_even, vi1x8ACE);
250       vi2x8ACE = wasm_v128_and(vmask_even, vi2x8ACE);
251       vi3x8ACE = wasm_v128_and(vmask_even, vi3x8ACE);
252       vi4x8ACE = wasm_v128_and(vmask_even, vi4x8ACE);
253 
254       vi0x9BDF = wasm_v128_and(vmask_odd, vi0x9BDF);
255       vi1x9BDF = wasm_v128_and(vmask_odd, vi1x9BDF);
256       vi2x9BDF = wasm_v128_and(vmask_odd, vi2x9BDF);
257       vi3x9BDF = wasm_v128_and(vmask_odd, vi3x9BDF);
258       vi4x9BDF = wasm_v128_and(vmask_odd, vi4x9BDF);
259 
260       v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
261 
262       v128_t vo0p2 = wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0));
263 
264       v128_t vo0p3 = wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1));
265 
266       vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
267 
268       vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
269 
270       vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
271 
272       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
273 
274       vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
275 
276       vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
277 
278       vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
279 
280       const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
281       const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
282       const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
283       const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
284       const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
285 
286       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
287 
288       vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x68AC, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
289 
290       vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x68AC, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
291 
292       vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi3x68AC, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
293 
294       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x68AC, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
295 
296       const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
297       const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
298       const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
299       const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
300       const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
301 
302       vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
303 
304       vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
305 
306       vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi2x79BD, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
307 
308       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x79BD, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
309 
310       vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x79BD, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
311 
312       const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vzero, 1, 2, 3, 4);
313       const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vzero, 1, 2, 3, 4);
314       const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vzero, 1, 2, 3, 4);
315       const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vzero, 1, 2, 3, 4);
316       const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vzero, 1, 2, 3, 4);
317 
318       vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0xACEG, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
319 
320       vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1xACEG, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
321 
322       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
323 
324       vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3xACEG, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
325 
326       vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4xACEG, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
327 
328       vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
329       vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
330       vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
331 
332       v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
333       vo0 = wasm_f32x4_min(vo0, vmax);
334 
335       size_t w_tmp = (w + 1 * sizeof(float)) / (2 * sizeof(float));
336       if XNN_LIKELY(w_tmp >= 4) {
337         wasm_v128_store(o0, vo0); o0 += 4;
338       } else {
339         if (w_tmp & 2) {
340           *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
341 
342           vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
343         }
344         if (w_tmp & 1) {
345           *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
346         }
347       }
348     }
349 
350     i0 = (const float*) ((uintptr_t) i2 - input_decrement);
351     i1 = (const float*) ((uintptr_t) i3 - input_decrement);
352     i2 = (const float*) ((uintptr_t) i4 - input_decrement);
353     i3 = (const float*) ((uintptr_t) i2 + input_width);
354     i4 = (const float*) ((uintptr_t) i3 + input_width);
355 
356 
357     output_height -= 1;
358     padded_input_height -= 2;
359   } while (output_height != 0);
360 }
361