1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-loadsplat.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <wasm_simd128.h>
13 
14 #include <xnnpack/dwconv.h>
15 #include <xnnpack/math.h>
16 
17 
18 
xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_2x4_acc2(size_t input_height,size_t input_width,const float * input,const float * weights,const float * zero,float * output,uint32_t padding_top,const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS (1)])19 void xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_2x4_acc2(
20 
21     size_t input_height,
22     size_t input_width,
23     const float* input,
24     const float* weights,
25     const float* zero,
26     float* output,
27     uint32_t padding_top,
28     const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
29 {
30   assert(input_height != 0);
31   assert(input_width != 0);
32   assert(input_width % sizeof(float) == 0);
33   assert(padding_top >= 1);
34   assert(padding_top <= 2);
35 
36   const v128_t vmask_even = wasm_v128_load(params->scalar.mask_even);
37   const v128_t vmask_odd  = wasm_v128_load(params->scalar.mask_odd);
38   const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
39   const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
40 
41   const v128_t vbias = wasm_v32x4_load_splat(weights);
42   const v128_t vk00 = wasm_v32x4_load_splat(weights + 1);
43   const v128_t vk01 = wasm_v32x4_load_splat(weights + 2);
44   const v128_t vk02 = wasm_v32x4_load_splat(weights + 3);
45   const v128_t vk03 = wasm_v32x4_load_splat(weights + 4);
46   const v128_t vk04 = wasm_v32x4_load_splat(weights + 5);
47   const v128_t vk10 = wasm_v32x4_load_splat(weights + 6);
48   const v128_t vk11 = wasm_v32x4_load_splat(weights + 7);
49   const v128_t vk12 = wasm_v32x4_load_splat(weights + 8);
50   const v128_t vk13 = wasm_v32x4_load_splat(weights + 9);
51   const v128_t vk14 = wasm_v32x4_load_splat(weights + 10);
52   const v128_t vk20 = wasm_v32x4_load_splat(weights + 11);
53   const v128_t vk21 = wasm_v32x4_load_splat(weights + 12);
54   const v128_t vk22 = wasm_v32x4_load_splat(weights + 13);
55   const v128_t vk23 = wasm_v32x4_load_splat(weights + 14);
56   const v128_t vk24 = wasm_v32x4_load_splat(weights + 15);
57   const v128_t vk30 = wasm_v32x4_load_splat(weights + 16);
58   const v128_t vk31 = wasm_v32x4_load_splat(weights + 17);
59   const v128_t vk32 = wasm_v32x4_load_splat(weights + 18);
60   const v128_t vk33 = wasm_v32x4_load_splat(weights + 19);
61   const v128_t vk34 = wasm_v32x4_load_splat(weights + 20);
62   const v128_t vk40 = wasm_v32x4_load_splat(weights + 21);
63   const v128_t vk41 = wasm_v32x4_load_splat(weights + 22);
64   const v128_t vk42 = wasm_v32x4_load_splat(weights + 23);
65   const v128_t vk43 = wasm_v32x4_load_splat(weights + 24);
66   const v128_t vk44 = wasm_v32x4_load_splat(weights + 25);
67 
68   const v128_t vzero = wasm_f32x4_splat(0.0f);
69 
70   const uint32_t padding_top_less_1 = padding_top - 1;
71   const size_t input_decrement = round_up_po2(input_width, 8 * sizeof(float));
72 
73   const float* i0 = zero;
74   const float* i1 = (const float*) ((uintptr_t) input - ((-padding_top_less_1) & input_width));
75   const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
76   if XNN_UNPREDICTABLE(padding_top_less_1 != 0) {
77     i1 = zero;
78   }
79   const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
80   const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
81   const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
82   const float* i6 = (const float*) ((uintptr_t) i5 + input_width);
83 
84   const size_t output_width = round_down_po2((input_width + (2 /* padding */ - 3 /* kernel size */ + 2 /* subsampling */) * sizeof(float)) / 2, sizeof(float));
85 
86   float* o0 = output;
87   float* o1 = (float*) ((uintptr_t) o0 + output_width);
88 
89   size_t padded_input_height = input_height + (padding_top_less_1 + 1) + 2 /* padding bottom */;
90   size_t output_height = (padded_input_height - 5 /* kernel size */ + 2 /* subsampling */) / 2;
91   do {
92     if XNN_UNPREDICTABLE(padded_input_height < 6) {
93       i3 = zero;
94     }
95     if XNN_UNPREDICTABLE(padded_input_height < 7) {
96       i4 = zero;
97       o1 = o0;
98     }
99     if XNN_UNPREDICTABLE(padded_input_height < 8) {
100       i5 = zero;
101     }
102     if XNN_UNPREDICTABLE(padded_input_height < 9) {
103       i6 = zero;
104     }
105 
106     v128_t vi0x0246 = vzero;
107     v128_t vi1x0246 = vzero;
108     v128_t vi2x0246 = vzero;
109     v128_t vi3x0246 = vzero;
110     v128_t vi4x0246 = vzero;
111     v128_t vi5x0246 = vzero;
112     v128_t vi6x0246 = vzero;
113 
114     v128_t vi0x1357 = vzero;
115     v128_t vi1x1357 = vzero;
116     v128_t vi2x1357 = vzero;
117     v128_t vi3x1357 = vzero;
118     v128_t vi4x1357 = vzero;
119     v128_t vi5x1357 = vzero;
120     v128_t vi6x1357 = vzero;
121 
122     const v128_t vi0x89AB = wasm_v128_load(i0);
123     const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
124     i0 += 8;
125     const v128_t vi1x89AB = wasm_v128_load(i1);
126     const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
127     i1 += 8;
128     const v128_t vi2x89AB = wasm_v128_load(i2);
129     const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
130     i2 += 8;
131     const v128_t vi3x89AB = wasm_v128_load(i3);
132     const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
133     i3 += 8;
134     const v128_t vi4x89AB = wasm_v128_load(i4);
135     const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
136     i4 += 8;
137     const v128_t vi5x89AB = wasm_v128_load(i5);
138     const v128_t vi5xCDEF = wasm_v128_load(i5 + 4);
139     i5 += 8;
140     const v128_t vi6x89AB = wasm_v128_load(i6);
141     const v128_t vi6xCDEF = wasm_v128_load(i6 + 4);
142     i6 += 8;
143 
144     v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
145     v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
146     v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
147     v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
148     v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
149     v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
150     v128_t vi3x8ACE = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6);
151     v128_t vi3x9BDF = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7);
152     v128_t vi4x8ACE = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6);
153     v128_t vi4x9BDF = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7);
154     v128_t vi5x8ACE = wasm_v32x4_shuffle(vi5x89AB, vi5xCDEF, 0, 2, 4, 6);
155     v128_t vi5x9BDF = wasm_v32x4_shuffle(vi5x89AB, vi5xCDEF, 1, 3, 5, 7);
156     v128_t vi6x8ACE = wasm_v32x4_shuffle(vi6x89AB, vi6xCDEF, 0, 2, 4, 6);
157     v128_t vi6x9BDF = wasm_v32x4_shuffle(vi6x89AB, vi6xCDEF, 1, 3, 5, 7);
158 
159     size_t w = input_width;
160     for (; w > 8 * sizeof(float); w -= 8 * sizeof(float)) {
161       v128_t vo0p0 = vbias;
162       v128_t vo1p0 = vbias;
163 
164       v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, vk02);
165       v128_t vo1p1 = wasm_f32x4_mul(vi2x8ACE, vk02);
166 
167       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, vk12));
168       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x8ACE, vk12));
169 
170       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, vk22));
171       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x8ACE, vk22));
172 
173       vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x8ACE, vk32));
174       vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x8ACE, vk32));
175 
176       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x8ACE, vk42));
177       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x8ACE, vk42));
178 
179       vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x9BDF, vk03));
180       vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x9BDF, vk03));
181 
182       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, vk13));
183       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x9BDF, vk13));
184 
185       vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x9BDF, vk23));
186       vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x9BDF, vk23));
187 
188       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x9BDF, vk33));
189       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x9BDF, vk33));
190 
191       vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x9BDF, vk43));
192       vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi6x9BDF, vk43));
193 
194       const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
195       vi0x0246 = vi0x8ACE;
196       const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
197       vi1x0246 = vi1x8ACE;
198       const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
199       vi2x0246 = vi2x8ACE;
200       const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
201       vi3x0246 = vi3x8ACE;
202       const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
203       vi4x0246 = vi4x8ACE;
204       const v128_t vi5x68AC = wasm_v32x4_shuffle(vi5x0246, vi5x8ACE, 3, 4, 5, 6);
205       vi5x0246 = vi5x8ACE;
206       const v128_t vi6x68AC = wasm_v32x4_shuffle(vi6x0246, vi6x8ACE, 3, 4, 5, 6);
207       vi6x0246 = vi6x8ACE;
208 
209       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, vk00));
210       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x68AC, vk00));
211 
212       vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x68AC, vk10));
213       vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x68AC, vk10));
214 
215       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x68AC, vk20));
216       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x68AC, vk20));
217 
218       vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x68AC, vk30));
219       vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x68AC, vk30));
220 
221       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x68AC, vk40));
222       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x68AC, vk40));
223 
224       const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
225       vi0x1357 = vi0x9BDF;
226       const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
227       vi1x1357 = vi1x9BDF;
228       const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
229       vi2x1357 = vi2x9BDF;
230       const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
231       vi3x1357 = vi3x9BDF;
232       const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
233       vi4x1357 = vi4x9BDF;
234       const v128_t vi5x79BD = wasm_v32x4_shuffle(vi5x1357, vi5x9BDF, 3, 4, 5, 6);
235       vi5x1357 = vi5x9BDF;
236       const v128_t vi6x79BD = wasm_v32x4_shuffle(vi6x1357, vi6x9BDF, 3, 4, 5, 6);
237       vi6x1357 = vi6x9BDF;
238 
239       const v128_t vi0xGHIJ = wasm_v128_load(i0);
240       const v128_t vi0xKLMN = wasm_v128_load(i0 + 4);
241       i0 += 8;
242       const v128_t vi1xGHIJ = wasm_v128_load(i1);
243       const v128_t vi1xKLMN = wasm_v128_load(i1 + 4);
244       i1 += 8;
245       const v128_t vi2xGHIJ = wasm_v128_load(i2);
246       const v128_t vi2xKLMN = wasm_v128_load(i2 + 4);
247       i2 += 8;
248       const v128_t vi3xGHIJ = wasm_v128_load(i3);
249       const v128_t vi3xKLMN = wasm_v128_load(i3 + 4);
250       i3 += 8;
251       const v128_t vi4xGHIJ = wasm_v128_load(i4);
252       const v128_t vi4xKLMN = wasm_v128_load(i4 + 4);
253       i4 += 8;
254       const v128_t vi5xGHIJ = wasm_v128_load(i5);
255       const v128_t vi5xKLMN = wasm_v128_load(i5 + 4);
256       i5 += 8;
257       const v128_t vi6xGHIJ = wasm_v128_load(i6);
258       const v128_t vi6xKLMN = wasm_v128_load(i6 + 4);
259       i6 += 8;
260 
261       const v128_t vi0xGIKM = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 0, 2, 4, 6);
262       const v128_t vi0xHJLN = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 1, 3, 5, 7);
263       const v128_t vi1xGIKM = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 0, 2, 4, 6);
264       const v128_t vi1xHJLN = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 1, 3, 5, 7);
265       const v128_t vi2xGIKM = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 0, 2, 4, 6);
266       const v128_t vi2xHJLN = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 1, 3, 5, 7);
267       const v128_t vi3xGIKM = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 0, 2, 4, 6);
268       const v128_t vi3xHJLN = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 1, 3, 5, 7);
269       const v128_t vi4xGIKM = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 0, 2, 4, 6);
270       const v128_t vi4xHJLN = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 1, 3, 5, 7);
271       const v128_t vi5xGIKM = wasm_v32x4_shuffle(vi5xGHIJ, vi5xKLMN, 0, 2, 4, 6);
272       const v128_t vi5xHJLN = wasm_v32x4_shuffle(vi5xGHIJ, vi5xKLMN, 1, 3, 5, 7);
273       const v128_t vi6xGIKM = wasm_v32x4_shuffle(vi6xGHIJ, vi6xKLMN, 0, 2, 4, 6);
274       const v128_t vi6xHJLN = wasm_v32x4_shuffle(vi6xGHIJ, vi6xKLMN, 1, 3, 5, 7);
275 
276       vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x79BD, vk01));
277       vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x79BD, vk01));
278 
279       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, vk11));
280       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x79BD, vk11));
281 
282       vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x79BD, vk21));
283       vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x79BD, vk21));
284 
285       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x79BD, vk31));
286       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x79BD, vk31));
287 
288       vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x79BD, vk41));
289       vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi6x79BD, vk41));
290 
291       const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vi0xGIKM, 1, 2, 3, 4);
292       vi0x8ACE = vi0xGIKM;
293       vi0x9BDF = vi0xHJLN;
294       const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vi1xGIKM, 1, 2, 3, 4);
295       vi1x8ACE = vi1xGIKM;
296       vi1x9BDF = vi1xHJLN;
297       const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vi2xGIKM, 1, 2, 3, 4);
298       vi2x8ACE = vi2xGIKM;
299       vi2x9BDF = vi2xHJLN;
300       const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vi3xGIKM, 1, 2, 3, 4);
301       vi3x8ACE = vi3xGIKM;
302       vi3x9BDF = vi3xHJLN;
303       const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vi4xGIKM, 1, 2, 3, 4);
304       vi4x8ACE = vi4xGIKM;
305       vi4x9BDF = vi4xHJLN;
306       const v128_t vi5xACEG = wasm_v32x4_shuffle(vi5x8ACE, vi5xGIKM, 1, 2, 3, 4);
307       vi5x8ACE = vi5xGIKM;
308       vi5x9BDF = vi5xHJLN;
309       const v128_t vi6xACEG = wasm_v32x4_shuffle(vi6x8ACE, vi6xGIKM, 1, 2, 3, 4);
310       vi6x8ACE = vi6xGIKM;
311       vi6x9BDF = vi6xHJLN;
312 
313       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0xACEG, vk04));
314       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2xACEG, vk04));
315 
316       vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1xACEG, vk14));
317       vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3xACEG, vk14));
318 
319       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, vk24));
320       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4xACEG, vk24));
321 
322       vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3xACEG, vk34));
323       vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5xACEG, vk34));
324 
325       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4xACEG, vk44));
326       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6xACEG, vk44));
327 
328       vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
329       vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
330 
331       v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
332       v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
333       vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
334       vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
335 
336       wasm_v128_store(o1, vo1); o1 += 4;
337       wasm_v128_store(o0, vo0); o0 += 4;
338     }
339     // Last block has 1-8 pixels to process.
340     assert(w <= 8 * sizeof(float));
341     assert(w >= 1 * sizeof(float));
342     {
343       v128_t vo0p0 = vbias;
344       v128_t vo1p0 = vbias;
345 
346       vi0x8ACE = wasm_v128_and(vmask_even, vi0x8ACE);
347       vi1x8ACE = wasm_v128_and(vmask_even, vi1x8ACE);
348       vi2x8ACE = wasm_v128_and(vmask_even, vi2x8ACE);
349       vi3x8ACE = wasm_v128_and(vmask_even, vi3x8ACE);
350       vi4x8ACE = wasm_v128_and(vmask_even, vi4x8ACE);
351       vi5x8ACE = wasm_v128_and(vmask_even, vi5x8ACE);
352       vi6x8ACE = wasm_v128_and(vmask_even, vi6x8ACE);
353 
354       vi0x9BDF = wasm_v128_and(vmask_odd, vi0x9BDF);
355       vi1x9BDF = wasm_v128_and(vmask_odd, vi1x9BDF);
356       vi2x9BDF = wasm_v128_and(vmask_odd, vi2x9BDF);
357       vi3x9BDF = wasm_v128_and(vmask_odd, vi3x9BDF);
358       vi4x9BDF = wasm_v128_and(vmask_odd, vi4x9BDF);
359       vi5x9BDF = wasm_v128_and(vmask_odd, vi5x9BDF);
360       vi6x9BDF = wasm_v128_and(vmask_odd, vi6x9BDF);
361 
362       v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, vk02);
363       v128_t vo1p1 = wasm_f32x4_mul(vi2x8ACE, vk02);
364 
365       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, vk12));
366       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x8ACE, vk12));
367 
368       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, vk22));
369       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x8ACE, vk22));
370 
371       vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x8ACE, vk32));
372       vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x8ACE, vk32));
373 
374       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x8ACE, vk42));
375       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x8ACE, vk42));
376 
377       vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x9BDF, vk03));
378       vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x9BDF, vk03));
379 
380       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, vk13));
381       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x9BDF, vk13));
382 
383       vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x9BDF, vk23));
384       vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x9BDF, vk23));
385 
386       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x9BDF, vk33));
387       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x9BDF, vk33));
388 
389       vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x9BDF, vk43));
390       vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi6x9BDF, vk43));
391 
392       const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
393       const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
394       const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
395       const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
396       const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
397       const v128_t vi5x68AC = wasm_v32x4_shuffle(vi5x0246, vi5x8ACE, 3, 4, 5, 6);
398       const v128_t vi6x68AC = wasm_v32x4_shuffle(vi6x0246, vi6x8ACE, 3, 4, 5, 6);
399 
400       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, vk00));
401       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x68AC, vk00));
402 
403       vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x68AC, vk10));
404       vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x68AC, vk10));
405 
406       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x68AC, vk20));
407       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x68AC, vk20));
408 
409       vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x68AC, vk30));
410       vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x68AC, vk30));
411 
412       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x68AC, vk40));
413       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6x68AC, vk40));
414 
415       const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
416       const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
417       const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
418       const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
419       const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
420       const v128_t vi5x79BD = wasm_v32x4_shuffle(vi5x1357, vi5x9BDF, 3, 4, 5, 6);
421       const v128_t vi6x79BD = wasm_v32x4_shuffle(vi6x1357, vi6x9BDF, 3, 4, 5, 6);
422 
423       vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x79BD, vk01));
424       vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x79BD, vk01));
425 
426       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, vk11));
427       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x79BD, vk11));
428 
429       vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x79BD, vk21));
430       vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x79BD, vk21));
431 
432       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x79BD, vk31));
433       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x79BD, vk31));
434 
435       vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x79BD, vk41));
436       vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi6x79BD, vk41));
437 
438       const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vzero, 1, 2, 3, 4);
439       const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vzero, 1, 2, 3, 4);
440       const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vzero, 1, 2, 3, 4);
441       const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vzero, 1, 2, 3, 4);
442       const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vzero, 1, 2, 3, 4);
443       const v128_t vi5xACEG = wasm_v32x4_shuffle(vi5x8ACE, vzero, 1, 2, 3, 4);
444       const v128_t vi6xACEG = wasm_v32x4_shuffle(vi6x8ACE, vzero, 1, 2, 3, 4);
445 
446       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0xACEG, vk04));
447       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2xACEG, vk04));
448 
449       vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1xACEG, vk14));
450       vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3xACEG, vk14));
451 
452       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, vk24));
453       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4xACEG, vk24));
454 
455       vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3xACEG, vk34));
456       vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5xACEG, vk34));
457 
458       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4xACEG, vk44));
459       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi6xACEG, vk44));
460 
461       vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
462       vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
463 
464       v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
465       v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
466       vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
467       vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
468 
469       size_t w_tmp = (w + 1 * sizeof(float)) / (2 * sizeof(float));
470       if XNN_LIKELY(w_tmp >= 4) {
471         wasm_v128_store(o1, vo1); o1 += 4;
472         wasm_v128_store(o0, vo0); o0 += 4;
473       } else {
474         if (w_tmp & 2) {
475           *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0); o1 += 2;
476           *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
477 
478           vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
479           vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
480         }
481         if (w_tmp & 1) {
482           *o1 = wasm_f32x4_extract_lane(vo1, 0); o1 += 1;
483           *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
484         }
485       }
486     }
487 
488     i0 = (const float*) ((uintptr_t) i4 - input_decrement);
489     i1 = (const float*) ((uintptr_t) i5 - input_decrement);
490     i2 = (const float*) ((uintptr_t) i6 - input_decrement);
491     i3 = (const float*) ((uintptr_t) i2 + input_width);
492     i4 = (const float*) ((uintptr_t) i3 + input_width);
493     i5 = (const float*) ((uintptr_t) i4 + input_width);
494     i6 = (const float*) ((uintptr_t) i5 + input_width);
495 
496     o0 = o1;
497     o1 = (float*) ((uintptr_t) o0 + output_width);
498 
499     output_height = doz(output_height, 2);
500     padded_input_height = doz(padded_input_height, 4);
501   } while (output_height != 0);
502 }
503