1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-loadsplat.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <wasm_simd128.h>
13 
14 #include <xnnpack/dwconv.h>
15 #include <xnnpack/math.h>
16 
17 
18 
xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_1x4_acc4(size_t input_height,size_t input_width,const float * input,const float * weights,const float * zero,float * output,uint32_t padding_top,const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS (1)])19 void xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_x86_loadsplat_1x4_acc4(
20 
21     size_t input_height,
22     size_t input_width,
23     const float* input,
24     const float* weights,
25     const float* zero,
26     float* output,
27     uint32_t padding_top,
28     const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
29 {
30   assert(input_height != 0);
31   assert(input_width != 0);
32   assert(input_width % sizeof(float) == 0);
33   assert(padding_top >= 1);
34   assert(padding_top <= 2);
35 
36   const v128_t vmask_even = wasm_v128_load(params->scalar.mask_even);
37   const v128_t vmask_odd  = wasm_v128_load(params->scalar.mask_odd);
38   const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
39   const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
40 
41   const v128_t vbias = wasm_v32x4_load_splat(weights);
42   const v128_t vk00 = wasm_v32x4_load_splat(weights + 1);
43   const v128_t vk01 = wasm_v32x4_load_splat(weights + 2);
44   const v128_t vk02 = wasm_v32x4_load_splat(weights + 3);
45   const v128_t vk03 = wasm_v32x4_load_splat(weights + 4);
46   const v128_t vk04 = wasm_v32x4_load_splat(weights + 5);
47   const v128_t vk10 = wasm_v32x4_load_splat(weights + 6);
48   const v128_t vk11 = wasm_v32x4_load_splat(weights + 7);
49   const v128_t vk12 = wasm_v32x4_load_splat(weights + 8);
50   const v128_t vk13 = wasm_v32x4_load_splat(weights + 9);
51   const v128_t vk14 = wasm_v32x4_load_splat(weights + 10);
52   const v128_t vk20 = wasm_v32x4_load_splat(weights + 11);
53   const v128_t vk21 = wasm_v32x4_load_splat(weights + 12);
54   const v128_t vk22 = wasm_v32x4_load_splat(weights + 13);
55   const v128_t vk23 = wasm_v32x4_load_splat(weights + 14);
56   const v128_t vk24 = wasm_v32x4_load_splat(weights + 15);
57   const v128_t vk30 = wasm_v32x4_load_splat(weights + 16);
58   const v128_t vk31 = wasm_v32x4_load_splat(weights + 17);
59   const v128_t vk32 = wasm_v32x4_load_splat(weights + 18);
60   const v128_t vk33 = wasm_v32x4_load_splat(weights + 19);
61   const v128_t vk34 = wasm_v32x4_load_splat(weights + 20);
62   const v128_t vk40 = wasm_v32x4_load_splat(weights + 21);
63   const v128_t vk41 = wasm_v32x4_load_splat(weights + 22);
64   const v128_t vk42 = wasm_v32x4_load_splat(weights + 23);
65   const v128_t vk43 = wasm_v32x4_load_splat(weights + 24);
66   const v128_t vk44 = wasm_v32x4_load_splat(weights + 25);
67 
68   const v128_t vzero = wasm_f32x4_splat(0.0f);
69 
70   const uint32_t padding_top_less_1 = padding_top - 1;
71   const size_t input_decrement = round_up_po2(input_width, 8 * sizeof(float));
72 
73   const float* i0 = zero;
74   const float* i1 = (const float*) ((uintptr_t) input - ((-padding_top_less_1) & input_width));
75   const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
76   if XNN_UNPREDICTABLE(padding_top_less_1 != 0) {
77     i1 = zero;
78   }
79   const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
80   const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
81 
82 
83   float* o0 = output;
84 
85   size_t padded_input_height = input_height + (padding_top_less_1 + 1) + 2 /* padding bottom */;
86   size_t output_height = (padded_input_height - 5 /* kernel size */ + 2 /* subsampling */) / 2;
87   do {
88     if XNN_UNPREDICTABLE(padded_input_height < 6) {
89       i3 = zero;
90     }
91     if XNN_UNPREDICTABLE(padded_input_height < 7) {
92       i4 = zero;
93     }
94 
95     v128_t vi0x0246 = vzero;
96     v128_t vi1x0246 = vzero;
97     v128_t vi2x0246 = vzero;
98     v128_t vi3x0246 = vzero;
99     v128_t vi4x0246 = vzero;
100 
101     v128_t vi0x1357 = vzero;
102     v128_t vi1x1357 = vzero;
103     v128_t vi2x1357 = vzero;
104     v128_t vi3x1357 = vzero;
105     v128_t vi4x1357 = vzero;
106 
107     const v128_t vi0x89AB = wasm_v128_load(i0);
108     const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
109     i0 += 8;
110     const v128_t vi1x89AB = wasm_v128_load(i1);
111     const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
112     i1 += 8;
113     const v128_t vi2x89AB = wasm_v128_load(i2);
114     const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
115     i2 += 8;
116     const v128_t vi3x89AB = wasm_v128_load(i3);
117     const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
118     i3 += 8;
119     const v128_t vi4x89AB = wasm_v128_load(i4);
120     const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
121     i4 += 8;
122 
123     v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
124     v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
125     v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
126     v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
127     v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
128     v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
129     v128_t vi3x8ACE = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6);
130     v128_t vi3x9BDF = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7);
131     v128_t vi4x8ACE = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6);
132     v128_t vi4x9BDF = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7);
133 
134     size_t w = input_width;
135     for (; w > 8 * sizeof(float); w -= 8 * sizeof(float)) {
136       v128_t vo0p0 = vbias;
137 
138       v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, vk02);
139 
140       v128_t vo0p2 = wasm_f32x4_mul(vi1x8ACE, vk12);
141 
142       v128_t vo0p3 = wasm_f32x4_mul(vi2x8ACE, vk22);
143 
144       vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x8ACE, vk32));
145 
146       vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x8ACE, vk42));
147 
148       vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi0x9BDF, vk03));
149 
150       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, vk13));
151 
152       vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x9BDF, vk23));
153 
154       vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x9BDF, vk33));
155 
156       vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi4x9BDF, vk43));
157 
158       const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
159       vi0x0246 = vi0x8ACE;
160       const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
161       vi1x0246 = vi1x8ACE;
162       const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
163       vi2x0246 = vi2x8ACE;
164       const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
165       vi3x0246 = vi3x8ACE;
166       const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
167       vi4x0246 = vi4x8ACE;
168 
169       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, vk00));
170 
171       vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x68AC, vk10));
172 
173       vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x68AC, vk20));
174 
175       vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi3x68AC, vk30));
176 
177       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x68AC, vk40));
178 
179       const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
180       vi0x1357 = vi0x9BDF;
181       const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
182       vi1x1357 = vi1x9BDF;
183       const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
184       vi2x1357 = vi2x9BDF;
185       const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
186       vi3x1357 = vi3x9BDF;
187       const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
188       vi4x1357 = vi4x9BDF;
189 
190       const v128_t vi0xGHIJ = wasm_v128_load(i0);
191       const v128_t vi0xKLMN = wasm_v128_load(i0 + 4);
192       i0 += 8;
193       const v128_t vi1xGHIJ = wasm_v128_load(i1);
194       const v128_t vi1xKLMN = wasm_v128_load(i1 + 4);
195       i1 += 8;
196       const v128_t vi2xGHIJ = wasm_v128_load(i2);
197       const v128_t vi2xKLMN = wasm_v128_load(i2 + 4);
198       i2 += 8;
199       const v128_t vi3xGHIJ = wasm_v128_load(i3);
200       const v128_t vi3xKLMN = wasm_v128_load(i3 + 4);
201       i3 += 8;
202       const v128_t vi4xGHIJ = wasm_v128_load(i4);
203       const v128_t vi4xKLMN = wasm_v128_load(i4 + 4);
204       i4 += 8;
205 
206       const v128_t vi0xGIKM = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 0, 2, 4, 6);
207       const v128_t vi0xHJLN = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 1, 3, 5, 7);
208       const v128_t vi1xGIKM = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 0, 2, 4, 6);
209       const v128_t vi1xHJLN = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 1, 3, 5, 7);
210       const v128_t vi2xGIKM = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 0, 2, 4, 6);
211       const v128_t vi2xHJLN = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 1, 3, 5, 7);
212       const v128_t vi3xGIKM = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 0, 2, 4, 6);
213       const v128_t vi3xHJLN = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 1, 3, 5, 7);
214       const v128_t vi4xGIKM = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 0, 2, 4, 6);
215       const v128_t vi4xHJLN = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 1, 3, 5, 7);
216 
217       vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x79BD, vk01));
218 
219       vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x79BD, vk11));
220 
221       vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi2x79BD, vk21));
222 
223       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x79BD, vk31));
224 
225       vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x79BD, vk41));
226 
227       const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vi0xGIKM, 1, 2, 3, 4);
228       vi0x8ACE = vi0xGIKM;
229       vi0x9BDF = vi0xHJLN;
230       const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vi1xGIKM, 1, 2, 3, 4);
231       vi1x8ACE = vi1xGIKM;
232       vi1x9BDF = vi1xHJLN;
233       const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vi2xGIKM, 1, 2, 3, 4);
234       vi2x8ACE = vi2xGIKM;
235       vi2x9BDF = vi2xHJLN;
236       const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vi3xGIKM, 1, 2, 3, 4);
237       vi3x8ACE = vi3xGIKM;
238       vi3x9BDF = vi3xHJLN;
239       const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vi4xGIKM, 1, 2, 3, 4);
240       vi4x8ACE = vi4xGIKM;
241       vi4x9BDF = vi4xHJLN;
242 
243       vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0xACEG, vk04));
244 
245       vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1xACEG, vk14));
246 
247       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, vk24));
248 
249       vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3xACEG, vk34));
250 
251       vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4xACEG, vk44));
252 
253       vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
254       vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
255       vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
256 
257       v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
258       vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
259 
260       wasm_v128_store(o0, vo0); o0 += 4;
261     }
262     // Last block has 1-8 pixels to process.
263     assert(w <= 8 * sizeof(float));
264     assert(w >= 1 * sizeof(float));
265     {
266       v128_t vo0p0 = vbias;
267 
268       vi0x8ACE = wasm_v128_and(vmask_even, vi0x8ACE);
269       vi1x8ACE = wasm_v128_and(vmask_even, vi1x8ACE);
270       vi2x8ACE = wasm_v128_and(vmask_even, vi2x8ACE);
271       vi3x8ACE = wasm_v128_and(vmask_even, vi3x8ACE);
272       vi4x8ACE = wasm_v128_and(vmask_even, vi4x8ACE);
273 
274       vi0x9BDF = wasm_v128_and(vmask_odd, vi0x9BDF);
275       vi1x9BDF = wasm_v128_and(vmask_odd, vi1x9BDF);
276       vi2x9BDF = wasm_v128_and(vmask_odd, vi2x9BDF);
277       vi3x9BDF = wasm_v128_and(vmask_odd, vi3x9BDF);
278       vi4x9BDF = wasm_v128_and(vmask_odd, vi4x9BDF);
279 
280       v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, vk02);
281 
282       v128_t vo0p2 = wasm_f32x4_mul(vi1x8ACE, vk12);
283 
284       v128_t vo0p3 = wasm_f32x4_mul(vi2x8ACE, vk22);
285 
286       vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x8ACE, vk32));
287 
288       vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x8ACE, vk42));
289 
290       vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi0x9BDF, vk03));
291 
292       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, vk13));
293 
294       vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x9BDF, vk23));
295 
296       vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x9BDF, vk33));
297 
298       vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi4x9BDF, vk43));
299 
300       const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
301       const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
302       const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
303       const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
304       const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
305 
306       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, vk00));
307 
308       vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x68AC, vk10));
309 
310       vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x68AC, vk20));
311 
312       vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi3x68AC, vk30));
313 
314       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x68AC, vk40));
315 
316       const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
317       const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
318       const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
319       const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
320       const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
321 
322       vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x79BD, vk01));
323 
324       vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x79BD, vk11));
325 
326       vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi2x79BD, vk21));
327 
328       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x79BD, vk31));
329 
330       vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x79BD, vk41));
331 
332       const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vzero, 1, 2, 3, 4);
333       const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vzero, 1, 2, 3, 4);
334       const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vzero, 1, 2, 3, 4);
335       const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vzero, 1, 2, 3, 4);
336       const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vzero, 1, 2, 3, 4);
337 
338       vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0xACEG, vk04));
339 
340       vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi1xACEG, vk14));
341 
342       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, vk24));
343 
344       vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3xACEG, vk34));
345 
346       vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4xACEG, vk44));
347 
348       vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
349       vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
350       vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
351 
352       v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
353       vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
354 
355       size_t w_tmp = (w + 1 * sizeof(float)) / (2 * sizeof(float));
356       if XNN_LIKELY(w_tmp >= 4) {
357         wasm_v128_store(o0, vo0); o0 += 4;
358       } else {
359         if (w_tmp & 2) {
360           *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
361 
362           vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
363         }
364         if (w_tmp & 1) {
365           *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
366         }
367       }
368     }
369 
370     i0 = (const float*) ((uintptr_t) i2 - input_decrement);
371     i1 = (const float*) ((uintptr_t) i3 - input_decrement);
372     i2 = (const float*) ((uintptr_t) i4 - input_decrement);
373     i3 = (const float*) ((uintptr_t) i2 + input_width);
374     i4 = (const float*) ((uintptr_t) i3 + input_width);
375 
376 
377     output_height -= 1;
378     padded_input_height -= 2;
379   } while (output_height != 0);
380 }
381