1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-loadsplat.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <wasm_simd128.h>
13 
14 #include <xnnpack/dwconv.h>
15 #include <xnnpack/math.h>
16 
17 
18 
xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_4x4(size_t input_height,size_t input_width,const float * input,const float * weights,const float * zero,float * output,uint32_t padding_top,const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS (1)])19 void xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_4x4(
20     size_t input_height,
21     size_t input_width,
22     const float* input,
23     const float* weights,
24     const float* zero,
25     float* output,
26     uint32_t padding_top,
27     const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
28 {
29   assert(input_height != 0);
30   assert(input_width != 0);
31   assert(input_width % sizeof(float) == 0);
32   assert(padding_top >= 0);
33   assert(padding_top <= 1);
34 
35   const v128_t vmask_even = wasm_v128_load(params->scalar.mask_even);
36   const v128_t vmask_odd  = wasm_v128_load(params->scalar.mask_odd);
37   const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
38   const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
39 
40   const v128_t vw0123 = wasm_v128_load(weights);
41   const v128_t vw4567 = wasm_v128_load(weights + 4);
42   const v128_t vw89 = wasm_v64x2_load_splat(weights + 8);
43   const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
44   const v128_t vk00 = wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
45   const v128_t vk01 = wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
46   const v128_t vk02 = wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
47   const v128_t vk10 = wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
48   const v128_t vk11 = wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
49   const v128_t vk12 = wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
50   const v128_t vk20 = wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
51   const v128_t vk21 = wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0);
52   const v128_t vk22 = wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1);
53 
54   const v128_t vzero = wasm_f32x4_splat(0.0f);
55 
56   const size_t input_decrement = round_down_po2(input_width, 4 /* SIMD output width */ * 2 /* subsampling */ * sizeof(float));
57   const size_t output_width = round_down_po2((input_width + (2 /* padding */ - 3 /* kernel size */ + 2 /* subsampling */) * sizeof(float)) / 2, sizeof(float));
58 
59   const float* i0 = (const float*) ((uintptr_t) input - ((-padding_top) & input_width));
60   const float* i1 = (const float*) ((uintptr_t) i0 + input_width);
61   if XNN_UNPREDICTABLE(padding_top != 0) {
62     i0 = zero;
63   }
64   const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
65   const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
66   const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
67   const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
68   const float* i6 = (const float*) ((uintptr_t) i5 + input_width);
69   const float* i7 = (const float*) ((uintptr_t) i6 + input_width);
70   const float* i8 = (const float*) ((uintptr_t) i7 + input_width);
71 
72   float* o0 = output;
73   float* o1 = (float*) ((uintptr_t) o0 + output_width);
74   float* o2 = (float*) ((uintptr_t) o1 + output_width);
75   float* o3 = (float*) ((uintptr_t) o2 + output_width);
76 
77   size_t padded_input_height = input_height + padding_top + 1 /* padding bottom */;
78   size_t output_height = (padded_input_height - 3 /* kernel size */ + 2 /* subsampling */) / 2;
79   do {
80     if XNN_UNPREDICTABLE(padded_input_height < 4) {
81       i2 = zero;
82     }
83     if XNN_UNPREDICTABLE(padded_input_height < 5) {
84       i3 = zero;
85       o1 = o0;
86     }
87     if XNN_UNPREDICTABLE(padded_input_height < 6) {
88       i4 = zero;
89     }
90     if XNN_UNPREDICTABLE(padded_input_height < 7) {
91       i5 = zero;
92       o2 = o1;
93     }
94     if XNN_UNPREDICTABLE(padded_input_height < 8) {
95       i6 = zero;
96     }
97     if XNN_UNPREDICTABLE(padded_input_height < 9) {
98       i7 = zero;
99       o3 = o2;
100     }
101     if XNN_UNPREDICTABLE(padded_input_height < 10) {
102       i8 = zero;
103     }
104 
105     v128_t vi0x1357 = vzero;
106     v128_t vi1x1357 = vzero;
107     v128_t vi2x1357 = vzero;
108     v128_t vi3x1357 = vzero;
109     v128_t vi4x1357 = vzero;
110     v128_t vi5x1357 = vzero;
111     v128_t vi6x1357 = vzero;
112     v128_t vi7x1357 = vzero;
113     v128_t vi8x1357 = vzero;
114 
115     size_t w = input_width;
116     for (; w >= 8 * sizeof(float); w -= 8 * sizeof(float)) {
117       v128_t vo0p0 = vbias;
118       v128_t vo1p0 = vbias;
119       v128_t vo2p0 = vbias;
120       v128_t vo3p0 = vbias;
121 
122       const v128_t vi0x89AB = wasm_v128_load(i0);
123       const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
124       i0 += 8;
125       const v128_t vi1x89AB = wasm_v128_load(i1);
126       const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
127       i1 += 8;
128       const v128_t vi2x89AB = wasm_v128_load(i2);
129       const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
130       i2 += 8;
131       const v128_t vi3x89AB = wasm_v128_load(i3);
132       const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
133       i3 += 8;
134       const v128_t vi4x89AB = wasm_v128_load(i4);
135       const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
136       i4 += 8;
137       const v128_t vi5x89AB = wasm_v128_load(i5);
138       const v128_t vi5xCDEF = wasm_v128_load(i5 + 4);
139       i5 += 8;
140       const v128_t vi6x89AB = wasm_v128_load(i6);
141       const v128_t vi6xCDEF = wasm_v128_load(i6 + 4);
142       i6 += 8;
143       const v128_t vi7x89AB = wasm_v128_load(i7);
144       const v128_t vi7xCDEF = wasm_v128_load(i7 + 4);
145       i7 += 8;
146       const v128_t vi8x89AB = wasm_v128_load(i8);
147       const v128_t vi8xCDEF = wasm_v128_load(i8 + 4);
148       i8 += 8;
149 
150       const v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
151       const v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
152       const v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
153       const v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
154       const v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
155       const v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
156       const v128_t vi3x8ACE = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6);
157       const v128_t vi3x9BDF = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7);
158       const v128_t vi4x8ACE = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6);
159       const v128_t vi4x9BDF = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7);
160       const v128_t vi5x8ACE = wasm_v32x4_shuffle(vi5x89AB, vi5xCDEF, 0, 2, 4, 6);
161       const v128_t vi5x9BDF = wasm_v32x4_shuffle(vi5x89AB, vi5xCDEF, 1, 3, 5, 7);
162       const v128_t vi6x8ACE = wasm_v32x4_shuffle(vi6x89AB, vi6xCDEF, 0, 2, 4, 6);
163       const v128_t vi6x9BDF = wasm_v32x4_shuffle(vi6x89AB, vi6xCDEF, 1, 3, 5, 7);
164       const v128_t vi7x8ACE = wasm_v32x4_shuffle(vi7x89AB, vi7xCDEF, 0, 2, 4, 6);
165       const v128_t vi7x9BDF = wasm_v32x4_shuffle(vi7x89AB, vi7xCDEF, 1, 3, 5, 7);
166       const v128_t vi8x8ACE = wasm_v32x4_shuffle(vi8x89AB, vi8xCDEF, 0, 2, 4, 6);
167       const v128_t vi8x9BDF = wasm_v32x4_shuffle(vi8x89AB, vi8xCDEF, 1, 3, 5, 7);
168 
169       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x8ACE, vk01));
170       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x8ACE, vk01));
171       vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x8ACE, vk01));
172       vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x8ACE, vk01));
173 
174       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, vk11));
175       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x8ACE, vk11));
176       vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x8ACE, vk11));
177       vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x8ACE, vk11));
178 
179       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, vk21));
180       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x8ACE, vk21));
181       vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x8ACE, vk21));
182       vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi8x8ACE, vk21));
183 
184       const v128_t vi0x7BDF = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
185       vi0x1357 = vi0x9BDF;
186       const v128_t vi1x7BDF = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
187       vi1x1357 = vi1x9BDF;
188       const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
189       vi2x1357 = vi2x9BDF;
190       const v128_t vi3x7BDF = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
191       vi3x1357 = vi3x9BDF;
192       const v128_t vi4x7BDF = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
193       vi4x1357 = vi4x9BDF;
194       const v128_t vi5x7BDF = wasm_v32x4_shuffle(vi5x1357, vi5x9BDF, 3, 4, 5, 6);
195       vi5x1357 = vi5x9BDF;
196       const v128_t vi6x7BDF = wasm_v32x4_shuffle(vi6x1357, vi6x9BDF, 3, 4, 5, 6);
197       vi6x1357 = vi6x9BDF;
198       const v128_t vi7x7BDF = wasm_v32x4_shuffle(vi7x1357, vi7x9BDF, 3, 4, 5, 6);
199       vi7x1357 = vi7x9BDF;
200       const v128_t vi8x7BDF = wasm_v32x4_shuffle(vi8x1357, vi8x9BDF, 3, 4, 5, 6);
201       vi8x1357 = vi8x9BDF;
202 
203       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x7BDF, vk00));
204       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x7BDF, vk00));
205       vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x7BDF, vk00));
206       vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x7BDF, vk00));
207 
208       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x7BDF, vk10));
209       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x7BDF, vk10));
210       vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x7BDF, vk10));
211       vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x7BDF, vk10));
212 
213       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x7BDF, vk20));
214       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x7BDF, vk20));
215       vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x7BDF, vk20));
216       vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi8x7BDF, vk20));
217 
218       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, vk02));
219       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x9BDF, vk02));
220       vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x9BDF, vk02));
221       vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x9BDF, vk02));
222 
223       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, vk12));
224       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x9BDF, vk12));
225       vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x9BDF, vk12));
226       vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x9BDF, vk12));
227 
228       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, vk22));
229       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x9BDF, vk22));
230       vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x9BDF, vk22));
231       vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi8x9BDF, vk22));
232 
233 
234       v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
235       v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
236       v128_t vo2 = wasm_v128_bitselect(vmin, vo2p0, wasm_f32x4_lt(vo2p0, vmin));
237       v128_t vo3 = wasm_v128_bitselect(vmin, vo3p0, wasm_f32x4_lt(vo3p0, vmin));
238       vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
239       vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
240       vo2 = wasm_v128_bitselect(vo2, vmax, wasm_f32x4_le(vo2, vmax));
241       vo3 = wasm_v128_bitselect(vo3, vmax, wasm_f32x4_le(vo3, vmax));
242 
243       wasm_v128_store(o3, vo3); o3 += 4;
244       wasm_v128_store(o2, vo2); o2 += 4;
245       wasm_v128_store(o1, vo1); o1 += 4;
246       wasm_v128_store(o0, vo0); o0 += 4;
247     }
248     // Last block has 0-7 pixels to process.
249     assert(w < 8 * sizeof(float));
250     if XNN_LIKELY(w != 0) {
251       v128_t vo0p0 = vbias;
252       v128_t vo1p0 = vbias;
253       v128_t vo2p0 = vbias;
254       v128_t vo3p0 = vbias;
255 
256       const v128_t vi0x89AB = wasm_v128_load(i0);
257       const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
258       const v128_t vi1x89AB = wasm_v128_load(i1);
259       const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
260       const v128_t vi2x89AB = wasm_v128_load(i2);
261       const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
262       const v128_t vi3x89AB = wasm_v128_load(i3);
263       const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
264       const v128_t vi4x89AB = wasm_v128_load(i4);
265       const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
266       const v128_t vi5x89AB = wasm_v128_load(i5);
267       const v128_t vi5xCDEF = wasm_v128_load(i5 + 4);
268       const v128_t vi6x89AB = wasm_v128_load(i6);
269       const v128_t vi6xCDEF = wasm_v128_load(i6 + 4);
270       const v128_t vi7x89AB = wasm_v128_load(i7);
271       const v128_t vi7xCDEF = wasm_v128_load(i7 + 4);
272       const v128_t vi8x89AB = wasm_v128_load(i8);
273       const v128_t vi8xCDEF = wasm_v128_load(i8 + 4);
274 
275       const v128_t vi0x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6));
276       const v128_t vi0x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7));
277       const v128_t vi1x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6));
278       const v128_t vi1x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7));
279       const v128_t vi2x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6));
280       const v128_t vi2x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7));
281       const v128_t vi3x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6));
282       const v128_t vi3x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7));
283       const v128_t vi4x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6));
284       const v128_t vi4x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7));
285       const v128_t vi5x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi5x89AB, vi5xCDEF, 0, 2, 4, 6));
286       const v128_t vi5x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi5x89AB, vi5xCDEF, 1, 3, 5, 7));
287       const v128_t vi6x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi6x89AB, vi6xCDEF, 0, 2, 4, 6));
288       const v128_t vi6x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi6x89AB, vi6xCDEF, 1, 3, 5, 7));
289       const v128_t vi7x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi7x89AB, vi7xCDEF, 0, 2, 4, 6));
290       const v128_t vi7x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi7x89AB, vi7xCDEF, 1, 3, 5, 7));
291       const v128_t vi8x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi8x89AB, vi8xCDEF, 0, 2, 4, 6));
292       const v128_t vi8x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi8x89AB, vi8xCDEF, 1, 3, 5, 7));
293 
294       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x8ACE, vk01));
295       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x8ACE, vk01));
296       vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x8ACE, vk01));
297       vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x8ACE, vk01));
298 
299       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, vk11));
300       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x8ACE, vk11));
301       vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x8ACE, vk11));
302       vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x8ACE, vk11));
303 
304       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, vk21));
305       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x8ACE, vk21));
306       vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x8ACE, vk21));
307       vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi8x8ACE, vk21));
308 
309       const v128_t vi0x7BDF = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
310       const v128_t vi1x7BDF = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
311       const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
312       const v128_t vi3x7BDF = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
313       const v128_t vi4x7BDF = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
314       const v128_t vi5x7BDF = wasm_v32x4_shuffle(vi5x1357, vi5x9BDF, 3, 4, 5, 6);
315       const v128_t vi6x7BDF = wasm_v32x4_shuffle(vi6x1357, vi6x9BDF, 3, 4, 5, 6);
316       const v128_t vi7x7BDF = wasm_v32x4_shuffle(vi7x1357, vi7x9BDF, 3, 4, 5, 6);
317       const v128_t vi8x7BDF = wasm_v32x4_shuffle(vi8x1357, vi8x9BDF, 3, 4, 5, 6);
318 
319       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x7BDF, vk00));
320       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x7BDF, vk00));
321       vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x7BDF, vk00));
322       vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x7BDF, vk00));
323 
324       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x7BDF, vk10));
325       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x7BDF, vk10));
326       vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x7BDF, vk10));
327       vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x7BDF, vk10));
328 
329       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x7BDF, vk20));
330       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x7BDF, vk20));
331       vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x7BDF, vk20));
332       vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi8x7BDF, vk20));
333 
334       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, vk02));
335       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x9BDF, vk02));
336       vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x9BDF, vk02));
337       vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x9BDF, vk02));
338 
339       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, vk12));
340       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x9BDF, vk12));
341       vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x9BDF, vk12));
342       vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x9BDF, vk12));
343 
344       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, vk22));
345       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x9BDF, vk22));
346       vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x9BDF, vk22));
347       vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi8x9BDF, vk22));
348 
349 
350       v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
351       v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
352       v128_t vo2 = wasm_v128_bitselect(vmin, vo2p0, wasm_f32x4_lt(vo2p0, vmin));
353       v128_t vo3 = wasm_v128_bitselect(vmin, vo3p0, wasm_f32x4_lt(vo3p0, vmin));
354       vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
355       vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
356       vo2 = wasm_v128_bitselect(vo2, vmax, wasm_f32x4_le(vo2, vmax));
357       vo3 = wasm_v128_bitselect(vo3, vmax, wasm_f32x4_le(vo3, vmax));
358 
359       w += 1 * sizeof(float);
360       if (w & (8 * sizeof(float))) {
361         wasm_v128_store(o3, vo3); o3 += 4;
362         wasm_v128_store(o2, vo2); o2 += 4;
363         wasm_v128_store(o1, vo1); o1 += 4;
364         wasm_v128_store(o0, vo0); o0 += 4;
365       } else {
366         if (w & (4 * sizeof(float))) {
367           *((double*) o3) = wasm_f64x2_extract_lane(vo3, 0); o3 += 2;
368           *((double*) o2) = wasm_f64x2_extract_lane(vo2, 0); o2 += 2;
369           *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0); o1 += 2;
370           *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
371 
372           vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
373           vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
374           vo2 = wasm_v32x4_shuffle(vo2, vo2, 2, 3, 0, 1);
375           vo3 = wasm_v32x4_shuffle(vo3, vo3, 2, 3, 0, 1);
376         }
377         if (w & (2 * sizeof(float))) {
378           *o3 = wasm_f32x4_extract_lane(vo3, 0); o3 += 1;
379           *o2 = wasm_f32x4_extract_lane(vo2, 0); o2 += 1;
380           *o1 = wasm_f32x4_extract_lane(vo1, 0); o1 += 1;
381           *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
382         }
383       }
384     }
385 
386     i0 = (const float*) ((uintptr_t) i8 - input_decrement);
387     i1 = (const float*) ((uintptr_t) i0 + input_width);
388     i2 = (const float*) ((uintptr_t) i1 + input_width);
389     i3 = (const float*) ((uintptr_t) i2 + input_width);
390     i4 = (const float*) ((uintptr_t) i3 + input_width);
391     i5 = (const float*) ((uintptr_t) i4 + input_width);
392     i6 = (const float*) ((uintptr_t) i5 + input_width);
393     i7 = (const float*) ((uintptr_t) i6 + input_width);
394     i8 = (const float*) ((uintptr_t) i7 + input_width);
395 
396     o0 = o3;
397     o1 = (float*) ((uintptr_t) o0 + output_width);
398     o2 = (float*) ((uintptr_t) o1 + output_width);
399     o3 = (float*) ((uintptr_t) o2 + output_width);
400 
401     output_height = doz(output_height, 4);
402     padded_input_height = doz(padded_input_height, 8);
403   } while (output_height != 0);
404 }
405