1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-splat.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <wasm_simd128.h>
13 
14 #include <xnnpack/dwconv.h>
15 #include <xnnpack/math.h>
16 
17 
18 
xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_4x4(size_t input_height,size_t input_width,const float * input,const float * weights,const float * zero,float * output,uint32_t padding_top,const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS (1)])19 void xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_4x4(
20     size_t input_height,
21     size_t input_width,
22     const float* input,
23     const float* weights,
24     const float* zero,
25     float* output,
26     uint32_t padding_top,
27     const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
28 {
29   assert(input_height != 0);
30   assert(input_width != 0);
31   assert(input_width % sizeof(float) == 0);
32   assert(padding_top >= 0);
33   assert(padding_top <= 1);
34 
35   const v128_t vmask_even = wasm_v128_load(params->scalar.mask_even);
36   const v128_t vmask_odd  = wasm_v128_load(params->scalar.mask_odd);
37   const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
38   const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
39 
40   const v128_t vw0123 = wasm_v128_load(weights);
41   const v128_t vw4567 = wasm_v128_load(weights + 4);
42   const v128_t vw89 = wasm_v64x2_load_splat(weights + 8);
43 
44   const v128_t vzero = wasm_f32x4_splat(0.0f);
45 
46   const size_t input_decrement = round_down_po2(input_width, 4 /* SIMD output width */ * 2 /* subsampling */ * sizeof(float));
47   const size_t output_width = round_down_po2((input_width + (2 /* padding */ - 3 /* kernel size */ + 2 /* subsampling */) * sizeof(float)) / 2, sizeof(float));
48 
49   const float* i0 = (const float*) ((uintptr_t) input - ((-padding_top) & input_width));
50   const float* i1 = (const float*) ((uintptr_t) i0 + input_width);
51   if XNN_UNPREDICTABLE(padding_top != 0) {
52     i0 = zero;
53   }
54   const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
55   const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
56   const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
57   const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
58   const float* i6 = (const float*) ((uintptr_t) i5 + input_width);
59   const float* i7 = (const float*) ((uintptr_t) i6 + input_width);
60   const float* i8 = (const float*) ((uintptr_t) i7 + input_width);
61 
62   float* o0 = output;
63   float* o1 = (float*) ((uintptr_t) o0 + output_width);
64   float* o2 = (float*) ((uintptr_t) o1 + output_width);
65   float* o3 = (float*) ((uintptr_t) o2 + output_width);
66 
67   size_t padded_input_height = input_height + padding_top + 1 /* padding bottom */;
68   size_t output_height = (padded_input_height - 3 /* kernel size */ + 2 /* subsampling */) / 2;
69   do {
70     if XNN_UNPREDICTABLE(padded_input_height < 4) {
71       i2 = zero;
72     }
73     if XNN_UNPREDICTABLE(padded_input_height < 5) {
74       i3 = zero;
75       o1 = o0;
76     }
77     if XNN_UNPREDICTABLE(padded_input_height < 6) {
78       i4 = zero;
79     }
80     if XNN_UNPREDICTABLE(padded_input_height < 7) {
81       i5 = zero;
82       o2 = o1;
83     }
84     if XNN_UNPREDICTABLE(padded_input_height < 8) {
85       i6 = zero;
86     }
87     if XNN_UNPREDICTABLE(padded_input_height < 9) {
88       i7 = zero;
89       o3 = o2;
90     }
91     if XNN_UNPREDICTABLE(padded_input_height < 10) {
92       i8 = zero;
93     }
94 
95     v128_t vi0x1357 = vzero;
96     v128_t vi1x1357 = vzero;
97     v128_t vi2x1357 = vzero;
98     v128_t vi3x1357 = vzero;
99     v128_t vi4x1357 = vzero;
100     v128_t vi5x1357 = vzero;
101     v128_t vi6x1357 = vzero;
102     v128_t vi7x1357 = vzero;
103     v128_t vi8x1357 = vzero;
104 
105     size_t w = input_width;
106     for (; w >= 8 * sizeof(float); w -= 8 * sizeof(float)) {
107       v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
108       v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
109       v128_t vo2p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
110       v128_t vo3p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
111 
112       const v128_t vi0x89AB = wasm_v128_load(i0);
113       const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
114       i0 += 8;
115       const v128_t vi1x89AB = wasm_v128_load(i1);
116       const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
117       i1 += 8;
118       const v128_t vi2x89AB = wasm_v128_load(i2);
119       const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
120       i2 += 8;
121       const v128_t vi3x89AB = wasm_v128_load(i3);
122       const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
123       i3 += 8;
124       const v128_t vi4x89AB = wasm_v128_load(i4);
125       const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
126       i4 += 8;
127       const v128_t vi5x89AB = wasm_v128_load(i5);
128       const v128_t vi5xCDEF = wasm_v128_load(i5 + 4);
129       i5 += 8;
130       const v128_t vi6x89AB = wasm_v128_load(i6);
131       const v128_t vi6xCDEF = wasm_v128_load(i6 + 4);
132       i6 += 8;
133       const v128_t vi7x89AB = wasm_v128_load(i7);
134       const v128_t vi7xCDEF = wasm_v128_load(i7 + 4);
135       i7 += 8;
136       const v128_t vi8x89AB = wasm_v128_load(i8);
137       const v128_t vi8xCDEF = wasm_v128_load(i8 + 4);
138       i8 += 8;
139 
140       const v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
141       const v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
142       const v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
143       const v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
144       const v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
145       const v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
146       const v128_t vi3x8ACE = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6);
147       const v128_t vi3x9BDF = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7);
148       const v128_t vi4x8ACE = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6);
149       const v128_t vi4x9BDF = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7);
150       const v128_t vi5x8ACE = wasm_v32x4_shuffle(vi5x89AB, vi5xCDEF, 0, 2, 4, 6);
151       const v128_t vi5x9BDF = wasm_v32x4_shuffle(vi5x89AB, vi5xCDEF, 1, 3, 5, 7);
152       const v128_t vi6x8ACE = wasm_v32x4_shuffle(vi6x89AB, vi6xCDEF, 0, 2, 4, 6);
153       const v128_t vi6x9BDF = wasm_v32x4_shuffle(vi6x89AB, vi6xCDEF, 1, 3, 5, 7);
154       const v128_t vi7x8ACE = wasm_v32x4_shuffle(vi7x89AB, vi7xCDEF, 0, 2, 4, 6);
155       const v128_t vi7x9BDF = wasm_v32x4_shuffle(vi7x89AB, vi7xCDEF, 1, 3, 5, 7);
156       const v128_t vi8x8ACE = wasm_v32x4_shuffle(vi8x89AB, vi8xCDEF, 0, 2, 4, 6);
157       const v128_t vi8x9BDF = wasm_v32x4_shuffle(vi8x89AB, vi8xCDEF, 1, 3, 5, 7);
158 
159       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
160       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
161       vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
162       vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
163 
164       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
165       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
166       vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
167       vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
168 
169       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
170       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
171       vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
172       vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi8x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
173 
174       const v128_t vi0x7BDF = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
175       vi0x1357 = vi0x9BDF;
176       const v128_t vi1x7BDF = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
177       vi1x1357 = vi1x9BDF;
178       const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
179       vi2x1357 = vi2x9BDF;
180       const v128_t vi3x7BDF = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
181       vi3x1357 = vi3x9BDF;
182       const v128_t vi4x7BDF = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
183       vi4x1357 = vi4x9BDF;
184       const v128_t vi5x7BDF = wasm_v32x4_shuffle(vi5x1357, vi5x9BDF, 3, 4, 5, 6);
185       vi5x1357 = vi5x9BDF;
186       const v128_t vi6x7BDF = wasm_v32x4_shuffle(vi6x1357, vi6x9BDF, 3, 4, 5, 6);
187       vi6x1357 = vi6x9BDF;
188       const v128_t vi7x7BDF = wasm_v32x4_shuffle(vi7x1357, vi7x9BDF, 3, 4, 5, 6);
189       vi7x1357 = vi7x9BDF;
190       const v128_t vi8x7BDF = wasm_v32x4_shuffle(vi8x1357, vi8x9BDF, 3, 4, 5, 6);
191       vi8x1357 = vi8x9BDF;
192 
193       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x7BDF, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
194       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x7BDF, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
195       vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x7BDF, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
196       vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x7BDF, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
197 
198       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
199       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
200       vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
201       vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
202 
203       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
204       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
205       vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
206       vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi8x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
207 
208       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
209       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
210       vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
211       vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x9BDF, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
212 
213       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
214       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
215       vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
216       vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
217 
218       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
219       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
220       vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x9BDF, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
221       vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi8x9BDF, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
222 
223 
224       v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
225       v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
226       v128_t vo2 = wasm_v128_bitselect(vmin, vo2p0, wasm_f32x4_lt(vo2p0, vmin));
227       v128_t vo3 = wasm_v128_bitselect(vmin, vo3p0, wasm_f32x4_lt(vo3p0, vmin));
228       vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
229       vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
230       vo2 = wasm_v128_bitselect(vo2, vmax, wasm_f32x4_le(vo2, vmax));
231       vo3 = wasm_v128_bitselect(vo3, vmax, wasm_f32x4_le(vo3, vmax));
232 
233       wasm_v128_store(o3, vo3); o3 += 4;
234       wasm_v128_store(o2, vo2); o2 += 4;
235       wasm_v128_store(o1, vo1); o1 += 4;
236       wasm_v128_store(o0, vo0); o0 += 4;
237     }
238     // Last block has 0-7 pixels to process.
239     assert(w < 8 * sizeof(float));
240     if XNN_LIKELY(w != 0) {
241       v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
242       v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
243       v128_t vo2p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
244       v128_t vo3p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
245 
246       const v128_t vi0x89AB = wasm_v128_load(i0);
247       const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
248       const v128_t vi1x89AB = wasm_v128_load(i1);
249       const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
250       const v128_t vi2x89AB = wasm_v128_load(i2);
251       const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
252       const v128_t vi3x89AB = wasm_v128_load(i3);
253       const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
254       const v128_t vi4x89AB = wasm_v128_load(i4);
255       const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
256       const v128_t vi5x89AB = wasm_v128_load(i5);
257       const v128_t vi5xCDEF = wasm_v128_load(i5 + 4);
258       const v128_t vi6x89AB = wasm_v128_load(i6);
259       const v128_t vi6xCDEF = wasm_v128_load(i6 + 4);
260       const v128_t vi7x89AB = wasm_v128_load(i7);
261       const v128_t vi7xCDEF = wasm_v128_load(i7 + 4);
262       const v128_t vi8x89AB = wasm_v128_load(i8);
263       const v128_t vi8xCDEF = wasm_v128_load(i8 + 4);
264 
265       const v128_t vi0x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6));
266       const v128_t vi0x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7));
267       const v128_t vi1x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6));
268       const v128_t vi1x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7));
269       const v128_t vi2x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6));
270       const v128_t vi2x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7));
271       const v128_t vi3x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6));
272       const v128_t vi3x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7));
273       const v128_t vi4x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6));
274       const v128_t vi4x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7));
275       const v128_t vi5x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi5x89AB, vi5xCDEF, 0, 2, 4, 6));
276       const v128_t vi5x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi5x89AB, vi5xCDEF, 1, 3, 5, 7));
277       const v128_t vi6x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi6x89AB, vi6xCDEF, 0, 2, 4, 6));
278       const v128_t vi6x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi6x89AB, vi6xCDEF, 1, 3, 5, 7));
279       const v128_t vi7x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi7x89AB, vi7xCDEF, 0, 2, 4, 6));
280       const v128_t vi7x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi7x89AB, vi7xCDEF, 1, 3, 5, 7));
281       const v128_t vi8x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi8x89AB, vi8xCDEF, 0, 2, 4, 6));
282       const v128_t vi8x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi8x89AB, vi8xCDEF, 1, 3, 5, 7));
283 
284       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
285       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
286       vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
287       vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
288 
289       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
290       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
291       vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
292       vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
293 
294       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
295       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
296       vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
297       vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi8x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
298 
299       const v128_t vi0x7BDF = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
300       const v128_t vi1x7BDF = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
301       const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
302       const v128_t vi3x7BDF = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
303       const v128_t vi4x7BDF = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
304       const v128_t vi5x7BDF = wasm_v32x4_shuffle(vi5x1357, vi5x9BDF, 3, 4, 5, 6);
305       const v128_t vi6x7BDF = wasm_v32x4_shuffle(vi6x1357, vi6x9BDF, 3, 4, 5, 6);
306       const v128_t vi7x7BDF = wasm_v32x4_shuffle(vi7x1357, vi7x9BDF, 3, 4, 5, 6);
307       const v128_t vi8x7BDF = wasm_v32x4_shuffle(vi8x1357, vi8x9BDF, 3, 4, 5, 6);
308 
309       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x7BDF, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
310       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x7BDF, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
311       vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x7BDF, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
312       vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x7BDF, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
313 
314       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
315       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
316       vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
317       vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
318 
319       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
320       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
321       vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
322       vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi8x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
323 
324       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
325       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
326       vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
327       vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x9BDF, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
328 
329       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
330       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
331       vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
332       vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
333 
334       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
335       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
336       vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x9BDF, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
337       vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi8x9BDF, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
338 
339 
340       v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
341       v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
342       v128_t vo2 = wasm_v128_bitselect(vmin, vo2p0, wasm_f32x4_lt(vo2p0, vmin));
343       v128_t vo3 = wasm_v128_bitselect(vmin, vo3p0, wasm_f32x4_lt(vo3p0, vmin));
344       vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
345       vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
346       vo2 = wasm_v128_bitselect(vo2, vmax, wasm_f32x4_le(vo2, vmax));
347       vo3 = wasm_v128_bitselect(vo3, vmax, wasm_f32x4_le(vo3, vmax));
348 
349       w += 1 * sizeof(float);
350       if (w & (8 * sizeof(float))) {
351         wasm_v128_store(o3, vo3); o3 += 4;
352         wasm_v128_store(o2, vo2); o2 += 4;
353         wasm_v128_store(o1, vo1); o1 += 4;
354         wasm_v128_store(o0, vo0); o0 += 4;
355       } else {
356         if (w & (4 * sizeof(float))) {
357           *((double*) o3) = wasm_f64x2_extract_lane(vo3, 0); o3 += 2;
358           *((double*) o2) = wasm_f64x2_extract_lane(vo2, 0); o2 += 2;
359           *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0); o1 += 2;
360           *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
361 
362           vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
363           vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
364           vo2 = wasm_v32x4_shuffle(vo2, vo2, 2, 3, 0, 1);
365           vo3 = wasm_v32x4_shuffle(vo3, vo3, 2, 3, 0, 1);
366         }
367         if (w & (2 * sizeof(float))) {
368           *o3 = wasm_f32x4_extract_lane(vo3, 0); o3 += 1;
369           *o2 = wasm_f32x4_extract_lane(vo2, 0); o2 += 1;
370           *o1 = wasm_f32x4_extract_lane(vo1, 0); o1 += 1;
371           *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
372         }
373       }
374     }
375 
376     i0 = (const float*) ((uintptr_t) i8 - input_decrement);
377     i1 = (const float*) ((uintptr_t) i0 + input_width);
378     i2 = (const float*) ((uintptr_t) i1 + input_width);
379     i3 = (const float*) ((uintptr_t) i2 + input_width);
380     i4 = (const float*) ((uintptr_t) i3 + input_width);
381     i5 = (const float*) ((uintptr_t) i4 + input_width);
382     i6 = (const float*) ((uintptr_t) i5 + input_width);
383     i7 = (const float*) ((uintptr_t) i6 + input_width);
384     i8 = (const float*) ((uintptr_t) i7 + input_width);
385 
386     o0 = o3;
387     o1 = (float*) ((uintptr_t) o0 + output_width);
388     o2 = (float*) ((uintptr_t) o1 + output_width);
389     o3 = (float*) ((uintptr_t) o2 + output_width);
390 
391     output_height = doz(output_height, 4);
392     padded_input_height = doz(padded_input_height, 8);
393   } while (output_height != 0);
394 }
395