1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-dwconv2d-chw/5x5p2-wasmsimd-splat.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <wasm_simd128.h>
13 
14 #include <xnnpack/dwconv.h>
15 #include <xnnpack/math.h>
16 
17 
18 
xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_2x4(size_t input_height,size_t input_width,const float * input,const float * weights,const float * zero,float * output,uint32_t padding_top,const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS (1)])19 void xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_2x4(
20     size_t input_height,
21     size_t input_width,
22     const float* input,
23     const float* weights,
24     const float* zero,
25     float* output,
26     uint32_t padding_top,
27     const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
28 {
29   assert(input_height != 0);
30   assert(input_width != 0);
31   assert(input_width % sizeof(float) == 0);
32   assert(padding_top == 2);
33 
34   const v128_t vmask = wasm_v128_load(params->scalar.mask);
35   const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
36   const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
37 
38   const v128_t vw0123 = wasm_v128_load(weights);
39   const v128_t vw4567 = wasm_v128_load(weights + 4);
40   const v128_t vw89AB = wasm_v128_load(weights + 8);
41   const v128_t vwCDEF = wasm_v128_load(weights + 12);
42   const v128_t vwGHIJ = wasm_v128_load(weights + 16);
43   const v128_t vwKLMN = wasm_v128_load(weights + 20);
44   const v128_t vwOP = wasm_v64x2_load_splat(weights + 24);
45 
46   const v128_t vzero = wasm_f32x4_splat(0.0f);
47 
48   const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
49 
50   const float* i0 = zero;
51   const float* i1 = zero;
52   const float* i2 = input;
53   const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
54   const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
55   const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
56 
57   float* o0 = output;
58   float* o1 = (float*) ((uintptr_t) o0 + input_width);
59 
60   size_t output_height = input_height;
61   do {
62     if XNN_UNPREDICTABLE(output_height < 2) {
63       i3 = zero;
64       o1 = o0;
65     }
66     if XNN_UNPREDICTABLE(output_height < 3) {
67       i4 = zero;
68     }
69     if XNN_UNPREDICTABLE(output_height < 4) {
70       i5 = zero;
71     }
72 
73     v128_t vi0x0123 = vzero;
74     v128_t vi1x0123 = vzero;
75     v128_t vi2x0123 = vzero;
76     v128_t vi3x0123 = vzero;
77     v128_t vi4x0123 = vzero;
78     v128_t vi5x0123 = vzero;
79 
80     v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
81     v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
82     v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
83     v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
84     v128_t vi4x4567 = wasm_v128_load(i4); i4 += 4;
85     v128_t vi5x4567 = wasm_v128_load(i5); i5 += 4;
86 
87     size_t w = input_width;
88     for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
89       v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
90       v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
91 
92       const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
93       const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
94       const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
95       const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
96       const v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
97       const v128_t vi5x89AB = wasm_v128_load(i5); i5 += 4;
98 
99       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
100       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
101 
102       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
103       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
104 
105       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
106       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
107 
108       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
109       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
110 
111       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
112       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
113 
114       const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
115       const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
116       const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
117       const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
118       const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
119       const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
120 
121       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
122       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
123 
124       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
125       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
126 
127       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
128       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
129 
130       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
131       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
132 
133       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
134       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
135 
136       const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
137       vi0x0123 = vi0x4567;
138       const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
139       vi1x0123 = vi1x4567;
140       const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
141       vi2x0123 = vi2x4567;
142       const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
143       vi3x0123 = vi3x4567;
144       const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
145       vi4x0123 = vi4x4567;
146       const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
147       vi5x0123 = vi5x4567;
148 
149       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
150       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
151 
152       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
153       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
154 
155       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
156       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
157 
158       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
159       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
160 
161       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
162       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
163 
164       const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
165       const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
166       const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
167       const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
168       const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
169       const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
170 
171       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
172       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
173 
174       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
175       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
176 
177       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
178       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
179 
180       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
181       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
182 
183       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
184       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
185 
186       const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
187       vi0x4567 = vi0x89AB;
188       const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
189       vi1x4567 = vi1x89AB;
190       const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
191       vi2x4567 = vi2x89AB;
192       const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
193       vi3x4567 = vi3x89AB;
194       const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
195       vi4x4567 = vi4x89AB;
196       const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 2, 3, 4, 5);
197       vi5x4567 = vi5x89AB;
198 
199       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
200       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
201 
202       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
203       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
204 
205       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
206       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
207 
208       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
209       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
210 
211       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
212       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
213 
214 
215       v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
216       v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
217       vo0 = wasm_f32x4_min(vo0, vmax);
218       vo1 = wasm_f32x4_min(vo1, vmax);
219 
220       wasm_v128_store(o1, vo1); o1 += 4;
221       wasm_v128_store(o0, vo0); o0 += 4;
222     }
223     // Always process the last block of 5..8 pixels.
224     if XNN_LIKELY(w > 4 * sizeof(float)) {
225       v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
226       v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
227 
228       v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
229       v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
230       v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
231       v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
232       v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
233       v128_t vi5x89AB = wasm_v128_load(i5); i5 += 4;
234 
235       vi0x89AB = wasm_v128_and(vmask, vi0x89AB);
236       vi1x89AB = wasm_v128_and(vmask, vi1x89AB);
237       vi2x89AB = wasm_v128_and(vmask, vi2x89AB);
238       vi3x89AB = wasm_v128_and(vmask, vi3x89AB);
239       vi4x89AB = wasm_v128_and(vmask, vi4x89AB);
240       vi5x89AB = wasm_v128_and(vmask, vi5x89AB);
241 
242       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
243       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
244 
245       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
246       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
247 
248       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
249       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
250 
251       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
252       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
253 
254       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
255       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
256 
257       const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
258       const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
259       const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
260       const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
261       const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
262       const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
263 
264       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
265       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
266 
267       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
268       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
269 
270       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
271       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
272 
273       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
274       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
275 
276       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
277       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
278 
279       const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
280       vi0x0123 = vi0x4567;
281       const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
282       vi1x0123 = vi1x4567;
283       const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
284       vi2x0123 = vi2x4567;
285       const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
286       vi3x0123 = vi3x4567;
287       const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
288       vi4x0123 = vi4x4567;
289       const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
290       vi5x0123 = vi5x4567;
291 
292       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
293       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
294 
295       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
296       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
297 
298       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
299       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
300 
301       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
302       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
303 
304       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
305       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
306 
307       const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
308       const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
309       const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
310       const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
311       const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
312       const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
313 
314       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
315       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
316 
317       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
318       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
319 
320       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
321       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
322 
323       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
324       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
325 
326       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
327       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
328 
329       const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
330       vi0x4567 = vi0x89AB;
331       const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
332       vi1x4567 = vi1x89AB;
333       const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
334       vi2x4567 = vi2x89AB;
335       const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
336       vi3x4567 = vi3x89AB;
337       const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
338       vi4x4567 = vi4x89AB;
339       const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 2, 3, 4, 5);
340       vi5x4567 = vi5x89AB;
341 
342       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
343       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
344 
345       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
346       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
347 
348       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
349       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
350 
351       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
352       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
353 
354       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
355       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
356 
357 
358       v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
359       v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
360       vo0 = wasm_f32x4_min(vo0, vmax);
361       vo1 = wasm_f32x4_min(vo1, vmax);
362 
363       wasm_v128_store(o1, vo1); o1 += 4;
364       wasm_v128_store(o0, vo0); o0 += 4;
365 
366       w -= 4 * sizeof(float);
367     }
368     assert(w >= 1 * sizeof(float));
369     assert(w <= 4 * sizeof(float));
370     {
371       v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
372       v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
373 
374       vi0x4567 = wasm_v128_and(vmask, vi0x4567);
375       vi1x4567 = wasm_v128_and(vmask, vi1x4567);
376       vi2x4567 = wasm_v128_and(vmask, vi2x4567);
377       vi3x4567 = wasm_v128_and(vmask, vi3x4567);
378       vi4x4567 = wasm_v128_and(vmask, vi4x4567);
379       vi5x4567 = wasm_v128_and(vmask, vi5x4567);
380 
381       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
382       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
383 
384       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
385       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
386 
387       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
388       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
389 
390       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
391       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
392 
393       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
394       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
395 
396       const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
397       const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
398       const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
399       const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
400       const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
401       const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
402 
403       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
404       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
405 
406       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
407       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
408 
409       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
410       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
411 
412       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
413       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
414 
415       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
416       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
417 
418       const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
419       const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
420       const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
421       const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
422       const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
423       const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
424 
425       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
426       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
427 
428       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
429       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
430 
431       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
432       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
433 
434       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
435       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
436 
437       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
438       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
439 
440       const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
441       const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
442       const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
443       const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
444       const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
445       const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vzero, 1, 2, 3, 4);
446 
447       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
448       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
449 
450       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
451       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
452 
453       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
454       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
455 
456       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
457       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
458 
459       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
460       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
461 
462       const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x5678, vzero, 1, 2, 3, 4);
463       const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x5678, vzero, 1, 2, 3, 4);
464       const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x5678, vzero, 1, 2, 3, 4);
465       const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x5678, vzero, 1, 2, 3, 4);
466       const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x5678, vzero, 1, 2, 3, 4);
467       const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x5678, vzero, 1, 2, 3, 4);
468 
469       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
470       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
471 
472       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
473       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
474 
475       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
476       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
477 
478       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
479       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
480 
481       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
482       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
483 
484 
485       v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
486       v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
487       vo0 = wasm_f32x4_min(vo0, vmax);
488       vo1 = wasm_f32x4_min(vo1, vmax);
489 
490       if XNN_LIKELY(w & (4 * sizeof(float))) {
491         wasm_v128_store(o1, vo1); o1 += 4;
492         wasm_v128_store(o0, vo0); o0 += 4;
493       } else {
494         if (w & (2 * sizeof(float))) {
495           *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0); o1 += 2;
496           *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
497 
498           vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
499           vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
500         }
501         if (w & (1 * sizeof(float))) {
502           *o1 = wasm_f32x4_extract_lane(vo1, 0); o1 += 1;
503           *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
504         }
505       }
506     }
507 
508     i0 = (const float*) ((uintptr_t) i2 - input_decrement);
509     i1 = (const float*) ((uintptr_t) i3 - input_decrement);
510     i2 = (const float*) ((uintptr_t) i1 + input_width);
511     i3 = (const float*) ((uintptr_t) i2 + input_width);
512     i4 = (const float*) ((uintptr_t) i3 + input_width);
513     i5 = (const float*) ((uintptr_t) i4 + input_width);
514 
515     o0 = o1;
516     o1 = (float*) ((uintptr_t) o0 + input_width);
517 
518     output_height = doz(output_height, 2);
519   } while (output_height != 0);
520 }
521