1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd-loadsplat.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <wasm_simd128.h>
13 
14 
15 #include <xnnpack/dwconv.h>
16 #include <xnnpack/math.h>
17 
18 
19 
xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_5x4(size_t input_height,size_t input_width,const float * input,const float * weights,const float * zero,float * output,uint32_t padding_top,const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS (1)])20 void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_5x4(
21     size_t input_height,
22     size_t input_width,
23     const float* input,
24     const float* weights,
25     const float* zero,
26     float* output,
27     uint32_t padding_top,
28     const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
29 {
30   assert(input_height != 0);
31   assert(input_width != 0);
32   assert(input_width % sizeof(float) == 0);
33   assert(padding_top == 1);
34 
35   const v128_t vmask = wasm_v128_load(params->scalar.mask);
36   const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
37   const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
38 
39   const v128_t vw0123 = wasm_v128_load(weights);
40   const v128_t vw4567 = wasm_v128_load(weights + 4);
41   const v128_t vw89 = wasm_v64x2_load_splat(weights + 8);
42   const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
43   const v128_t vk00 = wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
44   const v128_t vk01 = wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
45   const v128_t vk02 = wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
46   const v128_t vk10 = wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
47   const v128_t vk11 = wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
48   const v128_t vk12 = wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
49   const v128_t vk20 = wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
50   const v128_t vk21 = wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0);
51   const v128_t vk22 = wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1);
52 
53   const v128_t vzero = wasm_f32x4_splat(0.0f);
54 
55   const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
56 
57   const float* i0 = zero;
58   const float* i1 = input;
59   const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
60   const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
61   const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
62   const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
63   const float* i6 = (const float*) ((uintptr_t) i5 + input_width);
64 
65   float* o0 = output;
66   float* o1 = (float*) ((uintptr_t) o0 + input_width);
67   float* o2 = (float*) ((uintptr_t) o1 + input_width);
68   float* o3 = (float*) ((uintptr_t) o2 + input_width);
69   float* o4 = (float*) ((uintptr_t) o3 + input_width);
70 
71   size_t output_height = input_height;
72   do {
73     if XNN_UNPREDICTABLE(output_height < 2) {
74       i2 = zero;
75       o1 = o0;
76     }
77     if XNN_UNPREDICTABLE(output_height < 3) {
78       i3 = zero;
79       o2 = o1;
80     }
81     if XNN_UNPREDICTABLE(output_height < 4) {
82       i4 = zero;
83       o3 = o2;
84     }
85     if XNN_UNPREDICTABLE(output_height < 5) {
86       i5 = zero;
87       o4 = o3;
88     }
89     if XNN_UNPREDICTABLE(output_height < 6) {
90       i6 = zero;
91     }
92 
93     v128_t vi0x0123 = vzero;
94     v128_t vi1x0123 = vzero;
95     v128_t vi2x0123 = vzero;
96     v128_t vi3x0123 = vzero;
97     v128_t vi4x0123 = vzero;
98     v128_t vi5x0123 = vzero;
99     v128_t vi6x0123 = vzero;
100 
101     v128_t vi0x4567 = wasm_v128_load(i0);
102     i0 += 4;
103     v128_t vi1x4567 = wasm_v128_load(i1);
104     i1 += 4;
105     v128_t vi2x4567 = wasm_v128_load(i2);
106     i2 += 4;
107     v128_t vi3x4567 = wasm_v128_load(i3);
108     i3 += 4;
109     v128_t vi4x4567 = wasm_v128_load(i4);
110     i4 += 4;
111     v128_t vi5x4567 = wasm_v128_load(i5);
112     i5 += 4;
113     v128_t vi6x4567 = wasm_v128_load(i6);
114     i6 += 4;
115 
116     size_t w = input_width;
117     for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
118       const v128_t vi0x89AB = wasm_v128_load(i0);
119       i0 += 4;
120       const v128_t vi1x89AB = wasm_v128_load(i1);
121       i1 += 4;
122       const v128_t vi2x89AB = wasm_v128_load(i2);
123       i2 += 4;
124       const v128_t vi3x89AB = wasm_v128_load(i3);
125       i3 += 4;
126       const v128_t vi4x89AB = wasm_v128_load(i4);
127       i4 += 4;
128       const v128_t vi5x89AB = wasm_v128_load(i5);
129       i5 += 4;
130       const v128_t vi6x89AB = wasm_v128_load(i6);
131       i6 += 4;
132 
133       v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
134       v128_t vo1p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi1x4567, vk01));
135       v128_t vo2p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi2x4567, vk01));
136       v128_t vo3p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi3x4567, vk01));
137       v128_t vo4p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi4x4567, vk01));
138       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk11));
139       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk11));
140       vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, vk11));
141       vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, vk11));
142       vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x4567, vk11));
143       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk21));
144       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk21));
145       vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, vk21));
146       vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, vk21));
147       vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x4567, vk21));
148 
149       const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
150       const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
151       const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
152       const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
153       const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
154       const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
155       const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
156 
157       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk00));
158       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, vk00));
159       vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, vk00));
160       vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x3456, vk00));
161       vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x3456, vk00));
162       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
163       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk10));
164       vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, vk10));
165       vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, vk10));
166       vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x3456, vk10));
167       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk20));
168       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, vk20));
169       vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, vk20));
170       vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x3456, vk20));
171       vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x3456, vk20));
172 
173       vi0x0123 = vi0x4567;
174       vi1x0123 = vi1x4567;
175       vi2x0123 = vi2x4567;
176       vi3x0123 = vi3x4567;
177       vi4x0123 = vi4x4567;
178       vi5x0123 = vi5x4567;
179       vi6x0123 = vi6x4567;
180 
181       const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
182       const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
183       const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
184       const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
185       const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
186       const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
187       const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 1, 2, 3, 4);
188 
189       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
190       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, vk02));
191       vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, vk02));
192       vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x5678, vk02));
193       vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x5678, vk02));
194       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk12));
195       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk12));
196       vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, vk12));
197       vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, vk12));
198       vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x5678, vk12));
199       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
200       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, vk22));
201       vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, vk22));
202       vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x5678, vk22));
203       vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x5678, vk22));
204 
205       vi0x4567 = vi0x89AB;
206       vi1x4567 = vi1x89AB;
207       vi2x4567 = vi2x89AB;
208       vi3x4567 = vi3x89AB;
209       vi4x4567 = vi4x89AB;
210       vi5x4567 = vi5x89AB;
211       vi6x4567 = vi6x89AB;
212 
213 
214       v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
215       v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
216       v128_t vo2 = wasm_v128_bitselect(vmin, vo2p0, wasm_f32x4_lt(vo2p0, vmin));
217       v128_t vo3 = wasm_v128_bitselect(vmin, vo3p0, wasm_f32x4_lt(vo3p0, vmin));
218       v128_t vo4 = wasm_v128_bitselect(vmin, vo4p0, wasm_f32x4_lt(vo4p0, vmin));
219       vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
220       vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
221       vo2 = wasm_v128_bitselect(vo2, vmax, wasm_f32x4_le(vo2, vmax));
222       vo3 = wasm_v128_bitselect(vo3, vmax, wasm_f32x4_le(vo3, vmax));
223       vo4 = wasm_v128_bitselect(vo4, vmax, wasm_f32x4_le(vo4, vmax));
224 
225       wasm_v128_store(o4, vo4);
226       o4 += 4;
227       wasm_v128_store(o3, vo3);
228       o3 += 4;
229       wasm_v128_store(o2, vo2);
230       o2 += 4;
231       wasm_v128_store(o1, vo1);
232       o1 += 4;
233       wasm_v128_store(o0, vo0);
234       o0 += 4;
235     }
236     // Always process the last block of 1..4 pixels.
237     assert(w >= 1 * sizeof(float));
238     assert(w <= 4 * sizeof(float));
239     {
240       vi0x4567 = wasm_v128_and(vmask, vi0x4567);
241       vi1x4567 = wasm_v128_and(vmask, vi1x4567);
242       vi2x4567 = wasm_v128_and(vmask, vi2x4567);
243       vi3x4567 = wasm_v128_and(vmask, vi3x4567);
244       vi4x4567 = wasm_v128_and(vmask, vi4x4567);
245       vi5x4567 = wasm_v128_and(vmask, vi5x4567);
246       vi6x4567 = wasm_v128_and(vmask, vi6x4567);
247 
248       v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
249       v128_t vo1p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi1x4567, vk01));
250       v128_t vo2p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi2x4567, vk01));
251       v128_t vo3p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi3x4567, vk01));
252       v128_t vo4p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi4x4567, vk01));
253       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk11));
254       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk11));
255       vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, vk11));
256       vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, vk11));
257       vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x4567, vk11));
258       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk21));
259       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk21));
260       vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, vk21));
261       vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, vk21));
262       vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x4567, vk21));
263 
264       const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
265       const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
266       const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
267       const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
268       const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
269       const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
270       const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
271 
272       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk00));
273       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, vk00));
274       vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, vk00));
275       vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x3456, vk00));
276       vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x3456, vk00));
277       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
278       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk10));
279       vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, vk10));
280       vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, vk10));
281       vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x3456, vk10));
282       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk20));
283       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, vk20));
284       vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, vk20));
285       vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x3456, vk20));
286       vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x3456, vk20));
287 
288       const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
289       const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
290       const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
291       const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
292       const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
293       const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vzero, 1, 2, 3, 4);
294       const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vzero, 1, 2, 3, 4);
295 
296       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
297       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, vk02));
298       vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, vk02));
299       vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x5678, vk02));
300       vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x5678, vk02));
301       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk12));
302       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk12));
303       vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, vk12));
304       vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, vk12));
305       vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x5678, vk12));
306       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
307       vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, vk22));
308       vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, vk22));
309       vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x5678, vk22));
310       vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x5678, vk22));
311 
312 
313       v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
314       v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
315       v128_t vo2 = wasm_v128_bitselect(vmin, vo2p0, wasm_f32x4_lt(vo2p0, vmin));
316       v128_t vo3 = wasm_v128_bitselect(vmin, vo3p0, wasm_f32x4_lt(vo3p0, vmin));
317       v128_t vo4 = wasm_v128_bitselect(vmin, vo4p0, wasm_f32x4_lt(vo4p0, vmin));
318       vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
319       vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
320       vo2 = wasm_v128_bitselect(vo2, vmax, wasm_f32x4_le(vo2, vmax));
321       vo3 = wasm_v128_bitselect(vo3, vmax, wasm_f32x4_le(vo3, vmax));
322       vo4 = wasm_v128_bitselect(vo4, vmax, wasm_f32x4_le(vo4, vmax));
323 
324       if XNN_LIKELY(w == 4 * sizeof(float)) {
325         wasm_v128_store(o4, vo4);
326         o4 += 4;
327         wasm_v128_store(o3, vo3);
328         o3 += 4;
329         wasm_v128_store(o2, vo2);
330         o2 += 4;
331         wasm_v128_store(o1, vo1);
332         o1 += 4;
333         wasm_v128_store(o0, vo0);
334         o0 += 4;
335       } else {
336         if (w & (2 * sizeof(float))) {
337           *((double*) o4) = wasm_f64x2_extract_lane(vo4, 0);
338           o4 += 2;
339           *((double*) o3) = wasm_f64x2_extract_lane(vo3, 0);
340           o3 += 2;
341           *((double*) o2) = wasm_f64x2_extract_lane(vo2, 0);
342           o2 += 2;
343           *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0);
344           o1 += 2;
345           *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0);
346           o0 += 2;
347 
348           vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
349           vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
350           vo2 = wasm_v32x4_shuffle(vo2, vo2, 2, 3, 0, 1);
351           vo3 = wasm_v32x4_shuffle(vo3, vo3, 2, 3, 0, 1);
352           vo4 = wasm_v32x4_shuffle(vo4, vo4, 2, 3, 0, 1);
353         }
354         if (w & (1 * sizeof(float))) {
355           *o4 = wasm_f32x4_extract_lane(vo4, 0);
356           o4 += 1;
357           *o3 = wasm_f32x4_extract_lane(vo3, 0);
358           o3 += 1;
359           *o2 = wasm_f32x4_extract_lane(vo2, 0);
360           o2 += 1;
361           *o1 = wasm_f32x4_extract_lane(vo1, 0);
362           o1 += 1;
363           *o0 = wasm_f32x4_extract_lane(vo0, 0);
364           o0 += 1;
365         }
366       }
367     }
368 
369     i0 = (const float*) ((uintptr_t) i5 - input_decrement);
370     i1 = (const float*) ((uintptr_t) i6 - input_decrement);
371     i2 = (const float*) ((uintptr_t) i1 + input_width);
372     i3 = (const float*) ((uintptr_t) i2 + input_width);
373     i4 = (const float*) ((uintptr_t) i3 + input_width);
374     i5 = (const float*) ((uintptr_t) i4 + input_width);
375     i6 = (const float*) ((uintptr_t) i5 + input_width);
376 
377     o0 = o4;
378     o1 = (float*) ((uintptr_t) o0 + input_width);
379     o2 = (float*) ((uintptr_t) o1 + input_width);
380     o3 = (float*) ((uintptr_t) o2 + input_width);
381     o4 = (float*) ((uintptr_t) o3 + input_width);
382 
383     output_height = doz(output_height, 5);
384   } while (output_height != 0);
385 }
386