1 // Auto-generated file. Do not edit!
2 // Template: src/f32-dwconv2d-chw/5x5s2p2-wasmsimd-splat.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <wasm_simd128.h>
13
14 #include <xnnpack/dwconv.h>
15 #include <xnnpack/math.h>
16
17
18
xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_1x4_acc3(size_t input_height,size_t input_width,const float * input,const float * weights,const float * zero,float * output,uint32_t padding_top,const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS (1)])19 void xnn_f32_dwconv2d_chw_ukernel_5x5s2p2__wasmsimd_arm_splat_1x4_acc3(
20 size_t input_height,
21 size_t input_width,
22 const float* input,
23 const float* weights,
24 const float* zero,
25 float* output,
26 uint32_t padding_top,
27 const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
28 {
29 assert(input_height != 0);
30 assert(input_width != 0);
31 assert(input_width % sizeof(float) == 0);
32 assert(padding_top >= 1);
33 assert(padding_top <= 2);
34
35 const v128_t vmask_even = wasm_v128_load(params->scalar.mask_even);
36 const v128_t vmask_odd = wasm_v128_load(params->scalar.mask_odd);
37 const v128_t vmax = wasm_v32x4_load_splat(¶ms->scalar.max);
38 const v128_t vmin = wasm_v32x4_load_splat(¶ms->scalar.min);
39
40 const v128_t vw0123 = wasm_v128_load(weights);
41 const v128_t vw4567 = wasm_v128_load(weights + 4);
42 const v128_t vw89AB = wasm_v128_load(weights + 8);
43 const v128_t vwCDEF = wasm_v128_load(weights + 12);
44 const v128_t vwGHIJ = wasm_v128_load(weights + 16);
45 const v128_t vwKLMN = wasm_v128_load(weights + 20);
46 const v128_t vwOP = wasm_v64x2_load_splat(weights + 24);
47
48 const v128_t vzero = wasm_f32x4_splat(0.0f);
49
50 const uint32_t padding_top_less_1 = padding_top - 1;
51 const size_t input_decrement = round_up_po2(input_width, 8 * sizeof(float));
52
53 const float* i0 = zero;
54 const float* i1 = (const float*) ((uintptr_t) input - ((-padding_top_less_1) & input_width));
55 const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
56 if XNN_UNPREDICTABLE(padding_top_less_1 != 0) {
57 i1 = zero;
58 }
59 const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
60 const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
61
62
63 float* o0 = output;
64
65 size_t padded_input_height = input_height + (padding_top_less_1 + 1) + 2 /* padding bottom */;
66 size_t output_height = (padded_input_height - 5 /* kernel size */ + 2 /* subsampling */) / 2;
67 do {
68 if XNN_UNPREDICTABLE(padded_input_height < 6) {
69 i3 = zero;
70 }
71 if XNN_UNPREDICTABLE(padded_input_height < 7) {
72 i4 = zero;
73 }
74
75 v128_t vi0x0246 = vzero;
76 v128_t vi1x0246 = vzero;
77 v128_t vi2x0246 = vzero;
78 v128_t vi3x0246 = vzero;
79 v128_t vi4x0246 = vzero;
80
81 v128_t vi0x1357 = vzero;
82 v128_t vi1x1357 = vzero;
83 v128_t vi2x1357 = vzero;
84 v128_t vi3x1357 = vzero;
85 v128_t vi4x1357 = vzero;
86
87 const v128_t vi0x89AB = wasm_v128_load(i0);
88 const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
89 i0 += 8;
90 const v128_t vi1x89AB = wasm_v128_load(i1);
91 const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
92 i1 += 8;
93 const v128_t vi2x89AB = wasm_v128_load(i2);
94 const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
95 i2 += 8;
96 const v128_t vi3x89AB = wasm_v128_load(i3);
97 const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
98 i3 += 8;
99 const v128_t vi4x89AB = wasm_v128_load(i4);
100 const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
101 i4 += 8;
102
103 v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
104 v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
105 v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
106 v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
107 v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
108 v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
109 v128_t vi3x8ACE = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6);
110 v128_t vi3x9BDF = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7);
111 v128_t vi4x8ACE = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6);
112 v128_t vi4x9BDF = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7);
113
114 size_t w = input_width;
115 for (; w > 8 * sizeof(float); w -= 8 * sizeof(float)) {
116 v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
117
118 v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
119
120 v128_t vo0p2 = wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0));
121
122 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
123
124 vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
125
126 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
127
128 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
129
130 vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
131
132 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
133
134 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
135
136 vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
137
138 const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
139 vi0x0246 = vi0x8ACE;
140 const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
141 vi1x0246 = vi1x8ACE;
142 const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
143 vi2x0246 = vi2x8ACE;
144 const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
145 vi3x0246 = vi3x8ACE;
146 const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
147 vi4x0246 = vi4x8ACE;
148
149 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
150
151 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x68AC, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
152
153 vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x68AC, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
154
155 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x68AC, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
156
157 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x68AC, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
158
159 const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
160 vi0x1357 = vi0x9BDF;
161 const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
162 vi1x1357 = vi1x9BDF;
163 const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
164 vi2x1357 = vi2x9BDF;
165 const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
166 vi3x1357 = vi3x9BDF;
167 const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
168 vi4x1357 = vi4x9BDF;
169
170 const v128_t vi0xGHIJ = wasm_v128_load(i0);
171 const v128_t vi0xKLMN = wasm_v128_load(i0 + 4);
172 i0 += 8;
173 const v128_t vi1xGHIJ = wasm_v128_load(i1);
174 const v128_t vi1xKLMN = wasm_v128_load(i1 + 4);
175 i1 += 8;
176 const v128_t vi2xGHIJ = wasm_v128_load(i2);
177 const v128_t vi2xKLMN = wasm_v128_load(i2 + 4);
178 i2 += 8;
179 const v128_t vi3xGHIJ = wasm_v128_load(i3);
180 const v128_t vi3xKLMN = wasm_v128_load(i3 + 4);
181 i3 += 8;
182 const v128_t vi4xGHIJ = wasm_v128_load(i4);
183 const v128_t vi4xKLMN = wasm_v128_load(i4 + 4);
184 i4 += 8;
185
186 const v128_t vi0xGIKM = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 0, 2, 4, 6);
187 const v128_t vi0xHJLN = wasm_v32x4_shuffle(vi0xGHIJ, vi0xKLMN, 1, 3, 5, 7);
188 const v128_t vi1xGIKM = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 0, 2, 4, 6);
189 const v128_t vi1xHJLN = wasm_v32x4_shuffle(vi1xGHIJ, vi1xKLMN, 1, 3, 5, 7);
190 const v128_t vi2xGIKM = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 0, 2, 4, 6);
191 const v128_t vi2xHJLN = wasm_v32x4_shuffle(vi2xGHIJ, vi2xKLMN, 1, 3, 5, 7);
192 const v128_t vi3xGIKM = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 0, 2, 4, 6);
193 const v128_t vi3xHJLN = wasm_v32x4_shuffle(vi3xGHIJ, vi3xKLMN, 1, 3, 5, 7);
194 const v128_t vi4xGIKM = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 0, 2, 4, 6);
195 const v128_t vi4xHJLN = wasm_v32x4_shuffle(vi4xGHIJ, vi4xKLMN, 1, 3, 5, 7);
196
197 vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
198
199 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
200
201 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x79BD, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
202
203 vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x79BD, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
204
205 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x79BD, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
206
207 const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vi0xGIKM, 1, 2, 3, 4);
208 vi0x8ACE = vi0xGIKM;
209 vi0x9BDF = vi0xHJLN;
210 const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vi1xGIKM, 1, 2, 3, 4);
211 vi1x8ACE = vi1xGIKM;
212 vi1x9BDF = vi1xHJLN;
213 const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vi2xGIKM, 1, 2, 3, 4);
214 vi2x8ACE = vi2xGIKM;
215 vi2x9BDF = vi2xHJLN;
216 const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vi3xGIKM, 1, 2, 3, 4);
217 vi3x8ACE = vi3xGIKM;
218 vi3x9BDF = vi3xHJLN;
219 const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vi4xGIKM, 1, 2, 3, 4);
220 vi4x8ACE = vi4xGIKM;
221 vi4x9BDF = vi4xHJLN;
222
223 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0xACEG, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
224
225 vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1xACEG, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
226
227 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
228
229 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3xACEG, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
230
231 vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4xACEG, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
232
233 vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
234 vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
235
236 v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
237 vo0 = wasm_f32x4_min(vo0, vmax);
238
239 wasm_v128_store(o0, vo0); o0 += 4;
240 }
241 // Last block has 1-8 pixels to process.
242 assert(w <= 8 * sizeof(float));
243 assert(w >= 1 * sizeof(float));
244 {
245 v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
246
247 vi0x8ACE = wasm_v128_and(vmask_even, vi0x8ACE);
248 vi1x8ACE = wasm_v128_and(vmask_even, vi1x8ACE);
249 vi2x8ACE = wasm_v128_and(vmask_even, vi2x8ACE);
250 vi3x8ACE = wasm_v128_and(vmask_even, vi3x8ACE);
251 vi4x8ACE = wasm_v128_and(vmask_even, vi4x8ACE);
252
253 vi0x9BDF = wasm_v128_and(vmask_odd, vi0x9BDF);
254 vi1x9BDF = wasm_v128_and(vmask_odd, vi1x9BDF);
255 vi2x9BDF = wasm_v128_and(vmask_odd, vi2x9BDF);
256 vi3x9BDF = wasm_v128_and(vmask_odd, vi3x9BDF);
257 vi4x9BDF = wasm_v128_and(vmask_odd, vi4x9BDF);
258
259 v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3));
260
261 v128_t vo0p2 = wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0));
262
263 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
264
265 vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
266
267 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
268
269 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
270
271 vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
272
273 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
274
275 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
276
277 vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
278
279 const v128_t vi0x68AC = wasm_v32x4_shuffle(vi0x0246, vi0x8ACE, 3, 4, 5, 6);
280 const v128_t vi1x68AC = wasm_v32x4_shuffle(vi1x0246, vi1x8ACE, 3, 4, 5, 6);
281 const v128_t vi2x68AC = wasm_v32x4_shuffle(vi2x0246, vi2x8ACE, 3, 4, 5, 6);
282 const v128_t vi3x68AC = wasm_v32x4_shuffle(vi3x0246, vi3x8ACE, 3, 4, 5, 6);
283 const v128_t vi4x68AC = wasm_v32x4_shuffle(vi4x0246, vi4x8ACE, 3, 4, 5, 6);
284
285 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x68AC, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
286
287 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x68AC, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
288
289 vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x68AC, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
290
291 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x68AC, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
292
293 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x68AC, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
294
295 const v128_t vi0x79BD = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
296 const v128_t vi1x79BD = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
297 const v128_t vi2x79BD = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
298 const v128_t vi3x79BD = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
299 const v128_t vi4x79BD = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
300
301 vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x79BD, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
302
303 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x79BD, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
304
305 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x79BD, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
306
307 vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x79BD, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
308
309 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x79BD, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
310
311 const v128_t vi0xACEG = wasm_v32x4_shuffle(vi0x8ACE, vzero, 1, 2, 3, 4);
312 const v128_t vi1xACEG = wasm_v32x4_shuffle(vi1x8ACE, vzero, 1, 2, 3, 4);
313 const v128_t vi2xACEG = wasm_v32x4_shuffle(vi2x8ACE, vzero, 1, 2, 3, 4);
314 const v128_t vi3xACEG = wasm_v32x4_shuffle(vi3x8ACE, vzero, 1, 2, 3, 4);
315 const v128_t vi4xACEG = wasm_v32x4_shuffle(vi4x8ACE, vzero, 1, 2, 3, 4);
316
317 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0xACEG, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
318
319 vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1xACEG, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
320
321 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2xACEG, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
322
323 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3xACEG, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
324
325 vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4xACEG, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
326
327 vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
328 vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
329
330 v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
331 vo0 = wasm_f32x4_min(vo0, vmax);
332
333 size_t w_tmp = (w + 1 * sizeof(float)) / (2 * sizeof(float));
334 if XNN_LIKELY(w_tmp >= 4) {
335 wasm_v128_store(o0, vo0); o0 += 4;
336 } else {
337 if (w_tmp & 2) {
338 *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
339
340 vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
341 }
342 if (w_tmp & 1) {
343 *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
344 }
345 }
346 }
347
348 i0 = (const float*) ((uintptr_t) i2 - input_decrement);
349 i1 = (const float*) ((uintptr_t) i3 - input_decrement);
350 i2 = (const float*) ((uintptr_t) i4 - input_decrement);
351 i3 = (const float*) ((uintptr_t) i2 + input_width);
352 i4 = (const float*) ((uintptr_t) i3 + input_width);
353
354
355 output_height -= 1;
356 padded_input_height -= 2;
357 } while (output_height != 0);
358 }
359