1 // Auto-generated file. Do not edit!
2 // Template: src/f32-dwconv2d-chw/5x5p2-wasmsimd-loadsplat.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <wasm_simd128.h>
13
14 #include <xnnpack/dwconv.h>
15 #include <xnnpack/math.h>
16
17
18
xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_2x4_acc3(size_t input_height,size_t input_width,const float * input,const float * weights,const float * zero,float * output,uint32_t padding_top,const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS (1)])19 void xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_2x4_acc3(
20 size_t input_height,
21 size_t input_width,
22 const float* input,
23 const float* weights,
24 const float* zero,
25 float* output,
26 uint32_t padding_top,
27 const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
28 {
29 assert(input_height != 0);
30 assert(input_width != 0);
31 assert(input_width % sizeof(float) == 0);
32 assert(padding_top == 2);
33
34 const v128_t vmask = wasm_v128_load(params->scalar.mask);
35 const v128_t vmax = wasm_v32x4_load_splat(¶ms->scalar.max);
36 const v128_t vmin = wasm_v32x4_load_splat(¶ms->scalar.min);
37
38 const v128_t vw0123 = wasm_v128_load(weights);
39 const v128_t vw4567 = wasm_v128_load(weights + 4);
40 const v128_t vw89AB = wasm_v128_load(weights + 8);
41 const v128_t vwCDEF = wasm_v128_load(weights + 12);
42 const v128_t vwGHIJ = wasm_v128_load(weights + 16);
43 const v128_t vwKLMN = wasm_v128_load(weights + 20);
44 const v128_t vwOP = wasm_v64x2_load_splat(weights + 24);
45 const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
46 const v128_t vk00 = wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
47 const v128_t vk01 = wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
48 const v128_t vk02 = wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
49 const v128_t vk03 = wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
50 const v128_t vk04 = wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
51 const v128_t vk10 = wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
52 const v128_t vk11 = wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
53 const v128_t vk12 = wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0);
54 const v128_t vk13 = wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1);
55 const v128_t vk14 = wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2);
56 const v128_t vk20 = wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3);
57 const v128_t vk21 = wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0);
58 const v128_t vk22 = wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1);
59 const v128_t vk23 = wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2);
60 const v128_t vk24 = wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3);
61 const v128_t vk30 = wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0);
62 const v128_t vk31 = wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1);
63 const v128_t vk32 = wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2);
64 const v128_t vk33 = wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3);
65 const v128_t vk34 = wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0);
66 const v128_t vk40 = wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1);
67 const v128_t vk41 = wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2);
68 const v128_t vk42 = wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3);
69 const v128_t vk43 = wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0);
70 const v128_t vk44 = wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1);
71
72 const v128_t vzero = wasm_f32x4_splat(0.0f);
73
74 const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
75
76 const float* i0 = zero;
77 const float* i1 = zero;
78 const float* i2 = input;
79 const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
80 const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
81 const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
82
83 float* o0 = output;
84 float* o1 = (float*) ((uintptr_t) o0 + input_width);
85
86 size_t output_height = input_height;
87 do {
88 if XNN_UNPREDICTABLE(output_height < 2) {
89 i3 = zero;
90 o1 = o0;
91 }
92 if XNN_UNPREDICTABLE(output_height < 3) {
93 i4 = zero;
94 }
95 if XNN_UNPREDICTABLE(output_height < 4) {
96 i5 = zero;
97 }
98
99 v128_t vi0x0123 = vzero;
100 v128_t vi1x0123 = vzero;
101 v128_t vi2x0123 = vzero;
102 v128_t vi3x0123 = vzero;
103 v128_t vi4x0123 = vzero;
104 v128_t vi5x0123 = vzero;
105
106 v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
107 v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
108 v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
109 v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
110 v128_t vi4x4567 = wasm_v128_load(i4); i4 += 4;
111 v128_t vi5x4567 = wasm_v128_load(i5); i5 += 4;
112
113 size_t w = input_width;
114 for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
115 v128_t vo0p0 = vbias;
116 v128_t vo1p0 = vbias;
117
118 const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
119 const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
120 const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
121 const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
122 const v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
123 const v128_t vi5x89AB = wasm_v128_load(i5); i5 += 4;
124
125 v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, vk02);
126 v128_t vo1p1 = wasm_f32x4_mul(vi1x4567, vk02);
127
128 v128_t vo0p2 = wasm_f32x4_mul(vi1x4567, vk12);
129 v128_t vo1p2 = wasm_f32x4_mul(vi2x4567, vk12);
130
131 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x4567, vk22));
132 vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x4567, vk22));
133
134 vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x4567, vk32));
135 vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi4x4567, vk32));
136
137 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, vk42));
138 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, vk42));
139
140 const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
141 const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
142 const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
143 const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
144 const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
145 const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
146
147 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, vk01));
148 vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x3456, vk01));
149
150 vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x3456, vk11));
151 vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi2x3456, vk11));
152
153 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk21));
154 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, vk21));
155
156 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x3456, vk31));
157 vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x3456, vk31));
158
159 vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x3456, vk41));
160 vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi5x3456, vk41));
161
162 const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
163 vi0x0123 = vi0x4567;
164 const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
165 vi1x0123 = vi1x4567;
166 const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
167 vi2x0123 = vi2x4567;
168 const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
169 vi3x0123 = vi3x4567;
170 const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
171 vi4x0123 = vi4x4567;
172 const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
173 vi5x0123 = vi5x4567;
174
175 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
176 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, vk00));
177
178 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, vk10));
179 vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x2345, vk10));
180
181 vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x2345, vk20));
182 vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi3x2345, vk20));
183
184 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, vk30));
185 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x2345, vk30));
186
187 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x2345, vk40));
188 vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x2345, vk40));
189
190 const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
191 const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
192 const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
193 const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
194 const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
195 const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
196
197 vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x5678, vk03));
198 vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi1x5678, vk03));
199
200 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk13));
201 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk13));
202
203 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, vk23));
204 vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x5678, vk23));
205
206 vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x5678, vk33));
207 vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi4x5678, vk33));
208
209 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, vk43));
210 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x5678, vk43));
211
212 const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
213 vi0x4567 = vi0x89AB;
214 const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
215 vi1x4567 = vi1x89AB;
216 const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
217 vi2x4567 = vi2x89AB;
218 const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
219 vi3x4567 = vi3x89AB;
220 const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
221 vi4x4567 = vi4x89AB;
222 const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 2, 3, 4, 5);
223 vi5x4567 = vi5x89AB;
224
225 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x6789, vk04));
226 vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x6789, vk04));
227
228 vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x6789, vk14));
229 vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi2x6789, vk14));
230
231 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
232 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, vk24));
233
234 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, vk34));
235 vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x6789, vk34));
236
237 vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x6789, vk44));
238 vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi5x6789, vk44));
239
240 vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
241 vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
242 vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
243 vo1p0 = wasm_f32x4_add(vo1p0, vo1p2);
244
245 v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
246 v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
247 vo0 = wasm_f32x4_min(vo0, vmax);
248 vo1 = wasm_f32x4_min(vo1, vmax);
249
250 wasm_v128_store(o1, vo1); o1 += 4;
251 wasm_v128_store(o0, vo0); o0 += 4;
252 }
253 // Always process the last block of 5..8 pixels.
254 if XNN_LIKELY(w > 4 * sizeof(float)) {
255 v128_t vo0p0 = vbias;
256 v128_t vo1p0 = vbias;
257
258 v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
259 v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
260 v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
261 v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
262 v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
263 v128_t vi5x89AB = wasm_v128_load(i5); i5 += 4;
264
265 vi0x89AB = wasm_v128_and(vmask, vi0x89AB);
266 vi1x89AB = wasm_v128_and(vmask, vi1x89AB);
267 vi2x89AB = wasm_v128_and(vmask, vi2x89AB);
268 vi3x89AB = wasm_v128_and(vmask, vi3x89AB);
269 vi4x89AB = wasm_v128_and(vmask, vi4x89AB);
270 vi5x89AB = wasm_v128_and(vmask, vi5x89AB);
271
272 v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, vk02);
273 v128_t vo1p1 = wasm_f32x4_mul(vi1x4567, vk02);
274
275 v128_t vo0p2 = wasm_f32x4_mul(vi1x4567, vk12);
276 v128_t vo1p2 = wasm_f32x4_mul(vi2x4567, vk12);
277
278 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x4567, vk22));
279 vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x4567, vk22));
280
281 vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x4567, vk32));
282 vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi4x4567, vk32));
283
284 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, vk42));
285 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, vk42));
286
287 const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
288 const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
289 const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
290 const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
291 const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
292 const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
293
294 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, vk01));
295 vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x3456, vk01));
296
297 vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x3456, vk11));
298 vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi2x3456, vk11));
299
300 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk21));
301 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, vk21));
302
303 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x3456, vk31));
304 vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x3456, vk31));
305
306 vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x3456, vk41));
307 vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi5x3456, vk41));
308
309 const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
310 vi0x0123 = vi0x4567;
311 const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
312 vi1x0123 = vi1x4567;
313 const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
314 vi2x0123 = vi2x4567;
315 const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
316 vi3x0123 = vi3x4567;
317 const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
318 vi4x0123 = vi4x4567;
319 const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
320 vi5x0123 = vi5x4567;
321
322 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
323 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, vk00));
324
325 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, vk10));
326 vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x2345, vk10));
327
328 vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x2345, vk20));
329 vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi3x2345, vk20));
330
331 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, vk30));
332 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x2345, vk30));
333
334 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x2345, vk40));
335 vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x2345, vk40));
336
337 const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
338 const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
339 const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
340 const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
341 const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
342 const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
343
344 vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x5678, vk03));
345 vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi1x5678, vk03));
346
347 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk13));
348 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk13));
349
350 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, vk23));
351 vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x5678, vk23));
352
353 vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x5678, vk33));
354 vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi4x5678, vk33));
355
356 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, vk43));
357 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x5678, vk43));
358
359 const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
360 vi0x4567 = vi0x89AB;
361 const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
362 vi1x4567 = vi1x89AB;
363 const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
364 vi2x4567 = vi2x89AB;
365 const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
366 vi3x4567 = vi3x89AB;
367 const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
368 vi4x4567 = vi4x89AB;
369 const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 2, 3, 4, 5);
370 vi5x4567 = vi5x89AB;
371
372 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x6789, vk04));
373 vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x6789, vk04));
374
375 vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x6789, vk14));
376 vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi2x6789, vk14));
377
378 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
379 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, vk24));
380
381 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, vk34));
382 vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x6789, vk34));
383
384 vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x6789, vk44));
385 vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi5x6789, vk44));
386
387 vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
388 vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
389 vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
390 vo1p0 = wasm_f32x4_add(vo1p0, vo1p2);
391
392 v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
393 v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
394 vo0 = wasm_f32x4_min(vo0, vmax);
395 vo1 = wasm_f32x4_min(vo1, vmax);
396
397 wasm_v128_store(o1, vo1); o1 += 4;
398 wasm_v128_store(o0, vo0); o0 += 4;
399
400 w -= 4 * sizeof(float);
401 }
402 assert(w >= 1 * sizeof(float));
403 assert(w <= 4 * sizeof(float));
404 {
405 v128_t vo0p0 = vbias;
406 v128_t vo1p0 = vbias;
407
408 vi0x4567 = wasm_v128_and(vmask, vi0x4567);
409 vi1x4567 = wasm_v128_and(vmask, vi1x4567);
410 vi2x4567 = wasm_v128_and(vmask, vi2x4567);
411 vi3x4567 = wasm_v128_and(vmask, vi3x4567);
412 vi4x4567 = wasm_v128_and(vmask, vi4x4567);
413 vi5x4567 = wasm_v128_and(vmask, vi5x4567);
414
415 v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, vk02);
416 v128_t vo1p1 = wasm_f32x4_mul(vi1x4567, vk02);
417
418 v128_t vo0p2 = wasm_f32x4_mul(vi1x4567, vk12);
419 v128_t vo1p2 = wasm_f32x4_mul(vi2x4567, vk12);
420
421 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x4567, vk22));
422 vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x4567, vk22));
423
424 vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x4567, vk32));
425 vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi4x4567, vk32));
426
427 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, vk42));
428 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, vk42));
429
430 const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
431 const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
432 const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
433 const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
434 const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
435 const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
436
437 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, vk01));
438 vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x3456, vk01));
439
440 vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x3456, vk11));
441 vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi2x3456, vk11));
442
443 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk21));
444 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, vk21));
445
446 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x3456, vk31));
447 vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x3456, vk31));
448
449 vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x3456, vk41));
450 vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi5x3456, vk41));
451
452 const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
453 const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
454 const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
455 const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
456 const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
457 const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
458
459 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
460 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, vk00));
461
462 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, vk10));
463 vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x2345, vk10));
464
465 vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x2345, vk20));
466 vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi3x2345, vk20));
467
468 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, vk30));
469 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x2345, vk30));
470
471 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x2345, vk40));
472 vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x2345, vk40));
473
474 const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
475 const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
476 const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
477 const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
478 const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
479 const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vzero, 1, 2, 3, 4);
480
481 vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x5678, vk03));
482 vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi1x5678, vk03));
483
484 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk13));
485 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk13));
486
487 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, vk23));
488 vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x5678, vk23));
489
490 vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi3x5678, vk33));
491 vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi4x5678, vk33));
492
493 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, vk43));
494 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x5678, vk43));
495
496 const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x5678, vzero, 1, 2, 3, 4);
497 const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x5678, vzero, 1, 2, 3, 4);
498 const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x5678, vzero, 1, 2, 3, 4);
499 const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x5678, vzero, 1, 2, 3, 4);
500 const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x5678, vzero, 1, 2, 3, 4);
501 const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x5678, vzero, 1, 2, 3, 4);
502
503 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x6789, vk04));
504 vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x6789, vk04));
505
506 vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x6789, vk14));
507 vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi2x6789, vk14));
508
509 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
510 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, vk24));
511
512 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, vk34));
513 vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x6789, vk34));
514
515 vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi4x6789, vk44));
516 vo1p2 = wasm_f32x4_add(vo1p2, wasm_f32x4_mul(vi5x6789, vk44));
517
518 vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
519 vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
520 vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
521 vo1p0 = wasm_f32x4_add(vo1p0, vo1p2);
522
523 v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
524 v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
525 vo0 = wasm_f32x4_min(vo0, vmax);
526 vo1 = wasm_f32x4_min(vo1, vmax);
527
528 if XNN_LIKELY(w & (4 * sizeof(float))) {
529 wasm_v128_store(o1, vo1); o1 += 4;
530 wasm_v128_store(o0, vo0); o0 += 4;
531 } else {
532 if (w & (2 * sizeof(float))) {
533 *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0); o1 += 2;
534 *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
535
536 vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
537 vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
538 }
539 if (w & (1 * sizeof(float))) {
540 *o1 = wasm_f32x4_extract_lane(vo1, 0); o1 += 1;
541 *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
542 }
543 }
544 }
545
546 i0 = (const float*) ((uintptr_t) i2 - input_decrement);
547 i1 = (const float*) ((uintptr_t) i3 - input_decrement);
548 i2 = (const float*) ((uintptr_t) i1 + input_width);
549 i3 = (const float*) ((uintptr_t) i2 + input_width);
550 i4 = (const float*) ((uintptr_t) i3 + input_width);
551 i5 = (const float*) ((uintptr_t) i4 + input_width);
552
553 o0 = o1;
554 o1 = (float*) ((uintptr_t) o0 + input_width);
555
556 output_height = doz(output_height, 2);
557 } while (output_height != 0);
558 }
559