1 // Auto-generated file. Do not edit!
2 // Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd-loadsplat.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <wasm_simd128.h>
13
14
15 #include <xnnpack/dwconv.h>
16 #include <xnnpack/math.h>
17
18
19
xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_4x4(size_t input_height,size_t input_width,const float * input,const float * weights,const float * zero,float * output,uint32_t padding_top,const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS (1)])20 void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_4x4(
21 size_t input_height,
22 size_t input_width,
23 const float* input,
24 const float* weights,
25 const float* zero,
26 float* output,
27 uint32_t padding_top,
28 const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
29 {
30 assert(input_height != 0);
31 assert(input_width != 0);
32 assert(input_width % sizeof(float) == 0);
33 assert(padding_top == 1);
34
35 const v128_t vmask = wasm_v128_load(params->scalar.mask);
36 const v128_t vmax = wasm_v32x4_load_splat(¶ms->scalar.max);
37 const v128_t vmin = wasm_v32x4_load_splat(¶ms->scalar.min);
38
39 const v128_t vw0123 = wasm_v128_load(weights);
40 const v128_t vw4567 = wasm_v128_load(weights + 4);
41 const v128_t vw89 = wasm_v64x2_load_splat(weights + 8);
42 const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
43 const v128_t vk00 = wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
44 const v128_t vk01 = wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
45 const v128_t vk02 = wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
46 const v128_t vk10 = wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
47 const v128_t vk11 = wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
48 const v128_t vk12 = wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
49 const v128_t vk20 = wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
50 const v128_t vk21 = wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0);
51 const v128_t vk22 = wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1);
52
53 const v128_t vzero = wasm_f32x4_splat(0.0f);
54
55 const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
56
57 const float* i0 = zero;
58 const float* i1 = input;
59 const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
60 const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
61 const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
62 const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
63
64 float* o0 = output;
65 float* o1 = (float*) ((uintptr_t) o0 + input_width);
66 float* o2 = (float*) ((uintptr_t) o1 + input_width);
67 float* o3 = (float*) ((uintptr_t) o2 + input_width);
68
69 size_t output_height = input_height;
70 do {
71 if XNN_UNPREDICTABLE(output_height < 2) {
72 i2 = zero;
73 o1 = o0;
74 }
75 if XNN_UNPREDICTABLE(output_height < 3) {
76 i3 = zero;
77 o2 = o1;
78 }
79 if XNN_UNPREDICTABLE(output_height < 4) {
80 i4 = zero;
81 o3 = o2;
82 }
83 if XNN_UNPREDICTABLE(output_height < 5) {
84 i5 = zero;
85 }
86
87 v128_t vi0x0123 = vzero;
88 v128_t vi1x0123 = vzero;
89 v128_t vi2x0123 = vzero;
90 v128_t vi3x0123 = vzero;
91 v128_t vi4x0123 = vzero;
92 v128_t vi5x0123 = vzero;
93
94 v128_t vi0x4567 = wasm_v128_load(i0);
95 i0 += 4;
96 v128_t vi1x4567 = wasm_v128_load(i1);
97 i1 += 4;
98 v128_t vi2x4567 = wasm_v128_load(i2);
99 i2 += 4;
100 v128_t vi3x4567 = wasm_v128_load(i3);
101 i3 += 4;
102 v128_t vi4x4567 = wasm_v128_load(i4);
103 i4 += 4;
104 v128_t vi5x4567 = wasm_v128_load(i5);
105 i5 += 4;
106
107 size_t w = input_width;
108 for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
109 const v128_t vi0x89AB = wasm_v128_load(i0);
110 i0 += 4;
111 const v128_t vi1x89AB = wasm_v128_load(i1);
112 i1 += 4;
113 const v128_t vi2x89AB = wasm_v128_load(i2);
114 i2 += 4;
115 const v128_t vi3x89AB = wasm_v128_load(i3);
116 i3 += 4;
117 const v128_t vi4x89AB = wasm_v128_load(i4);
118 i4 += 4;
119 const v128_t vi5x89AB = wasm_v128_load(i5);
120 i5 += 4;
121
122 v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
123 v128_t vo1p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi1x4567, vk01));
124 v128_t vo2p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi2x4567, vk01));
125 v128_t vo3p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi3x4567, vk01));
126 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk11));
127 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk11));
128 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, vk11));
129 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, vk11));
130 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk21));
131 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk21));
132 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, vk21));
133 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, vk21));
134
135 const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
136 const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
137 const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
138 const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
139 const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
140 const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
141
142 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk00));
143 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, vk00));
144 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, vk00));
145 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x3456, vk00));
146 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
147 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk10));
148 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, vk10));
149 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, vk10));
150 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk20));
151 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, vk20));
152 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, vk20));
153 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x3456, vk20));
154
155 vi0x0123 = vi0x4567;
156 vi1x0123 = vi1x4567;
157 vi2x0123 = vi2x4567;
158 vi3x0123 = vi3x4567;
159 vi4x0123 = vi4x4567;
160 vi5x0123 = vi5x4567;
161
162 const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
163 const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
164 const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
165 const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
166 const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
167 const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
168
169 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
170 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, vk02));
171 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, vk02));
172 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x5678, vk02));
173 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk12));
174 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk12));
175 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, vk12));
176 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, vk12));
177 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
178 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, vk22));
179 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, vk22));
180 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x5678, vk22));
181
182 vi0x4567 = vi0x89AB;
183 vi1x4567 = vi1x89AB;
184 vi2x4567 = vi2x89AB;
185 vi3x4567 = vi3x89AB;
186 vi4x4567 = vi4x89AB;
187 vi5x4567 = vi5x89AB;
188
189
190 v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
191 v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
192 v128_t vo2 = wasm_f32x4_max(vo2p0, vmin);
193 v128_t vo3 = wasm_f32x4_max(vo3p0, vmin);
194 vo0 = wasm_f32x4_min(vo0, vmax);
195 vo1 = wasm_f32x4_min(vo1, vmax);
196 vo2 = wasm_f32x4_min(vo2, vmax);
197 vo3 = wasm_f32x4_min(vo3, vmax);
198
199 wasm_v128_store(o3, vo3);
200 o3 += 4;
201 wasm_v128_store(o2, vo2);
202 o2 += 4;
203 wasm_v128_store(o1, vo1);
204 o1 += 4;
205 wasm_v128_store(o0, vo0);
206 o0 += 4;
207 }
208 // Always process the last block of 1..4 pixels.
209 assert(w >= 1 * sizeof(float));
210 assert(w <= 4 * sizeof(float));
211 {
212 vi0x4567 = wasm_v128_and(vmask, vi0x4567);
213 vi1x4567 = wasm_v128_and(vmask, vi1x4567);
214 vi2x4567 = wasm_v128_and(vmask, vi2x4567);
215 vi3x4567 = wasm_v128_and(vmask, vi3x4567);
216 vi4x4567 = wasm_v128_and(vmask, vi4x4567);
217 vi5x4567 = wasm_v128_and(vmask, vi5x4567);
218
219 v128_t vo0p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi0x4567, vk01));
220 v128_t vo1p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi1x4567, vk01));
221 v128_t vo2p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi2x4567, vk01));
222 v128_t vo3p0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vi3x4567, vk01));
223 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk11));
224 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk11));
225 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, vk11));
226 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, vk11));
227 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk21));
228 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk21));
229 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, vk21));
230 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, vk21));
231
232 const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
233 const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
234 const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
235 const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
236 const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
237 const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
238
239 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, vk00));
240 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, vk00));
241 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, vk00));
242 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x3456, vk00));
243 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk10));
244 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk10));
245 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, vk10));
246 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, vk10));
247 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, vk20));
248 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, vk20));
249 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, vk20));
250 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x3456, vk20));
251
252 const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
253 const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
254 const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
255 const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
256 const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
257 const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vzero, 1, 2, 3, 4);
258
259 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, vk02));
260 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, vk02));
261 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, vk02));
262 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x5678, vk02));
263 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk12));
264 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk12));
265 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, vk12));
266 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, vk12));
267 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, vk22));
268 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, vk22));
269 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, vk22));
270 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x5678, vk22));
271
272
273 v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
274 v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
275 v128_t vo2 = wasm_f32x4_max(vo2p0, vmin);
276 v128_t vo3 = wasm_f32x4_max(vo3p0, vmin);
277 vo0 = wasm_f32x4_min(vo0, vmax);
278 vo1 = wasm_f32x4_min(vo1, vmax);
279 vo2 = wasm_f32x4_min(vo2, vmax);
280 vo3 = wasm_f32x4_min(vo3, vmax);
281
282 if XNN_LIKELY(w == 4 * sizeof(float)) {
283 wasm_v128_store(o3, vo3);
284 o3 += 4;
285 wasm_v128_store(o2, vo2);
286 o2 += 4;
287 wasm_v128_store(o1, vo1);
288 o1 += 4;
289 wasm_v128_store(o0, vo0);
290 o0 += 4;
291 } else {
292 if (w & (2 * sizeof(float))) {
293 *((double*) o3) = wasm_f64x2_extract_lane(vo3, 0);
294 o3 += 2;
295 *((double*) o2) = wasm_f64x2_extract_lane(vo2, 0);
296 o2 += 2;
297 *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0);
298 o1 += 2;
299 *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0);
300 o0 += 2;
301
302 vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
303 vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
304 vo2 = wasm_v32x4_shuffle(vo2, vo2, 2, 3, 0, 1);
305 vo3 = wasm_v32x4_shuffle(vo3, vo3, 2, 3, 0, 1);
306 }
307 if (w & (1 * sizeof(float))) {
308 *o3 = wasm_f32x4_extract_lane(vo3, 0);
309 o3 += 1;
310 *o2 = wasm_f32x4_extract_lane(vo2, 0);
311 o2 += 1;
312 *o1 = wasm_f32x4_extract_lane(vo1, 0);
313 o1 += 1;
314 *o0 = wasm_f32x4_extract_lane(vo0, 0);
315 o0 += 1;
316 }
317 }
318 }
319
320 i0 = (const float*) ((uintptr_t) i4 - input_decrement);
321 i1 = (const float*) ((uintptr_t) i5 - input_decrement);
322 i2 = (const float*) ((uintptr_t) i1 + input_width);
323 i3 = (const float*) ((uintptr_t) i2 + input_width);
324 i4 = (const float*) ((uintptr_t) i3 + input_width);
325 i5 = (const float*) ((uintptr_t) i4 + input_width);
326
327 o0 = o3;
328 o1 = (float*) ((uintptr_t) o0 + input_width);
329 o2 = (float*) ((uintptr_t) o1 + input_width);
330 o3 = (float*) ((uintptr_t) o2 + input_width);
331
332 output_height = doz(output_height, 4);
333 } while (output_height != 0);
334 }
335