1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-loadsplat.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <wasm_simd128.h>
13 
14 #include <xnnpack/dwconv.h>
15 #include <xnnpack/math.h>
16 
17 
18 
xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_1x4_acc4(size_t input_height,size_t input_width,const float * input,const float * weights,const float * zero,float * output,uint32_t padding_top,const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS (1)])19 void xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_1x4_acc4(
20     size_t input_height,
21     size_t input_width,
22     const float* input,
23     const float* weights,
24     const float* zero,
25     float* output,
26     uint32_t padding_top,
27     const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
28 {
29   assert(input_height != 0);
30   assert(input_width != 0);
31   assert(input_width % sizeof(float) == 0);
32   assert(padding_top >= 0);
33   assert(padding_top <= 1);
34 
35   const v128_t vmask_even = wasm_v128_load(params->scalar.mask_even);
36   const v128_t vmask_odd  = wasm_v128_load(params->scalar.mask_odd);
37   const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
38   const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
39 
40   const v128_t vw0123 = wasm_v128_load(weights);
41   const v128_t vw4567 = wasm_v128_load(weights + 4);
42   const v128_t vw89 = wasm_v64x2_load_splat(weights + 8);
43   const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
44   const v128_t vk00 = wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
45   const v128_t vk01 = wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
46   const v128_t vk02 = wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
47   const v128_t vk10 = wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
48   const v128_t vk11 = wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
49   const v128_t vk12 = wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
50   const v128_t vk20 = wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
51   const v128_t vk21 = wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0);
52   const v128_t vk22 = wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1);
53 
54   const v128_t vzero = wasm_f32x4_splat(0.0f);
55 
56   const size_t input_decrement = round_down_po2(input_width, 4 /* SIMD output width */ * 2 /* subsampling */ * sizeof(float));
57 
58   const float* i0 = (const float*) ((uintptr_t) input - ((-padding_top) & input_width));
59   const float* i1 = (const float*) ((uintptr_t) i0 + input_width);
60   if XNN_UNPREDICTABLE(padding_top != 0) {
61     i0 = zero;
62   }
63   const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
64 
65   float* o0 = output;
66 
67   size_t padded_input_height = input_height + padding_top + 1 /* padding bottom */;
68   size_t output_height = (padded_input_height - 3 /* kernel size */ + 2 /* subsampling */) / 2;
69   do {
70     if XNN_UNPREDICTABLE(padded_input_height < 4) {
71       i2 = zero;
72     }
73 
74     v128_t vi0x1357 = vzero;
75     v128_t vi1x1357 = vzero;
76     v128_t vi2x1357 = vzero;
77 
78     size_t w = input_width;
79     for (; w >= 8 * sizeof(float); w -= 8 * sizeof(float)) {
80       v128_t vo0p0 = vbias;
81 
82       const v128_t vi0x89AB = wasm_v128_load(i0);
83       const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
84       i0 += 8;
85       const v128_t vi1x89AB = wasm_v128_load(i1);
86       const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
87       i1 += 8;
88       const v128_t vi2x89AB = wasm_v128_load(i2);
89       const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
90       i2 += 8;
91 
92       const v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
93       const v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
94       const v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
95       const v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
96       const v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
97       const v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
98 
99       v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, vk01);
100 
101       v128_t vo0p2 = wasm_f32x4_mul(vi1x8ACE, vk11);
102 
103       v128_t vo0p3 = wasm_f32x4_mul(vi2x8ACE, vk21);
104 
105       const v128_t vi0x7BDF = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
106       vi0x1357 = vi0x9BDF;
107       const v128_t vi1x7BDF = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
108       vi1x1357 = vi1x9BDF;
109       const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
110       vi2x1357 = vi2x9BDF;
111 
112       vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x7BDF, vk00));
113 
114       vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x7BDF, vk10));
115 
116       vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi2x7BDF, vk20));
117 
118       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, vk02));
119 
120       vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x9BDF, vk12));
121 
122       vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x9BDF, vk22));
123 
124       vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
125       vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
126       vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
127 
128       v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
129       vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
130 
131       wasm_v128_store(o0, vo0); o0 += 4;
132     }
133     // Last block has 0-7 pixels to process.
134     assert(w < 8 * sizeof(float));
135     if XNN_LIKELY(w != 0) {
136       v128_t vo0p0 = vbias;
137 
138       const v128_t vi0x89AB = wasm_v128_load(i0);
139       const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
140       const v128_t vi1x89AB = wasm_v128_load(i1);
141       const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
142       const v128_t vi2x89AB = wasm_v128_load(i2);
143       const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
144 
145       const v128_t vi0x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6));
146       const v128_t vi0x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7));
147       const v128_t vi1x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6));
148       const v128_t vi1x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7));
149       const v128_t vi2x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6));
150       const v128_t vi2x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7));
151 
152       v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, vk01);
153 
154       v128_t vo0p2 = wasm_f32x4_mul(vi1x8ACE, vk11);
155 
156       v128_t vo0p3 = wasm_f32x4_mul(vi2x8ACE, vk21);
157 
158       const v128_t vi0x7BDF = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
159       const v128_t vi1x7BDF = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
160       const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
161 
162       vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x7BDF, vk00));
163 
164       vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi1x7BDF, vk10));
165 
166       vo0p3 = wasm_f32x4_add(vo0p3, wasm_f32x4_mul(vi2x7BDF, vk20));
167 
168       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, vk02));
169 
170       vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x9BDF, vk12));
171 
172       vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi2x9BDF, vk22));
173 
174       vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
175       vo0p2 = wasm_f32x4_add(vo0p2, vo0p3);
176       vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
177 
178       v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
179       vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
180 
181       w += 1 * sizeof(float);
182       if (w & (8 * sizeof(float))) {
183         wasm_v128_store(o0, vo0); o0 += 4;
184       } else {
185         if (w & (4 * sizeof(float))) {
186           *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
187 
188           vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
189         }
190         if (w & (2 * sizeof(float))) {
191           *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
192         }
193       }
194     }
195 
196     i0 = (const float*) ((uintptr_t) i2 - input_decrement);
197     i1 = (const float*) ((uintptr_t) i0 + input_width);
198     i2 = (const float*) ((uintptr_t) i1 + input_width);
199 
200 
201     output_height -= 1;
202     padded_input_height -= 2;
203   } while (output_height != 0);
204 }
205