1 // Auto-generated file. Do not edit!
2 // Template: src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-splat.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <wasm_simd128.h>
13
14 #include <xnnpack/dwconv.h>
15 #include <xnnpack/math.h>
16
17
18
xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4_acc3(size_t input_height,size_t input_width,const float * input,const float * weights,const float * zero,float * output,uint32_t padding_top,const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS (1)])19 void xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4_acc3(
20 size_t input_height,
21 size_t input_width,
22 const float* input,
23 const float* weights,
24 const float* zero,
25 float* output,
26 uint32_t padding_top,
27 const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
28 {
29 assert(input_height != 0);
30 assert(input_width != 0);
31 assert(input_width % sizeof(float) == 0);
32 assert(padding_top >= 0);
33 assert(padding_top <= 1);
34
35 const v128_t vmask_even = wasm_v128_load(params->scalar.mask_even);
36 const v128_t vmask_odd = wasm_v128_load(params->scalar.mask_odd);
37 const v128_t vmax = wasm_v32x4_load_splat(¶ms->scalar.max);
38 const v128_t vmin = wasm_v32x4_load_splat(¶ms->scalar.min);
39
40 const v128_t vw0123 = wasm_v128_load(weights);
41 const v128_t vw4567 = wasm_v128_load(weights + 4);
42 const v128_t vw89 = wasm_v64x2_load_splat(weights + 8);
43
44 const v128_t vzero = wasm_f32x4_splat(0.0f);
45
46 const size_t input_decrement = round_down_po2(input_width, 4 /* SIMD output width */ * 2 /* subsampling */ * sizeof(float));
47
48 const float* i0 = (const float*) ((uintptr_t) input - ((-padding_top) & input_width));
49 const float* i1 = (const float*) ((uintptr_t) i0 + input_width);
50 if XNN_UNPREDICTABLE(padding_top != 0) {
51 i0 = zero;
52 }
53 const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
54
55 float* o0 = output;
56
57 size_t padded_input_height = input_height + padding_top + 1 /* padding bottom */;
58 size_t output_height = (padded_input_height - 3 /* kernel size */ + 2 /* subsampling */) / 2;
59 do {
60 if XNN_UNPREDICTABLE(padded_input_height < 4) {
61 i2 = zero;
62 }
63
64 v128_t vi0x1357 = vzero;
65 v128_t vi1x1357 = vzero;
66 v128_t vi2x1357 = vzero;
67
68 size_t w = input_width;
69 for (; w >= 8 * sizeof(float); w -= 8 * sizeof(float)) {
70 v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
71
72 const v128_t vi0x89AB = wasm_v128_load(i0);
73 const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
74 i0 += 8;
75 const v128_t vi1x89AB = wasm_v128_load(i1);
76 const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
77 i1 += 8;
78 const v128_t vi2x89AB = wasm_v128_load(i2);
79 const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
80 i2 += 8;
81
82 const v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
83 const v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
84 const v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
85 const v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
86 const v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
87 const v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
88
89 v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2));
90
91 v128_t vo0p2 = wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1));
92
93 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
94
95 const v128_t vi0x7BDF = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
96 vi0x1357 = vi0x9BDF;
97 const v128_t vi1x7BDF = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
98 vi1x1357 = vi1x9BDF;
99 const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
100 vi2x1357 = vi2x9BDF;
101
102 vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x7BDF, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
103
104 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
105
106 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
107
108 vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
109
110 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
111
112 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
113
114 vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
115 vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
116
117 v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
118 vo0 = wasm_f32x4_min(vo0, vmax);
119
120 wasm_v128_store(o0, vo0); o0 += 4;
121 }
122 // Last block has 0-7 pixels to process.
123 assert(w < 8 * sizeof(float));
124 if XNN_LIKELY(w != 0) {
125 v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
126
127 const v128_t vi0x89AB = wasm_v128_load(i0);
128 const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
129 const v128_t vi1x89AB = wasm_v128_load(i1);
130 const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
131 const v128_t vi2x89AB = wasm_v128_load(i2);
132 const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
133
134 const v128_t vi0x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6));
135 const v128_t vi0x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7));
136 const v128_t vi1x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6));
137 const v128_t vi1x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7));
138 const v128_t vi2x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6));
139 const v128_t vi2x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7));
140
141 v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2));
142
143 v128_t vo0p2 = wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1));
144
145 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
146
147 const v128_t vi0x7BDF = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
148 const v128_t vi1x7BDF = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
149 const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
150
151 vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x7BDF, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
152
153 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
154
155 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
156
157 vo0p2 = wasm_f32x4_add(vo0p2, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
158
159 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
160
161 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
162
163 vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
164 vo0p0 = wasm_f32x4_add(vo0p0, vo0p2);
165
166 v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
167 vo0 = wasm_f32x4_min(vo0, vmax);
168
169 w += 1 * sizeof(float);
170 if (w & (8 * sizeof(float))) {
171 wasm_v128_store(o0, vo0); o0 += 4;
172 } else {
173 if (w & (4 * sizeof(float))) {
174 *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
175
176 vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
177 }
178 if (w & (2 * sizeof(float))) {
179 *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
180 }
181 }
182 }
183
184 i0 = (const float*) ((uintptr_t) i2 - input_decrement);
185 i1 = (const float*) ((uintptr_t) i0 + input_width);
186 i2 = (const float*) ((uintptr_t) i1 + input_width);
187
188
189 output_height -= 1;
190 padded_input_height -= 2;
191 } while (output_height != 0);
192 }
193