1 // Auto-generated file. Do not edit!
2 // Template: src/f32-dwconv2d-chw/3x3s2p1-neon.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <arm_neon.h>
13
14 #include <xnnpack/dwconv.h>
15 #include <xnnpack/math.h>
16
17
xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__neonfma_3x4(size_t input_height,size_t input_width,const float * input,const float * weights,const float * zero,float * output,uint32_t padding_top,const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__neonfma_3x4(
19 size_t input_height,
20 size_t input_width,
21 const float* input,
22 const float* weights,
23 const float* zero,
24 float* output,
25 uint32_t padding_top,
26 const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
27 {
28 assert(input_height != 0);
29 assert(input_width != 0);
30 assert(input_width % sizeof(float) == 0);
31 assert(padding_top >= 0);
32 assert(padding_top <= 1);
33
34 const uint32x4_t vmask_even = vld1q_u32(params->neon.mask_even);
35 const uint32x4_t vmask_odd = vld1q_u32(params->neon.mask_odd);
36 const float32x4_t vmax = vld1q_dup_f32(¶ms->neon.max);
37 const float32x4_t vmin = vld1q_dup_f32(¶ms->neon.min);
38
39 const float32x4_t vw0123 = vld1q_f32(weights);
40 const float32x4_t vw4567 = vld1q_f32(weights + 4);
41 const float32x2_t vw89 = vld1_f32(weights + 8);
42
43 const size_t input_decrement = round_down_po2(input_width, 4 /* SIMD output width */ * 2 /* subsampling */ * sizeof(float));
44 const size_t output_width = round_down_po2((input_width + (2 /* padding */ - 3 /* kernel size */ + 2 /* subsampling */) * sizeof(float)) / 2, sizeof(float));
45
46 const float* i0 = (const float*) ((uintptr_t) input - ((-padding_top) & input_width));
47 const float* i1 = (const float*) ((uintptr_t) i0 + input_width);
48 if XNN_UNPREDICTABLE(padding_top != 0) {
49 i0 = zero;
50 }
51 const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
52 const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
53 const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
54 const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
55 const float* i6 = (const float*) ((uintptr_t) i5 + input_width);
56
57 float* o0 = output;
58 float* o1 = (float*) ((uintptr_t) o0 + output_width);
59 float* o2 = (float*) ((uintptr_t) o1 + output_width);
60
61 size_t padded_input_height = input_height + padding_top + 1 /* padding bottom */;
62 size_t output_height = (padded_input_height - 3 /* kernel size */ + 2 /* subsampling */) / 2;
63 do {
64 if XNN_UNPREDICTABLE(padded_input_height < 4) {
65 i2 = zero;
66 }
67 if XNN_UNPREDICTABLE(padded_input_height < 5) {
68 i3 = zero;
69 o1 = o0;
70 }
71 if XNN_UNPREDICTABLE(padded_input_height < 6) {
72 i4 = zero;
73 }
74 if XNN_UNPREDICTABLE(padded_input_height < 7) {
75 i5 = zero;
76 o2 = o1;
77 }
78 if XNN_UNPREDICTABLE(padded_input_height < 8) {
79 i6 = zero;
80 }
81
82 float32x4_t vi0x1357 = vmovq_n_f32(0.0f);
83 float32x4_t vi1x1357 = vmovq_n_f32(0.0f);
84 float32x4_t vi2x1357 = vmovq_n_f32(0.0f);
85 float32x4_t vi3x1357 = vmovq_n_f32(0.0f);
86 float32x4_t vi4x1357 = vmovq_n_f32(0.0f);
87 float32x4_t vi5x1357 = vmovq_n_f32(0.0f);
88 float32x4_t vi6x1357 = vmovq_n_f32(0.0f);
89
90 size_t w = input_width;
91 for (; w >= 8 * sizeof(float); w -= 8 * sizeof(float)) {
92 float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
93 float32x4_t vo1p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
94 float32x4_t vo2p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
95
96 const float32x4x2_t vi0x8ACE9BDF = vld2q_f32(i0); i0 += 8;
97 const float32x4x2_t vi1x8ACE9BDF = vld2q_f32(i1); i1 += 8;
98 const float32x4x2_t vi2x8ACE9BDF = vld2q_f32(i2); i2 += 8;
99 const float32x4x2_t vi3x8ACE9BDF = vld2q_f32(i3); i3 += 8;
100 const float32x4x2_t vi4x8ACE9BDF = vld2q_f32(i4); i4 += 8;
101 const float32x4x2_t vi5x8ACE9BDF = vld2q_f32(i5); i5 += 8;
102 const float32x4x2_t vi6x8ACE9BDF = vld2q_f32(i6); i6 += 8;
103
104 vo0p0 = vfmaq_lane_f32(vo0p0, vi0x8ACE9BDF.val[0], vget_high_f32(vw0123), 0);
105 vo1p0 = vfmaq_lane_f32(vo1p0, vi2x8ACE9BDF.val[0], vget_high_f32(vw0123), 0);
106 vo2p0 = vfmaq_lane_f32(vo2p0, vi4x8ACE9BDF.val[0], vget_high_f32(vw0123), 0);
107
108 vo0p0 = vfmaq_lane_f32(vo0p0, vi1x8ACE9BDF.val[0], vget_low_f32(vw4567), 1);
109 vo1p0 = vfmaq_lane_f32(vo1p0, vi3x8ACE9BDF.val[0], vget_low_f32(vw4567), 1);
110 vo2p0 = vfmaq_lane_f32(vo2p0, vi5x8ACE9BDF.val[0], vget_low_f32(vw4567), 1);
111
112 vo0p0 = vfmaq_lane_f32(vo0p0, vi2x8ACE9BDF.val[0], vw89, 0);
113 vo1p0 = vfmaq_lane_f32(vo1p0, vi4x8ACE9BDF.val[0], vw89, 0);
114 vo2p0 = vfmaq_lane_f32(vo2p0, vi6x8ACE9BDF.val[0], vw89, 0);
115
116 const float32x4_t vi0x7BDF = vextq_f32(vi0x1357, vi0x8ACE9BDF.val[1], 3);
117 vi0x1357 = vi0x8ACE9BDF.val[1];
118 const float32x4_t vi1x7BDF = vextq_f32(vi1x1357, vi1x8ACE9BDF.val[1], 3);
119 vi1x1357 = vi1x8ACE9BDF.val[1];
120 const float32x4_t vi2x7BDF = vextq_f32(vi2x1357, vi2x8ACE9BDF.val[1], 3);
121 vi2x1357 = vi2x8ACE9BDF.val[1];
122 const float32x4_t vi3x7BDF = vextq_f32(vi3x1357, vi3x8ACE9BDF.val[1], 3);
123 vi3x1357 = vi3x8ACE9BDF.val[1];
124 const float32x4_t vi4x7BDF = vextq_f32(vi4x1357, vi4x8ACE9BDF.val[1], 3);
125 vi4x1357 = vi4x8ACE9BDF.val[1];
126 const float32x4_t vi5x7BDF = vextq_f32(vi5x1357, vi5x8ACE9BDF.val[1], 3);
127 vi5x1357 = vi5x8ACE9BDF.val[1];
128 const float32x4_t vi6x7BDF = vextq_f32(vi6x1357, vi6x8ACE9BDF.val[1], 3);
129 vi6x1357 = vi6x8ACE9BDF.val[1];
130
131 vo0p0 = vfmaq_lane_f32(vo0p0, vi0x7BDF, vget_low_f32(vw0123), 1);
132 vo1p0 = vfmaq_lane_f32(vo1p0, vi2x7BDF, vget_low_f32(vw0123), 1);
133 vo2p0 = vfmaq_lane_f32(vo2p0, vi4x7BDF, vget_low_f32(vw0123), 1);
134
135 vo0p0 = vfmaq_lane_f32(vo0p0, vi1x7BDF, vget_low_f32(vw4567), 0);
136 vo1p0 = vfmaq_lane_f32(vo1p0, vi3x7BDF, vget_low_f32(vw4567), 0);
137 vo2p0 = vfmaq_lane_f32(vo2p0, vi5x7BDF, vget_low_f32(vw4567), 0);
138
139 vo0p0 = vfmaq_lane_f32(vo0p0, vi2x7BDF, vget_high_f32(vw4567), 1);
140 vo1p0 = vfmaq_lane_f32(vo1p0, vi4x7BDF, vget_high_f32(vw4567), 1);
141 vo2p0 = vfmaq_lane_f32(vo2p0, vi6x7BDF, vget_high_f32(vw4567), 1);
142
143 vo0p0 = vfmaq_lane_f32(vo0p0, vi0x8ACE9BDF.val[1], vget_high_f32(vw0123), 1);
144 vo1p0 = vfmaq_lane_f32(vo1p0, vi2x8ACE9BDF.val[1], vget_high_f32(vw0123), 1);
145 vo2p0 = vfmaq_lane_f32(vo2p0, vi4x8ACE9BDF.val[1], vget_high_f32(vw0123), 1);
146
147 vo0p0 = vfmaq_lane_f32(vo0p0, vi1x8ACE9BDF.val[1], vget_high_f32(vw4567), 0);
148 vo1p0 = vfmaq_lane_f32(vo1p0, vi3x8ACE9BDF.val[1], vget_high_f32(vw4567), 0);
149 vo2p0 = vfmaq_lane_f32(vo2p0, vi5x8ACE9BDF.val[1], vget_high_f32(vw4567), 0);
150
151 vo0p0 = vfmaq_lane_f32(vo0p0, vi2x8ACE9BDF.val[1], vw89, 1);
152 vo1p0 = vfmaq_lane_f32(vo1p0, vi4x8ACE9BDF.val[1], vw89, 1);
153 vo2p0 = vfmaq_lane_f32(vo2p0, vi6x8ACE9BDF.val[1], vw89, 1);
154
155
156 float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
157 float32x4_t vo1 = vmaxq_f32(vo1p0, vmin);
158 float32x4_t vo2 = vmaxq_f32(vo2p0, vmin);
159
160 vo0 = vminq_f32(vo0, vmax);
161 vo1 = vminq_f32(vo1, vmax);
162 vo2 = vminq_f32(vo2, vmax);
163
164 vst1q_f32(o2, vo2); o2 += 4;
165 vst1q_f32(o1, vo1); o1 += 4;
166 vst1q_f32(o0, vo0); o0 += 4;
167 }
168 // Last block has 0-7 pixels to process.
169 assert(w < 8 * sizeof(float));
170 if XNN_LIKELY(w != 0) {
171 float32x4_t vo0p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
172 float32x4_t vo1p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
173 float32x4_t vo2p0 = vdupq_lane_f32(vget_low_f32(vw0123), 0);
174
175 const float32x4x2_t vi0x8ACE9BDF = vld2q_f32(i0);
176 const float32x4x2_t vi1x8ACE9BDF = vld2q_f32(i1);
177 const float32x4x2_t vi2x8ACE9BDF = vld2q_f32(i2);
178 const float32x4x2_t vi3x8ACE9BDF = vld2q_f32(i3);
179 const float32x4x2_t vi4x8ACE9BDF = vld2q_f32(i4);
180 const float32x4x2_t vi5x8ACE9BDF = vld2q_f32(i5);
181 const float32x4x2_t vi6x8ACE9BDF = vld2q_f32(i6);
182
183 const float32x4_t vi0x8ACE = vreinterpretq_f32_u32(vandq_u32(vmask_even, vreinterpretq_u32_f32(vi0x8ACE9BDF.val[0])));
184 const float32x4_t vi0x9BDF = vreinterpretq_f32_u32(vandq_u32(vmask_odd, vreinterpretq_u32_f32(vi0x8ACE9BDF.val[1])));
185 const float32x4_t vi1x8ACE = vreinterpretq_f32_u32(vandq_u32(vmask_even, vreinterpretq_u32_f32(vi1x8ACE9BDF.val[0])));
186 const float32x4_t vi1x9BDF = vreinterpretq_f32_u32(vandq_u32(vmask_odd, vreinterpretq_u32_f32(vi1x8ACE9BDF.val[1])));
187 const float32x4_t vi2x8ACE = vreinterpretq_f32_u32(vandq_u32(vmask_even, vreinterpretq_u32_f32(vi2x8ACE9BDF.val[0])));
188 const float32x4_t vi2x9BDF = vreinterpretq_f32_u32(vandq_u32(vmask_odd, vreinterpretq_u32_f32(vi2x8ACE9BDF.val[1])));
189 const float32x4_t vi3x8ACE = vreinterpretq_f32_u32(vandq_u32(vmask_even, vreinterpretq_u32_f32(vi3x8ACE9BDF.val[0])));
190 const float32x4_t vi3x9BDF = vreinterpretq_f32_u32(vandq_u32(vmask_odd, vreinterpretq_u32_f32(vi3x8ACE9BDF.val[1])));
191 const float32x4_t vi4x8ACE = vreinterpretq_f32_u32(vandq_u32(vmask_even, vreinterpretq_u32_f32(vi4x8ACE9BDF.val[0])));
192 const float32x4_t vi4x9BDF = vreinterpretq_f32_u32(vandq_u32(vmask_odd, vreinterpretq_u32_f32(vi4x8ACE9BDF.val[1])));
193 const float32x4_t vi5x8ACE = vreinterpretq_f32_u32(vandq_u32(vmask_even, vreinterpretq_u32_f32(vi5x8ACE9BDF.val[0])));
194 const float32x4_t vi5x9BDF = vreinterpretq_f32_u32(vandq_u32(vmask_odd, vreinterpretq_u32_f32(vi5x8ACE9BDF.val[1])));
195 const float32x4_t vi6x8ACE = vreinterpretq_f32_u32(vandq_u32(vmask_even, vreinterpretq_u32_f32(vi6x8ACE9BDF.val[0])));
196 const float32x4_t vi6x9BDF = vreinterpretq_f32_u32(vandq_u32(vmask_odd, vreinterpretq_u32_f32(vi6x8ACE9BDF.val[1])));
197
198 vo0p0 = vfmaq_lane_f32(vo0p0, vi0x8ACE, vget_high_f32(vw0123), 0);
199 vo1p0 = vfmaq_lane_f32(vo1p0, vi2x8ACE, vget_high_f32(vw0123), 0);
200 vo2p0 = vfmaq_lane_f32(vo2p0, vi4x8ACE, vget_high_f32(vw0123), 0);
201
202 vo0p0 = vfmaq_lane_f32(vo0p0, vi1x8ACE, vget_low_f32(vw4567), 1);
203 vo1p0 = vfmaq_lane_f32(vo1p0, vi3x8ACE, vget_low_f32(vw4567), 1);
204 vo2p0 = vfmaq_lane_f32(vo2p0, vi5x8ACE, vget_low_f32(vw4567), 1);
205
206 vo0p0 = vfmaq_lane_f32(vo0p0, vi2x8ACE, vw89, 0);
207 vo1p0 = vfmaq_lane_f32(vo1p0, vi4x8ACE, vw89, 0);
208 vo2p0 = vfmaq_lane_f32(vo2p0, vi6x8ACE, vw89, 0);
209
210 const float32x4_t vi0x7BDF = vextq_f32(vi0x1357, vi0x9BDF, 3);
211 const float32x4_t vi1x7BDF = vextq_f32(vi1x1357, vi1x9BDF, 3);
212 const float32x4_t vi2x7BDF = vextq_f32(vi2x1357, vi2x9BDF, 3);
213 const float32x4_t vi3x7BDF = vextq_f32(vi3x1357, vi3x9BDF, 3);
214 const float32x4_t vi4x7BDF = vextq_f32(vi4x1357, vi4x9BDF, 3);
215 const float32x4_t vi5x7BDF = vextq_f32(vi5x1357, vi5x9BDF, 3);
216 const float32x4_t vi6x7BDF = vextq_f32(vi6x1357, vi6x9BDF, 3);
217
218 vo0p0 = vfmaq_lane_f32(vo0p0, vi0x7BDF, vget_low_f32(vw0123), 1);
219 vo1p0 = vfmaq_lane_f32(vo1p0, vi2x7BDF, vget_low_f32(vw0123), 1);
220 vo2p0 = vfmaq_lane_f32(vo2p0, vi4x7BDF, vget_low_f32(vw0123), 1);
221
222 vo0p0 = vfmaq_lane_f32(vo0p0, vi1x7BDF, vget_low_f32(vw4567), 0);
223 vo1p0 = vfmaq_lane_f32(vo1p0, vi3x7BDF, vget_low_f32(vw4567), 0);
224 vo2p0 = vfmaq_lane_f32(vo2p0, vi5x7BDF, vget_low_f32(vw4567), 0);
225
226 vo0p0 = vfmaq_lane_f32(vo0p0, vi2x7BDF, vget_high_f32(vw4567), 1);
227 vo1p0 = vfmaq_lane_f32(vo1p0, vi4x7BDF, vget_high_f32(vw4567), 1);
228 vo2p0 = vfmaq_lane_f32(vo2p0, vi6x7BDF, vget_high_f32(vw4567), 1);
229
230 vo0p0 = vfmaq_lane_f32(vo0p0, vi0x9BDF, vget_high_f32(vw0123), 1);
231 vo1p0 = vfmaq_lane_f32(vo1p0, vi2x9BDF, vget_high_f32(vw0123), 1);
232 vo2p0 = vfmaq_lane_f32(vo2p0, vi4x9BDF, vget_high_f32(vw0123), 1);
233
234 vo0p0 = vfmaq_lane_f32(vo0p0, vi1x9BDF, vget_high_f32(vw4567), 0);
235 vo1p0 = vfmaq_lane_f32(vo1p0, vi3x9BDF, vget_high_f32(vw4567), 0);
236 vo2p0 = vfmaq_lane_f32(vo2p0, vi5x9BDF, vget_high_f32(vw4567), 0);
237
238 vo0p0 = vfmaq_lane_f32(vo0p0, vi2x9BDF, vw89, 1);
239 vo1p0 = vfmaq_lane_f32(vo1p0, vi4x9BDF, vw89, 1);
240 vo2p0 = vfmaq_lane_f32(vo2p0, vi6x9BDF, vw89, 1);
241
242
243 float32x4_t vo0 = vmaxq_f32(vo0p0, vmin);
244 float32x4_t vo1 = vmaxq_f32(vo1p0, vmin);
245 float32x4_t vo2 = vmaxq_f32(vo2p0, vmin);
246
247 vo0 = vminq_f32(vo0, vmax);
248 vo1 = vminq_f32(vo1, vmax);
249 vo2 = vminq_f32(vo2, vmax);
250
251 w += 1 * sizeof(float);
252 if (w & (8 * sizeof(float))) {
253 vst1q_f32(o2, vo2); o2 += 4;
254 vst1q_f32(o1, vo1); o1 += 4;
255 vst1q_f32(o0, vo0); o0 += 4;
256 } else {
257 float32x2_t vo0_lo = vget_low_f32(vo0);
258 float32x2_t vo1_lo = vget_low_f32(vo1);
259 float32x2_t vo2_lo = vget_low_f32(vo2);
260 if (w & (4 * sizeof(float))) {
261 vst1_f32(o2, vo2_lo); o2 += 2;
262 vst1_f32(o1, vo1_lo); o1 += 2;
263 vst1_f32(o0, vo0_lo); o0 += 2;
264
265 vo0_lo = vget_high_f32(vo0);
266 vo1_lo = vget_high_f32(vo1);
267 vo2_lo = vget_high_f32(vo2);
268 }
269 if (w & (2 * sizeof(float))) {
270 vst1_lane_f32(o2, vo2_lo, 0); o2 += 1;
271 vst1_lane_f32(o1, vo1_lo, 0); o1 += 1;
272 vst1_lane_f32(o0, vo0_lo, 0); o0 += 1;
273 }
274 }
275 }
276
277 i0 = (const float*) ((uintptr_t) i6 - input_decrement);
278 i1 = (const float*) ((uintptr_t) i0 + input_width);
279 i2 = (const float*) ((uintptr_t) i1 + input_width);
280 i3 = (const float*) ((uintptr_t) i2 + input_width);
281 i4 = (const float*) ((uintptr_t) i3 + input_width);
282 i5 = (const float*) ((uintptr_t) i4 + input_width);
283 i6 = (const float*) ((uintptr_t) i5 + input_width);
284
285 o0 = o2;
286 o1 = (float*) ((uintptr_t) o0 + output_width);
287 o2 = (float*) ((uintptr_t) o1 + output_width);
288
289 output_height = doz(output_height, 3);
290 padded_input_height = doz(padded_input_height, 6);
291 } while (output_height != 0);
292 }
293