1 // Auto-generated file. Do not edit!
2 // Template: src/qs8-gavgpool/unipass-neon.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <arm_neon.h>
13
14 #include <xnnpack/gavgpool.h>
15
16
xnn_qs8_gavgpool_minmax_ukernel_7x__neon_c16_acc2(size_t rows,size_t channels,const int8_t * input,size_t input_stride,const int8_t * zero,int8_t * output,const union xnn_qs8_avgpool_params params[restrict XNN_MIN_ELEMENTS (1)])17 void xnn_qs8_gavgpool_minmax_ukernel_7x__neon_c16_acc2(
18 size_t rows,
19 size_t channels,
20 const int8_t* input,
21 size_t input_stride,
22 const int8_t* zero,
23 int8_t* output,
24 const union xnn_qs8_avgpool_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
25 {
26 assert(rows != 0);
27 assert(rows <= 7);
28 assert(channels != 0);
29
30 const int8_t* i0 = input;
31 const int8_t* i1 = (const int8_t*) ((uintptr_t) i0 + input_stride);
32 if XNN_UNPREDICTABLE(rows < 2) {
33 i1 = zero;
34 }
35 const int8_t* i2 = (const int8_t*) ((uintptr_t) i1 + input_stride);
36 if XNN_UNPREDICTABLE(rows <= 2) {
37 i2 = zero;
38 }
39 const int8_t* i3 = (const int8_t*) ((uintptr_t) i2 + input_stride);
40 if XNN_UNPREDICTABLE(rows < 4) {
41 i3 = zero;
42 }
43 const int8_t* i4 = (const int8_t*) ((uintptr_t) i3 + input_stride);
44 if XNN_UNPREDICTABLE(rows <= 4) {
45 i4 = zero;
46 }
47 const int8_t* i5 = (const int8_t*) ((uintptr_t) i4 + input_stride);
48 if XNN_UNPREDICTABLE(rows < 6) {
49 i5 = zero;
50 }
51 const int8_t* i6 = (const int8_t*) ((uintptr_t) i5 + input_stride);
52 if XNN_UNPREDICTABLE(rows <= 6) {
53 i6 = zero;
54 }
55
56 const int32x4_t vbias = vld1q_dup_s32(¶ms->neon.bias);
57 #if XNN_ARCH_ARM64
58 const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->neon.multiplier);
59 #else
60 const int32x2_t vmultiplier = vld1_dup_s32(¶ms->neon.multiplier);
61 #endif
62 const int64x2_t vleft_shift = vld1q_dup_s64(¶ms->neon.left_shift);
63 const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->neon.output_zero_point);
64 const int8x16_t voutput_min = vld1q_dup_s8(¶ms->neon.output_min);
65 const int8x16_t voutput_max = vld1q_dup_s8(¶ms->neon.output_max);
66 while (channels >= 16) {
67 const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
68 const int8x8_t vi0x89ABCDEF = vld1_s8(i0); i0 += 8;
69 const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
70 const int8x8_t vi1x89ABCDEF = vld1_s8(i1); i1 += 8;
71 const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
72 const int8x8_t vi2x89ABCDEF = vld1_s8(i2); i2 += 8;
73 const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
74 const int8x8_t vi3x89ABCDEF = vld1_s8(i3); i3 += 8;
75 const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
76 const int8x8_t vi4x89ABCDEF = vld1_s8(i4); i4 += 8;
77 const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
78 const int8x8_t vi5x89ABCDEF = vld1_s8(i5); i5 += 8;
79 const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
80 const int8x8_t vi6x89ABCDEF = vld1_s8(i6); i6 += 8;
81
82 int16x8_t vacc0x01234567 = vaddl_s8(vi0x01234567, vi1x01234567);
83 int16x8_t vacc0x89ABCDEF = vaddl_s8(vi0x89ABCDEF, vi1x89ABCDEF);
84 int16x8_t vacc1x01234567 = vaddl_s8(vi2x01234567, vi3x01234567);
85 int16x8_t vacc1x89ABCDEF = vaddl_s8(vi2x89ABCDEF, vi3x89ABCDEF);
86
87 vacc0x01234567 = vaddw_s8(vacc0x01234567, vi4x01234567);
88 vacc0x89ABCDEF = vaddw_s8(vacc0x89ABCDEF, vi4x89ABCDEF);
89 vacc1x01234567 = vaddw_s8(vacc1x01234567, vi5x01234567);
90 vacc1x89ABCDEF = vaddw_s8(vacc1x89ABCDEF, vi5x89ABCDEF);
91 vacc0x01234567 = vaddw_s8(vacc0x01234567, vi6x01234567);
92 vacc0x89ABCDEF = vaddw_s8(vacc0x89ABCDEF, vi6x89ABCDEF);
93
94 // Add up all accumulators to vacc0x0123456789ABCDEF
95 vacc0x01234567 = vaddq_s16(vacc0x01234567, vacc1x01234567);
96 vacc0x89ABCDEF = vaddq_s16(vacc0x89ABCDEF, vacc1x89ABCDEF);
97
98 int32x4_t vacc0123 = vaddw_s16(vbias, vget_low_s16(vacc0x01234567));
99 int32x4_t vacc4567 = vaddw_s16(vbias, vget_high_s16(vacc0x01234567));
100 int32x4_t vacc89AB = vaddw_s16(vbias, vget_low_s16(vacc0x89ABCDEF));
101 int32x4_t vaccCDEF = vaddw_s16(vbias, vget_high_s16(vacc0x89ABCDEF));
102
103 const int32x4_t vsgnacc0123 = vreinterpretq_s32_u32(vcltq_s32(vacc0123, vmovq_n_s32(0)));
104 const int32x4_t vsgnacc4567 = vreinterpretq_s32_u32(vcltq_s32(vacc4567, vmovq_n_s32(0)));
105 const int32x4_t vsgnacc89AB = vreinterpretq_s32_u32(vcltq_s32(vacc89AB, vmovq_n_s32(0)));
106 const int32x4_t vsgnaccCDEF = vreinterpretq_s32_u32(vcltq_s32(vaccCDEF, vmovq_n_s32(0)));
107
108 #if XNN_ARCH_ARM64
109 const int64x2_t vprod01 = vmull_s32(vget_low_s32(vacc0123), vget_low_s32(vmultiplier));
110 const int64x2_t vprod23 = vmull_high_s32(vacc0123, vmultiplier);
111 const int64x2_t vprod45 = vmull_s32(vget_low_s32(vacc4567), vget_low_s32(vmultiplier));
112 const int64x2_t vprod67 = vmull_high_s32(vacc4567, vmultiplier);
113 const int64x2_t vprod89 = vmull_s32(vget_low_s32(vacc89AB), vget_low_s32(vmultiplier));
114 const int64x2_t vprodAB = vmull_high_s32(vacc89AB, vmultiplier);
115 const int64x2_t vprodCD = vmull_s32(vget_low_s32(vaccCDEF), vget_low_s32(vmultiplier));
116 const int64x2_t vprodEF = vmull_high_s32(vaccCDEF, vmultiplier);
117
118 const int64x2_t vadjprod01 = vaddw_s32(vprod01, vget_low_s32(vsgnacc0123));
119 const int64x2_t vadjprod23 = vaddw_high_s32(vprod23, vsgnacc0123);
120 const int64x2_t vadjprod45 = vaddw_s32(vprod45, vget_low_s32(vsgnacc4567));
121 const int64x2_t vadjprod67 = vaddw_high_s32(vprod67, vsgnacc4567);
122 const int64x2_t vadjprod89 = vaddw_s32(vprod89, vget_low_s32(vsgnacc89AB));
123 const int64x2_t vadjprodAB = vaddw_high_s32(vprodAB, vsgnacc89AB);
124 const int64x2_t vadjprodCD = vaddw_s32(vprodCD, vget_low_s32(vsgnaccCDEF));
125 const int64x2_t vadjprodEF = vaddw_high_s32(vprodEF, vsgnaccCDEF);
126 #else
127 const int64x2_t vprod01 = vmull_s32(vget_low_s32(vacc0123), vmultiplier);
128 const int64x2_t vprod23 = vmull_s32(vget_high_s32(vacc0123), vmultiplier);
129 const int64x2_t vprod45 = vmull_s32(vget_low_s32(vacc4567), vmultiplier);
130 const int64x2_t vprod67 = vmull_s32(vget_high_s32(vacc4567), vmultiplier);
131 const int64x2_t vprod89 = vmull_s32(vget_low_s32(vacc89AB), vmultiplier);
132 const int64x2_t vprodAB = vmull_s32(vget_high_s32(vacc89AB), vmultiplier);
133 const int64x2_t vprodCD = vmull_s32(vget_low_s32(vaccCDEF), vmultiplier);
134 const int64x2_t vprodEF = vmull_s32(vget_high_s32(vaccCDEF), vmultiplier);
135
136 const int64x2_t vadjprod01 = vaddw_s32(vprod01, vget_low_s32(vsgnacc0123));
137 const int64x2_t vadjprod23 = vaddw_s32(vprod23, vget_high_s32(vsgnacc0123));
138 const int64x2_t vadjprod45 = vaddw_s32(vprod45, vget_low_s32(vsgnacc4567));
139 const int64x2_t vadjprod67 = vaddw_s32(vprod67, vget_high_s32(vsgnacc4567));
140 const int64x2_t vadjprod89 = vaddw_s32(vprod89, vget_low_s32(vsgnacc89AB));
141 const int64x2_t vadjprodAB = vaddw_s32(vprodAB, vget_high_s32(vsgnacc89AB));
142 const int64x2_t vadjprodCD = vaddw_s32(vprodCD, vget_low_s32(vsgnaccCDEF));
143 const int64x2_t vadjprodEF = vaddw_s32(vprodEF, vget_high_s32(vsgnaccCDEF));
144 #endif
145
146 const int64x2_t vacc01 = vrshlq_s64(vadjprod01, vleft_shift);
147 const int64x2_t vacc23 = vrshlq_s64(vadjprod23, vleft_shift);
148 const int64x2_t vacc45 = vrshlq_s64(vadjprod45, vleft_shift);
149 const int64x2_t vacc67 = vrshlq_s64(vadjprod67, vleft_shift);
150 const int64x2_t vacc89 = vrshlq_s64(vadjprod89, vleft_shift);
151 const int64x2_t vaccAB = vrshlq_s64(vadjprodAB, vleft_shift);
152 const int64x2_t vaccCD = vrshlq_s64(vadjprodCD, vleft_shift);
153 const int64x2_t vaccEF = vrshlq_s64(vadjprodEF, vleft_shift);
154
155 #if XNN_ARCH_ARM64
156 vacc0123 = vuzp1q_s32(vreinterpretq_s32_s64(vacc01), vreinterpretq_s32_s64(vacc23));
157 vacc4567 = vuzp1q_s32(vreinterpretq_s32_s64(vacc45), vreinterpretq_s32_s64(vacc67));
158 vacc89AB = vuzp1q_s32(vreinterpretq_s32_s64(vacc89), vreinterpretq_s32_s64(vaccAB));
159 vaccCDEF = vuzp1q_s32(vreinterpretq_s32_s64(vaccCD), vreinterpretq_s32_s64(vaccEF));
160
161 const int16x8_t vacc01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567), voutput_zero_point);
162 const int16x8_t vacc89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc89AB), vaccCDEF), voutput_zero_point);
163
164 int8x16_t vout0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc01234567), vacc89ABCDEF);
165 #else
166 vacc0123 = vcombine_s32(vmovn_s64(vacc01), vmovn_s64(vacc23));
167 vacc4567 = vcombine_s32(vmovn_s64(vacc45), vmovn_s64(vacc67));
168 vacc89AB = vcombine_s32(vmovn_s64(vacc89), vmovn_s64(vaccAB));
169 vaccCDEF = vcombine_s32(vmovn_s64(vaccCD), vmovn_s64(vaccEF));
170
171 const int16x8_t vacc01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567)), voutput_zero_point);
172 const int16x8_t vacc89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF)), voutput_zero_point);
173
174 int8x16_t vout0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc01234567), vqmovn_s16(vacc89ABCDEF));
175 #endif
176
177 vout0123456789ABCDEF = vmaxq_s8(vout0123456789ABCDEF, voutput_min);
178
179 vout0123456789ABCDEF = vminq_s8(vout0123456789ABCDEF, voutput_max);
180
181 vst1q_s8(output, vout0123456789ABCDEF); output += 16;
182
183 channels -= 16;
184 }
185 if XNN_UNLIKELY(channels != 0) {
186 do {
187 const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
188 const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
189 const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
190 const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
191 const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
192 const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
193 const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
194
195 int16x8_t vacc0x01234567 = vaddl_s8(vi0x01234567, vi1x01234567);
196 int16x8_t vacc1x01234567 = vaddl_s8(vi2x01234567, vi3x01234567);
197
198 vacc0x01234567 = vaddw_s8(vacc0x01234567, vi4x01234567);
199 vacc1x01234567 = vaddw_s8(vacc1x01234567, vi5x01234567);
200 vacc0x01234567 = vaddw_s8(vacc0x01234567, vi6x01234567);
201
202 // Add up all accumulators to vacc0x0123456789ABCDEF
203 vacc0x01234567 = vaddq_s16(vacc0x01234567, vacc1x01234567);
204
205 int32x4_t vacc0123 = vaddw_s16(vbias, vget_low_s16(vacc0x01234567));
206 int32x4_t vacc4567 = vaddw_s16(vbias, vget_high_s16(vacc0x01234567));
207
208 const int32x4_t vsgnacc0123 = vreinterpretq_s32_u32(vcltq_s32(vacc0123, vmovq_n_s32(0)));
209 const int32x4_t vsgnacc4567 = vreinterpretq_s32_u32(vcltq_s32(vacc4567, vmovq_n_s32(0)));
210
211 #if XNN_ARCH_ARM64
212 const int64x2_t vprod01 = vmull_s32(vget_low_s32(vacc0123), vget_low_s32(vmultiplier));
213 const int64x2_t vprod23 = vmull_high_s32(vacc0123, vmultiplier);
214 const int64x2_t vprod45 = vmull_s32(vget_low_s32(vacc4567), vget_low_s32(vmultiplier));
215 const int64x2_t vprod67 = vmull_high_s32(vacc4567, vmultiplier);
216
217 const int64x2_t vadjprod01 = vaddw_s32(vprod01, vget_low_s32(vsgnacc0123));
218 const int64x2_t vadjprod23 = vaddw_high_s32(vprod23, vsgnacc0123);
219 const int64x2_t vadjprod45 = vaddw_s32(vprod45, vget_low_s32(vsgnacc4567));
220 const int64x2_t vadjprod67 = vaddw_high_s32(vprod67, vsgnacc4567);
221 #else
222 const int64x2_t vprod01 = vmull_s32(vget_low_s32(vacc0123), vmultiplier);
223 const int64x2_t vprod23 = vmull_s32(vget_high_s32(vacc0123), vmultiplier);
224 const int64x2_t vprod45 = vmull_s32(vget_low_s32(vacc4567), vmultiplier);
225 const int64x2_t vprod67 = vmull_s32(vget_high_s32(vacc4567), vmultiplier);
226
227 const int64x2_t vadjprod01 = vaddw_s32(vprod01, vget_low_s32(vsgnacc0123));
228 const int64x2_t vadjprod23 = vaddw_s32(vprod23, vget_high_s32(vsgnacc0123));
229 const int64x2_t vadjprod45 = vaddw_s32(vprod45, vget_low_s32(vsgnacc4567));
230 const int64x2_t vadjprod67 = vaddw_s32(vprod67, vget_high_s32(vsgnacc4567));
231 #endif
232
233 const int64x2_t vacc01 = vrshlq_s64(vadjprod01, vleft_shift);
234 const int64x2_t vacc23 = vrshlq_s64(vadjprod23, vleft_shift);
235 const int64x2_t vacc45 = vrshlq_s64(vadjprod45, vleft_shift);
236 const int64x2_t vacc67 = vrshlq_s64(vadjprod67, vleft_shift);
237
238 #if XNN_ARCH_ARM64
239 vacc0123 = vuzp1q_s32(vreinterpretq_s32_s64(vacc01), vreinterpretq_s32_s64(vacc23));
240 vacc4567 = vuzp1q_s32(vreinterpretq_s32_s64(vacc45), vreinterpretq_s32_s64(vacc67));
241
242 const int16x8_t vacc01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567), voutput_zero_point);
243
244 int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
245 #else
246 vacc0123 = vcombine_s32(vmovn_s64(vacc01), vmovn_s64(vacc23));
247 vacc4567 = vcombine_s32(vmovn_s64(vacc45), vmovn_s64(vacc67));
248
249 const int16x8_t vacc01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567)), voutput_zero_point);
250
251 int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
252 #endif
253
254 vout01234567 = vmax_s8(vout01234567, vget_low_s8(voutput_min));
255 vout01234567 = vmin_s8(vout01234567, vget_low_s8(voutput_max));
256
257 if XNN_LIKELY(channels >= 8) {
258 vst1_s8(output, vout01234567); output += 8;
259 channels -= 8;
260 } else {
261 if (channels & 4) {
262 vst1_lane_u32(__builtin_assume_aligned(output, 1), vreinterpret_u32_s8(vout01234567), 0); output += 4;
263 vout01234567 = vext_s8(vout01234567, vout01234567, 4);
264 }
265 if (channels & 2) {
266 vst1_lane_u16(__builtin_assume_aligned(output, 1), vreinterpret_u16_s8(vout01234567), 0); output += 2;
267 vout01234567 = vext_s8(vout01234567, vout01234567, 2);
268 }
269 if (channels & 1) {
270 vst1_lane_s8(output, vout01234567, 0); output += 1;
271 }
272 channels = 0;
273 }
274 } while (channels != 0);
275 }
276 }
277