1 // Auto-generated file. Do not edit!
2 //   Template: src/qs8-gavgpool/multipass-sse.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <emmintrin.h>
13 
14 #include <xnnpack/gavgpool.h>
15 #include <xnnpack/math.h>
16 
17 
xnn_qs8_gavgpool_minmax_ukernel_7p7x__sse2_c8_acc2(size_t rows,size_t channels,const int8_t * input,size_t input_stride,const int8_t * zero,int32_t * buffer,int8_t * output,const union xnn_qs8_avgpool_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_qs8_gavgpool_minmax_ukernel_7p7x__sse2_c8_acc2(
19     size_t rows,
20     size_t channels,
21     const int8_t* input,
22     size_t input_stride,
23     const int8_t* zero,
24     int32_t* buffer,
25     int8_t* output,
26     const union xnn_qs8_avgpool_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
27 {
28   assert(rows > 7);
29   assert(channels != 0);
30 
31   const int8_t* i0 = input;
32   const int8_t* i1 = (const int8_t*) ((uintptr_t) i0 + input_stride);
33   const int8_t* i2 = (const int8_t*) ((uintptr_t) i1 + input_stride);
34   const int8_t* i3 = (const int8_t*) ((uintptr_t) i2 + input_stride);
35   const int8_t* i4 = (const int8_t*) ((uintptr_t) i3 + input_stride);
36   const int8_t* i5 = (const int8_t*) ((uintptr_t) i4 + input_stride);
37   const int8_t* i6 = (const int8_t*) ((uintptr_t) i5 + input_stride);
38   const size_t input_increment = 7 * input_stride - round_up_po2(channels, 8);
39 
40   const __m128i vbias = _mm_load_si128((const __m128i*) params->sse2.bias);
41   int32_t* b = buffer;
42   size_t c = channels;
43   for (; c != 0; c = doz(c, 8)) {
44     const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
45     i0 += 8;
46     const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
47     i1 += 8;
48     const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
49     i2 += 8;
50     const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
51     i3 += 8;
52     const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
53     i4 += 8;
54     const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
55     i5 += 8;
56     const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
57     i6 += 8;
58 
59     const __m128i vxi0x01234567 = _mm_unpacklo_epi8(vi0x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi0x01234567));
60     const __m128i vxi1x01234567 = _mm_unpacklo_epi8(vi1x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi1x01234567));
61     const __m128i vxi2x01234567 = _mm_unpacklo_epi8(vi2x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi2x01234567));
62     const __m128i vxi3x01234567 = _mm_unpacklo_epi8(vi3x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi3x01234567));
63     const __m128i vxi4x01234567 = _mm_unpacklo_epi8(vi4x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi4x01234567));
64     const __m128i vxi5x01234567 = _mm_unpacklo_epi8(vi5x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi5x01234567));
65     const __m128i vxi6x01234567 = _mm_unpacklo_epi8(vi6x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi6x01234567));
66 
67     __m128i vacc0x01234567 = _mm_add_epi16(vxi0x01234567, vxi1x01234567);
68     __m128i vacc1x01234567 = _mm_add_epi16(vxi2x01234567, vxi3x01234567);
69 
70     vacc0x01234567 = _mm_add_epi16(vacc0x01234567, vxi4x01234567);
71     vacc1x01234567 = _mm_add_epi16(vacc1x01234567, vxi5x01234567);
72     vacc0x01234567 = _mm_add_epi16(vacc0x01234567, vxi6x01234567);
73 
74     // Add up all accumulators to vacc0x01234567
75     vacc0x01234567 = _mm_add_epi16(vacc0x01234567, vacc1x01234567);
76 
77     const __m128i vsgnacc0x01234567 = _mm_cmpgt_epi16(_mm_setzero_si128(), vacc0x01234567);
78     const __m128i vacc0123 = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vacc0x01234567, vsgnacc0x01234567));
79     const __m128i vacc4567 = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vacc0x01234567, vsgnacc0x01234567));
80 
81     _mm_store_si128((__m128i*) b, vacc0123);
82     _mm_store_si128((__m128i*) (b + 4), vacc4567);
83     b += 8;
84   }
85 
86   for (rows -= 7; rows > 7; rows -= 7) {
87     i0 = (const int8_t*) ((uintptr_t) i0 + input_increment);
88     i1 = (const int8_t*) ((uintptr_t) i1 + input_increment);
89     i2 = (const int8_t*) ((uintptr_t) i2 + input_increment);
90     i3 = (const int8_t*) ((uintptr_t) i3 + input_increment);
91     i4 = (const int8_t*) ((uintptr_t) i4 + input_increment);
92     i5 = (const int8_t*) ((uintptr_t) i5 + input_increment);
93     i6 = (const int8_t*) ((uintptr_t) i6 + input_increment);
94 
95     int32_t* b = buffer;
96     size_t c = channels;
97     for (; c != 0; c = doz(c, 8)) {
98       const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
99       i0 += 8;
100       const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
101       i1 += 8;
102       const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
103       i2 += 8;
104       const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
105       i3 += 8;
106       const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
107       i4 += 8;
108       const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
109       i5 += 8;
110       const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
111       i6 += 8;
112 
113       const __m128i vxi0x01234567 = _mm_unpacklo_epi8(vi0x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi0x01234567));
114       const __m128i vxi1x01234567 = _mm_unpacklo_epi8(vi1x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi1x01234567));
115       const __m128i vxi2x01234567 = _mm_unpacklo_epi8(vi2x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi2x01234567));
116       const __m128i vxi3x01234567 = _mm_unpacklo_epi8(vi3x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi3x01234567));
117       const __m128i vxi4x01234567 = _mm_unpacklo_epi8(vi4x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi4x01234567));
118       const __m128i vxi5x01234567 = _mm_unpacklo_epi8(vi5x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi5x01234567));
119       const __m128i vxi6x01234567 = _mm_unpacklo_epi8(vi6x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi6x01234567));
120 
121       __m128i vacc0x01234567 = _mm_add_epi16(vxi0x01234567, vxi1x01234567);
122       __m128i vacc1x01234567 = _mm_add_epi16(vxi2x01234567, vxi3x01234567);
123 
124       vacc0x01234567 = _mm_add_epi16(vacc0x01234567, vxi4x01234567);
125       vacc1x01234567 = _mm_add_epi16(vacc1x01234567, vxi5x01234567);
126       vacc0x01234567 = _mm_add_epi16(vacc0x01234567, vxi6x01234567);
127 
128       // Add up all accumulators to vacc0x01234567
129       vacc0x01234567 = _mm_add_epi16(vacc0x01234567, vacc1x01234567);
130 
131       const __m128i vsgnacc0x01234567 = _mm_cmpgt_epi16(_mm_setzero_si128(), vacc0x01234567);
132       const __m128i vacc0123 = _mm_add_epi32(_mm_unpacklo_epi16(vacc0x01234567, vsgnacc0x01234567), _mm_load_si128((const __m128i*) (b + 0)));
133       const __m128i vacc4567 = _mm_add_epi32(_mm_unpackhi_epi16(vacc0x01234567, vsgnacc0x01234567), _mm_load_si128((const __m128i*) (b + 4)));
134 
135       _mm_store_si128((__m128i*) b, vacc0123);
136       _mm_store_si128((__m128i*) (b + 4), vacc4567);
137       b += 8;
138     }
139   }
140 
141   i0 = (const int8_t*) ((uintptr_t) i0 + input_increment);
142   i1 = (const int8_t*) ((uintptr_t) i1 + input_increment);
143   if XNN_UNPREDICTABLE(rows < 2) {
144     i1 = zero;
145   }
146   i2 = (const int8_t*) ((uintptr_t) i2 + input_increment);
147   if XNN_UNPREDICTABLE(rows <= 2) {
148     i2 = zero;
149   }
150   i3 = (const int8_t*) ((uintptr_t) i3 + input_increment);
151   if XNN_UNPREDICTABLE(rows < 4) {
152     i3 = zero;
153   }
154   i4 = (const int8_t*) ((uintptr_t) i4 + input_increment);
155   if XNN_UNPREDICTABLE(rows <= 4) {
156     i4 = zero;
157   }
158   i5 = (const int8_t*) ((uintptr_t) i5 + input_increment);
159   if XNN_UNPREDICTABLE(rows < 6) {
160     i5 = zero;
161   }
162   i6 = (const int8_t*) ((uintptr_t) i6 + input_increment);
163   if XNN_UNPREDICTABLE(rows <= 6) {
164     i6 = zero;
165   }
166 
167   const __m128i vmultiplier = _mm_load_si128((const __m128i*) params->sse2.multiplier);
168   const __m128i vrounding = _mm_load_si128((const __m128i*) params->sse2.rounding);
169   const __m128i vshift = _mm_loadl_epi64((const __m128i*) params->sse2.shift);
170   while (channels >= 8) {
171     const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
172     i0 += 8;
173     const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
174     i1 += 8;
175     const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
176     i2 += 8;
177     const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
178     i3 += 8;
179     const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
180     i4 += 8;
181     const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
182     i5 += 8;
183     const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
184     i6 += 8;
185 
186     const __m128i vxi0x01234567 = _mm_unpacklo_epi8(vi0x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi0x01234567));
187     const __m128i vxi1x01234567 = _mm_unpacklo_epi8(vi1x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi1x01234567));
188     const __m128i vxi2x01234567 = _mm_unpacklo_epi8(vi2x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi2x01234567));
189     const __m128i vxi3x01234567 = _mm_unpacklo_epi8(vi3x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi3x01234567));
190     const __m128i vxi4x01234567 = _mm_unpacklo_epi8(vi4x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi4x01234567));
191     const __m128i vxi5x01234567 = _mm_unpacklo_epi8(vi5x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi5x01234567));
192     const __m128i vxi6x01234567 = _mm_unpacklo_epi8(vi6x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi6x01234567));
193 
194     __m128i vacc0x01234567 = _mm_add_epi16(vxi0x01234567, vxi1x01234567);
195     __m128i vacc1x01234567 = _mm_add_epi16(vxi2x01234567, vxi3x01234567);
196 
197     vacc0x01234567 = _mm_add_epi16(vacc0x01234567, vxi4x01234567);
198     vacc1x01234567 = _mm_add_epi16(vacc1x01234567, vxi5x01234567);
199     vacc0x01234567 = _mm_add_epi16(vacc0x01234567, vxi6x01234567);
200 
201     // Add up all accumulators to vacc0x01234567
202     vacc0x01234567 = _mm_add_epi16(vacc0x01234567, vacc1x01234567);
203 
204     const __m128i vsgnacc0x01234567 = _mm_cmpgt_epi16(_mm_setzero_si128(), vacc0x01234567);
205     const __m128i vacc0123 = _mm_add_epi32(_mm_unpacklo_epi16(vacc0x01234567, vsgnacc0x01234567), _mm_load_si128((const __m128i*) (buffer + 0)));
206     const __m128i vacc4567 = _mm_add_epi32(_mm_unpackhi_epi16(vacc0x01234567, vsgnacc0x01234567), _mm_load_si128((const __m128i*) (buffer + 4)));
207     buffer += 8;
208 
209     const __m128i vsgnacc0123 = _mm_cmpgt_epi32(_mm_setzero_si128(), vacc0123);
210     const __m128i vsgnacc4567 = _mm_cmpgt_epi32(_mm_setzero_si128(), vacc4567);
211 
212     const __m128i vabsacc0123 = _mm_sub_epi32(_mm_xor_si128(vacc0123, vsgnacc0123), vsgnacc0123);
213     const __m128i vabsacc4567 = _mm_sub_epi32(_mm_xor_si128(vacc4567, vsgnacc4567), vsgnacc4567);
214 
215     const __m128i vabsacc13 = _mm_shuffle_epi32(vabsacc0123, _MM_SHUFFLE(3, 3, 1, 1));
216     const __m128i vabsacc57 = _mm_shuffle_epi32(vabsacc4567, _MM_SHUFFLE(3, 3, 1, 1));
217 
218     const __m128i vabsprod02 = _mm_mul_epu32(vabsacc0123, vmultiplier);
219     const __m128i vabsprod13 = _mm_mul_epu32(vabsacc13, vmultiplier);
220     const __m128i vabsprod46 = _mm_mul_epu32(vabsacc4567, vmultiplier);
221     const __m128i vabsprod57 = _mm_mul_epu32(vabsacc57, vmultiplier);
222 
223     const __m128i vabsout02 = _mm_srl_epi64(_mm_add_epi64(vabsprod02, vrounding), vshift);
224     const __m128i vabsout13 = _mm_srl_epi64(_mm_add_epi64(vabsprod13, vrounding), vshift);
225     const __m128i vabsout46 = _mm_srl_epi64(_mm_add_epi64(vabsprod46, vrounding), vshift);
226     const __m128i vabsout57 = _mm_srl_epi64(_mm_add_epi64(vabsprod57, vrounding), vshift);
227 
228     const __m128i vabsout0213 = _mm_castps_si128(
229         _mm_shuffle_ps(_mm_castsi128_ps(vabsout02), _mm_castsi128_ps(vabsout13), _MM_SHUFFLE(2, 0, 2, 0)));
230     const __m128i vabsout4657 = _mm_castps_si128(
231         _mm_shuffle_ps(_mm_castsi128_ps(vabsout46), _mm_castsi128_ps(vabsout57), _MM_SHUFFLE(2, 0, 2, 0)));
232 
233     const __m128i vabsout0123 = _mm_shuffle_epi32(vabsout0213, _MM_SHUFFLE(3, 1, 2, 0));
234     const __m128i vabsout4567 = _mm_shuffle_epi32(vabsout4657, _MM_SHUFFLE(3, 1, 2, 0));
235 
236     const __m128i vout0123 = _mm_sub_epi32(_mm_xor_si128(vabsout0123, vsgnacc0123), vsgnacc0123);
237     const __m128i vout4567 = _mm_sub_epi32(_mm_xor_si128(vabsout4567, vsgnacc4567), vsgnacc4567);
238 
239     const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point);
240     __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vout0123, vout4567), voutput_zero_point);
241 
242     const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse2.output_min);
243     const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse2.output_max);
244     vout01234567 = _mm_min_epi16(_mm_max_epi16(vout01234567, voutput_min), voutput_max);
245 
246     __m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
247 
248     _mm_storel_epi64((__m128i*) output, vout0123456701234567);
249     output += 8;
250 
251     channels -= 8;
252   }
253   if XNN_UNLIKELY(channels != 0) {
254     {
255       const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
256       i0 += 8;
257       const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
258       i1 += 8;
259       const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
260       i2 += 8;
261       const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
262       i3 += 8;
263       const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
264       i4 += 8;
265       const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
266       i5 += 8;
267       const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
268       i6 += 8;
269 
270       const __m128i vxi0x01234567 = _mm_unpacklo_epi8(vi0x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi0x01234567));
271       const __m128i vxi1x01234567 = _mm_unpacklo_epi8(vi1x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi1x01234567));
272       const __m128i vxi2x01234567 = _mm_unpacklo_epi8(vi2x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi2x01234567));
273       const __m128i vxi3x01234567 = _mm_unpacklo_epi8(vi3x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi3x01234567));
274       const __m128i vxi4x01234567 = _mm_unpacklo_epi8(vi4x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi4x01234567));
275       const __m128i vxi5x01234567 = _mm_unpacklo_epi8(vi5x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi5x01234567));
276       const __m128i vxi6x01234567 = _mm_unpacklo_epi8(vi6x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi6x01234567));
277 
278       __m128i vacc0x01234567 = _mm_add_epi16(vxi0x01234567, vxi1x01234567);
279       __m128i vacc1x01234567 = _mm_add_epi16(vxi2x01234567, vxi3x01234567);
280 
281       vacc0x01234567 = _mm_add_epi16(vacc0x01234567, vxi4x01234567);
282       vacc1x01234567 = _mm_add_epi16(vacc1x01234567, vxi5x01234567);
283       vacc0x01234567 = _mm_add_epi16(vacc0x01234567, vxi6x01234567);
284 
285       // Add up all accumulators to vacc0x01234567
286       vacc0x01234567 = _mm_add_epi16(vacc0x01234567, vacc1x01234567);
287 
288       const __m128i vsgnacc0x01234567 = _mm_cmpgt_epi16(_mm_setzero_si128(), vacc0x01234567);
289       const __m128i vacc0123 = _mm_add_epi32(_mm_unpacklo_epi16(vacc0x01234567, vsgnacc0x01234567), _mm_load_si128((const __m128i*) buffer));
290       const __m128i vacc4567 = _mm_add_epi32(_mm_unpackhi_epi16(vacc0x01234567, vsgnacc0x01234567), _mm_load_si128((const __m128i*) (buffer + 4)));
291       buffer += 8;
292 
293       const __m128i vsgnacc0123 = _mm_cmpgt_epi32(_mm_setzero_si128(), vacc0123);
294       const __m128i vsgnacc4567 = _mm_cmpgt_epi32(_mm_setzero_si128(), vacc4567);
295 
296       const __m128i vabsacc0123 = _mm_sub_epi32(_mm_xor_si128(vacc0123, vsgnacc0123), vsgnacc0123);
297       const __m128i vabsacc4567 = _mm_sub_epi32(_mm_xor_si128(vacc4567, vsgnacc4567), vsgnacc4567);
298 
299       const __m128i vabsacc13 = _mm_shuffle_epi32(vabsacc0123, _MM_SHUFFLE(3, 3, 1, 1));
300       const __m128i vabsacc57 = _mm_shuffle_epi32(vabsacc4567, _MM_SHUFFLE(3, 3, 1, 1));
301 
302       const __m128i vabsprod02 = _mm_mul_epu32(vabsacc0123, vmultiplier);
303       const __m128i vabsprod13 = _mm_mul_epu32(vabsacc13, vmultiplier);
304       const __m128i vabsprod46 = _mm_mul_epu32(vabsacc4567, vmultiplier);
305       const __m128i vabsprod57 = _mm_mul_epu32(vabsacc57, vmultiplier);
306 
307       const __m128i vabsout02 = _mm_srl_epi64(_mm_add_epi64(vabsprod02, vrounding), vshift);
308       const __m128i vabsout13 = _mm_srl_epi64(_mm_add_epi64(vabsprod13, vrounding), vshift);
309       const __m128i vabsout46 = _mm_srl_epi64(_mm_add_epi64(vabsprod46, vrounding), vshift);
310       const __m128i vabsout57 = _mm_srl_epi64(_mm_add_epi64(vabsprod57, vrounding), vshift);
311 
312       const __m128i vabsout0213 = _mm_castps_si128(
313           _mm_shuffle_ps(_mm_castsi128_ps(vabsout02), _mm_castsi128_ps(vabsout13), _MM_SHUFFLE(2, 0, 2, 0)));
314       const __m128i vabsout4657 = _mm_castps_si128(
315           _mm_shuffle_ps(_mm_castsi128_ps(vabsout46), _mm_castsi128_ps(vabsout57), _MM_SHUFFLE(2, 0, 2, 0)));
316 
317       const __m128i vabsout0123 = _mm_shuffle_epi32(vabsout0213, _MM_SHUFFLE(3, 1, 2, 0));
318       const __m128i vabsout4567 = _mm_shuffle_epi32(vabsout4657, _MM_SHUFFLE(3, 1, 2, 0));
319 
320       const __m128i vout0123 = _mm_sub_epi32(_mm_xor_si128(vabsout0123, vsgnacc0123), vsgnacc0123);
321       const __m128i vout4567 = _mm_sub_epi32(_mm_xor_si128(vabsout4567, vsgnacc4567), vsgnacc4567);
322 
323       const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point);
324       __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vout0123, vout4567), voutput_zero_point);
325 
326       const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse2.output_min);
327       const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse2.output_max);
328       vout01234567 = _mm_min_epi16(_mm_max_epi16(vout01234567, voutput_min), voutput_max);
329 
330       __m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
331 
332       if (channels & 4) {
333         *((uint32_t*) output) = (uint32_t) _mm_cvtsi128_si32(vout0123456701234567);
334         vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
335         output += 4;
336       }
337       if (channels & 2) {
338         *((uint16_t*) output) = (uint16_t) _mm_extract_epi16(vout0123456701234567, 0);
339         vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
340         output += 2;
341       }
342       if (channels & 1) {
343         *output = (int32_t) _mm_cvtsi128_si32(vout0123456701234567);
344       }
345     }
346   }
347 }
348