1// Copyright 2020 Google LLC
2//
3// This source code is licensed under the BSD-style license found in the
4// LICENSE file in the root directory of this source tree.
5
6$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
7$SSE_HEADER = {2: "emmintrin.h", 3: "tmmintrin.h", 4: "smmintrin.h"}[SSE]
8$assert CHANNEL_TILE % 8 == 0
9$assert CHANNEL_TILE >= 8
10$assert KERNEL_TILE >= 2
11#include <assert.h>
12
13#include <${SSE_HEADER}>
14
15#include <xnnpack/dwconv.h>
16
17
18$ISA = {2: "sse2", 3: "ssse3", 4: "sse41"}[SSE]
19void xnn_qs8_dwconv_minmax_ukernel_up${CHANNEL_TILE}x${KERNEL_TILE}__${ISA}_mul16(
20    size_t channels,
21    size_t output_width,
22    const int8_t** input,
23    const void* weights,
24    int8_t* output,
25    size_t input_stride,
26    size_t output_increment,
27    size_t input_offset,
28    const int8_t* zero,
29    const union xnn_qs8_gemm_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
30{
31  assert(channels != 0);
32  assert(output_width != 0);
33
34  do {
35    $for K in range(KERNEL_TILE):
36      const int8_t* i${K} = input[${K}];
37      assert(i${K} != NULL);
38      if XNN_UNPREDICTABLE(i${K} != zero) {
39        i${K} = (const int8_t*) ((uintptr_t) i${K} + input_offset);
40      }
41    input = (const int8_t**) ((uintptr_t) input + input_stride);
42
43    size_t c = channels;
44    const void* w = weights;
45    for (; c >= ${CHANNEL_TILE}; c -= ${CHANNEL_TILE}) {
46      __m128i vacc${ABC[0:4]} = _mm_loadu_si128((const __m128i*) w);
47      $for C in range(4, CHANNEL_TILE, 4):
48        __m128i vacc${ABC[C:C+4]} = _mm_loadu_si128((const __m128i*) ((uintptr_t) w + ${C} * sizeof(int32_t)));
49
50      $for K in range(KERNEL_TILE):
51
52        $for C in range(0, CHANNEL_TILE, 8):
53          $if C == 0:
54            const __m128i vi${K}x${ABC[0:8]} = _mm_loadl_epi64((const __m128i*) i${K});
55          $else:
56            const __m128i vi${K}x${ABC[C:C+8]} = _mm_loadl_epi64((const __m128i*) (i${K} + ${C}));
57          $if SSE >= 4:
58            const __m128i vxi${K}x${ABC[C:C+8]} = _mm_cvtepi8_epi16(vi${K}x${ABC[C:C+8]});
59          const __m128i vk${K}x${ABC[C:C+8]} = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + ${CHANNEL_TILE} * sizeof(int32_t) + ${K * CHANNEL_TILE + C} * sizeof(int8_t)));
60          $if SSE >= 4:
61            const __m128i vxk${K}x${ABC[C:C+8]} = _mm_cvtepi8_epi16(vk${K}x${ABC[C:C+8]});
62        i${K} += ${CHANNEL_TILE};
63
64        $if SSE < 4:
65          $for C in range(0, CHANNEL_TILE, 8):
66            const __m128i vxi${K}x${ABC[C:C+8]} = _mm_unpacklo_epi8(vi${K}x${ABC[C:C+8]}, _mm_cmpgt_epi8(_mm_setzero_si128(), vi${K}x${ABC[C:C+8]}));
67            const __m128i vxk${K}x${ABC[C:C+8]} = _mm_unpacklo_epi8(vk${K}x${ABC[C:C+8]}, _mm_cmpgt_epi8(_mm_setzero_si128(), vk${K}x${ABC[C:C+8]}));
68
69        $for C in range(0, CHANNEL_TILE, 8):
70          const __m128i vp${K}x${ABC[C:C+8]}lo = _mm_mullo_epi16(vxi${K}x${ABC[C:C+8]}, vxk${K}x${ABC[C:C+8]});
71          const __m128i vp${K}x${ABC[C:C+8]}hi = _mm_mulhi_epi16(vxi${K}x${ABC[C:C+8]}, vxk${K}x${ABC[C:C+8]});
72
73        $for C in range(0, CHANNEL_TILE, 8):
74          vacc${ABC[C:C+4]} = _mm_add_epi32(vacc${ABC[C:C+4]}, _mm_unpacklo_epi16(vp${K}x${ABC[C:C+8]}lo, vp${K}x${ABC[C:C+8]}hi));
75          vacc${ABC[C+4:C+8]} = _mm_add_epi32(vacc${ABC[C+4:C+8]}, _mm_unpackhi_epi16(vp${K}x${ABC[C:C+8]}lo, vp${K}x${ABC[C:C+8]}hi));
76
77      w = (const void*) ((uintptr_t) w + ${CHANNEL_TILE} * sizeof(int32_t) + ${KERNEL_TILE * CHANNEL_TILE} * sizeof(int8_t));
78
79      const __m128i vmultiplier = _mm_load_si128((const __m128i*) params->sse2.multiplier);
80      const __m128i vrounding = _mm_load_si128((const __m128i*) params->sse2.rounding);
81
82      $if SSE >= 4:
83        $for C in range(0, CHANNEL_TILE, 4):
84          const __m128i vacc${ABC[C+1:C+4:2]} = _mm_shuffle_epi32(vacc${ABC[C:C+4]}, _MM_SHUFFLE(3, 3, 1, 1));
85          const __m128i vprod${ABC[C:C+4:2]} = _mm_add_epi64(_mm_mul_epi32(vacc${ABC[C:C+4]}, vmultiplier), vrounding);
86          const __m128i vprod${ABC[C+1:C+4:2]} = _mm_add_epi64(_mm_mul_epi32(vacc${ABC[C+1:C+4:2]}, vmultiplier), vrounding);
87
88        $for C in range(0, CHANNEL_TILE, 4):
89          const __m128i vq31prod${ABC[C:C+4:2]} = _mm_srli_epi64(vprod${ABC[C:C+4:2]}, 31);
90          const __m128i vq31prod${ABC[C+1:C+4:2]} = _mm_add_epi64(vprod${ABC[C+1:C+4:2]}, vprod${ABC[C+1:C+4:2]});
91
92        $for C in range(0, CHANNEL_TILE, 4):
93          const __m128i vq31prod${ABC[C:C+4]} = _mm_blend_epi16(vq31prod${ABC[C:C+4:2]}, vq31prod${ABC[C+1:C+4:2]}, 0xCC);
94      $else:
95        $for C in range(0, CHANNEL_TILE, 4):
96          const __m128i vnmask${ABC[C:C+4]} = _mm_cmpgt_epi32(_mm_setzero_si128(), vacc${ABC[C:C+4]});
97
98        $for C in range(0, CHANNEL_TILE, 4):
99          $if SSE >= 3:
100            const __m128i vabsacc${ABC[C:C+4]} = _mm_abs_epi32(vacc${ABC[C:C+4]});
101          $else:
102            const __m128i vabsacc${ABC[C:C+4]} = _mm_sub_epi32(_mm_xor_si128(vacc${ABC[C:C+4]}, vnmask${ABC[C:C+4]}), vnmask${ABC[C:C+4]});
103
104        $for C in range(0, CHANNEL_TILE, 4):
105          const __m128i vabsacc${ABC[C+1:C+4:2]} = _mm_shuffle_epi32(vabsacc${ABC[C:C+4]}, _MM_SHUFFLE(3, 3, 1, 1));
106          const __m128i vabsprod${ABC[C:C+4:2]} = _mm_mul_epu32(vabsacc${ABC[C:C+4]}, vmultiplier);
107          const __m128i vabsprod${ABC[C+1:C+4:2]} = _mm_mul_epu32(vabsacc${ABC[C+1:C+4:2]}, vmultiplier);
108
109        $for C in range(0, CHANNEL_TILE, 4):
110          const __m128i vnmask${ABC[C:C+4:2]} = _mm_shuffle_epi32(vnmask${ABC[C:C+4]}, _MM_SHUFFLE(2, 2, 0, 0));
111          const __m128i vnmask${ABC[C+1:C+4:2]} = _mm_shuffle_epi32(vnmask${ABC[C:C+4]}, _MM_SHUFFLE(3, 3, 1, 1));
112
113        $for C in range(0, CHANNEL_TILE, 4):
114          const __m128i vprod${ABC[C:C+4:2]} = _mm_sub_epi64(_mm_xor_si128(vabsprod${ABC[C:C+4:2]}, vnmask${ABC[C:C+4:2]}), vnmask${ABC[C:C+4:2]});
115          const __m128i vprod${ABC[C+1:C+4:2]} = _mm_sub_epi64(_mm_xor_si128(vabsprod${ABC[C+1:C+4:2]}, vnmask${ABC[C+1:C+4:2]}), vnmask${ABC[C+1:C+4:2]});
116
117        $for C in range(0, CHANNEL_TILE, 4):
118          const __m128i vq31prod${ABC[C:C+4:2]} = _mm_srli_epi64(_mm_add_epi64(vprod${ABC[C:C+4:2]}, vrounding), 31);
119          const __m128i vq31prod${ABC[C+1:C+4:2]} = _mm_srli_epi64(_mm_add_epi64(vprod${ABC[C+1:C+4:2]}, vrounding), 31);
120
121        $for C in range(0, CHANNEL_TILE, 4):
122          const __m128i vq31prod${ABC[C:C+4:2]}${ABC[C+1:C+4:2]} = _mm_castps_si128(_mm_shuffle_ps(
123              _mm_castsi128_ps(vq31prod${ABC[C:C+4:2]}), _mm_castsi128_ps(vq31prod${ABC[C+1:C+4:2]}), _MM_SHUFFLE(2, 0, 2, 0)));
124
125        $for C in range(0, CHANNEL_TILE, 4):
126          const __m128i vq31prod${ABC[C:C+4]} = _mm_shuffle_epi32(vq31prod${ABC[C:C+4:2]}${ABC[C+1:C+4:2]}, _MM_SHUFFLE(3, 1, 2, 0));
127
128      const __m128i vremainder_mask = _mm_load_si128((const __m128i*) params->sse2.remainder_mask);
129      $for C in range(0, CHANNEL_TILE, 4):
130        const __m128i vrem${ABC[C:C+4]} =
131          _mm_add_epi32(_mm_and_si128(vq31prod${ABC[C:C+4]}, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vq31prod${ABC[C:C+4]}));
132
133      const __m128i vremainder_threshold = _mm_load_si128((const __m128i*) params->sse2.remainder_threshold);
134      const __m128i vshift = _mm_load_si128((const __m128i*) params->sse2.shift);
135      $for C in range(0, CHANNEL_TILE, 4):
136        vacc${ABC[C:C+4]} =
137          _mm_sub_epi32(_mm_sra_epi32(vq31prod${ABC[C:C+4]}, vshift), _mm_cmpgt_epi32(vrem${ABC[C:C+4]}, vremainder_threshold));
138
139      const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point);
140      $for C in range(0, CHANNEL_TILE, 8):
141        __m128i vout${ABC[C:C+8]} = _mm_adds_epi16(_mm_packs_epi32(vacc${ABC[C:C+4]}, vacc${ABC[C+4:C+8]}), voutput_zero_point);
142
143      const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse2.output_min);
144      const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse2.output_max);
145      $for C in range(0, CHANNEL_TILE, 8):
146        vout${ABC[C:C+8]} = _mm_min_epi16(_mm_max_epi16(vout${ABC[C:C+8]}, voutput_min), voutput_max);
147
148      $for C in range(0, CHANNEL_TILE, 16):
149        $if C + 8 < CHANNEL_TILE:
150          __m128i vout${ABC[C:C+16]} = _mm_packs_epi16(vout${ABC[C:C+8]}, vout${ABC[C+8:C+16]});
151        $else:
152          __m128i vout${ABC[C:C+8]}${ABC[C:C+8]} = _mm_packs_epi16(vout${ABC[C:C+8]}, vout${ABC[C:C+8]});
153
154      $if CHANNEL_TILE > 8:
155        _mm_storeu_si128((__m128i*) output, vout${ABC[0:16]});
156      $else:
157        _mm_storel_epi64((__m128i*) output, vout${ABC[0:8]}${ABC[0:8]});
158      $for C in range(16, CHANNEL_TILE, 16):
159        $if C + 8 < CHANNEL_TILE:
160          _mm_storeu_si128((__m128i*) (output + ${C}), vout${ABC[C:C+16]});
161        $else:
162          _mm_storel_epi64((__m128i*) (output + ${C}), vout${ABC[C:C+8]}${ABC[C:C+8]});
163      output += ${CHANNEL_TILE};
164    }
165    if XNN_UNLIKELY(c != 0) {
166      $if CHANNEL_TILE > 8:
167        const int8_t* k = (const int8_t*) ((uintptr_t) w + ${CHANNEL_TILE} * sizeof(int32_t));
168      ${"do " if CHANNEL_TILE > 8 else ""}{
169        __m128i vacc${ABC[0:4]} = _mm_loadu_si128((const __m128i*) w);
170        __m128i vacc${ABC[4:8]} = _mm_loadu_si128((const __m128i*) ((uintptr_t) w + 4 * sizeof(int32_t)));
171
172        $for K in range(KERNEL_TILE):
173
174          const __m128i vi${K}x${ABC[0:8]} = _mm_loadl_epi64((const __m128i*) i${K});
175          $if SSE >= 4:
176            const __m128i vxi${K}x${ABC[0:8]} = _mm_cvtepi8_epi16(vi${K}x${ABC[0:8]});
177          $if CHANNEL_TILE > 8:
178            $if K == 0:
179              const __m128i vk${K}x${ABC[0:8]} = _mm_loadl_epi64((const __m128i*) k);
180            $else:
181              const __m128i vk${K}x${ABC[0:8]} = _mm_loadl_epi64((const __m128i*) (k + ${K * CHANNEL_TILE}));
182          $else:
183            const __m128i vk${K}x${ABC[0:8]} = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + ${CHANNEL_TILE} * sizeof(int32_t) + ${K * CHANNEL_TILE} * sizeof(int8_t)));
184          $if SSE >= 4:
185            const __m128i vxk${K}x${ABC[0:8]} = _mm_cvtepi8_epi16(vk${K}x${ABC[0:8]});
186          $if CHANNEL_TILE > 8:
187            i${K} += 8;
188
189          $if SSE < 4:
190            const __m128i vxi${K}x${ABC[0:8]} = _mm_unpacklo_epi8(vi${K}x${ABC[0:8]}, _mm_cmpgt_epi8(_mm_setzero_si128(), vi${K}x${ABC[0:8]}));
191            const __m128i vxk${K}x${ABC[0:8]} = _mm_unpacklo_epi8(vk${K}x${ABC[0:8]}, _mm_cmpgt_epi8(_mm_setzero_si128(), vk${K}x${ABC[0:8]}));
192
193          const __m128i vp${K}x${ABC[0:8]}lo = _mm_mullo_epi16(vxi${K}x${ABC[0:8]}, vxk${K}x${ABC[0:8]});
194          const __m128i vp${K}x${ABC[0:8]}hi = _mm_mulhi_epi16(vxi${K}x${ABC[0:8]}, vxk${K}x${ABC[0:8]});
195
196          vacc${ABC[0:4]} = _mm_add_epi32(vacc${ABC[0:4]}, _mm_unpacklo_epi16(vp${K}x${ABC[0:8]}lo, vp${K}x${ABC[0:8]}hi));
197          vacc${ABC[4:8]} = _mm_add_epi32(vacc${ABC[4:8]}, _mm_unpackhi_epi16(vp${K}x${ABC[0:8]}lo, vp${K}x${ABC[0:8]}hi));
198
199        $if CHANNEL_TILE > 8:
200          w = (const void*) ((uintptr_t) w + 8 * sizeof(int32_t));
201          k += 8;
202
203        const __m128i vmultiplier = _mm_load_si128((const __m128i*) params->sse2.multiplier);
204        const __m128i vrounding = _mm_load_si128((const __m128i*) params->sse2.rounding);
205
206        $if SSE >= 4:
207          const __m128i vacc${ABC[1:4:2]} = _mm_shuffle_epi32(vacc${ABC[0:4]}, _MM_SHUFFLE(3, 3, 1, 1));
208          const __m128i vacc${ABC[5:8:2]} = _mm_shuffle_epi32(vacc${ABC[4:8]}, _MM_SHUFFLE(3, 3, 1, 1));
209
210          const __m128i vprod${ABC[0:4:2]} = _mm_add_epi64(_mm_mul_epi32(vacc${ABC[0:4]}, vmultiplier), vrounding);
211          const __m128i vprod${ABC[4:8:2]} = _mm_add_epi64(_mm_mul_epi32(vacc${ABC[4:8]}, vmultiplier), vrounding);
212
213          const __m128i vprod${ABC[1:4:2]} = _mm_add_epi64(_mm_mul_epi32(vacc${ABC[1:4:2]}, vmultiplier), vrounding);
214          const __m128i vprod${ABC[5:8:2]} = _mm_add_epi64(_mm_mul_epi32(vacc${ABC[5:8:2]}, vmultiplier), vrounding);
215
216          const __m128i vq31prod${ABC[0:4:2]} = _mm_srli_epi64(vprod${ABC[0:4:2]}, 31);
217          const __m128i vq31prod${ABC[1:4:2]} = _mm_add_epi64(vprod${ABC[1:4:2]}, vprod${ABC[1:4:2]});
218          const __m128i vq31prod${ABC[4:8:2]} = _mm_srli_epi64(vprod${ABC[4:8:2]}, 31);
219          const __m128i vq31prod${ABC[5:8:2]} = _mm_add_epi64(vprod${ABC[5:8:2]}, vprod${ABC[5:8:2]});
220
221          const __m128i vq31prod${ABC[0:4]} = _mm_blend_epi16(vq31prod${ABC[0:4:2]}, vq31prod${ABC[1:4:2]}, 0xCC);
222          const __m128i vq31prod${ABC[4:8]} = _mm_blend_epi16(vq31prod${ABC[4:8:2]}, vq31prod${ABC[5:8:2]}, 0xCC);
223        $else:
224          const __m128i vnmask${ABC[0:4]} = _mm_cmpgt_epi32(_mm_setzero_si128(), vacc${ABC[0:4]});
225          const __m128i vnmask${ABC[4:8]} = _mm_cmpgt_epi32(_mm_setzero_si128(), vacc${ABC[4:8]});
226
227          $if SSE >= 3:
228            const __m128i vabsacc${ABC[0:4]} = _mm_abs_epi32(vacc${ABC[0:4]});
229            const __m128i vabsacc${ABC[4:8]} = _mm_abs_epi32(vacc${ABC[4:8]});
230          $else:
231            const __m128i vabsacc${ABC[0:4]} = _mm_sub_epi32(_mm_xor_si128(vacc${ABC[0:4]}, vnmask${ABC[0:4]}), vnmask${ABC[0:4]});
232            const __m128i vabsacc${ABC[4:8]} = _mm_sub_epi32(_mm_xor_si128(vacc${ABC[4:8]}, vnmask${ABC[4:8]}), vnmask${ABC[4:8]});
233
234          const __m128i vabsacc${ABC[1:4:2]} = _mm_shuffle_epi32(vabsacc${ABC[0:4]}, _MM_SHUFFLE(3, 3, 1, 1));
235          const __m128i vabsacc${ABC[5:8:2]} = _mm_shuffle_epi32(vabsacc${ABC[4:8]}, _MM_SHUFFLE(3, 3, 1, 1));
236
237          const __m128i vabsprod${ABC[0:4:2]} = _mm_mul_epu32(vabsacc${ABC[0:4]}, vmultiplier);
238          const __m128i vabsprod${ABC[1:4:2]} = _mm_mul_epu32(vabsacc${ABC[1:4:2]}, vmultiplier);
239          const __m128i vabsprod${ABC[4:8:2]} = _mm_mul_epu32(vabsacc${ABC[4:8]}, vmultiplier);
240          const __m128i vabsprod${ABC[5:8:2]} = _mm_mul_epu32(vabsacc${ABC[5:8:2]}, vmultiplier);
241
242          const __m128i vnmask${ABC[0:4:2]} = _mm_shuffle_epi32(vnmask${ABC[0:4]}, _MM_SHUFFLE(2, 2, 0, 0));
243          const __m128i vnmask${ABC[1:4:2]} = _mm_shuffle_epi32(vnmask${ABC[0:4]}, _MM_SHUFFLE(3, 3, 1, 1));
244          const __m128i vnmask${ABC[4:8:2]} = _mm_shuffle_epi32(vnmask${ABC[4:8]}, _MM_SHUFFLE(2, 2, 0, 0));
245          const __m128i vnmask${ABC[5:8:2]} = _mm_shuffle_epi32(vnmask${ABC[4:8]}, _MM_SHUFFLE(3, 3, 1, 1));
246
247          const __m128i vprod${ABC[0:4:2]} = _mm_sub_epi64(_mm_xor_si128(vabsprod${ABC[0:4:2]}, vnmask${ABC[0:4:2]}), vnmask${ABC[0:4:2]});
248          const __m128i vprod${ABC[1:4:2]} = _mm_sub_epi64(_mm_xor_si128(vabsprod${ABC[1:4:2]}, vnmask${ABC[1:4:2]}), vnmask${ABC[1:4:2]});
249          const __m128i vprod${ABC[4:8:2]} = _mm_sub_epi64(_mm_xor_si128(vabsprod${ABC[4:8:2]}, vnmask${ABC[4:8:2]}), vnmask${ABC[4:8:2]});
250          const __m128i vprod${ABC[5:8:2]} = _mm_sub_epi64(_mm_xor_si128(vabsprod${ABC[5:8:2]}, vnmask${ABC[5:8:2]}), vnmask${ABC[5:8:2]});
251
252          const __m128i vq31prod${ABC[0:4:2]} = _mm_srli_epi64(_mm_add_epi64(vprod${ABC[0:4:2]}, vrounding), 31);
253          const __m128i vq31prod${ABC[1:4:2]} = _mm_srli_epi64(_mm_add_epi64(vprod${ABC[1:4:2]}, vrounding), 31);
254          const __m128i vq31prod${ABC[4:8:2]} = _mm_srli_epi64(_mm_add_epi64(vprod${ABC[4:8:2]}, vrounding), 31);
255          const __m128i vq31prod${ABC[5:8:2]} = _mm_srli_epi64(_mm_add_epi64(vprod${ABC[5:8:2]}, vrounding), 31);
256
257          const __m128i vq31prod${ABC[0:4:2]}${ABC[1:4:2]} = _mm_castps_si128(_mm_shuffle_ps(
258              _mm_castsi128_ps(vq31prod${ABC[0:4:2]}), _mm_castsi128_ps(vq31prod${ABC[1:4:2]}), _MM_SHUFFLE(2, 0, 2, 0)));
259          const __m128i vq31prod${ABC[4:8:2]}${ABC[5:8:2]} = _mm_castps_si128(_mm_shuffle_ps(
260              _mm_castsi128_ps(vq31prod${ABC[4:8:2]}), _mm_castsi128_ps(vq31prod${ABC[5:8:2]}), _MM_SHUFFLE(2, 0, 2, 0)));
261
262          const __m128i vq31prod${ABC[0:4]} = _mm_shuffle_epi32(vq31prod${ABC[0:4:2]}${ABC[1:4:2]}, _MM_SHUFFLE(3, 1, 2, 0));
263          const __m128i vq31prod${ABC[4:8]} = _mm_shuffle_epi32(vq31prod${ABC[4:8:2]}${ABC[5:8:2]}, _MM_SHUFFLE(3, 1, 2, 0));
264
265        const __m128i vremainder_mask = _mm_load_si128((const __m128i*) params->sse2.remainder_mask);
266        const __m128i vrem${ABC[0:4]} =
267          _mm_add_epi32(_mm_and_si128(vq31prod${ABC[0:4]}, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vq31prod${ABC[0:4]}));
268        const __m128i vrem${ABC[4:8]} =
269          _mm_add_epi32(_mm_and_si128(vq31prod${ABC[4:8]}, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vq31prod${ABC[4:8]}));
270
271        const __m128i vremainder_threshold = _mm_load_si128((const __m128i*) params->sse2.remainder_threshold);
272        const __m128i vshift = _mm_load_si128((const __m128i*) params->sse2.shift);
273        vacc${ABC[0:4]} =
274          _mm_sub_epi32(_mm_sra_epi32(vq31prod${ABC[0:4]}, vshift), _mm_cmpgt_epi32(vrem${ABC[0:4]}, vremainder_threshold));
275        vacc${ABC[4:8]} =
276          _mm_sub_epi32(_mm_sra_epi32(vq31prod${ABC[4:8]}, vshift), _mm_cmpgt_epi32(vrem${ABC[4:8]}, vremainder_threshold));
277
278        const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point);
279        __m128i vout${ABC[0:8]} = _mm_adds_epi16(_mm_packs_epi32(vacc${ABC[0:4]}, vacc${ABC[4:8]}), voutput_zero_point);
280
281        const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse2.output_min);
282        const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse2.output_max);
283        vout${ABC[0:8]} = _mm_min_epi16(_mm_max_epi16(vout${ABC[0:8]}, voutput_min), voutput_max);
284
285        __m128i vout${ABC[0:8]}${ABC[0:8]} = _mm_packs_epi16(vout${ABC[0:8]}, vout${ABC[0:8]});
286
287        $if CHANNEL_TILE > 8:
288          if XNN_LIKELY(c >= 8) {
289            _mm_storel_epi64((__m128i*) output, vout${ABC[0:8]}${ABC[0:8]});
290            output += 8;
291            c -= 8;
292          } else {
293            if (c & 4) {
294              *((uint32_t*) output) = (uint32_t) _mm_cvtsi128_si32(vout${ABC[0:8]}${ABC[0:8]});
295              vout${ABC[0:8]}${ABC[0:8]} = _mm_srli_epi64(vout${ABC[0:8]}${ABC[0:8]}, 32);
296              output += 4;
297            }
298            if (c & 2) {
299              *((uint16_t*) output) = (uint16_t) _mm_extract_epi16(vout${ABC[0:8]}${ABC[0:8]}, 0);
300              vout${ABC[0:8]}${ABC[0:8]} = _mm_srli_epi32(vout${ABC[0:8]}${ABC[0:8]}, 16);
301              output += 2;
302            }
303            if (c & 1) {
304              $if SSE >= 4:
305                *output = (int8_t) _mm_extract_epi8(vout${ABC[0:8]}${ABC[0:8]}, 0);
306              $else:
307                *output = (int32_t) _mm_cvtsi128_si32(vout${ABC[0:8]}${ABC[0:8]});
308              output += 1;
309            }
310            c = 0;
311          }
312        $else:
313          if (c & 4) {
314            *((uint32_t*) output) = (uint32_t) _mm_cvtsi128_si32(vout${ABC[0:8]}${ABC[0:8]});
315            vout${ABC[0:8]}${ABC[0:8]} = _mm_srli_epi64(vout${ABC[0:8]}${ABC[0:8]}, 32);
316            output += 4;
317          }
318          if (c & 2) {
319            *((uint16_t*) output) = (uint16_t) _mm_extract_epi16(vout${ABC[0:8]}${ABC[0:8]}, 0);
320            vout${ABC[0:8]}${ABC[0:8]} = _mm_srli_epi32(vout${ABC[0:8]}${ABC[0:8]}, 16);
321            output += 2;
322          }
323          if (c & 1) {
324            $if SSE >= 4:
325              *output = (int8_t) _mm_extract_epi8(vout${ABC[0:8]}${ABC[0:8]}, 0);
326            $else:
327              *output = (int32_t) _mm_cvtsi128_si32(vout${ABC[0:8]}${ABC[0:8]});
328            output += 1;
329          }
330      }${" while (c != 0);" if CHANNEL_TILE > 8 else ""}
331    }
332
333    output = (int8_t*) ((uintptr_t) output + output_increment);
334  } while (--output_width != 0);
335}
336