1 // Copyright (c) Facebook, Inc. and its affiliates.
2 // All rights reserved.
3 //
4 // Copyright 2019 Google LLC
5 //
6 // This source code is licensed under the BSD-style license found in the
7 // LICENSE file in the root directory of this source tree.
8
9 #include <immintrin.h>
10
11 #include <xnnpack/dwconv.h>
12
13
xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2(size_t channels,size_t output_width,const uint8_t ** input,const void * weights,uint8_t * output,size_t input_stride,size_t output_increment,size_t input_offset,const uint8_t * zero,const union xnn_qu8_gemm_params params[restrict XNN_MIN_ELEMENTS (1)])14 void xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2(
15 size_t channels,
16 size_t output_width,
17 const uint8_t** input,
18 const void* weights,
19 uint8_t* output,
20 size_t input_stride,
21 size_t output_increment,
22 size_t input_offset,
23 const uint8_t* zero,
24 const union xnn_qu8_gemm_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
25 {
26 const __m128i vkernel_zero_point = _mm_load_si128((const __m128i*) params->sse2.kernel_zero_point);
27 const __m128i vzero = _mm_setzero_si128();
28
29 do {
30 const uint8_t* i0 = input[0];
31 if XNN_UNPREDICTABLE(i0 != zero) {
32 i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
33 }
34 const uint8_t* i1 = input[1];
35 if XNN_UNPREDICTABLE(i1 != zero) {
36 i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
37 }
38 const uint8_t* i2 = input[2];
39 if XNN_UNPREDICTABLE(i2 != zero) {
40 i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
41 }
42 const uint8_t* i3 = input[3];
43 if XNN_UNPREDICTABLE(i3 != zero) {
44 i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
45 }
46 const uint8_t* i4 = input[4];
47 if XNN_UNPREDICTABLE(i4 != zero) {
48 i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
49 }
50 const uint8_t* i5 = input[5];
51 if XNN_UNPREDICTABLE(i5 != zero) {
52 i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
53 }
54 const uint8_t* i6 = input[6];
55 if XNN_UNPREDICTABLE(i6 != zero) {
56 i6 = (const uint8_t*) ((uintptr_t) i6 + input_offset);
57 }
58 const uint8_t* i7 = input[7];
59 if XNN_UNPREDICTABLE(i7 != zero) {
60 i7 = (const uint8_t*) ((uintptr_t) i7 + input_offset);
61 }
62 const uint8_t* i8 = input[8];
63 if XNN_UNPREDICTABLE(i8 != zero) {
64 i8 = (const uint8_t*) ((uintptr_t) i8 + input_offset);
65 }
66
67 input = (const uint8_t**) ((uintptr_t) input + input_stride);
68
69 size_t c = channels;
70 const void* w = weights;
71 for (; c >= 8; c -= 8) {
72 __m128i vacc_lo = _mm_loadu_si128((const __m128i*) w);
73 __m128i vacc_hi = _mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16));
74
75 const __m128i vi0 = _mm_loadl_epi64((const __m128i*) i0); i0 += 8;
76 const __m128i vxi0 = _mm_unpacklo_epi8(vi0, vzero);
77 const __m128i vk0 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32));
78 const __m128i vxk0 = _mm_sub_epi16(_mm_unpacklo_epi8(vk0, vzero), vkernel_zero_point);
79 const __m128i vprod0_odd = _mm_mullo_epi16(vxi0, vxk0);
80 const __m128i vprod0_even = _mm_mulhi_epi16(vxi0, vxk0);
81 vacc_lo = _mm_add_epi32(vacc_lo, _mm_unpacklo_epi16(vprod0_odd, vprod0_even));
82 vacc_hi = _mm_add_epi32(vacc_hi, _mm_unpackhi_epi16(vprod0_odd, vprod0_even));
83
84 const __m128i vi1 = _mm_loadl_epi64((const __m128i*) i1); i1 += 8;
85 const __m128i vxi1 = _mm_unpacklo_epi8(vi1, vzero);
86 const __m128i vk1 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 40));
87 const __m128i vxk1 = _mm_sub_epi16(_mm_unpacklo_epi8(vk1, vzero), vkernel_zero_point);
88 const __m128i vprod1_odd = _mm_mullo_epi16(vxi1, vxk1);
89 const __m128i vprod1_even = _mm_mulhi_epi16(vxi1, vxk1);
90 vacc_lo = _mm_add_epi32(vacc_lo, _mm_unpacklo_epi16(vprod1_odd, vprod1_even));
91 vacc_hi = _mm_add_epi32(vacc_hi, _mm_unpackhi_epi16(vprod1_odd, vprod1_even));
92
93 const __m128i vi2 = _mm_loadl_epi64((const __m128i*) i2); i2 += 8;
94 const __m128i vxi2 = _mm_unpacklo_epi8(vi2, vzero);
95 const __m128i vk2 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 48));
96 const __m128i vxk2 = _mm_sub_epi16(_mm_unpacklo_epi8(vk2, vzero), vkernel_zero_point);
97 const __m128i vprod2_odd = _mm_mullo_epi16(vxi2, vxk2);
98 const __m128i vprod2_even = _mm_mulhi_epi16(vxi2, vxk2);
99 vacc_lo = _mm_add_epi32(vacc_lo, _mm_unpacklo_epi16(vprod2_odd, vprod2_even));
100 vacc_hi = _mm_add_epi32(vacc_hi, _mm_unpackhi_epi16(vprod2_odd, vprod2_even));
101
102 const __m128i vi3 = _mm_loadl_epi64((const __m128i*) i3); i3 += 8;
103 const __m128i vxi3 = _mm_unpacklo_epi8(vi3, vzero);
104 const __m128i vk3 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 56));
105 const __m128i vxk3 = _mm_sub_epi16(_mm_unpacklo_epi8(vk3, vzero), vkernel_zero_point);
106 const __m128i vprod3_odd = _mm_mullo_epi16(vxi3, vxk3);
107 const __m128i vprod3_even = _mm_mulhi_epi16(vxi3, vxk3);
108 vacc_lo = _mm_add_epi32(vacc_lo, _mm_unpacklo_epi16(vprod3_odd, vprod3_even));
109 vacc_hi = _mm_add_epi32(vacc_hi, _mm_unpackhi_epi16(vprod3_odd, vprod3_even));
110
111 const __m128i vi4 = _mm_loadl_epi64((const __m128i*) i4); i4 += 8;
112 const __m128i vxi4 = _mm_unpacklo_epi8(vi4, vzero);
113 const __m128i vk4 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 64));
114 const __m128i vxk4 = _mm_sub_epi16(_mm_unpacklo_epi8(vk4, vzero), vkernel_zero_point);
115 const __m128i vprod4_odd = _mm_mullo_epi16(vxi4, vxk4);
116 const __m128i vprod4_even = _mm_mulhi_epi16(vxi4, vxk4);
117 vacc_lo = _mm_add_epi32(vacc_lo, _mm_unpacklo_epi16(vprod4_odd, vprod4_even));
118 vacc_hi = _mm_add_epi32(vacc_hi, _mm_unpackhi_epi16(vprod4_odd, vprod4_even));
119
120 const __m128i vi5 = _mm_loadl_epi64((const __m128i*) i5); i5 += 8;
121 const __m128i vxi5 = _mm_unpacklo_epi8(vi5, vzero);
122 const __m128i vk5 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 72));
123 const __m128i vxk5 = _mm_sub_epi16(_mm_unpacklo_epi8(vk5, vzero), vkernel_zero_point);
124 const __m128i vprod5_odd = _mm_mullo_epi16(vxi5, vxk5);
125 const __m128i vprod5_even = _mm_mulhi_epi16(vxi5, vxk5);
126 vacc_lo = _mm_add_epi32(vacc_lo, _mm_unpacklo_epi16(vprod5_odd, vprod5_even));
127 vacc_hi = _mm_add_epi32(vacc_hi, _mm_unpackhi_epi16(vprod5_odd, vprod5_even));
128
129 const __m128i vi6 = _mm_loadl_epi64((const __m128i*) i6); i6 += 8;
130 const __m128i vxi6 = _mm_unpacklo_epi8(vi6, vzero);
131 const __m128i vk6 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 80));
132 const __m128i vxk6 = _mm_sub_epi16(_mm_unpacklo_epi8(vk6, vzero), vkernel_zero_point);
133 const __m128i vprod6_odd = _mm_mullo_epi16(vxi6, vxk6);
134 const __m128i vprod6_even = _mm_mulhi_epi16(vxi6, vxk6);
135 vacc_lo = _mm_add_epi32(vacc_lo, _mm_unpacklo_epi16(vprod6_odd, vprod6_even));
136 vacc_hi = _mm_add_epi32(vacc_hi, _mm_unpackhi_epi16(vprod6_odd, vprod6_even));
137
138 const __m128i vi7 = _mm_loadl_epi64((const __m128i*) i7); i7 += 8;
139 const __m128i vxi7 = _mm_unpacklo_epi8(vi7, vzero);
140 const __m128i vk7 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 88));
141 const __m128i vxk7 = _mm_sub_epi16(_mm_unpacklo_epi8(vk7, vzero), vkernel_zero_point);
142 const __m128i vprod7_odd = _mm_mullo_epi16(vxi7, vxk7);
143 const __m128i vprod7_even = _mm_mulhi_epi16(vxi7, vxk7);
144 vacc_lo = _mm_add_epi32(vacc_lo, _mm_unpacklo_epi16(vprod7_odd, vprod7_even));
145 vacc_hi = _mm_add_epi32(vacc_hi, _mm_unpackhi_epi16(vprod7_odd, vprod7_even));
146
147 const __m128i vi8 = _mm_loadl_epi64((const __m128i*) i8); i8 += 8;
148 const __m128i vxi8 = _mm_unpacklo_epi8(vi8, vzero);
149 const __m128i vk8 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 96));
150 const __m128i vxk8 = _mm_sub_epi16(_mm_unpacklo_epi8(vk8, vzero), vkernel_zero_point);
151 const __m128i vprod8_odd = _mm_mullo_epi16(vxi8, vxk8);
152 const __m128i vprod8_even = _mm_mulhi_epi16(vxi8, vxk8);
153 vacc_lo = _mm_add_epi32(vacc_lo, _mm_unpacklo_epi16(vprod8_odd, vprod8_even));
154 vacc_hi = _mm_add_epi32(vacc_hi, _mm_unpackhi_epi16(vprod8_odd, vprod8_even));
155
156 w = (void*) ((uintptr_t) w + 104);
157
158 const __m128i vmultiplier = _mm_load_si128((const __m128i*) params->sse2.multiplier);
159 const __m128i vrounding = _mm_load_si128((const __m128i*) params->sse2.rounding);
160
161 const __m128i vnmask_lo0123 = _mm_cmpgt_epi32(_mm_setzero_si128(), vacc_lo);
162 const __m128i vnmask_hi0123 = _mm_cmpgt_epi32(_mm_setzero_si128(), vacc_hi);
163
164 const __m128i vabsacc_lo0123 = _mm_sub_epi32(_mm_xor_si128(vacc_lo, vnmask_lo0123), vnmask_lo0123);
165 const __m128i vabsacc_hi0123 = _mm_sub_epi32(_mm_xor_si128(vacc_hi, vnmask_hi0123), vnmask_hi0123);
166
167 const __m128i vabsacc_lo1032 = _mm_shuffle_epi32(vabsacc_lo0123, _MM_SHUFFLE(2, 3, 0, 1));
168 const __m128i vabsacc_hi1032 = _mm_shuffle_epi32(vabsacc_hi0123, _MM_SHUFFLE(2, 3, 0, 1));
169
170 const __m128i vabsprod_lo02 = _mm_mul_epu32(vabsacc_lo0123, vmultiplier);
171 const __m128i vabsprod_hi02 = _mm_mul_epu32(vabsacc_hi0123, vmultiplier);
172
173 const __m128i vnmask_lo02 = _mm_shuffle_epi32(vnmask_lo0123, _MM_SHUFFLE(2, 2, 0, 0));
174 const __m128i vnmask_hi02 = _mm_shuffle_epi32(vnmask_hi0123, _MM_SHUFFLE(2, 2, 0, 0));
175
176 const __m128i vprod_lo02 = _mm_sub_epi64(_mm_xor_si128(vabsprod_lo02, vnmask_lo02), vnmask_lo02);
177 const __m128i vprod_hi02 = _mm_sub_epi64(_mm_xor_si128(vabsprod_hi02, vnmask_hi02), vnmask_hi02);
178
179 const __m128i vq31prod_lo02 = _mm_srli_epi64(_mm_add_epi64(vprod_lo02, vrounding), 31);
180 const __m128i vq31prod_hi02 = _mm_srli_epi64(_mm_add_epi64(vprod_hi02, vrounding), 31);
181
182 const __m128i vabsprod_lo13 = _mm_mul_epu32(vabsacc_lo1032, vmultiplier);
183 const __m128i vabsprod_hi13 = _mm_mul_epu32(vabsacc_hi1032, vmultiplier);
184
185 const __m128i vnmask_lo13 = _mm_shuffle_epi32(vnmask_lo0123, _MM_SHUFFLE(3, 3, 1, 1));
186 const __m128i vnmask_hi13 = _mm_shuffle_epi32(vnmask_hi0123, _MM_SHUFFLE(3, 3, 1, 1));
187
188 const __m128i vprod_lo13 = _mm_sub_epi64(_mm_xor_si128(vabsprod_lo13, vnmask_lo13), vnmask_lo13);
189 const __m128i vprod_hi13 = _mm_sub_epi64(_mm_xor_si128(vabsprod_hi13, vnmask_hi13), vnmask_hi13);
190
191 const __m128i vq31prod_lo13 = _mm_srli_epi64(_mm_add_epi64(vprod_lo13, vrounding), 31);
192 const __m128i vq31prod_hi13 = _mm_srli_epi64(_mm_add_epi64(vprod_hi13, vrounding), 31);
193
194 const __m128i vq31prod_lo0213 = _mm_castps_si128(_mm_shuffle_ps(
195 _mm_castsi128_ps(vq31prod_lo02), _mm_castsi128_ps(vq31prod_lo13), _MM_SHUFFLE(2, 0, 2, 0)));
196 const __m128i vq31prod_hi0213 = _mm_castps_si128(_mm_shuffle_ps(
197 _mm_castsi128_ps(vq31prod_hi02), _mm_castsi128_ps(vq31prod_hi13), _MM_SHUFFLE(2, 0, 2, 0)));
198
199 const __m128i vq31prod_lo0123 = _mm_shuffle_epi32(vq31prod_lo0213, _MM_SHUFFLE(3, 1, 2, 0));
200 const __m128i vq31prod_hi0123 = _mm_shuffle_epi32(vq31prod_hi0213, _MM_SHUFFLE(3, 1, 2, 0));
201
202 const __m128i vremainder_mask = _mm_load_si128((const __m128i*) params->sse2.remainder_mask);
203
204 const __m128i vrem_lo0123 =
205 _mm_add_epi32(_mm_and_si128(vq31prod_lo0123, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vq31prod_lo0123));
206 const __m128i vrem_hi0123 =
207 _mm_add_epi32(_mm_and_si128(vq31prod_hi0123, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vq31prod_hi0123));
208
209 const __m128i vremainder_threshold = _mm_load_si128((const __m128i*) params->sse2.remainder_threshold);
210 const __m128i vshift = _mm_load_si128((const __m128i*) params->sse2.shift);
211
212 const __m128i vout_lo = _mm_sub_epi32(_mm_sra_epi32(vq31prod_lo0123, vshift), _mm_cmpgt_epi32(vrem_lo0123, vremainder_threshold));
213 const __m128i vout_hi = _mm_sub_epi32(_mm_sra_epi32(vq31prod_hi0123, vshift), _mm_cmpgt_epi32(vrem_hi0123, vremainder_threshold));
214
215 const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point);
216 __m128i vout = _mm_adds_epi16(_mm_packs_epi32(vout_lo, vout_hi), voutput_zero_point);
217 vout = _mm_packus_epi16(vout, vout);
218 vout = _mm_min_epu8(vout, _mm_load_si128((const __m128i*) params->sse2.output_max));
219 vout = _mm_max_epu8(vout, _mm_load_si128((const __m128i*) params->sse2.output_min));
220
221 _mm_storel_epi64((__m128i*) output, vout); output += 8;
222 }
223 if (c != 0) {
224 __m128i vacc_lo = _mm_loadu_si128((const __m128i*) w);
225 __m128i vacc_hi = _mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16));
226
227 const __m128i vi0 = _mm_loadl_epi64((const __m128i*) i0); i0 += 8;
228 const __m128i vxi0 = _mm_unpacklo_epi8(vi0, vzero);
229 const __m128i vk0 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32));
230 const __m128i vxk0 = _mm_sub_epi16(_mm_unpacklo_epi8(vk0, vzero), vkernel_zero_point);
231 const __m128i vprod0_odd = _mm_mullo_epi16(vxi0, vxk0);
232 const __m128i vprod0_even = _mm_mulhi_epi16(vxi0, vxk0);
233 vacc_lo = _mm_add_epi32(vacc_lo, _mm_unpacklo_epi16(vprod0_odd, vprod0_even));
234 vacc_hi = _mm_add_epi32(vacc_hi, _mm_unpackhi_epi16(vprod0_odd, vprod0_even));
235
236 const __m128i vi1 = _mm_loadl_epi64((const __m128i*) i1); i1 += 8;
237 const __m128i vxi1 = _mm_unpacklo_epi8(vi1, vzero);
238 const __m128i vk1 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 40));
239 const __m128i vxk1 = _mm_sub_epi16(_mm_unpacklo_epi8(vk1, vzero), vkernel_zero_point);
240 const __m128i vprod1_odd = _mm_mullo_epi16(vxi1, vxk1);
241 const __m128i vprod1_even = _mm_mulhi_epi16(vxi1, vxk1);
242 vacc_lo = _mm_add_epi32(vacc_lo, _mm_unpacklo_epi16(vprod1_odd, vprod1_even));
243 vacc_hi = _mm_add_epi32(vacc_hi, _mm_unpackhi_epi16(vprod1_odd, vprod1_even));
244
245 const __m128i vi2 = _mm_loadl_epi64((const __m128i*) i2); i2 += 8;
246 const __m128i vxi2 = _mm_unpacklo_epi8(vi2, vzero);
247 const __m128i vk2 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 48));
248 const __m128i vxk2 = _mm_sub_epi16(_mm_unpacklo_epi8(vk2, vzero), vkernel_zero_point);
249 const __m128i vprod2_odd = _mm_mullo_epi16(vxi2, vxk2);
250 const __m128i vprod2_even = _mm_mulhi_epi16(vxi2, vxk2);
251 vacc_lo = _mm_add_epi32(vacc_lo, _mm_unpacklo_epi16(vprod2_odd, vprod2_even));
252 vacc_hi = _mm_add_epi32(vacc_hi, _mm_unpackhi_epi16(vprod2_odd, vprod2_even));
253
254 const __m128i vi3 = _mm_loadl_epi64((const __m128i*) i3); i3 += 8;
255 const __m128i vxi3 = _mm_unpacklo_epi8(vi3, vzero);
256 const __m128i vk3 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 56));
257 const __m128i vxk3 = _mm_sub_epi16(_mm_unpacklo_epi8(vk3, vzero), vkernel_zero_point);
258 const __m128i vprod3_odd = _mm_mullo_epi16(vxi3, vxk3);
259 const __m128i vprod3_even = _mm_mulhi_epi16(vxi3, vxk3);
260 vacc_lo = _mm_add_epi32(vacc_lo, _mm_unpacklo_epi16(vprod3_odd, vprod3_even));
261 vacc_hi = _mm_add_epi32(vacc_hi, _mm_unpackhi_epi16(vprod3_odd, vprod3_even));
262
263 const __m128i vi4 = _mm_loadl_epi64((const __m128i*) i4); i4 += 8;
264 const __m128i vxi4 = _mm_unpacklo_epi8(vi4, vzero);
265 const __m128i vk4 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 64));
266 const __m128i vxk4 = _mm_sub_epi16(_mm_unpacklo_epi8(vk4, vzero), vkernel_zero_point);
267 const __m128i vprod4_odd = _mm_mullo_epi16(vxi4, vxk4);
268 const __m128i vprod4_even = _mm_mulhi_epi16(vxi4, vxk4);
269 vacc_lo = _mm_add_epi32(vacc_lo, _mm_unpacklo_epi16(vprod4_odd, vprod4_even));
270 vacc_hi = _mm_add_epi32(vacc_hi, _mm_unpackhi_epi16(vprod4_odd, vprod4_even));
271
272 const __m128i vi5 = _mm_loadl_epi64((const __m128i*) i5); i5 += 8;
273 const __m128i vxi5 = _mm_unpacklo_epi8(vi5, vzero);
274 const __m128i vk5 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 72));
275 const __m128i vxk5 = _mm_sub_epi16(_mm_unpacklo_epi8(vk5, vzero), vkernel_zero_point);
276 const __m128i vprod5_odd = _mm_mullo_epi16(vxi5, vxk5);
277 const __m128i vprod5_even = _mm_mulhi_epi16(vxi5, vxk5);
278 vacc_lo = _mm_add_epi32(vacc_lo, _mm_unpacklo_epi16(vprod5_odd, vprod5_even));
279 vacc_hi = _mm_add_epi32(vacc_hi, _mm_unpackhi_epi16(vprod5_odd, vprod5_even));
280
281 const __m128i vi6 = _mm_loadl_epi64((const __m128i*) i6); i6 += 8;
282 const __m128i vxi6 = _mm_unpacklo_epi8(vi6, vzero);
283 const __m128i vk6 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 80));
284 const __m128i vxk6 = _mm_sub_epi16(_mm_unpacklo_epi8(vk6, vzero), vkernel_zero_point);
285 const __m128i vprod6_odd = _mm_mullo_epi16(vxi6, vxk6);
286 const __m128i vprod6_even = _mm_mulhi_epi16(vxi6, vxk6);
287 vacc_lo = _mm_add_epi32(vacc_lo, _mm_unpacklo_epi16(vprod6_odd, vprod6_even));
288 vacc_hi = _mm_add_epi32(vacc_hi, _mm_unpackhi_epi16(vprod6_odd, vprod6_even));
289
290 const __m128i vi7 = _mm_loadl_epi64((const __m128i*) i7); i7 += 8;
291 const __m128i vxi7 = _mm_unpacklo_epi8(vi7, vzero);
292 const __m128i vk7 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 88));
293 const __m128i vxk7 = _mm_sub_epi16(_mm_unpacklo_epi8(vk7, vzero), vkernel_zero_point);
294 const __m128i vprod7_odd = _mm_mullo_epi16(vxi7, vxk7);
295 const __m128i vprod7_even = _mm_mulhi_epi16(vxi7, vxk7);
296 vacc_lo = _mm_add_epi32(vacc_lo, _mm_unpacklo_epi16(vprod7_odd, vprod7_even));
297 vacc_hi = _mm_add_epi32(vacc_hi, _mm_unpackhi_epi16(vprod7_odd, vprod7_even));
298
299 const __m128i vi8 = _mm_loadl_epi64((const __m128i*) i8); i8 += 8;
300 const __m128i vxi8 = _mm_unpacklo_epi8(vi8, vzero);
301 const __m128i vk8 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 96));
302 const __m128i vxk8 = _mm_sub_epi16(_mm_unpacklo_epi8(vk8, vzero), vkernel_zero_point);
303 const __m128i vprod8_odd = _mm_mullo_epi16(vxi8, vxk8);
304 const __m128i vprod8_even = _mm_mulhi_epi16(vxi8, vxk8);
305 vacc_lo = _mm_add_epi32(vacc_lo, _mm_unpacklo_epi16(vprod8_odd, vprod8_even));
306 vacc_hi = _mm_add_epi32(vacc_hi, _mm_unpackhi_epi16(vprod8_odd, vprod8_even));
307
308 const __m128i vmultiplier = _mm_load_si128((const __m128i*) params->sse2.multiplier);
309 const __m128i vrounding = _mm_load_si128((const __m128i*) params->sse2.rounding);
310
311 const __m128i vnmask_lo0123 = _mm_cmpgt_epi32(_mm_setzero_si128(), vacc_lo);
312 const __m128i vnmask_hi0123 = _mm_cmpgt_epi32(_mm_setzero_si128(), vacc_hi);
313
314 const __m128i vabsacc_lo0123 = _mm_sub_epi32(_mm_xor_si128(vacc_lo, vnmask_lo0123), vnmask_lo0123);
315 const __m128i vabsacc_hi0123 = _mm_sub_epi32(_mm_xor_si128(vacc_hi, vnmask_hi0123), vnmask_hi0123);
316
317 const __m128i vabsacc_lo1032 = _mm_shuffle_epi32(vabsacc_lo0123, _MM_SHUFFLE(2, 3, 0, 1));
318 const __m128i vabsacc_hi1032 = _mm_shuffle_epi32(vabsacc_hi0123, _MM_SHUFFLE(2, 3, 0, 1));
319
320 const __m128i vabsprod_lo02 = _mm_mul_epu32(vabsacc_lo0123, vmultiplier);
321 const __m128i vabsprod_hi02 = _mm_mul_epu32(vabsacc_hi0123, vmultiplier);
322
323 const __m128i vnmask_lo02 = _mm_shuffle_epi32(vnmask_lo0123, _MM_SHUFFLE(2, 2, 0, 0));
324 const __m128i vnmask_hi02 = _mm_shuffle_epi32(vnmask_hi0123, _MM_SHUFFLE(2, 2, 0, 0));
325
326 const __m128i vprod_lo02 = _mm_sub_epi64(_mm_xor_si128(vabsprod_lo02, vnmask_lo02), vnmask_lo02);
327 const __m128i vprod_hi02 = _mm_sub_epi64(_mm_xor_si128(vabsprod_hi02, vnmask_hi02), vnmask_hi02);
328
329 const __m128i vq31prod_lo02 = _mm_srli_epi64(_mm_add_epi64(vprod_lo02, vrounding), 31);
330 const __m128i vq31prod_hi02 = _mm_srli_epi64(_mm_add_epi64(vprod_hi02, vrounding), 31);
331
332 const __m128i vabsprod_lo13 = _mm_mul_epu32(vabsacc_lo1032, vmultiplier);
333 const __m128i vabsprod_hi13 = _mm_mul_epu32(vabsacc_hi1032, vmultiplier);
334
335 const __m128i vnmask_lo13 = _mm_shuffle_epi32(vnmask_lo0123, _MM_SHUFFLE(3, 3, 1, 1));
336 const __m128i vnmask_hi13 = _mm_shuffle_epi32(vnmask_hi0123, _MM_SHUFFLE(3, 3, 1, 1));
337
338 const __m128i vprod_lo13 = _mm_sub_epi64(_mm_xor_si128(vabsprod_lo13, vnmask_lo13), vnmask_lo13);
339 const __m128i vprod_hi13 = _mm_sub_epi64(_mm_xor_si128(vabsprod_hi13, vnmask_hi13), vnmask_hi13);
340
341 const __m128i vq31prod_lo13 = _mm_srli_epi64(_mm_add_epi64(vprod_lo13, vrounding), 31);
342 const __m128i vq31prod_hi13 = _mm_srli_epi64(_mm_add_epi64(vprod_hi13, vrounding), 31);
343
344 const __m128i vq31prod_lo0213 = _mm_castps_si128(_mm_shuffle_ps(
345 _mm_castsi128_ps(vq31prod_lo02), _mm_castsi128_ps(vq31prod_lo13), _MM_SHUFFLE(2, 0, 2, 0)));
346 const __m128i vq31prod_hi0213 = _mm_castps_si128(_mm_shuffle_ps(
347 _mm_castsi128_ps(vq31prod_hi02), _mm_castsi128_ps(vq31prod_hi13), _MM_SHUFFLE(2, 0, 2, 0)));
348
349 const __m128i vq31prod_lo0123 = _mm_shuffle_epi32(vq31prod_lo0213, _MM_SHUFFLE(3, 1, 2, 0));
350 const __m128i vq31prod_hi0123 = _mm_shuffle_epi32(vq31prod_hi0213, _MM_SHUFFLE(3, 1, 2, 0));
351
352 const __m128i vremainder_mask = _mm_load_si128((const __m128i*) params->sse2.remainder_mask);
353
354 const __m128i vrem_lo0123 =
355 _mm_add_epi32(_mm_and_si128(vq31prod_lo0123, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vq31prod_lo0123));
356 const __m128i vrem_hi0123 =
357 _mm_add_epi32(_mm_and_si128(vq31prod_hi0123, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vq31prod_hi0123));
358
359 const __m128i vremainder_threshold = _mm_load_si128((const __m128i*) params->sse2.remainder_threshold);
360 const __m128i vshift = _mm_load_si128((const __m128i*) params->sse2.shift);
361
362 const __m128i vout_lo = _mm_sub_epi32(_mm_sra_epi32(vq31prod_lo0123, vshift), _mm_cmpgt_epi32(vrem_lo0123, vremainder_threshold));
363 const __m128i vout_hi = _mm_sub_epi32(_mm_sra_epi32(vq31prod_hi0123, vshift), _mm_cmpgt_epi32(vrem_hi0123, vremainder_threshold));
364
365 const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point);
366 __m128i vout = _mm_adds_epi16(_mm_packs_epi32(vout_lo, vout_hi), voutput_zero_point);
367 vout = _mm_packus_epi16(vout, vout);
368 vout = _mm_min_epu8(vout, _mm_load_si128((const __m128i*) params->sse2.output_max));
369 vout = _mm_max_epu8(vout, _mm_load_si128((const __m128i*) params->sse2.output_min));
370
371 if (c & 4) {
372 *((uint32_t*) output) = (uint32_t) _mm_cvtsi128_si32(vout);
373 output += 4;
374 vout = _mm_srli_epi64(vout, 32);
375 }
376 if (c & 2) {
377 *((uint16_t*) output) = (uint16_t) _mm_extract_epi16(vout, 0);
378 output += 2;
379 vout = _mm_srli_epi32(vout, 16);
380 }
381 if (c & 1) {
382 *((uint8_t*) output) = (uint8_t) _mm_cvtsi128_si32(vout);
383 output += 1;
384 }
385 }
386
387 output = (uint8_t*) ((uintptr_t) output + output_increment);
388 } while (--output_width != 0);
389 }
390