Lines Matching refs:__m128i

26 …const __m128i vkernel_zero_point = _mm_load_si128((const __m128i*) params->sse2.kernel_zero_point);  in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
27 const __m128i vzero = _mm_setzero_si128(); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
72 __m128i vacc_lo = _mm_loadu_si128((const __m128i*) w); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
73 __m128i vacc_hi = _mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16)); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
75 const __m128i vi0 = _mm_loadl_epi64((const __m128i*) i0); i0 += 8; in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
76 const __m128i vxi0 = _mm_unpacklo_epi8(vi0, vzero); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
77 const __m128i vk0 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32)); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
78 const __m128i vxk0 = _mm_sub_epi16(_mm_unpacklo_epi8(vk0, vzero), vkernel_zero_point); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
79 const __m128i vprod0_odd = _mm_mullo_epi16(vxi0, vxk0); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
80 const __m128i vprod0_even = _mm_mulhi_epi16(vxi0, vxk0); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
84 const __m128i vi1 = _mm_loadl_epi64((const __m128i*) i1); i1 += 8; in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
85 const __m128i vxi1 = _mm_unpacklo_epi8(vi1, vzero); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
86 const __m128i vk1 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 40)); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
87 const __m128i vxk1 = _mm_sub_epi16(_mm_unpacklo_epi8(vk1, vzero), vkernel_zero_point); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
88 const __m128i vprod1_odd = _mm_mullo_epi16(vxi1, vxk1); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
89 const __m128i vprod1_even = _mm_mulhi_epi16(vxi1, vxk1); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
93 const __m128i vi2 = _mm_loadl_epi64((const __m128i*) i2); i2 += 8; in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
94 const __m128i vxi2 = _mm_unpacklo_epi8(vi2, vzero); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
95 const __m128i vk2 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 48)); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
96 const __m128i vxk2 = _mm_sub_epi16(_mm_unpacklo_epi8(vk2, vzero), vkernel_zero_point); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
97 const __m128i vprod2_odd = _mm_mullo_epi16(vxi2, vxk2); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
98 const __m128i vprod2_even = _mm_mulhi_epi16(vxi2, vxk2); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
102 const __m128i vi3 = _mm_loadl_epi64((const __m128i*) i3); i3 += 8; in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
103 const __m128i vxi3 = _mm_unpacklo_epi8(vi3, vzero); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
104 const __m128i vk3 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 56)); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
105 const __m128i vxk3 = _mm_sub_epi16(_mm_unpacklo_epi8(vk3, vzero), vkernel_zero_point); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
106 const __m128i vprod3_odd = _mm_mullo_epi16(vxi3, vxk3); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
107 const __m128i vprod3_even = _mm_mulhi_epi16(vxi3, vxk3); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
111 const __m128i vi4 = _mm_loadl_epi64((const __m128i*) i4); i4 += 8; in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
112 const __m128i vxi4 = _mm_unpacklo_epi8(vi4, vzero); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
113 const __m128i vk4 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 64)); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
114 const __m128i vxk4 = _mm_sub_epi16(_mm_unpacklo_epi8(vk4, vzero), vkernel_zero_point); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
115 const __m128i vprod4_odd = _mm_mullo_epi16(vxi4, vxk4); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
116 const __m128i vprod4_even = _mm_mulhi_epi16(vxi4, vxk4); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
120 const __m128i vi5 = _mm_loadl_epi64((const __m128i*) i5); i5 += 8; in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
121 const __m128i vxi5 = _mm_unpacklo_epi8(vi5, vzero); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
122 const __m128i vk5 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 72)); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
123 const __m128i vxk5 = _mm_sub_epi16(_mm_unpacklo_epi8(vk5, vzero), vkernel_zero_point); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
124 const __m128i vprod5_odd = _mm_mullo_epi16(vxi5, vxk5); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
125 const __m128i vprod5_even = _mm_mulhi_epi16(vxi5, vxk5); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
129 const __m128i vi6 = _mm_loadl_epi64((const __m128i*) i6); i6 += 8; in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
130 const __m128i vxi6 = _mm_unpacklo_epi8(vi6, vzero); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
131 const __m128i vk6 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 80)); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
132 const __m128i vxk6 = _mm_sub_epi16(_mm_unpacklo_epi8(vk6, vzero), vkernel_zero_point); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
133 const __m128i vprod6_odd = _mm_mullo_epi16(vxi6, vxk6); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
134 const __m128i vprod6_even = _mm_mulhi_epi16(vxi6, vxk6); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
138 const __m128i vi7 = _mm_loadl_epi64((const __m128i*) i7); i7 += 8; in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
139 const __m128i vxi7 = _mm_unpacklo_epi8(vi7, vzero); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
140 const __m128i vk7 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 88)); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
141 const __m128i vxk7 = _mm_sub_epi16(_mm_unpacklo_epi8(vk7, vzero), vkernel_zero_point); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
142 const __m128i vprod7_odd = _mm_mullo_epi16(vxi7, vxk7); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
143 const __m128i vprod7_even = _mm_mulhi_epi16(vxi7, vxk7); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
147 const __m128i vi8 = _mm_loadl_epi64((const __m128i*) i8); i8 += 8; in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
148 const __m128i vxi8 = _mm_unpacklo_epi8(vi8, vzero); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
149 const __m128i vk8 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 96)); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
150 const __m128i vxk8 = _mm_sub_epi16(_mm_unpacklo_epi8(vk8, vzero), vkernel_zero_point); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
151 const __m128i vprod8_odd = _mm_mullo_epi16(vxi8, vxk8); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
152 const __m128i vprod8_even = _mm_mulhi_epi16(vxi8, vxk8); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
158 const __m128i vmultiplier = _mm_load_si128((const __m128i*) params->sse2.multiplier); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
159 const __m128i vrounding = _mm_load_si128((const __m128i*) params->sse2.rounding); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
161 const __m128i vnmask_lo0123 = _mm_cmpgt_epi32(_mm_setzero_si128(), vacc_lo); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
162 const __m128i vnmask_hi0123 = _mm_cmpgt_epi32(_mm_setzero_si128(), vacc_hi); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
164 …const __m128i vabsacc_lo0123 = _mm_sub_epi32(_mm_xor_si128(vacc_lo, vnmask_lo0123), vnmask_lo0123); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
165 …const __m128i vabsacc_hi0123 = _mm_sub_epi32(_mm_xor_si128(vacc_hi, vnmask_hi0123), vnmask_hi0123); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
167 const __m128i vabsacc_lo1032 = _mm_shuffle_epi32(vabsacc_lo0123, _MM_SHUFFLE(2, 3, 0, 1)); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
168 const __m128i vabsacc_hi1032 = _mm_shuffle_epi32(vabsacc_hi0123, _MM_SHUFFLE(2, 3, 0, 1)); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
170 const __m128i vabsprod_lo02 = _mm_mul_epu32(vabsacc_lo0123, vmultiplier); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
171 const __m128i vabsprod_hi02 = _mm_mul_epu32(vabsacc_hi0123, vmultiplier); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
173 const __m128i vnmask_lo02 = _mm_shuffle_epi32(vnmask_lo0123, _MM_SHUFFLE(2, 2, 0, 0)); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
174 const __m128i vnmask_hi02 = _mm_shuffle_epi32(vnmask_hi0123, _MM_SHUFFLE(2, 2, 0, 0)); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
176 … const __m128i vprod_lo02 = _mm_sub_epi64(_mm_xor_si128(vabsprod_lo02, vnmask_lo02), vnmask_lo02); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
177 … const __m128i vprod_hi02 = _mm_sub_epi64(_mm_xor_si128(vabsprod_hi02, vnmask_hi02), vnmask_hi02); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
179 const __m128i vq31prod_lo02 = _mm_srli_epi64(_mm_add_epi64(vprod_lo02, vrounding), 31); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
180 const __m128i vq31prod_hi02 = _mm_srli_epi64(_mm_add_epi64(vprod_hi02, vrounding), 31); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
182 const __m128i vabsprod_lo13 = _mm_mul_epu32(vabsacc_lo1032, vmultiplier); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
183 const __m128i vabsprod_hi13 = _mm_mul_epu32(vabsacc_hi1032, vmultiplier); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
185 const __m128i vnmask_lo13 = _mm_shuffle_epi32(vnmask_lo0123, _MM_SHUFFLE(3, 3, 1, 1)); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
186 const __m128i vnmask_hi13 = _mm_shuffle_epi32(vnmask_hi0123, _MM_SHUFFLE(3, 3, 1, 1)); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
188 … const __m128i vprod_lo13 = _mm_sub_epi64(_mm_xor_si128(vabsprod_lo13, vnmask_lo13), vnmask_lo13); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
189 … const __m128i vprod_hi13 = _mm_sub_epi64(_mm_xor_si128(vabsprod_hi13, vnmask_hi13), vnmask_hi13); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
191 const __m128i vq31prod_lo13 = _mm_srli_epi64(_mm_add_epi64(vprod_lo13, vrounding), 31); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
192 const __m128i vq31prod_hi13 = _mm_srli_epi64(_mm_add_epi64(vprod_hi13, vrounding), 31); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
194 const __m128i vq31prod_lo0213 = _mm_castps_si128(_mm_shuffle_ps( in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
196 const __m128i vq31prod_hi0213 = _mm_castps_si128(_mm_shuffle_ps( in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
199 const __m128i vq31prod_lo0123 = _mm_shuffle_epi32(vq31prod_lo0213, _MM_SHUFFLE(3, 1, 2, 0)); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
200 const __m128i vq31prod_hi0123 = _mm_shuffle_epi32(vq31prod_hi0213, _MM_SHUFFLE(3, 1, 2, 0)); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
202 const __m128i vremainder_mask = _mm_load_si128((const __m128i*) params->sse2.remainder_mask); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
204 const __m128i vrem_lo0123 = in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
206 const __m128i vrem_hi0123 = in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
209 …const __m128i vremainder_threshold = _mm_load_si128((const __m128i*) params->sse2.remainder_thresh… in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
210 const __m128i vshift = _mm_load_si128((const __m128i*) params->sse2.shift); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
212 …const __m128i vout_lo = _mm_sub_epi32(_mm_sra_epi32(vq31prod_lo0123, vshift), _mm_cmpgt_epi32(vrem… in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
213 …const __m128i vout_hi = _mm_sub_epi32(_mm_sra_epi32(vq31prod_hi0123, vshift), _mm_cmpgt_epi32(vrem… in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
215 …const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
216 __m128i vout = _mm_adds_epi16(_mm_packs_epi32(vout_lo, vout_hi), voutput_zero_point); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
218 vout = _mm_min_epu8(vout, _mm_load_si128((const __m128i*) params->sse2.output_max)); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
219 vout = _mm_max_epu8(vout, _mm_load_si128((const __m128i*) params->sse2.output_min)); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
221 _mm_storel_epi64((__m128i*) output, vout); output += 8; in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
224 __m128i vacc_lo = _mm_loadu_si128((const __m128i*) w); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
225 __m128i vacc_hi = _mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16)); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
227 const __m128i vi0 = _mm_loadl_epi64((const __m128i*) i0); i0 += 8; in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
228 const __m128i vxi0 = _mm_unpacklo_epi8(vi0, vzero); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
229 const __m128i vk0 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32)); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
230 const __m128i vxk0 = _mm_sub_epi16(_mm_unpacklo_epi8(vk0, vzero), vkernel_zero_point); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
231 const __m128i vprod0_odd = _mm_mullo_epi16(vxi0, vxk0); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
232 const __m128i vprod0_even = _mm_mulhi_epi16(vxi0, vxk0); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
236 const __m128i vi1 = _mm_loadl_epi64((const __m128i*) i1); i1 += 8; in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
237 const __m128i vxi1 = _mm_unpacklo_epi8(vi1, vzero); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
238 const __m128i vk1 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 40)); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
239 const __m128i vxk1 = _mm_sub_epi16(_mm_unpacklo_epi8(vk1, vzero), vkernel_zero_point); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
240 const __m128i vprod1_odd = _mm_mullo_epi16(vxi1, vxk1); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
241 const __m128i vprod1_even = _mm_mulhi_epi16(vxi1, vxk1); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
245 const __m128i vi2 = _mm_loadl_epi64((const __m128i*) i2); i2 += 8; in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
246 const __m128i vxi2 = _mm_unpacklo_epi8(vi2, vzero); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
247 const __m128i vk2 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 48)); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
248 const __m128i vxk2 = _mm_sub_epi16(_mm_unpacklo_epi8(vk2, vzero), vkernel_zero_point); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
249 const __m128i vprod2_odd = _mm_mullo_epi16(vxi2, vxk2); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
250 const __m128i vprod2_even = _mm_mulhi_epi16(vxi2, vxk2); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
254 const __m128i vi3 = _mm_loadl_epi64((const __m128i*) i3); i3 += 8; in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
255 const __m128i vxi3 = _mm_unpacklo_epi8(vi3, vzero); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
256 const __m128i vk3 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 56)); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
257 const __m128i vxk3 = _mm_sub_epi16(_mm_unpacklo_epi8(vk3, vzero), vkernel_zero_point); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
258 const __m128i vprod3_odd = _mm_mullo_epi16(vxi3, vxk3); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
259 const __m128i vprod3_even = _mm_mulhi_epi16(vxi3, vxk3); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
263 const __m128i vi4 = _mm_loadl_epi64((const __m128i*) i4); i4 += 8; in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
264 const __m128i vxi4 = _mm_unpacklo_epi8(vi4, vzero); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
265 const __m128i vk4 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 64)); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
266 const __m128i vxk4 = _mm_sub_epi16(_mm_unpacklo_epi8(vk4, vzero), vkernel_zero_point); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
267 const __m128i vprod4_odd = _mm_mullo_epi16(vxi4, vxk4); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
268 const __m128i vprod4_even = _mm_mulhi_epi16(vxi4, vxk4); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
272 const __m128i vi5 = _mm_loadl_epi64((const __m128i*) i5); i5 += 8; in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
273 const __m128i vxi5 = _mm_unpacklo_epi8(vi5, vzero); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
274 const __m128i vk5 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 72)); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
275 const __m128i vxk5 = _mm_sub_epi16(_mm_unpacklo_epi8(vk5, vzero), vkernel_zero_point); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
276 const __m128i vprod5_odd = _mm_mullo_epi16(vxi5, vxk5); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
277 const __m128i vprod5_even = _mm_mulhi_epi16(vxi5, vxk5); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
281 const __m128i vi6 = _mm_loadl_epi64((const __m128i*) i6); i6 += 8; in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
282 const __m128i vxi6 = _mm_unpacklo_epi8(vi6, vzero); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
283 const __m128i vk6 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 80)); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
284 const __m128i vxk6 = _mm_sub_epi16(_mm_unpacklo_epi8(vk6, vzero), vkernel_zero_point); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
285 const __m128i vprod6_odd = _mm_mullo_epi16(vxi6, vxk6); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
286 const __m128i vprod6_even = _mm_mulhi_epi16(vxi6, vxk6); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
290 const __m128i vi7 = _mm_loadl_epi64((const __m128i*) i7); i7 += 8; in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
291 const __m128i vxi7 = _mm_unpacklo_epi8(vi7, vzero); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
292 const __m128i vk7 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 88)); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
293 const __m128i vxk7 = _mm_sub_epi16(_mm_unpacklo_epi8(vk7, vzero), vkernel_zero_point); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
294 const __m128i vprod7_odd = _mm_mullo_epi16(vxi7, vxk7); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
295 const __m128i vprod7_even = _mm_mulhi_epi16(vxi7, vxk7); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
299 const __m128i vi8 = _mm_loadl_epi64((const __m128i*) i8); i8 += 8; in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
300 const __m128i vxi8 = _mm_unpacklo_epi8(vi8, vzero); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
301 const __m128i vk8 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 96)); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
302 const __m128i vxk8 = _mm_sub_epi16(_mm_unpacklo_epi8(vk8, vzero), vkernel_zero_point); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
303 const __m128i vprod8_odd = _mm_mullo_epi16(vxi8, vxk8); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
304 const __m128i vprod8_even = _mm_mulhi_epi16(vxi8, vxk8); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
308 const __m128i vmultiplier = _mm_load_si128((const __m128i*) params->sse2.multiplier); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
309 const __m128i vrounding = _mm_load_si128((const __m128i*) params->sse2.rounding); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
311 const __m128i vnmask_lo0123 = _mm_cmpgt_epi32(_mm_setzero_si128(), vacc_lo); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
312 const __m128i vnmask_hi0123 = _mm_cmpgt_epi32(_mm_setzero_si128(), vacc_hi); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
314 …const __m128i vabsacc_lo0123 = _mm_sub_epi32(_mm_xor_si128(vacc_lo, vnmask_lo0123), vnmask_lo0123); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
315 …const __m128i vabsacc_hi0123 = _mm_sub_epi32(_mm_xor_si128(vacc_hi, vnmask_hi0123), vnmask_hi0123); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
317 const __m128i vabsacc_lo1032 = _mm_shuffle_epi32(vabsacc_lo0123, _MM_SHUFFLE(2, 3, 0, 1)); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
318 const __m128i vabsacc_hi1032 = _mm_shuffle_epi32(vabsacc_hi0123, _MM_SHUFFLE(2, 3, 0, 1)); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
320 const __m128i vabsprod_lo02 = _mm_mul_epu32(vabsacc_lo0123, vmultiplier); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
321 const __m128i vabsprod_hi02 = _mm_mul_epu32(vabsacc_hi0123, vmultiplier); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
323 const __m128i vnmask_lo02 = _mm_shuffle_epi32(vnmask_lo0123, _MM_SHUFFLE(2, 2, 0, 0)); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
324 const __m128i vnmask_hi02 = _mm_shuffle_epi32(vnmask_hi0123, _MM_SHUFFLE(2, 2, 0, 0)); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
326 … const __m128i vprod_lo02 = _mm_sub_epi64(_mm_xor_si128(vabsprod_lo02, vnmask_lo02), vnmask_lo02); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
327 … const __m128i vprod_hi02 = _mm_sub_epi64(_mm_xor_si128(vabsprod_hi02, vnmask_hi02), vnmask_hi02); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
329 const __m128i vq31prod_lo02 = _mm_srli_epi64(_mm_add_epi64(vprod_lo02, vrounding), 31); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
330 const __m128i vq31prod_hi02 = _mm_srli_epi64(_mm_add_epi64(vprod_hi02, vrounding), 31); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
332 const __m128i vabsprod_lo13 = _mm_mul_epu32(vabsacc_lo1032, vmultiplier); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
333 const __m128i vabsprod_hi13 = _mm_mul_epu32(vabsacc_hi1032, vmultiplier); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
335 const __m128i vnmask_lo13 = _mm_shuffle_epi32(vnmask_lo0123, _MM_SHUFFLE(3, 3, 1, 1)); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
336 const __m128i vnmask_hi13 = _mm_shuffle_epi32(vnmask_hi0123, _MM_SHUFFLE(3, 3, 1, 1)); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
338 … const __m128i vprod_lo13 = _mm_sub_epi64(_mm_xor_si128(vabsprod_lo13, vnmask_lo13), vnmask_lo13); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
339 … const __m128i vprod_hi13 = _mm_sub_epi64(_mm_xor_si128(vabsprod_hi13, vnmask_hi13), vnmask_hi13); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
341 const __m128i vq31prod_lo13 = _mm_srli_epi64(_mm_add_epi64(vprod_lo13, vrounding), 31); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
342 const __m128i vq31prod_hi13 = _mm_srli_epi64(_mm_add_epi64(vprod_hi13, vrounding), 31); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
344 const __m128i vq31prod_lo0213 = _mm_castps_si128(_mm_shuffle_ps( in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
346 const __m128i vq31prod_hi0213 = _mm_castps_si128(_mm_shuffle_ps( in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
349 const __m128i vq31prod_lo0123 = _mm_shuffle_epi32(vq31prod_lo0213, _MM_SHUFFLE(3, 1, 2, 0)); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
350 const __m128i vq31prod_hi0123 = _mm_shuffle_epi32(vq31prod_hi0213, _MM_SHUFFLE(3, 1, 2, 0)); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
352 const __m128i vremainder_mask = _mm_load_si128((const __m128i*) params->sse2.remainder_mask); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
354 const __m128i vrem_lo0123 = in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
356 const __m128i vrem_hi0123 = in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
359 …const __m128i vremainder_threshold = _mm_load_si128((const __m128i*) params->sse2.remainder_thresh… in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
360 const __m128i vshift = _mm_load_si128((const __m128i*) params->sse2.shift); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
362 …const __m128i vout_lo = _mm_sub_epi32(_mm_sra_epi32(vq31prod_lo0123, vshift), _mm_cmpgt_epi32(vrem… in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
363 …const __m128i vout_hi = _mm_sub_epi32(_mm_sra_epi32(vq31prod_hi0123, vshift), _mm_cmpgt_epi32(vrem… in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
365 …const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
366 __m128i vout = _mm_adds_epi16(_mm_packs_epi32(vout_lo, vout_hi), voutput_zero_point); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
368 vout = _mm_min_epu8(vout, _mm_load_si128((const __m128i*) params->sse2.output_max)); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()
369 vout = _mm_max_epu8(vout, _mm_load_si128((const __m128i*) params->sse2.output_min)); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2()