// Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. $ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" $SSE_HEADER = {2: "emmintrin.h", 3: "tmmintrin.h", 4: "smmintrin.h"}[SSE] $assert CHANNEL_TILE % 8 == 0 $assert CHANNEL_TILE >= 8 $assert KERNEL_TILE >= 2 #include #include <${SSE_HEADER}> #include $ISA = {2: "sse2", 3: "ssse3", 4: "sse41"}[SSE] void xnn_qs8_dwconv_minmax_ukernel_up${CHANNEL_TILE}x${KERNEL_TILE}__${ISA}_mul16( size_t channels, size_t output_width, const int8_t** input, const void* weights, int8_t* output, size_t input_stride, size_t output_increment, size_t input_offset, const int8_t* zero, const union xnn_qs8_gemm_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN { assert(channels != 0); assert(output_width != 0); do { $for K in range(KERNEL_TILE): const int8_t* i${K} = input[${K}]; assert(i${K} != NULL); if XNN_UNPREDICTABLE(i${K} != zero) { i${K} = (const int8_t*) ((uintptr_t) i${K} + input_offset); } input = (const int8_t**) ((uintptr_t) input + input_stride); size_t c = channels; const void* w = weights; for (; c >= ${CHANNEL_TILE}; c -= ${CHANNEL_TILE}) { __m128i vacc${ABC[0:4]} = _mm_loadu_si128((const __m128i*) w); $for C in range(4, CHANNEL_TILE, 4): __m128i vacc${ABC[C:C+4]} = _mm_loadu_si128((const __m128i*) ((uintptr_t) w + ${C} * sizeof(int32_t))); $for K in range(KERNEL_TILE): $for C in range(0, CHANNEL_TILE, 8): $if C == 0: const __m128i vi${K}x${ABC[0:8]} = _mm_loadl_epi64((const __m128i*) i${K}); $else: const __m128i vi${K}x${ABC[C:C+8]} = _mm_loadl_epi64((const __m128i*) (i${K} + ${C})); $if SSE >= 4: const __m128i vxi${K}x${ABC[C:C+8]} = _mm_cvtepi8_epi16(vi${K}x${ABC[C:C+8]}); const __m128i vk${K}x${ABC[C:C+8]} = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + ${CHANNEL_TILE} * sizeof(int32_t) + ${K * CHANNEL_TILE + C} * sizeof(int8_t))); $if SSE >= 4: const __m128i vxk${K}x${ABC[C:C+8]} = _mm_cvtepi8_epi16(vk${K}x${ABC[C:C+8]}); i${K} += ${CHANNEL_TILE}; $if SSE < 4: $for C in range(0, CHANNEL_TILE, 8): const __m128i vxi${K}x${ABC[C:C+8]} = _mm_unpacklo_epi8(vi${K}x${ABC[C:C+8]}, _mm_cmpgt_epi8(_mm_setzero_si128(), vi${K}x${ABC[C:C+8]})); const __m128i vxk${K}x${ABC[C:C+8]} = _mm_unpacklo_epi8(vk${K}x${ABC[C:C+8]}, _mm_cmpgt_epi8(_mm_setzero_si128(), vk${K}x${ABC[C:C+8]})); $for C in range(0, CHANNEL_TILE, 8): const __m128i vp${K}x${ABC[C:C+8]}lo = _mm_mullo_epi16(vxi${K}x${ABC[C:C+8]}, vxk${K}x${ABC[C:C+8]}); const __m128i vp${K}x${ABC[C:C+8]}hi = _mm_mulhi_epi16(vxi${K}x${ABC[C:C+8]}, vxk${K}x${ABC[C:C+8]}); $for C in range(0, CHANNEL_TILE, 8): vacc${ABC[C:C+4]} = _mm_add_epi32(vacc${ABC[C:C+4]}, _mm_unpacklo_epi16(vp${K}x${ABC[C:C+8]}lo, vp${K}x${ABC[C:C+8]}hi)); vacc${ABC[C+4:C+8]} = _mm_add_epi32(vacc${ABC[C+4:C+8]}, _mm_unpackhi_epi16(vp${K}x${ABC[C:C+8]}lo, vp${K}x${ABC[C:C+8]}hi)); w = (const void*) ((uintptr_t) w + ${CHANNEL_TILE} * sizeof(int32_t) + ${KERNEL_TILE * CHANNEL_TILE} * sizeof(int8_t)); const __m128i vmultiplier = _mm_load_si128((const __m128i*) params->sse2.multiplier); const __m128i vrounding = _mm_load_si128((const __m128i*) params->sse2.rounding); $if SSE >= 4: $for C in range(0, CHANNEL_TILE, 4): const __m128i vacc${ABC[C+1:C+4:2]} = _mm_shuffle_epi32(vacc${ABC[C:C+4]}, _MM_SHUFFLE(3, 3, 1, 1)); const __m128i vprod${ABC[C:C+4:2]} = _mm_add_epi64(_mm_mul_epi32(vacc${ABC[C:C+4]}, vmultiplier), vrounding); const __m128i vprod${ABC[C+1:C+4:2]} = _mm_add_epi64(_mm_mul_epi32(vacc${ABC[C+1:C+4:2]}, vmultiplier), vrounding); $for C in range(0, CHANNEL_TILE, 4): const __m128i vq31prod${ABC[C:C+4:2]} = _mm_srli_epi64(vprod${ABC[C:C+4:2]}, 31); const __m128i vq31prod${ABC[C+1:C+4:2]} = _mm_add_epi64(vprod${ABC[C+1:C+4:2]}, vprod${ABC[C+1:C+4:2]}); $for C in range(0, CHANNEL_TILE, 4): const __m128i vq31prod${ABC[C:C+4]} = _mm_blend_epi16(vq31prod${ABC[C:C+4:2]}, vq31prod${ABC[C+1:C+4:2]}, 0xCC); $else: $for C in range(0, CHANNEL_TILE, 4): const __m128i vnmask${ABC[C:C+4]} = _mm_cmpgt_epi32(_mm_setzero_si128(), vacc${ABC[C:C+4]}); $for C in range(0, CHANNEL_TILE, 4): $if SSE >= 3: const __m128i vabsacc${ABC[C:C+4]} = _mm_abs_epi32(vacc${ABC[C:C+4]}); $else: const __m128i vabsacc${ABC[C:C+4]} = _mm_sub_epi32(_mm_xor_si128(vacc${ABC[C:C+4]}, vnmask${ABC[C:C+4]}), vnmask${ABC[C:C+4]}); $for C in range(0, CHANNEL_TILE, 4): const __m128i vabsacc${ABC[C+1:C+4:2]} = _mm_shuffle_epi32(vabsacc${ABC[C:C+4]}, _MM_SHUFFLE(3, 3, 1, 1)); const __m128i vabsprod${ABC[C:C+4:2]} = _mm_mul_epu32(vabsacc${ABC[C:C+4]}, vmultiplier); const __m128i vabsprod${ABC[C+1:C+4:2]} = _mm_mul_epu32(vabsacc${ABC[C+1:C+4:2]}, vmultiplier); $for C in range(0, CHANNEL_TILE, 4): const __m128i vnmask${ABC[C:C+4:2]} = _mm_shuffle_epi32(vnmask${ABC[C:C+4]}, _MM_SHUFFLE(2, 2, 0, 0)); const __m128i vnmask${ABC[C+1:C+4:2]} = _mm_shuffle_epi32(vnmask${ABC[C:C+4]}, _MM_SHUFFLE(3, 3, 1, 1)); $for C in range(0, CHANNEL_TILE, 4): const __m128i vprod${ABC[C:C+4:2]} = _mm_sub_epi64(_mm_xor_si128(vabsprod${ABC[C:C+4:2]}, vnmask${ABC[C:C+4:2]}), vnmask${ABC[C:C+4:2]}); const __m128i vprod${ABC[C+1:C+4:2]} = _mm_sub_epi64(_mm_xor_si128(vabsprod${ABC[C+1:C+4:2]}, vnmask${ABC[C+1:C+4:2]}), vnmask${ABC[C+1:C+4:2]}); $for C in range(0, CHANNEL_TILE, 4): const __m128i vq31prod${ABC[C:C+4:2]} = _mm_srli_epi64(_mm_add_epi64(vprod${ABC[C:C+4:2]}, vrounding), 31); const __m128i vq31prod${ABC[C+1:C+4:2]} = _mm_srli_epi64(_mm_add_epi64(vprod${ABC[C+1:C+4:2]}, vrounding), 31); $for C in range(0, CHANNEL_TILE, 4): const __m128i vq31prod${ABC[C:C+4:2]}${ABC[C+1:C+4:2]} = _mm_castps_si128(_mm_shuffle_ps( _mm_castsi128_ps(vq31prod${ABC[C:C+4:2]}), _mm_castsi128_ps(vq31prod${ABC[C+1:C+4:2]}), _MM_SHUFFLE(2, 0, 2, 0))); $for C in range(0, CHANNEL_TILE, 4): const __m128i vq31prod${ABC[C:C+4]} = _mm_shuffle_epi32(vq31prod${ABC[C:C+4:2]}${ABC[C+1:C+4:2]}, _MM_SHUFFLE(3, 1, 2, 0)); const __m128i vremainder_mask = _mm_load_si128((const __m128i*) params->sse2.remainder_mask); $for C in range(0, CHANNEL_TILE, 4): const __m128i vrem${ABC[C:C+4]} = _mm_add_epi32(_mm_and_si128(vq31prod${ABC[C:C+4]}, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vq31prod${ABC[C:C+4]})); const __m128i vremainder_threshold = _mm_load_si128((const __m128i*) params->sse2.remainder_threshold); const __m128i vshift = _mm_load_si128((const __m128i*) params->sse2.shift); $for C in range(0, CHANNEL_TILE, 4): vacc${ABC[C:C+4]} = _mm_sub_epi32(_mm_sra_epi32(vq31prod${ABC[C:C+4]}, vshift), _mm_cmpgt_epi32(vrem${ABC[C:C+4]}, vremainder_threshold)); const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point); $for C in range(0, CHANNEL_TILE, 8): __m128i vout${ABC[C:C+8]} = _mm_adds_epi16(_mm_packs_epi32(vacc${ABC[C:C+4]}, vacc${ABC[C+4:C+8]}), voutput_zero_point); const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse2.output_min); const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse2.output_max); $for C in range(0, CHANNEL_TILE, 8): vout${ABC[C:C+8]} = _mm_min_epi16(_mm_max_epi16(vout${ABC[C:C+8]}, voutput_min), voutput_max); $for C in range(0, CHANNEL_TILE, 16): $if C + 8 < CHANNEL_TILE: __m128i vout${ABC[C:C+16]} = _mm_packs_epi16(vout${ABC[C:C+8]}, vout${ABC[C+8:C+16]}); $else: __m128i vout${ABC[C:C+8]}${ABC[C:C+8]} = _mm_packs_epi16(vout${ABC[C:C+8]}, vout${ABC[C:C+8]}); $if CHANNEL_TILE > 8: _mm_storeu_si128((__m128i*) output, vout${ABC[0:16]}); $else: _mm_storel_epi64((__m128i*) output, vout${ABC[0:8]}${ABC[0:8]}); $for C in range(16, CHANNEL_TILE, 16): $if C + 8 < CHANNEL_TILE: _mm_storeu_si128((__m128i*) (output + ${C}), vout${ABC[C:C+16]}); $else: _mm_storel_epi64((__m128i*) (output + ${C}), vout${ABC[C:C+8]}${ABC[C:C+8]}); output += ${CHANNEL_TILE}; } if XNN_UNLIKELY(c != 0) { $if CHANNEL_TILE > 8: const int8_t* k = (const int8_t*) ((uintptr_t) w + ${CHANNEL_TILE} * sizeof(int32_t)); ${"do " if CHANNEL_TILE > 8 else ""}{ __m128i vacc${ABC[0:4]} = _mm_loadu_si128((const __m128i*) w); __m128i vacc${ABC[4:8]} = _mm_loadu_si128((const __m128i*) ((uintptr_t) w + 4 * sizeof(int32_t))); $for K in range(KERNEL_TILE): const __m128i vi${K}x${ABC[0:8]} = _mm_loadl_epi64((const __m128i*) i${K}); $if SSE >= 4: const __m128i vxi${K}x${ABC[0:8]} = _mm_cvtepi8_epi16(vi${K}x${ABC[0:8]}); $if CHANNEL_TILE > 8: $if K == 0: const __m128i vk${K}x${ABC[0:8]} = _mm_loadl_epi64((const __m128i*) k); $else: const __m128i vk${K}x${ABC[0:8]} = _mm_loadl_epi64((const __m128i*) (k + ${K * CHANNEL_TILE})); $else: const __m128i vk${K}x${ABC[0:8]} = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + ${CHANNEL_TILE} * sizeof(int32_t) + ${K * CHANNEL_TILE} * sizeof(int8_t))); $if SSE >= 4: const __m128i vxk${K}x${ABC[0:8]} = _mm_cvtepi8_epi16(vk${K}x${ABC[0:8]}); $if CHANNEL_TILE > 8: i${K} += 8; $if SSE < 4: const __m128i vxi${K}x${ABC[0:8]} = _mm_unpacklo_epi8(vi${K}x${ABC[0:8]}, _mm_cmpgt_epi8(_mm_setzero_si128(), vi${K}x${ABC[0:8]})); const __m128i vxk${K}x${ABC[0:8]} = _mm_unpacklo_epi8(vk${K}x${ABC[0:8]}, _mm_cmpgt_epi8(_mm_setzero_si128(), vk${K}x${ABC[0:8]})); const __m128i vp${K}x${ABC[0:8]}lo = _mm_mullo_epi16(vxi${K}x${ABC[0:8]}, vxk${K}x${ABC[0:8]}); const __m128i vp${K}x${ABC[0:8]}hi = _mm_mulhi_epi16(vxi${K}x${ABC[0:8]}, vxk${K}x${ABC[0:8]}); vacc${ABC[0:4]} = _mm_add_epi32(vacc${ABC[0:4]}, _mm_unpacklo_epi16(vp${K}x${ABC[0:8]}lo, vp${K}x${ABC[0:8]}hi)); vacc${ABC[4:8]} = _mm_add_epi32(vacc${ABC[4:8]}, _mm_unpackhi_epi16(vp${K}x${ABC[0:8]}lo, vp${K}x${ABC[0:8]}hi)); $if CHANNEL_TILE > 8: w = (const void*) ((uintptr_t) w + 8 * sizeof(int32_t)); k += 8; const __m128i vmultiplier = _mm_load_si128((const __m128i*) params->sse2.multiplier); const __m128i vrounding = _mm_load_si128((const __m128i*) params->sse2.rounding); $if SSE >= 4: const __m128i vacc${ABC[1:4:2]} = _mm_shuffle_epi32(vacc${ABC[0:4]}, _MM_SHUFFLE(3, 3, 1, 1)); const __m128i vacc${ABC[5:8:2]} = _mm_shuffle_epi32(vacc${ABC[4:8]}, _MM_SHUFFLE(3, 3, 1, 1)); const __m128i vprod${ABC[0:4:2]} = _mm_add_epi64(_mm_mul_epi32(vacc${ABC[0:4]}, vmultiplier), vrounding); const __m128i vprod${ABC[4:8:2]} = _mm_add_epi64(_mm_mul_epi32(vacc${ABC[4:8]}, vmultiplier), vrounding); const __m128i vprod${ABC[1:4:2]} = _mm_add_epi64(_mm_mul_epi32(vacc${ABC[1:4:2]}, vmultiplier), vrounding); const __m128i vprod${ABC[5:8:2]} = _mm_add_epi64(_mm_mul_epi32(vacc${ABC[5:8:2]}, vmultiplier), vrounding); const __m128i vq31prod${ABC[0:4:2]} = _mm_srli_epi64(vprod${ABC[0:4:2]}, 31); const __m128i vq31prod${ABC[1:4:2]} = _mm_add_epi64(vprod${ABC[1:4:2]}, vprod${ABC[1:4:2]}); const __m128i vq31prod${ABC[4:8:2]} = _mm_srli_epi64(vprod${ABC[4:8:2]}, 31); const __m128i vq31prod${ABC[5:8:2]} = _mm_add_epi64(vprod${ABC[5:8:2]}, vprod${ABC[5:8:2]}); const __m128i vq31prod${ABC[0:4]} = _mm_blend_epi16(vq31prod${ABC[0:4:2]}, vq31prod${ABC[1:4:2]}, 0xCC); const __m128i vq31prod${ABC[4:8]} = _mm_blend_epi16(vq31prod${ABC[4:8:2]}, vq31prod${ABC[5:8:2]}, 0xCC); $else: const __m128i vnmask${ABC[0:4]} = _mm_cmpgt_epi32(_mm_setzero_si128(), vacc${ABC[0:4]}); const __m128i vnmask${ABC[4:8]} = _mm_cmpgt_epi32(_mm_setzero_si128(), vacc${ABC[4:8]}); $if SSE >= 3: const __m128i vabsacc${ABC[0:4]} = _mm_abs_epi32(vacc${ABC[0:4]}); const __m128i vabsacc${ABC[4:8]} = _mm_abs_epi32(vacc${ABC[4:8]}); $else: const __m128i vabsacc${ABC[0:4]} = _mm_sub_epi32(_mm_xor_si128(vacc${ABC[0:4]}, vnmask${ABC[0:4]}), vnmask${ABC[0:4]}); const __m128i vabsacc${ABC[4:8]} = _mm_sub_epi32(_mm_xor_si128(vacc${ABC[4:8]}, vnmask${ABC[4:8]}), vnmask${ABC[4:8]}); const __m128i vabsacc${ABC[1:4:2]} = _mm_shuffle_epi32(vabsacc${ABC[0:4]}, _MM_SHUFFLE(3, 3, 1, 1)); const __m128i vabsacc${ABC[5:8:2]} = _mm_shuffle_epi32(vabsacc${ABC[4:8]}, _MM_SHUFFLE(3, 3, 1, 1)); const __m128i vabsprod${ABC[0:4:2]} = _mm_mul_epu32(vabsacc${ABC[0:4]}, vmultiplier); const __m128i vabsprod${ABC[1:4:2]} = _mm_mul_epu32(vabsacc${ABC[1:4:2]}, vmultiplier); const __m128i vabsprod${ABC[4:8:2]} = _mm_mul_epu32(vabsacc${ABC[4:8]}, vmultiplier); const __m128i vabsprod${ABC[5:8:2]} = _mm_mul_epu32(vabsacc${ABC[5:8:2]}, vmultiplier); const __m128i vnmask${ABC[0:4:2]} = _mm_shuffle_epi32(vnmask${ABC[0:4]}, _MM_SHUFFLE(2, 2, 0, 0)); const __m128i vnmask${ABC[1:4:2]} = _mm_shuffle_epi32(vnmask${ABC[0:4]}, _MM_SHUFFLE(3, 3, 1, 1)); const __m128i vnmask${ABC[4:8:2]} = _mm_shuffle_epi32(vnmask${ABC[4:8]}, _MM_SHUFFLE(2, 2, 0, 0)); const __m128i vnmask${ABC[5:8:2]} = _mm_shuffle_epi32(vnmask${ABC[4:8]}, _MM_SHUFFLE(3, 3, 1, 1)); const __m128i vprod${ABC[0:4:2]} = _mm_sub_epi64(_mm_xor_si128(vabsprod${ABC[0:4:2]}, vnmask${ABC[0:4:2]}), vnmask${ABC[0:4:2]}); const __m128i vprod${ABC[1:4:2]} = _mm_sub_epi64(_mm_xor_si128(vabsprod${ABC[1:4:2]}, vnmask${ABC[1:4:2]}), vnmask${ABC[1:4:2]}); const __m128i vprod${ABC[4:8:2]} = _mm_sub_epi64(_mm_xor_si128(vabsprod${ABC[4:8:2]}, vnmask${ABC[4:8:2]}), vnmask${ABC[4:8:2]}); const __m128i vprod${ABC[5:8:2]} = _mm_sub_epi64(_mm_xor_si128(vabsprod${ABC[5:8:2]}, vnmask${ABC[5:8:2]}), vnmask${ABC[5:8:2]}); const __m128i vq31prod${ABC[0:4:2]} = _mm_srli_epi64(_mm_add_epi64(vprod${ABC[0:4:2]}, vrounding), 31); const __m128i vq31prod${ABC[1:4:2]} = _mm_srli_epi64(_mm_add_epi64(vprod${ABC[1:4:2]}, vrounding), 31); const __m128i vq31prod${ABC[4:8:2]} = _mm_srli_epi64(_mm_add_epi64(vprod${ABC[4:8:2]}, vrounding), 31); const __m128i vq31prod${ABC[5:8:2]} = _mm_srli_epi64(_mm_add_epi64(vprod${ABC[5:8:2]}, vrounding), 31); const __m128i vq31prod${ABC[0:4:2]}${ABC[1:4:2]} = _mm_castps_si128(_mm_shuffle_ps( _mm_castsi128_ps(vq31prod${ABC[0:4:2]}), _mm_castsi128_ps(vq31prod${ABC[1:4:2]}), _MM_SHUFFLE(2, 0, 2, 0))); const __m128i vq31prod${ABC[4:8:2]}${ABC[5:8:2]} = _mm_castps_si128(_mm_shuffle_ps( _mm_castsi128_ps(vq31prod${ABC[4:8:2]}), _mm_castsi128_ps(vq31prod${ABC[5:8:2]}), _MM_SHUFFLE(2, 0, 2, 0))); const __m128i vq31prod${ABC[0:4]} = _mm_shuffle_epi32(vq31prod${ABC[0:4:2]}${ABC[1:4:2]}, _MM_SHUFFLE(3, 1, 2, 0)); const __m128i vq31prod${ABC[4:8]} = _mm_shuffle_epi32(vq31prod${ABC[4:8:2]}${ABC[5:8:2]}, _MM_SHUFFLE(3, 1, 2, 0)); const __m128i vremainder_mask = _mm_load_si128((const __m128i*) params->sse2.remainder_mask); const __m128i vrem${ABC[0:4]} = _mm_add_epi32(_mm_and_si128(vq31prod${ABC[0:4]}, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vq31prod${ABC[0:4]})); const __m128i vrem${ABC[4:8]} = _mm_add_epi32(_mm_and_si128(vq31prod${ABC[4:8]}, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vq31prod${ABC[4:8]})); const __m128i vremainder_threshold = _mm_load_si128((const __m128i*) params->sse2.remainder_threshold); const __m128i vshift = _mm_load_si128((const __m128i*) params->sse2.shift); vacc${ABC[0:4]} = _mm_sub_epi32(_mm_sra_epi32(vq31prod${ABC[0:4]}, vshift), _mm_cmpgt_epi32(vrem${ABC[0:4]}, vremainder_threshold)); vacc${ABC[4:8]} = _mm_sub_epi32(_mm_sra_epi32(vq31prod${ABC[4:8]}, vshift), _mm_cmpgt_epi32(vrem${ABC[4:8]}, vremainder_threshold)); const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point); __m128i vout${ABC[0:8]} = _mm_adds_epi16(_mm_packs_epi32(vacc${ABC[0:4]}, vacc${ABC[4:8]}), voutput_zero_point); const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse2.output_min); const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse2.output_max); vout${ABC[0:8]} = _mm_min_epi16(_mm_max_epi16(vout${ABC[0:8]}, voutput_min), voutput_max); __m128i vout${ABC[0:8]}${ABC[0:8]} = _mm_packs_epi16(vout${ABC[0:8]}, vout${ABC[0:8]}); $if CHANNEL_TILE > 8: if XNN_LIKELY(c >= 8) { _mm_storel_epi64((__m128i*) output, vout${ABC[0:8]}${ABC[0:8]}); output += 8; c -= 8; } else { if (c & 4) { *((uint32_t*) output) = (uint32_t) _mm_cvtsi128_si32(vout${ABC[0:8]}${ABC[0:8]}); vout${ABC[0:8]}${ABC[0:8]} = _mm_srli_epi64(vout${ABC[0:8]}${ABC[0:8]}, 32); output += 4; } if (c & 2) { *((uint16_t*) output) = (uint16_t) _mm_extract_epi16(vout${ABC[0:8]}${ABC[0:8]}, 0); vout${ABC[0:8]}${ABC[0:8]} = _mm_srli_epi32(vout${ABC[0:8]}${ABC[0:8]}, 16); output += 2; } if (c & 1) { $if SSE >= 4: *output = (int8_t) _mm_extract_epi8(vout${ABC[0:8]}${ABC[0:8]}, 0); $else: *output = (int32_t) _mm_cvtsi128_si32(vout${ABC[0:8]}${ABC[0:8]}); output += 1; } c = 0; } $else: if (c & 4) { *((uint32_t*) output) = (uint32_t) _mm_cvtsi128_si32(vout${ABC[0:8]}${ABC[0:8]}); vout${ABC[0:8]}${ABC[0:8]} = _mm_srli_epi64(vout${ABC[0:8]}${ABC[0:8]}, 32); output += 4; } if (c & 2) { *((uint16_t*) output) = (uint16_t) _mm_extract_epi16(vout${ABC[0:8]}${ABC[0:8]}, 0); vout${ABC[0:8]}${ABC[0:8]} = _mm_srli_epi32(vout${ABC[0:8]}${ABC[0:8]}, 16); output += 2; } if (c & 1) { $if SSE >= 4: *output = (int8_t) _mm_extract_epi8(vout${ABC[0:8]}${ABC[0:8]}, 0); $else: *output = (int32_t) _mm_cvtsi128_si32(vout${ABC[0:8]}${ABC[0:8]}); output += 1; } }${" while (c != 0);" if CHANNEL_TILE > 8 else ""} } output = (int8_t*) ((uintptr_t) output + output_increment); } while (--output_width != 0); }