1// Copyright 2020 Google LLC 2// 3// This source code is licensed under the BSD-style license found in the 4// LICENSE file in the root directory of this source tree. 5 6$SSE_HEADER = {2: "emmintrin.h", 4: "smmintrin.h"}[SSE] 7$assert BATCH_TILE % 8 == 0 8$assert BATCH_TILE >= 8 9$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" 10#include <assert.h> 11 12#include <${SSE_HEADER}> 13 14#include <xnnpack/vadd.h> 15 16 17$ISA = {2: "sse2", 4: "sse41"}[SSE] 18void xnn_qs8_vaddc_minmax_ukernel__${ISA}_mul16_ld64_x${BATCH_TILE}( 19 size_t n, 20 const int8_t* input_x, 21 const int8_t* input_y, 22 int8_t* output, 23 const union xnn_qs8_add_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN 24{ 25 const __m128i vx_multiplier_lo = _mm_load_si128((const __m128i*) params->sse2.x_multiplier_lo); 26 const __m128i vx_multiplier_hi = _mm_load_si128((const __m128i*) params->sse2.x_multiplier_hi); 27 const __m128i vremainder_mask = _mm_load_si128((const __m128i*) params->sse2.remainder_mask); 28 const __m128i vremainder_threshold = _mm_load_si128((const __m128i*) params->sse2.remainder_threshold); 29 const __m128i vshift = _mm_cvtsi32_si128((int) params->sse2.shift); 30 const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point); 31 const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse2.output_min); 32 const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse2.output_max); 33 34 __m128i vzero_point_product = _mm_cvtsi32_si128(params->sse2.y_multiplier[0] * (int32_t) *input_y); 35 vzero_point_product = _mm_shuffle_epi32(vzero_point_product, _MM_SHUFFLE(0, 0, 0, 0)); 36 vzero_point_product = _mm_add_epi32(vzero_point_product, _mm_load_si128((const __m128i*) params->sse2.zero_point_product)); 37 for (; n >= ${BATCH_TILE} * sizeof(int8_t); n -= ${BATCH_TILE} * sizeof(int8_t)) { 38 $if SSE >= 4: 39 const __m128i vx${ABC[0:8]} = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input_x)); 40 $for N in range(8, BATCH_TILE, 8): 41 const __m128i vx${ABC[N:N+8]} = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (input_x + ${N}))); 42 $else: 43 __m128i vx${ABC[0:8]} = _mm_loadl_epi64((const __m128i*) input_x); 44 $for N in range(8, BATCH_TILE, 8): 45 __m128i vx${ABC[N:N+8]} = _mm_loadl_epi64((const __m128i*) (input_x + ${N})); 46 input_x += ${BATCH_TILE}; 47 48 $if SSE < 4: 49 $for N in range(0, BATCH_TILE, 8): 50 vx${ABC[N:N+8]} = _mm_unpacklo_epi8(vx${ABC[N:N+8]}, _mm_cmpgt_epi8(_mm_setzero_si128(), vx${ABC[N:N+8]})); 51 52 $for N in range(0, BATCH_TILE, 8): 53 __m128i vxprod${ABC[N:N+8]}hi = _mm_mulhi_epu16(vx${ABC[N:N+8]}, vx_multiplier_lo); 54 const __m128i vxprod${ABC[N:N+8]}lo = _mm_mullo_epi16(vx${ABC[N:N+8]}, vx_multiplier_lo); 55 56 $for N in range(0, BATCH_TILE, 8): 57 vxprod${ABC[N:N+8]}hi = _mm_add_epi16(vxprod${ABC[N:N+8]}hi, _mm_mullo_epi16(vx${ABC[N:N+8]}, vx_multiplier_hi)); 58 59 $for N in range(0, BATCH_TILE, 8): 60 vxprod${ABC[N:N+8]}hi = _mm_sub_epi16(vxprod${ABC[N:N+8]}hi, _mm_and_si128(_mm_srai_epi16(vx${ABC[N:N+8]}, 15), vx_multiplier_lo)); 61 62 $for N in range(0, BATCH_TILE, 8): 63 __m128i vacc${ABC[N:N+4]} = _mm_add_epi32(vzero_point_product, _mm_unpacklo_epi16(vxprod${ABC[N:N+8]}lo, vxprod${ABC[N:N+8]}hi)); 64 __m128i vacc${ABC[N+4:N+8]} = _mm_add_epi32(vzero_point_product, _mm_unpackhi_epi16(vxprod${ABC[N:N+8]}lo, vxprod${ABC[N:N+8]}hi)); 65 66 $for N in range(0, BATCH_TILE, 4): 67 const __m128i vrem${ABC[N:N+4]} = _mm_add_epi32(_mm_and_si128(vacc${ABC[N:N+4]}, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vacc${ABC[N:N+4]})); 68 69 $for N in range(0, BATCH_TILE, 4): 70 vacc${ABC[N:N+4]} = _mm_sub_epi32(_mm_sra_epi32(vacc${ABC[N:N+4]}, vshift), _mm_cmpgt_epi32(vrem${ABC[N:N+4]}, vremainder_threshold)); 71 72 $for N in range(0, BATCH_TILE, 8): 73 __m128i vout${ABC[N:N+8]} = _mm_adds_epi16(_mm_packs_epi32(vacc${ABC[N:N+4]}, vacc${ABC[N+4:N+8]}), voutput_zero_point); 74 75 $for N in range(0, BATCH_TILE, 8): 76 vout${ABC[N:N+8]} = _mm_max_epi16(vout${ABC[N:N+8]}, voutput_min); 77 78 $for N in range(0, BATCH_TILE, 8): 79 vout${ABC[N:N+8]} = _mm_min_epi16(vout${ABC[N:N+8]}, voutput_max); 80 81 $for N in range(0, BATCH_TILE, 16): 82 $if N + 8 < BATCH_TILE: 83 const __m128i vout${ABC[N:N+16]} = _mm_packs_epi16(vout${ABC[N:N+8]}, vout${ABC[N+8:N+16]}); 84 $else: 85 const __m128i vout${ABC[N:N+8]}${ABC[N:N+8]} = _mm_packs_epi16(vout${ABC[N:N+8]}, vout${ABC[N:N+8]}); 86 87 $if BATCH_TILE >= 16: 88 _mm_storeu_si128((__m128i*) output, vout${ABC[0:16]}); 89 $else: 90 _mm_storel_epi64((__m128i*) output, vout${ABC[0:8]}${ABC[0:8]}); 91 $for N in range(16, BATCH_TILE, 16): 92 $if N + 8 < BATCH_TILE: 93 _mm_storeu_si128((__m128i*) (output + ${N}), vout${ABC[N:N+16]}); 94 $else: 95 _mm_storel_epi64((__m128i*) (output + ${N}), vout${ABC[N:N+8]}${ABC[N:N+8]}); 96 output += ${BATCH_TILE}; 97 } 98 if XNN_UNLIKELY(n != 0) { 99 ${"do " if BATCH_TILE > 8 else ""}{ 100 $if SSE >= 4: 101 const __m128i vx${ABC[0:8]} = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input_x)); 102 $else: 103 __m128i vx${ABC[0:8]} = _mm_loadl_epi64((const __m128i*) input_x); 104 $if BATCH_TILE > 8: 105 input_x += 8; 106 107 $if SSE < 4: 108 vx${ABC[0:8]} = _mm_unpacklo_epi8(vx${ABC[0:8]}, _mm_cmpgt_epi8(_mm_setzero_si128(), vx${ABC[0:8]})); 109 110 __m128i vxprod${ABC[0:8]}hi = _mm_mulhi_epu16(vx${ABC[0:8]}, vx_multiplier_lo); 111 const __m128i vxprod${ABC[0:8]}lo = _mm_mullo_epi16(vx${ABC[0:8]}, vx_multiplier_lo); 112 113 vxprod${ABC[0:8]}hi = _mm_add_epi16(vxprod${ABC[0:8]}hi, _mm_mullo_epi16(vx${ABC[0:8]}, vx_multiplier_hi)); 114 115 vxprod${ABC[0:8]}hi = _mm_sub_epi16(vxprod${ABC[0:8]}hi, _mm_and_si128(_mm_srai_epi16(vx${ABC[0:8]}, 15), vx_multiplier_lo)); 116 117 __m128i vacc${ABC[0:4]} = _mm_add_epi32(vzero_point_product, _mm_unpacklo_epi16(vxprod${ABC[0:8]}lo, vxprod${ABC[0:8]}hi)); 118 __m128i vacc${ABC[4:8]} = _mm_add_epi32(vzero_point_product, _mm_unpackhi_epi16(vxprod${ABC[0:8]}lo, vxprod${ABC[0:8]}hi)); 119 120 const __m128i vrem${ABC[0:4]} = _mm_add_epi32(_mm_and_si128(vacc${ABC[0:4]}, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vacc${ABC[0:4]})); 121 const __m128i vrem${ABC[4:8]} = _mm_add_epi32(_mm_and_si128(vacc${ABC[4:8]}, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vacc${ABC[4:8]})); 122 123 vacc${ABC[0:4]} = _mm_sub_epi32(_mm_sra_epi32(vacc${ABC[0:4]}, vshift), _mm_cmpgt_epi32(vrem${ABC[0:4]}, vremainder_threshold)); 124 vacc${ABC[4:8]} = _mm_sub_epi32(_mm_sra_epi32(vacc${ABC[4:8]}, vshift), _mm_cmpgt_epi32(vrem${ABC[4:8]}, vremainder_threshold)); 125 126 __m128i vout${ABC[0:8]} = _mm_adds_epi16(_mm_packs_epi32(vacc${ABC[0:4]}, vacc${ABC[4:8]}), voutput_zero_point); 127 vout${ABC[0:8]} = _mm_max_epi16(vout${ABC[0:8]}, voutput_min); 128 vout${ABC[0:8]} = _mm_min_epi16(vout${ABC[0:8]}, voutput_max); 129 130 __m128i vout${ABC[0:8]}${ABC[0:8]} = _mm_packs_epi16(vout${ABC[0:8]}, vout${ABC[0:8]}); 131 132 $if BATCH_TILE > 8: 133 if XNN_LIKELY(n >= (8 * sizeof(int8_t))) { 134 _mm_storel_epi64((__m128i*) output, vout${ABC[0:8]}${ABC[0:8]}); 135 output += 8; 136 n -= 8 * sizeof(int8_t); 137 } else { 138 if (n & (4 * sizeof(int8_t))) { 139 *((uint32_t*) output) = (uint32_t) _mm_cvtsi128_si32(vout${ABC[0:8]}${ABC[0:8]}); 140 vout${ABC[0:8]}${ABC[0:8]} = _mm_srli_epi64(vout${ABC[0:8]}${ABC[0:8]}, 32); 141 output += 4; 142 } 143 if (n & (2 * sizeof(int8_t))) { 144 *((uint16_t*) output) = (uint16_t) _mm_extract_epi16(vout${ABC[0:8]}${ABC[0:8]}, 0); 145 vout${ABC[0:8]}${ABC[0:8]} = _mm_srli_epi32(vout${ABC[0:8]}${ABC[0:8]}, 16); 146 output += 2; 147 } 148 if (n & (1 * sizeof(int8_t))) { 149 $if SSE >= 4: 150 *output = (int8_t) _mm_extract_epi8(vout${ABC[0:8]}${ABC[0:8]}, 0); 151 $else: 152 *output = (int32_t) _mm_cvtsi128_si32(vout${ABC[0:8]}${ABC[0:8]}); 153 } 154 n = 0; 155 } 156 $else: 157 if (n & (4 * sizeof(int8_t))) { 158 *((uint32_t*) output) = (uint32_t) _mm_cvtsi128_si32(vout${ABC[0:8]}${ABC[0:8]}); 159 vout${ABC[0:8]}${ABC[0:8]} = _mm_srli_epi64(vout${ABC[0:8]}${ABC[0:8]}, 32); 160 output += 4; 161 } 162 if (n & (2 * sizeof(int8_t))) { 163 *((uint16_t*) output) = (uint16_t) _mm_extract_epi16(vout${ABC[0:8]}${ABC[0:8]}, 0); 164 vout${ABC[0:8]}${ABC[0:8]} = _mm_srli_epi32(vout${ABC[0:8]}${ABC[0:8]}, 16); 165 output += 2; 166 } 167 if (n & (1 * sizeof(int8_t))) { 168 $if SSE >= 4: 169 *output = (int8_t) _mm_extract_epi8(vout${ABC[0:8]}${ABC[0:8]}, 0); 170 $else: 171 *output = (int32_t) _mm_cvtsi128_si32(vout${ABC[0:8]}${ABC[0:8]}); 172 } 173 }${" while (n != 0);" if BATCH_TILE > 8 else ""} 174 } 175} 176