1// Copyright 2020 Google LLC
2//
3// This source code is licensed under the BSD-style license found in the
4// LICENSE file in the root directory of this source tree.
5
6$assert BATCH_TILE % 8 == 0
7$assert BATCH_TILE >= 8
8$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
9#include <assert.h>
10
11#include <immintrin.h>
12
13#include <xnnpack/intrinsics-polyfill.h>
14#include <xnnpack/vadd.h>
15
16
17void xnn_qs8_vadd_minmax_ukernel__avx2_mul32_ld64_x${BATCH_TILE}(
18    size_t n,
19    const int8_t* input_x,
20    const int8_t* input_y,
21    int8_t* output,
22    const union xnn_qs8_add_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
23{
24  const __m256i vzero_point_product = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.zero_point_product));
25  const __m256i vx_multiplier = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.x_multiplier));
26  const __m256i vy_multiplier = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.y_multiplier));
27  const __m256i vremainder_mask = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.remainder_mask));
28  const __m256i vremainder_threshold = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.remainder_threshold));
29  const __m128i vshift = _mm_cvtsi32_si128((int) params->sse2.shift);
30  $if BATCH_TILE > 8:
31    const __m256i voutput_zero_point = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.output_zero_point));
32    const __m256i voutput_min = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.output_min));
33    const __m256i voutput_max = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.output_max));
34  $else:
35    const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point);
36    const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse2.output_min);
37    const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse2.output_max);
38
39  for (; n >= ${BATCH_TILE} * sizeof(int8_t); n -= ${BATCH_TILE} * sizeof(int8_t)) {
40    const __m256i vx${ABC[0:8]} = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) input_x));
41    const __m256i vy${ABC[0:8]} = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) input_y));
42    $for N in range(8, BATCH_TILE, 8):
43      const __m256i vx${ABC[N:N+8]} = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (input_x + ${N})));
44      const __m256i vy${ABC[N:N+8]} = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (input_y + ${N})));
45    input_x += ${BATCH_TILE};
46    input_y += ${BATCH_TILE};
47
48    $for N in range(0, BATCH_TILE, 8):
49      __m256i vacc${ABC[N:N+8]} = _mm256_add_epi32(vzero_point_product, _mm256_mullo_epi32(vx${ABC[N:N+8]}, vx_multiplier));
50
51    $for N in range(0, BATCH_TILE, 8):
52      vacc${ABC[N:N+8]} = _mm256_add_epi32(vacc${ABC[N:N+8]}, _mm256_mullo_epi32(vy${ABC[N:N+8]}, vy_multiplier));
53
54    $for N in range(0, BATCH_TILE, 8):
55      const __m256i vrem${ABC[N:N+8]} = _mm256_add_epi32(_mm256_and_si256(vacc${ABC[N:N+8]}, vremainder_mask), _mm256_srai_epi32(vacc${ABC[N:N+8]}, 31));
56
57    $for N in range(0, BATCH_TILE, 8):
58      vacc${ABC[N:N+8]} = _mm256_sub_epi32(_mm256_sra_epi32(vacc${ABC[N:N+8]}, vshift), _mm256_cmpgt_epi32(vrem${ABC[N:N+8]}, vremainder_threshold));
59
60    $for N in range(0, BATCH_TILE, 16):
61      $if N + 8 < BATCH_TILE:
62        __m256i vout${ABC[N:N+4]}${ABC[N+8:N+12]}${ABC[N+4:N+8]}${ABC[N+12:N+16]} = _mm256_adds_epi16(_mm256_packs_epi32(vacc${ABC[N:N+8]}, vacc${ABC[N+8:N+16]}), voutput_zero_point);
63      $elif BATCH_TILE > 8:
64        __m128i vout${ABC[N:N+8]} = _mm_adds_epi16(_mm_packs_epi32(_mm256_castsi256_si128(vacc${ABC[N:N+8]}), _mm256_extracti128_si256(vacc${ABC[N:N+8]}, 1)), _mm256_castsi256_si128(voutput_zero_point));
65      $else:
66        __m128i vout${ABC[N:N+8]} = _mm_adds_epi16(_mm_packs_epi32(_mm256_castsi256_si128(vacc${ABC[N:N+8]}), _mm256_extracti128_si256(vacc${ABC[N:N+8]}, 1)), voutput_zero_point);
67
68    $for N in range(0, BATCH_TILE, 16):
69      $if N + 8 < BATCH_TILE:
70        vout${ABC[N:N+4]}${ABC[N+8:N+12]}${ABC[N+4:N+8]}${ABC[N+12:N+16]} = _mm256_min_epi16(_mm256_max_epi16(vout${ABC[N:N+4]}${ABC[N+8:N+12]}${ABC[N+4:N+8]}${ABC[N+12:N+16]}, voutput_min), voutput_max);
71      $elif BATCH_TILE > 8:
72        vout${ABC[N:N+8]} = _mm_min_epi16(_mm_max_epi16(vout${ABC[N:N+8]}, _mm256_castsi256_si128(voutput_min)), _mm256_castsi256_si128(voutput_max));
73      $else:
74        vout${ABC[N:N+8]} = _mm_min_epi16(_mm_max_epi16(vout${ABC[N:N+8]}, voutput_min), voutput_max);
75
76    $for N in range(0, BATCH_TILE, 16):
77      $if N + 8 < BATCH_TILE:
78        __m128i vout${ABC[N:N+16]} = _mm_shuffle_epi32(_mm_packs_epi16(_mm256_castsi256_si128(vout${ABC[N:N+4]}${ABC[N+8:N+12]}${ABC[N+4:N+8]}${ABC[N+12:N+16]}), _mm256_extracti128_si256(vout${ABC[N:N+4]}${ABC[N+8:N+12]}${ABC[N+4:N+8]}${ABC[N+12:N+16]}, 1)), _MM_SHUFFLE(3, 1, 2, 0));
79      $else:
80        __m128i vout${ABC[N:N+8]}${ABC[N:N+8]} = _mm_packs_epi16(vout${ABC[N:N+8]}, vout${ABC[N:N+8]});
81
82    $if BATCH_TILE >= 16:
83      _mm_storeu_si128((__m128i*) output, vout${ABC[0:16]});
84    $else:
85      _mm_storel_epi64((__m128i*) output, vout${ABC[0:8]}${ABC[0:8]});
86    $for N in range(16, BATCH_TILE, 16):
87      $if N + 8 < BATCH_TILE:
88        _mm_storeu_si128((__m128i*) (output + ${N}), vout${ABC[N:N+16]});
89      $else:
90        _mm_storel_epi64((__m128i*) (output + ${N}), vout${ABC[N:N+8]}${ABC[N:N+8]});
91    output += ${BATCH_TILE};
92  }
93  if XNN_UNLIKELY(n != 0) {
94    ${"do " if BATCH_TILE > 8 else ""}{
95      const __m256i vx${ABC[0:8]} = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) input_x));
96      const __m256i vy${ABC[0:8]} = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) input_y));
97      $if BATCH_TILE > 8:
98        input_x += 8;
99        input_y += 8;
100
101      __m256i vacc${ABC[0:8]} = _mm256_add_epi32(vzero_point_product, _mm256_mullo_epi32(vx${ABC[0:8]}, vx_multiplier));
102
103      vacc${ABC[0:8]} = _mm256_add_epi32(vacc${ABC[0:8]}, _mm256_mullo_epi32(vy${ABC[0:8]}, vy_multiplier));
104
105      const __m256i vrem${ABC[0:8]} = _mm256_add_epi32(_mm256_and_si256(vacc${ABC[0:8]}, vremainder_mask), _mm256_srai_epi32(vacc${ABC[0:8]}, 31));
106
107      vacc${ABC[0:8]} = _mm256_sub_epi32(_mm256_sra_epi32(vacc${ABC[0:8]}, vshift), _mm256_cmpgt_epi32(vrem${ABC[0:8]}, vremainder_threshold));
108
109      $if BATCH_TILE > 8:
110        __m128i vout${ABC[0:8]} = _mm_adds_epi16(_mm_packs_epi32(_mm256_castsi256_si128(vacc${ABC[0:8]}), _mm256_extracti128_si256(vacc${ABC[0:8]}, 1)), _mm256_castsi256_si128(voutput_zero_point));
111        vout${ABC[0:8]} = _mm_min_epi16(_mm_max_epi16(vout${ABC[0:8]}, _mm256_castsi256_si128(voutput_min)), _mm256_castsi256_si128(voutput_max));
112      $else:
113        __m128i vout${ABC[0:8]} = _mm_adds_epi16(_mm_packs_epi32(_mm256_castsi256_si128(vacc${ABC[0:8]}), _mm256_extracti128_si256(vacc${ABC[0:8]}, 1)), voutput_zero_point);
114        vout${ABC[0:8]} = _mm_min_epi16(_mm_max_epi16(vout${ABC[0:8]}, voutput_min), voutput_max);
115      __m128i vout${ABC[0:8]}${ABC[0:8]} = _mm_packs_epi16(vout${ABC[0:8]}, vout${ABC[0:8]});
116
117      $if BATCH_TILE > 8:
118        if XNN_LIKELY(n >= (8 * sizeof(int8_t))) {
119          _mm_storel_epi64((__m128i*) output, vout${ABC[0:8]}${ABC[0:8]});
120          output += 8;
121          n -= 8 * sizeof(int8_t);
122        } else {
123          if (n & (4 * sizeof(int8_t))) {
124            *((uint32_t*) output) = (uint32_t) _mm_cvtsi128_si32(vout${ABC[0:8]}${ABC[0:8]});
125            vout${ABC[0:8]}${ABC[0:8]} = _mm_srli_epi64(vout${ABC[0:8]}${ABC[0:8]}, 32);
126            output += 4;
127          }
128          if (n & (2 * sizeof(int8_t))) {
129            *((uint16_t*) output) = (uint16_t) _mm_extract_epi16(vout${ABC[0:8]}${ABC[0:8]}, 0);
130            vout${ABC[0:8]}${ABC[0:8]} = _mm_srli_epi32(vout${ABC[0:8]}${ABC[0:8]}, 16);
131            output += 2;
132          }
133          if (n & (1 * sizeof(int8_t))) {
134            *output = (int8_t) _mm_extract_epi8(vout${ABC[0:8]}${ABC[0:8]}, 0);
135          }
136          n = 0;
137        }
138      $else:
139        if (n & (4 * sizeof(int8_t))) {
140          *((uint32_t*) output) = (uint32_t) _mm_cvtsi128_si32(vout${ABC[0:8]}${ABC[0:8]});
141          vout${ABC[0:8]}${ABC[0:8]} = _mm_srli_epi64(vout${ABC[0:8]}${ABC[0:8]}, 32);
142          output += 4;
143        }
144        if (n & (2 * sizeof(int8_t))) {
145          *((uint16_t*) output) = (uint16_t) _mm_extract_epi16(vout${ABC[0:8]}${ABC[0:8]}, 0);
146          vout${ABC[0:8]}${ABC[0:8]} = _mm_srli_epi32(vout${ABC[0:8]}${ABC[0:8]}, 16);
147          output += 2;
148        }
149        if (n & (1 * sizeof(int8_t))) {
150          *output = (int8_t) _mm_extract_epi8(vout${ABC[0:8]}${ABC[0:8]}, 0);
151        }
152    }${" while (n != 0);" if BATCH_TILE > 8 else ""}
153  }
154}
155