1 // Auto-generated file. Do not edit!
2 // Template: src/qs8-vadd/avx2-mul32-ld64.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <immintrin.h>
13
14 #include <xnnpack/intrinsics-polyfill.h>
15 #include <xnnpack/vadd.h>
16
17
xnn_qs8_vadd_minmax_ukernel__avx2_mul32_ld64_x32(size_t n,const int8_t * input_x,const int8_t * input_y,int8_t * output,const union xnn_qs8_add_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_qs8_vadd_minmax_ukernel__avx2_mul32_ld64_x32(
19 size_t n,
20 const int8_t* input_x,
21 const int8_t* input_y,
22 int8_t* output,
23 const union xnn_qs8_add_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
24 {
25 const __m256i vzero_point_product = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.zero_point_product));
26 const __m256i vx_multiplier = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.x_multiplier));
27 const __m256i vy_multiplier = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.y_multiplier));
28 const __m256i vremainder_mask = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.remainder_mask));
29 const __m256i vremainder_threshold = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.remainder_threshold));
30 const __m128i vshift = _mm_cvtsi32_si128((int) params->sse2.shift);
31 const __m256i voutput_zero_point = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.output_zero_point));
32 const __m256i voutput_min = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.output_min));
33 const __m256i voutput_max = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.output_max));
34
35 for (; n >= 32 * sizeof(int8_t); n -= 32 * sizeof(int8_t)) {
36 const __m256i vx01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) input_x));
37 const __m256i vy01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) input_y));
38 const __m256i vx89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (input_x + 8)));
39 const __m256i vy89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (input_y + 8)));
40 const __m256i vxGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (input_x + 16)));
41 const __m256i vyGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (input_y + 16)));
42 const __m256i vxOPQRSTUV = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (input_x + 24)));
43 const __m256i vyOPQRSTUV = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (input_y + 24)));
44 input_x += 32;
45 input_y += 32;
46
47 __m256i vacc01234567 = _mm256_add_epi32(vzero_point_product, _mm256_mullo_epi32(vx01234567, vx_multiplier));
48 __m256i vacc89ABCDEF = _mm256_add_epi32(vzero_point_product, _mm256_mullo_epi32(vx89ABCDEF, vx_multiplier));
49 __m256i vaccGHIJKLMN = _mm256_add_epi32(vzero_point_product, _mm256_mullo_epi32(vxGHIJKLMN, vx_multiplier));
50 __m256i vaccOPQRSTUV = _mm256_add_epi32(vzero_point_product, _mm256_mullo_epi32(vxOPQRSTUV, vx_multiplier));
51
52 vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vy01234567, vy_multiplier));
53 vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vy89ABCDEF, vy_multiplier));
54 vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_mullo_epi32(vyGHIJKLMN, vy_multiplier));
55 vaccOPQRSTUV = _mm256_add_epi32(vaccOPQRSTUV, _mm256_mullo_epi32(vyOPQRSTUV, vy_multiplier));
56
57 const __m256i vrem01234567 = _mm256_add_epi32(_mm256_and_si256(vacc01234567, vremainder_mask), _mm256_srai_epi32(vacc01234567, 31));
58 const __m256i vrem89ABCDEF = _mm256_add_epi32(_mm256_and_si256(vacc89ABCDEF, vremainder_mask), _mm256_srai_epi32(vacc89ABCDEF, 31));
59 const __m256i vremGHIJKLMN = _mm256_add_epi32(_mm256_and_si256(vaccGHIJKLMN, vremainder_mask), _mm256_srai_epi32(vaccGHIJKLMN, 31));
60 const __m256i vremOPQRSTUV = _mm256_add_epi32(_mm256_and_si256(vaccOPQRSTUV, vremainder_mask), _mm256_srai_epi32(vaccOPQRSTUV, 31));
61
62 vacc01234567 = _mm256_sub_epi32(_mm256_sra_epi32(vacc01234567, vshift), _mm256_cmpgt_epi32(vrem01234567, vremainder_threshold));
63 vacc89ABCDEF = _mm256_sub_epi32(_mm256_sra_epi32(vacc89ABCDEF, vshift), _mm256_cmpgt_epi32(vrem89ABCDEF, vremainder_threshold));
64 vaccGHIJKLMN = _mm256_sub_epi32(_mm256_sra_epi32(vaccGHIJKLMN, vshift), _mm256_cmpgt_epi32(vremGHIJKLMN, vremainder_threshold));
65 vaccOPQRSTUV = _mm256_sub_epi32(_mm256_sra_epi32(vaccOPQRSTUV, vshift), _mm256_cmpgt_epi32(vremOPQRSTUV, vremainder_threshold));
66
67 __m256i vout012389AB4567CDEF = _mm256_adds_epi16(_mm256_packs_epi32(vacc01234567, vacc89ABCDEF), voutput_zero_point);
68 __m256i voutGHIJOPQRKLMNSTUV = _mm256_adds_epi16(_mm256_packs_epi32(vaccGHIJKLMN, vaccOPQRSTUV), voutput_zero_point);
69
70 vout012389AB4567CDEF = _mm256_min_epi16(_mm256_max_epi16(vout012389AB4567CDEF, voutput_min), voutput_max);
71 voutGHIJOPQRKLMNSTUV = _mm256_min_epi16(_mm256_max_epi16(voutGHIJOPQRKLMNSTUV, voutput_min), voutput_max);
72
73 __m128i vout0123456789ABCDEF = _mm_shuffle_epi32(_mm_packs_epi16(_mm256_castsi256_si128(vout012389AB4567CDEF), _mm256_extracti128_si256(vout012389AB4567CDEF, 1)), _MM_SHUFFLE(3, 1, 2, 0));
74 __m128i voutGHIJKLMNOPQRSTUV = _mm_shuffle_epi32(_mm_packs_epi16(_mm256_castsi256_si128(voutGHIJOPQRKLMNSTUV), _mm256_extracti128_si256(voutGHIJOPQRKLMNSTUV, 1)), _MM_SHUFFLE(3, 1, 2, 0));
75
76 _mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
77 _mm_storeu_si128((__m128i*) (output + 16), voutGHIJKLMNOPQRSTUV);
78 output += 32;
79 }
80 if XNN_UNLIKELY(n != 0) {
81 do {
82 const __m256i vx01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) input_x));
83 const __m256i vy01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) input_y));
84 input_x += 8;
85 input_y += 8;
86
87 __m256i vacc01234567 = _mm256_add_epi32(vzero_point_product, _mm256_mullo_epi32(vx01234567, vx_multiplier));
88
89 vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vy01234567, vy_multiplier));
90
91 const __m256i vrem01234567 = _mm256_add_epi32(_mm256_and_si256(vacc01234567, vremainder_mask), _mm256_srai_epi32(vacc01234567, 31));
92
93 vacc01234567 = _mm256_sub_epi32(_mm256_sra_epi32(vacc01234567, vshift), _mm256_cmpgt_epi32(vrem01234567, vremainder_threshold));
94
95 __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(_mm256_castsi256_si128(vacc01234567), _mm256_extracti128_si256(vacc01234567, 1)), _mm256_castsi256_si128(voutput_zero_point));
96 vout01234567 = _mm_min_epi16(_mm_max_epi16(vout01234567, _mm256_castsi256_si128(voutput_min)), _mm256_castsi256_si128(voutput_max));
97 __m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
98
99 if XNN_LIKELY(n >= (8 * sizeof(int8_t))) {
100 _mm_storel_epi64((__m128i*) output, vout0123456701234567);
101 output += 8;
102 n -= 8 * sizeof(int8_t);
103 } else {
104 if (n & (4 * sizeof(int8_t))) {
105 *((uint32_t*) output) = (uint32_t) _mm_cvtsi128_si32(vout0123456701234567);
106 vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
107 output += 4;
108 }
109 if (n & (2 * sizeof(int8_t))) {
110 *((uint16_t*) output) = (uint16_t) _mm_extract_epi16(vout0123456701234567, 0);
111 vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
112 output += 2;
113 }
114 if (n & (1 * sizeof(int8_t))) {
115 *output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0);
116 }
117 n = 0;
118 }
119 } while (n != 0);
120 }
121 }
122