1 // Auto-generated file. Do not edit!
2 // Template: src/qs8-gemm/c2-neon-mull-padal-dup.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2021 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <arm_neon.h>
13
14 #include <xnnpack/gemm.h>
15 #include <xnnpack/math.h>
16
17
xnn_qs8_gemm_minmax_ukernel_2x8c2__neon_mull_padal_dup(size_t mr,size_t nc,size_t kc,const int8_t * restrict a,size_t a_stride,const void * restrict w,int8_t * restrict c,size_t cm_stride,size_t cn_stride,const union xnn_qs8_gemm_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_qs8_gemm_minmax_ukernel_2x8c2__neon_mull_padal_dup(
19 size_t mr,
20 size_t nc,
21 size_t kc,
22 const int8_t* restrict a,
23 size_t a_stride,
24 const void* restrict w,
25 int8_t* restrict c,
26 size_t cm_stride,
27 size_t cn_stride,
28 const union xnn_qs8_gemm_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
29 {
30 assert(mr != 0);
31 assert(mr <= 2);
32 assert(nc != 0);
33 assert(kc != 0);
34 assert(kc % sizeof(int8_t) == 0);
35 assert(a != NULL);
36 assert(w != NULL);
37 assert(c != NULL);
38
39 kc = round_up_po2(kc, 2);
40 const int8_t* a0 = a;
41 int8_t* c0 = c;
42 const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
43 int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
44 if XNN_UNPREDICTABLE(mr != 2) {
45 a1 = a0;
46 c1 = c0;
47 }
48
49 do {
50 int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
51 int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
52 int32x4_t vacc1x0123 = vacc0x0123;
53 int32x4_t vacc1x4567 = vacc0x4567;
54
55 size_t k = kc;
56
57
58 while (k >= 8 * sizeof(int8_t)) {
59 const int8x8_t va0 = vld1_s8(a0); a0 += 8;
60 const int8x8_t va1 = vld1_s8(a1); a1 += 8;
61
62 const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
63 const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
64 const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
65 const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
66 const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
67 const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
68 const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
69 const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
70
71 const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0)));
72 const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1)));
73 const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2)));
74 const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 3)));
75 vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
76 vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
77 vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
78 vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
79 const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0)));
80 const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1)));
81 const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2)));
82 const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 3)));
83 vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
84 vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
85 vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
86 vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
87 const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0)));
88 const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1)));
89 const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2)));
90 const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 3)));
91 vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
92 vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
93 vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
94 vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
95 const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0)));
96 const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1)));
97 const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2)));
98 const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 3)));
99 vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
100 vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
101 vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
102 vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
103
104 k -= 8 * sizeof(int8_t);
105 }
106
107 if XNN_UNLIKELY(k != 0) {
108 const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
109 const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
110
111 const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
112 const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
113
114 const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0)));
115 vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
116 const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0)));
117 vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
118 const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0)));
119 vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
120 const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0)));
121 vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
122
123 if (k > 2 * sizeof(int8_t)) {
124 const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
125 const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
126
127 const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1)));
128 vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
129 const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1)));
130 vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
131 const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1)));
132 vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
133 const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1)));
134 vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
135
136 if (k > 4 * sizeof(int8_t)) {
137 const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
138 const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
139
140 const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2)));
141 vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
142 const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2)));
143 vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
144 const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2)));
145 vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
146 const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2)));
147 vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
148 }
149 }
150 }
151 const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->neon.multiplier);
152 vacc0x0123 = vqrdmulhq_s32(vacc0x0123, vmultiplier);
153 vacc0x4567 = vqrdmulhq_s32(vacc0x4567, vmultiplier);
154 vacc1x0123 = vqrdmulhq_s32(vacc1x0123, vmultiplier);
155 vacc1x4567 = vqrdmulhq_s32(vacc1x4567, vmultiplier);
156
157 const int32x4_t vright_shift = vld1q_dup_s32(¶ms->neon.right_shift);
158 const int32x4_t vzero_shift_mask = vreinterpretq_s32_u32(vceqq_s32(vright_shift, vmovq_n_s32(0)));
159 vacc0x0123 = vsraq_n_s32(vacc0x0123, vbicq_s32(vacc0x0123, vzero_shift_mask), 31);
160 vacc0x4567 = vsraq_n_s32(vacc0x4567, vbicq_s32(vacc0x4567, vzero_shift_mask), 31);
161 vacc1x0123 = vsraq_n_s32(vacc1x0123, vbicq_s32(vacc1x0123, vzero_shift_mask), 31);
162 vacc1x4567 = vsraq_n_s32(vacc1x4567, vbicq_s32(vacc1x4567, vzero_shift_mask), 31);
163
164 vacc0x0123 = vrshlq_s32(vacc0x0123, vright_shift);
165 vacc0x4567 = vrshlq_s32(vacc0x4567, vright_shift);
166 vacc1x0123 = vrshlq_s32(vacc1x0123, vright_shift);
167 vacc1x4567 = vrshlq_s32(vacc1x4567, vright_shift);
168
169 const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->neon.output_zero_point);
170 #if XNN_ARCH_ARM64
171 const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
172 const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
173
174 int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
175 #else
176 const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
177 const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
178
179 int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
180 #endif
181 const int8x16_t voutput_min = vld1q_dup_s8(¶ms->neon.output_min);
182 const int8x16_t voutput_max = vld1q_dup_s8(¶ms->neon.output_max);
183
184 vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
185
186 vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
187
188 if (nc >= 8) {
189 vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
190 vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
191
192 c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
193 c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
194
195 a0 = (const int8_t*) ((uintptr_t) a0 - kc);
196 a1 = (const int8_t*) ((uintptr_t) a1 - kc);
197
198 nc -= 8;
199 } else {
200 if (nc & 4) {
201 vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
202 vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
203 vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
204 }
205 if (nc & 2) {
206 vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
207 vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
208 vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
209 }
210 if (nc & 1) {
211 vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
212 vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
213 }
214
215 nc = 0;
216 }
217 } while (nc != 0);
218 }
219