1 // Auto-generated file. Do not edit!
2 // Template: src/f16-gemm/neonfp16arith-ld64.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10
11 #include <assert.h>
12
13 #include <arm_neon.h>
14
15 #include <xnnpack/common.h>
16
17 #include <xnnpack/gemm.h>
18
19
xnn_f16_gemminc_minmax_ukernel_6x8__neonfp16arith_ld64(size_t mr,size_t nc,size_t kc,const void * restrict a,size_t a_stride,const void * restrict w,void * restrict c,size_t cm_stride,size_t cn_stride,const void * restrict acc,const struct xnn_f16_scaleminmax_params params[restrict XNN_MIN_ELEMENTS (1)])20 void xnn_f16_gemminc_minmax_ukernel_6x8__neonfp16arith_ld64(
21 size_t mr,
22 size_t nc,
23 size_t kc,
24 const void* restrict a,
25 size_t a_stride,
26 const void* restrict w,
27 void* restrict c,
28 size_t cm_stride,
29 size_t cn_stride,
30 const void*restrict acc,
31 const struct xnn_f16_scaleminmax_params params[restrict XNN_MIN_ELEMENTS(1)])
32 {
33 assert(mr != 0);
34 assert(mr <= 6);
35 assert(nc != 0);
36 assert(kc != 0);
37 assert(kc % sizeof(__fp16) == 0);
38 assert(a != NULL);
39 assert(w != NULL);
40 assert(c != NULL);
41 assert(acc != NULL);
42
43 const __fp16* a0 = (const __fp16*) a;
44 __fp16* c0 = (__fp16*) c;
45 const __fp16* a1 = (const __fp16*) ((uintptr_t) a0 + a_stride);
46 __fp16* c1 = (__fp16*) ((uintptr_t) c0 + cm_stride);
47 if XNN_UNPREDICTABLE(mr < 2) {
48 a1 = a0;
49 c1 = c0;
50 }
51 const __fp16* a2 = (const __fp16*) ((uintptr_t) a1 + a_stride);
52 __fp16* c2 = (__fp16*) ((uintptr_t) c1 + cm_stride);
53 if XNN_UNPREDICTABLE(mr <= 2) {
54 a2 = a1;
55 c2 = c1;
56 }
57 const __fp16* a3 = (const __fp16*) ((uintptr_t) a2 + a_stride);
58 __fp16* c3 = (__fp16*) ((uintptr_t) c2 + cm_stride);
59 if XNN_UNPREDICTABLE(mr < 4) {
60 a3 = a2;
61 c3 = c2;
62 }
63 const __fp16* a4 = (const __fp16*) ((uintptr_t) a3 + a_stride);
64 __fp16* c4 = (__fp16*) ((uintptr_t) c3 + cm_stride);
65 if XNN_UNPREDICTABLE(mr <= 4) {
66 a4 = a3;
67 c4 = c3;
68 }
69 const __fp16* a5 = (const __fp16*) ((uintptr_t) a4 + a_stride);
70 __fp16* c5 = (__fp16*) ((uintptr_t) c4 + cm_stride);
71 if XNN_UNPREDICTABLE(mr != 6) {
72 a5 = a4;
73 c5 = c4;
74 }
75
76 do {
77 float16x8_t vacc0x01234567 = vld1q_f16(acc); acc = (const void*) ((uintptr_t) acc + sizeof(float16x8_t));
78 float16x8_t vacc1x01234567 = vld1q_f16(acc); acc = (const void*) ((uintptr_t) acc + sizeof(float16x8_t));
79 float16x8_t vacc2x01234567 = vld1q_f16(acc); acc = (const void*) ((uintptr_t) acc + sizeof(float16x8_t));
80 float16x8_t vacc3x01234567 = vld1q_f16(acc); acc = (const void*) ((uintptr_t) acc + sizeof(float16x8_t));
81 float16x8_t vacc4x01234567 = vld1q_f16(acc); acc = (const void*) ((uintptr_t) acc + sizeof(float16x8_t));
82 float16x8_t vacc5x01234567 = vld1q_f16(acc); acc = (const void*) ((uintptr_t) acc + sizeof(float16x8_t));
83
84 size_t k = kc;
85 while (k >= 4 * sizeof(__fp16)) {
86 const float16x4_t va0 = vld1_f16(a0); a0 += 4;
87 const float16x4_t va1 = vld1_f16(a1); a1 += 4;
88 const float16x4_t va2 = vld1_f16(a2); a2 += 4;
89 const float16x4_t va3 = vld1_f16(a3); a3 += 4;
90 const float16x4_t va4 = vld1_f16(a4); a4 += 4;
91 const float16x4_t va5 = vld1_f16(a5); a5 += 4;
92
93 const float16x8_t vb01234567c0 = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
94
95 #if XNN_ARCH_ARM64
96 vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c0, va0, 0);
97 vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c0, va1, 0);
98 vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c0, va2, 0);
99 vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c0, va3, 0);
100 vacc4x01234567 = vfmaq_lane_f16(vacc4x01234567, vb01234567c0, va4, 0);
101 vacc5x01234567 = vfmaq_lane_f16(vacc5x01234567, vb01234567c0, va5, 0);
102 #else
103 const float16x8_t va0c0 = vdupq_lane_f16(va0, 0);
104 const float16x8_t va1c0 = vdupq_lane_f16(va1, 0);
105 const float16x8_t va2c0 = vdupq_lane_f16(va2, 0);
106 const float16x8_t va3c0 = vdupq_lane_f16(va3, 0);
107 const float16x8_t va4c0 = vdupq_lane_f16(va4, 0);
108 const float16x8_t va5c0 = vdupq_lane_f16(va5, 0);
109
110 vacc0x01234567 = vfmaq_f16(vacc0x01234567, va0c0, vb01234567c0);
111 vacc1x01234567 = vfmaq_f16(vacc1x01234567, va1c0, vb01234567c0);
112 vacc2x01234567 = vfmaq_f16(vacc2x01234567, va2c0, vb01234567c0);
113 vacc3x01234567 = vfmaq_f16(vacc3x01234567, va3c0, vb01234567c0);
114 vacc4x01234567 = vfmaq_f16(vacc4x01234567, va4c0, vb01234567c0);
115 vacc5x01234567 = vfmaq_f16(vacc5x01234567, va5c0, vb01234567c0);
116 #endif
117 const float16x8_t vb01234567c1 = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
118
119 #if XNN_ARCH_ARM64
120 vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c1, va0, 1);
121 vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c1, va1, 1);
122 vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c1, va2, 1);
123 vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c1, va3, 1);
124 vacc4x01234567 = vfmaq_lane_f16(vacc4x01234567, vb01234567c1, va4, 1);
125 vacc5x01234567 = vfmaq_lane_f16(vacc5x01234567, vb01234567c1, va5, 1);
126 #else
127 const float16x8_t va0c1 = vdupq_lane_f16(va0, 1);
128 const float16x8_t va1c1 = vdupq_lane_f16(va1, 1);
129 const float16x8_t va2c1 = vdupq_lane_f16(va2, 1);
130 const float16x8_t va3c1 = vdupq_lane_f16(va3, 1);
131 const float16x8_t va4c1 = vdupq_lane_f16(va4, 1);
132 const float16x8_t va5c1 = vdupq_lane_f16(va5, 1);
133
134 vacc0x01234567 = vfmaq_f16(vacc0x01234567, va0c1, vb01234567c1);
135 vacc1x01234567 = vfmaq_f16(vacc1x01234567, va1c1, vb01234567c1);
136 vacc2x01234567 = vfmaq_f16(vacc2x01234567, va2c1, vb01234567c1);
137 vacc3x01234567 = vfmaq_f16(vacc3x01234567, va3c1, vb01234567c1);
138 vacc4x01234567 = vfmaq_f16(vacc4x01234567, va4c1, vb01234567c1);
139 vacc5x01234567 = vfmaq_f16(vacc5x01234567, va5c1, vb01234567c1);
140 #endif
141 const float16x8_t vb01234567c2 = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
142
143 #if XNN_ARCH_ARM64
144 vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c2, va0, 2);
145 vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c2, va1, 2);
146 vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c2, va2, 2);
147 vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c2, va3, 2);
148 vacc4x01234567 = vfmaq_lane_f16(vacc4x01234567, vb01234567c2, va4, 2);
149 vacc5x01234567 = vfmaq_lane_f16(vacc5x01234567, vb01234567c2, va5, 2);
150 #else
151 const float16x8_t va0c2 = vdupq_lane_f16(va0, 2);
152 const float16x8_t va1c2 = vdupq_lane_f16(va1, 2);
153 const float16x8_t va2c2 = vdupq_lane_f16(va2, 2);
154 const float16x8_t va3c2 = vdupq_lane_f16(va3, 2);
155 const float16x8_t va4c2 = vdupq_lane_f16(va4, 2);
156 const float16x8_t va5c2 = vdupq_lane_f16(va5, 2);
157
158 vacc0x01234567 = vfmaq_f16(vacc0x01234567, va0c2, vb01234567c2);
159 vacc1x01234567 = vfmaq_f16(vacc1x01234567, va1c2, vb01234567c2);
160 vacc2x01234567 = vfmaq_f16(vacc2x01234567, va2c2, vb01234567c2);
161 vacc3x01234567 = vfmaq_f16(vacc3x01234567, va3c2, vb01234567c2);
162 vacc4x01234567 = vfmaq_f16(vacc4x01234567, va4c2, vb01234567c2);
163 vacc5x01234567 = vfmaq_f16(vacc5x01234567, va5c2, vb01234567c2);
164 #endif
165 const float16x8_t vb01234567c3 = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
166
167 #if XNN_ARCH_ARM64
168 vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c3, va0, 3);
169 vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c3, va1, 3);
170 vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c3, va2, 3);
171 vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c3, va3, 3);
172 vacc4x01234567 = vfmaq_lane_f16(vacc4x01234567, vb01234567c3, va4, 3);
173 vacc5x01234567 = vfmaq_lane_f16(vacc5x01234567, vb01234567c3, va5, 3);
174 #else
175 const float16x8_t va0c3 = vdupq_lane_f16(va0, 3);
176 const float16x8_t va1c3 = vdupq_lane_f16(va1, 3);
177 const float16x8_t va2c3 = vdupq_lane_f16(va2, 3);
178 const float16x8_t va3c3 = vdupq_lane_f16(va3, 3);
179 const float16x8_t va4c3 = vdupq_lane_f16(va4, 3);
180 const float16x8_t va5c3 = vdupq_lane_f16(va5, 3);
181
182 vacc0x01234567 = vfmaq_f16(vacc0x01234567, va0c3, vb01234567c3);
183 vacc1x01234567 = vfmaq_f16(vacc1x01234567, va1c3, vb01234567c3);
184 vacc2x01234567 = vfmaq_f16(vacc2x01234567, va2c3, vb01234567c3);
185 vacc3x01234567 = vfmaq_f16(vacc3x01234567, va3c3, vb01234567c3);
186 vacc4x01234567 = vfmaq_f16(vacc4x01234567, va4c3, vb01234567c3);
187 vacc5x01234567 = vfmaq_f16(vacc5x01234567, va5c3, vb01234567c3);
188 #endif
189
190 k -= 4 * sizeof(__fp16);
191 }
192 if XNN_UNLIKELY(k != 0) {
193 do {
194 const float16x8_t va0 = vld1q_dup_f16(a0); a0 += 1;
195 const float16x8_t va1 = vld1q_dup_f16(a1); a1 += 1;
196 const float16x8_t va2 = vld1q_dup_f16(a2); a2 += 1;
197 const float16x8_t va3 = vld1q_dup_f16(a3); a3 += 1;
198 const float16x8_t va4 = vld1q_dup_f16(a4); a4 += 1;
199 const float16x8_t va5 = vld1q_dup_f16(a5); a5 += 1;
200
201 const float16x8_t vb01234567 = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
202
203 vacc0x01234567 = vfmaq_f16(vacc0x01234567, va0, vb01234567);
204 vacc1x01234567 = vfmaq_f16(vacc1x01234567, va1, vb01234567);
205 vacc2x01234567 = vfmaq_f16(vacc2x01234567, va2, vb01234567);
206 vacc3x01234567 = vfmaq_f16(vacc3x01234567, va3, vb01234567);
207 vacc4x01234567 = vfmaq_f16(vacc4x01234567, va4, vb01234567);
208 vacc5x01234567 = vfmaq_f16(vacc5x01234567, va5, vb01234567);
209
210 k -= sizeof(__fp16);
211 } while (k != 0);
212 }
213
214 const float16x8_t vscale = vld1q_dup_f16((const __fp16*) ¶ms->scale);
215 vacc0x01234567 = vmulq_f16(vacc0x01234567, vscale);
216 vacc1x01234567 = vmulq_f16(vacc1x01234567, vscale);
217 vacc2x01234567 = vmulq_f16(vacc2x01234567, vscale);
218 vacc3x01234567 = vmulq_f16(vacc3x01234567, vscale);
219 vacc4x01234567 = vmulq_f16(vacc4x01234567, vscale);
220 vacc5x01234567 = vmulq_f16(vacc5x01234567, vscale);
221
222 const float16x8_t vmax = vld1q_dup_f16((const __fp16*) ¶ms->max);
223 vacc0x01234567 = vminq_f16(vacc0x01234567, vmax);
224 vacc1x01234567 = vminq_f16(vacc1x01234567, vmax);
225 vacc2x01234567 = vminq_f16(vacc2x01234567, vmax);
226 vacc3x01234567 = vminq_f16(vacc3x01234567, vmax);
227 vacc4x01234567 = vminq_f16(vacc4x01234567, vmax);
228 vacc5x01234567 = vminq_f16(vacc5x01234567, vmax);
229
230 const float16x8_t vmin = vld1q_dup_f16((const __fp16*) ¶ms->min);
231 vacc0x01234567 = vmaxq_f16(vacc0x01234567, vmin);
232 vacc1x01234567 = vmaxq_f16(vacc1x01234567, vmin);
233 vacc2x01234567 = vmaxq_f16(vacc2x01234567, vmin);
234 vacc3x01234567 = vmaxq_f16(vacc3x01234567, vmin);
235 vacc4x01234567 = vmaxq_f16(vacc4x01234567, vmin);
236 vacc5x01234567 = vmaxq_f16(vacc5x01234567, vmin);
237
238 if XNN_LIKELY(nc >= 8) {
239 vst1q_f16(c0, vacc0x01234567);
240 c0 = (__fp16*) ((uintptr_t) c0 + cn_stride);
241 vst1q_f16(c1, vacc1x01234567);
242 c1 = (__fp16*) ((uintptr_t) c1 + cn_stride);
243 vst1q_f16(c2, vacc2x01234567);
244 c2 = (__fp16*) ((uintptr_t) c2 + cn_stride);
245 vst1q_f16(c3, vacc3x01234567);
246 c3 = (__fp16*) ((uintptr_t) c3 + cn_stride);
247 vst1q_f16(c4, vacc4x01234567);
248 c4 = (__fp16*) ((uintptr_t) c4 + cn_stride);
249 vst1q_f16(c5, vacc5x01234567);
250 c5 = (__fp16*) ((uintptr_t) c5 + cn_stride);
251
252 a0 = (const __fp16*) ((uintptr_t) a0 - kc);
253 a1 = (const __fp16*) ((uintptr_t) a1 - kc);
254 a2 = (const __fp16*) ((uintptr_t) a2 - kc);
255 a3 = (const __fp16*) ((uintptr_t) a3 - kc);
256 a4 = (const __fp16*) ((uintptr_t) a4 - kc);
257 a5 = (const __fp16*) ((uintptr_t) a5 - kc);
258
259 nc -= 8;
260 } else {
261 float16x4_t vacc0x0123 = vget_low_f16(vacc0x01234567);
262 float16x4_t vacc1x0123 = vget_low_f16(vacc1x01234567);
263 float16x4_t vacc2x0123 = vget_low_f16(vacc2x01234567);
264 float16x4_t vacc3x0123 = vget_low_f16(vacc3x01234567);
265 float16x4_t vacc4x0123 = vget_low_f16(vacc4x01234567);
266 float16x4_t vacc5x0123 = vget_low_f16(vacc5x01234567);
267 if (nc & 4) {
268 vst1_f16(c0, vacc0x0123); c0 += 4;
269 vst1_f16(c1, vacc1x0123); c1 += 4;
270 vst1_f16(c2, vacc2x0123); c2 += 4;
271 vst1_f16(c3, vacc3x0123); c3 += 4;
272 vst1_f16(c4, vacc4x0123); c4 += 4;
273 vst1_f16(c5, vacc5x0123); c5 += 4;
274
275 vacc0x0123 = vget_high_f16(vacc0x01234567);
276 vacc1x0123 = vget_high_f16(vacc1x01234567);
277 vacc2x0123 = vget_high_f16(vacc2x01234567);
278 vacc3x0123 = vget_high_f16(vacc3x01234567);
279 vacc4x0123 = vget_high_f16(vacc4x01234567);
280 vacc5x0123 = vget_high_f16(vacc5x01234567);
281 }
282 if (nc & 2) {
283 vst1_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpret_u32_f16(vacc0x0123), 0); c0 += 2;
284 vst1_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpret_u32_f16(vacc1x0123), 0); c1 += 2;
285 vst1_lane_u32(__builtin_assume_aligned(c2, 1), vreinterpret_u32_f16(vacc2x0123), 0); c2 += 2;
286 vst1_lane_u32(__builtin_assume_aligned(c3, 1), vreinterpret_u32_f16(vacc3x0123), 0); c3 += 2;
287 vst1_lane_u32(__builtin_assume_aligned(c4, 1), vreinterpret_u32_f16(vacc4x0123), 0); c4 += 2;
288 vst1_lane_u32(__builtin_assume_aligned(c5, 1), vreinterpret_u32_f16(vacc5x0123), 0); c5 += 2;
289
290 vacc0x0123 = vext_f16(vacc0x0123, vacc0x0123, 2);
291 vacc1x0123 = vext_f16(vacc1x0123, vacc1x0123, 2);
292 vacc2x0123 = vext_f16(vacc2x0123, vacc2x0123, 2);
293 vacc3x0123 = vext_f16(vacc3x0123, vacc3x0123, 2);
294 vacc4x0123 = vext_f16(vacc4x0123, vacc4x0123, 2);
295 vacc5x0123 = vext_f16(vacc5x0123, vacc5x0123, 2);
296 }
297 if (nc & 1) {
298 vst1_lane_f16(c0, vacc0x0123, 0);
299 vst1_lane_f16(c1, vacc1x0123, 0);
300 vst1_lane_f16(c2, vacc2x0123, 0);
301 vst1_lane_f16(c3, vacc3x0123, 0);
302 vst1_lane_f16(c4, vacc4x0123, 0);
303 vst1_lane_f16(c5, vacc5x0123, 0);
304 }
305
306 nc = 0;
307 }
308 } while (nc != 0);
309 }
310