1 // Auto-generated file. Do not edit!
2 //   Template: src/f16-gemm/neonfp16arith-ld64.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 
11 #include <assert.h>
12 
13 #include <arm_neon.h>
14 
15 #include <xnnpack/common.h>
16 
17 #include <xnnpack/gemm.h>
18 
19 
xnn_f16_gemminc_minmax_ukernel_8x8__neonfp16arith_ld64(size_t mr,size_t nc,size_t kc,const void * restrict a,size_t a_stride,const void * restrict w,void * restrict c,size_t cm_stride,size_t cn_stride,const void * restrict acc,const struct xnn_f16_scaleminmax_params params[restrict XNN_MIN_ELEMENTS (1)])20 void xnn_f16_gemminc_minmax_ukernel_8x8__neonfp16arith_ld64(
21     size_t mr,
22     size_t nc,
23     size_t kc,
24     const void* restrict a,
25     size_t a_stride,
26     const void* restrict w,
27     void* restrict c,
28     size_t cm_stride,
29     size_t cn_stride,
30     const void*restrict acc,
31     const struct xnn_f16_scaleminmax_params params[restrict XNN_MIN_ELEMENTS(1)])
32 {
33   assert(mr != 0);
34   assert(mr <= 8);
35   assert(nc != 0);
36   assert(kc != 0);
37   assert(kc % sizeof(__fp16) == 0);
38   assert(a != NULL);
39   assert(w != NULL);
40   assert(c != NULL);
41   assert(acc != NULL);
42 
43   const __fp16* a0 = (const __fp16*) a;
44   __fp16* c0 = (__fp16*) c;
45   const __fp16* a1 = (const __fp16*) ((uintptr_t) a0 + a_stride);
46   __fp16* c1 = (__fp16*) ((uintptr_t) c0 + cm_stride);
47   if XNN_UNPREDICTABLE(mr < 2) {
48     a1 = a0;
49     c1 = c0;
50   }
51   const __fp16* a2 = (const __fp16*) ((uintptr_t) a1 + a_stride);
52   __fp16* c2 = (__fp16*) ((uintptr_t) c1 + cm_stride);
53   if XNN_UNPREDICTABLE(mr <= 2) {
54     a2 = a1;
55     c2 = c1;
56   }
57   const __fp16* a3 = (const __fp16*) ((uintptr_t) a2 + a_stride);
58   __fp16* c3 = (__fp16*) ((uintptr_t) c2 + cm_stride);
59   if XNN_UNPREDICTABLE(mr < 4) {
60     a3 = a2;
61     c3 = c2;
62   }
63   const __fp16* a4 = (const __fp16*) ((uintptr_t) a3 + a_stride);
64   __fp16* c4 = (__fp16*) ((uintptr_t) c3 + cm_stride);
65   if XNN_UNPREDICTABLE(mr <= 4) {
66     a4 = a3;
67     c4 = c3;
68   }
69   const __fp16* a5 = (const __fp16*) ((uintptr_t) a4 + a_stride);
70   __fp16* c5 = (__fp16*) ((uintptr_t) c4 + cm_stride);
71   if XNN_UNPREDICTABLE(mr < 6) {
72     a5 = a4;
73     c5 = c4;
74   }
75   const __fp16* a6 = (const __fp16*) ((uintptr_t) a5 + a_stride);
76   __fp16* c6 = (__fp16*) ((uintptr_t) c5 + cm_stride);
77   if XNN_UNPREDICTABLE(mr <= 6) {
78     a6 = a5;
79     c6 = c5;
80   }
81   const __fp16* a7 = (const __fp16*) ((uintptr_t) a6 + a_stride);
82   __fp16* c7 = (__fp16*) ((uintptr_t) c6 + cm_stride);
83   if XNN_UNPREDICTABLE(mr != 8) {
84     a7 = a6;
85     c7 = c6;
86   }
87 
88   do {
89     float16x8_t vacc0x01234567 = vld1q_f16(acc); acc = (const void*) ((uintptr_t) acc + sizeof(float16x8_t));
90     float16x8_t vacc1x01234567 = vld1q_f16(acc); acc = (const void*) ((uintptr_t) acc + sizeof(float16x8_t));
91     float16x8_t vacc2x01234567 = vld1q_f16(acc); acc = (const void*) ((uintptr_t) acc + sizeof(float16x8_t));
92     float16x8_t vacc3x01234567 = vld1q_f16(acc); acc = (const void*) ((uintptr_t) acc + sizeof(float16x8_t));
93     float16x8_t vacc4x01234567 = vld1q_f16(acc); acc = (const void*) ((uintptr_t) acc + sizeof(float16x8_t));
94     float16x8_t vacc5x01234567 = vld1q_f16(acc); acc = (const void*) ((uintptr_t) acc + sizeof(float16x8_t));
95     float16x8_t vacc6x01234567 = vld1q_f16(acc); acc = (const void*) ((uintptr_t) acc + sizeof(float16x8_t));
96     float16x8_t vacc7x01234567 = vld1q_f16(acc); acc = (const void*) ((uintptr_t) acc + sizeof(float16x8_t));
97 
98     size_t k = kc;
99     while (k >= 4 * sizeof(__fp16)) {
100       const float16x4_t va0 = vld1_f16(a0); a0 += 4;
101       const float16x4_t va1 = vld1_f16(a1); a1 += 4;
102       const float16x4_t va2 = vld1_f16(a2); a2 += 4;
103       const float16x4_t va3 = vld1_f16(a3); a3 += 4;
104       const float16x4_t va4 = vld1_f16(a4); a4 += 4;
105       const float16x4_t va5 = vld1_f16(a5); a5 += 4;
106       const float16x4_t va6 = vld1_f16(a6); a6 += 4;
107       const float16x4_t va7 = vld1_f16(a7); a7 += 4;
108 
109       const float16x8_t vb01234567c0 = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
110 
111       #if XNN_ARCH_ARM64
112         vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c0, va0, 0);
113         vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c0, va1, 0);
114         vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c0, va2, 0);
115         vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c0, va3, 0);
116         vacc4x01234567 = vfmaq_lane_f16(vacc4x01234567, vb01234567c0, va4, 0);
117         vacc5x01234567 = vfmaq_lane_f16(vacc5x01234567, vb01234567c0, va5, 0);
118         vacc6x01234567 = vfmaq_lane_f16(vacc6x01234567, vb01234567c0, va6, 0);
119         vacc7x01234567 = vfmaq_lane_f16(vacc7x01234567, vb01234567c0, va7, 0);
120       #else
121         const float16x8_t va0c0 = vdupq_lane_f16(va0, 0);
122         const float16x8_t va1c0 = vdupq_lane_f16(va1, 0);
123         const float16x8_t va2c0 = vdupq_lane_f16(va2, 0);
124         const float16x8_t va3c0 = vdupq_lane_f16(va3, 0);
125         const float16x8_t va4c0 = vdupq_lane_f16(va4, 0);
126         const float16x8_t va5c0 = vdupq_lane_f16(va5, 0);
127         const float16x8_t va6c0 = vdupq_lane_f16(va6, 0);
128         const float16x8_t va7c0 = vdupq_lane_f16(va7, 0);
129 
130         vacc0x01234567 = vfmaq_f16(vacc0x01234567, va0c0, vb01234567c0);
131         vacc1x01234567 = vfmaq_f16(vacc1x01234567, va1c0, vb01234567c0);
132         vacc2x01234567 = vfmaq_f16(vacc2x01234567, va2c0, vb01234567c0);
133         vacc3x01234567 = vfmaq_f16(vacc3x01234567, va3c0, vb01234567c0);
134         vacc4x01234567 = vfmaq_f16(vacc4x01234567, va4c0, vb01234567c0);
135         vacc5x01234567 = vfmaq_f16(vacc5x01234567, va5c0, vb01234567c0);
136         vacc6x01234567 = vfmaq_f16(vacc6x01234567, va6c0, vb01234567c0);
137         vacc7x01234567 = vfmaq_f16(vacc7x01234567, va7c0, vb01234567c0);
138       #endif
139       const float16x8_t vb01234567c1 = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
140 
141       #if XNN_ARCH_ARM64
142         vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c1, va0, 1);
143         vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c1, va1, 1);
144         vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c1, va2, 1);
145         vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c1, va3, 1);
146         vacc4x01234567 = vfmaq_lane_f16(vacc4x01234567, vb01234567c1, va4, 1);
147         vacc5x01234567 = vfmaq_lane_f16(vacc5x01234567, vb01234567c1, va5, 1);
148         vacc6x01234567 = vfmaq_lane_f16(vacc6x01234567, vb01234567c1, va6, 1);
149         vacc7x01234567 = vfmaq_lane_f16(vacc7x01234567, vb01234567c1, va7, 1);
150       #else
151         const float16x8_t va0c1 = vdupq_lane_f16(va0, 1);
152         const float16x8_t va1c1 = vdupq_lane_f16(va1, 1);
153         const float16x8_t va2c1 = vdupq_lane_f16(va2, 1);
154         const float16x8_t va3c1 = vdupq_lane_f16(va3, 1);
155         const float16x8_t va4c1 = vdupq_lane_f16(va4, 1);
156         const float16x8_t va5c1 = vdupq_lane_f16(va5, 1);
157         const float16x8_t va6c1 = vdupq_lane_f16(va6, 1);
158         const float16x8_t va7c1 = vdupq_lane_f16(va7, 1);
159 
160         vacc0x01234567 = vfmaq_f16(vacc0x01234567, va0c1, vb01234567c1);
161         vacc1x01234567 = vfmaq_f16(vacc1x01234567, va1c1, vb01234567c1);
162         vacc2x01234567 = vfmaq_f16(vacc2x01234567, va2c1, vb01234567c1);
163         vacc3x01234567 = vfmaq_f16(vacc3x01234567, va3c1, vb01234567c1);
164         vacc4x01234567 = vfmaq_f16(vacc4x01234567, va4c1, vb01234567c1);
165         vacc5x01234567 = vfmaq_f16(vacc5x01234567, va5c1, vb01234567c1);
166         vacc6x01234567 = vfmaq_f16(vacc6x01234567, va6c1, vb01234567c1);
167         vacc7x01234567 = vfmaq_f16(vacc7x01234567, va7c1, vb01234567c1);
168       #endif
169       const float16x8_t vb01234567c2 = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
170 
171       #if XNN_ARCH_ARM64
172         vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c2, va0, 2);
173         vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c2, va1, 2);
174         vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c2, va2, 2);
175         vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c2, va3, 2);
176         vacc4x01234567 = vfmaq_lane_f16(vacc4x01234567, vb01234567c2, va4, 2);
177         vacc5x01234567 = vfmaq_lane_f16(vacc5x01234567, vb01234567c2, va5, 2);
178         vacc6x01234567 = vfmaq_lane_f16(vacc6x01234567, vb01234567c2, va6, 2);
179         vacc7x01234567 = vfmaq_lane_f16(vacc7x01234567, vb01234567c2, va7, 2);
180       #else
181         const float16x8_t va0c2 = vdupq_lane_f16(va0, 2);
182         const float16x8_t va1c2 = vdupq_lane_f16(va1, 2);
183         const float16x8_t va2c2 = vdupq_lane_f16(va2, 2);
184         const float16x8_t va3c2 = vdupq_lane_f16(va3, 2);
185         const float16x8_t va4c2 = vdupq_lane_f16(va4, 2);
186         const float16x8_t va5c2 = vdupq_lane_f16(va5, 2);
187         const float16x8_t va6c2 = vdupq_lane_f16(va6, 2);
188         const float16x8_t va7c2 = vdupq_lane_f16(va7, 2);
189 
190         vacc0x01234567 = vfmaq_f16(vacc0x01234567, va0c2, vb01234567c2);
191         vacc1x01234567 = vfmaq_f16(vacc1x01234567, va1c2, vb01234567c2);
192         vacc2x01234567 = vfmaq_f16(vacc2x01234567, va2c2, vb01234567c2);
193         vacc3x01234567 = vfmaq_f16(vacc3x01234567, va3c2, vb01234567c2);
194         vacc4x01234567 = vfmaq_f16(vacc4x01234567, va4c2, vb01234567c2);
195         vacc5x01234567 = vfmaq_f16(vacc5x01234567, va5c2, vb01234567c2);
196         vacc6x01234567 = vfmaq_f16(vacc6x01234567, va6c2, vb01234567c2);
197         vacc7x01234567 = vfmaq_f16(vacc7x01234567, va7c2, vb01234567c2);
198       #endif
199       const float16x8_t vb01234567c3 = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
200 
201       #if XNN_ARCH_ARM64
202         vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c3, va0, 3);
203         vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c3, va1, 3);
204         vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c3, va2, 3);
205         vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c3, va3, 3);
206         vacc4x01234567 = vfmaq_lane_f16(vacc4x01234567, vb01234567c3, va4, 3);
207         vacc5x01234567 = vfmaq_lane_f16(vacc5x01234567, vb01234567c3, va5, 3);
208         vacc6x01234567 = vfmaq_lane_f16(vacc6x01234567, vb01234567c3, va6, 3);
209         vacc7x01234567 = vfmaq_lane_f16(vacc7x01234567, vb01234567c3, va7, 3);
210       #else
211         const float16x8_t va0c3 = vdupq_lane_f16(va0, 3);
212         const float16x8_t va1c3 = vdupq_lane_f16(va1, 3);
213         const float16x8_t va2c3 = vdupq_lane_f16(va2, 3);
214         const float16x8_t va3c3 = vdupq_lane_f16(va3, 3);
215         const float16x8_t va4c3 = vdupq_lane_f16(va4, 3);
216         const float16x8_t va5c3 = vdupq_lane_f16(va5, 3);
217         const float16x8_t va6c3 = vdupq_lane_f16(va6, 3);
218         const float16x8_t va7c3 = vdupq_lane_f16(va7, 3);
219 
220         vacc0x01234567 = vfmaq_f16(vacc0x01234567, va0c3, vb01234567c3);
221         vacc1x01234567 = vfmaq_f16(vacc1x01234567, va1c3, vb01234567c3);
222         vacc2x01234567 = vfmaq_f16(vacc2x01234567, va2c3, vb01234567c3);
223         vacc3x01234567 = vfmaq_f16(vacc3x01234567, va3c3, vb01234567c3);
224         vacc4x01234567 = vfmaq_f16(vacc4x01234567, va4c3, vb01234567c3);
225         vacc5x01234567 = vfmaq_f16(vacc5x01234567, va5c3, vb01234567c3);
226         vacc6x01234567 = vfmaq_f16(vacc6x01234567, va6c3, vb01234567c3);
227         vacc7x01234567 = vfmaq_f16(vacc7x01234567, va7c3, vb01234567c3);
228       #endif
229 
230       k -= 4 * sizeof(__fp16);
231     }
232     if XNN_UNLIKELY(k != 0) {
233       do {
234         const float16x8_t va0 = vld1q_dup_f16(a0); a0 += 1;
235         const float16x8_t va1 = vld1q_dup_f16(a1); a1 += 1;
236         const float16x8_t va2 = vld1q_dup_f16(a2); a2 += 1;
237         const float16x8_t va3 = vld1q_dup_f16(a3); a3 += 1;
238         const float16x8_t va4 = vld1q_dup_f16(a4); a4 += 1;
239         const float16x8_t va5 = vld1q_dup_f16(a5); a5 += 1;
240         const float16x8_t va6 = vld1q_dup_f16(a6); a6 += 1;
241         const float16x8_t va7 = vld1q_dup_f16(a7); a7 += 1;
242 
243         const float16x8_t vb01234567 = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
244 
245         vacc0x01234567 = vfmaq_f16(vacc0x01234567, va0, vb01234567);
246         vacc1x01234567 = vfmaq_f16(vacc1x01234567, va1, vb01234567);
247         vacc2x01234567 = vfmaq_f16(vacc2x01234567, va2, vb01234567);
248         vacc3x01234567 = vfmaq_f16(vacc3x01234567, va3, vb01234567);
249         vacc4x01234567 = vfmaq_f16(vacc4x01234567, va4, vb01234567);
250         vacc5x01234567 = vfmaq_f16(vacc5x01234567, va5, vb01234567);
251         vacc6x01234567 = vfmaq_f16(vacc6x01234567, va6, vb01234567);
252         vacc7x01234567 = vfmaq_f16(vacc7x01234567, va7, vb01234567);
253 
254         k -= sizeof(__fp16);
255       } while (k != 0);
256     }
257 
258     const float16x8_t vscale = vld1q_dup_f16((const __fp16*) &params->scale);
259     vacc0x01234567 = vmulq_f16(vacc0x01234567, vscale);
260     vacc1x01234567 = vmulq_f16(vacc1x01234567, vscale);
261     vacc2x01234567 = vmulq_f16(vacc2x01234567, vscale);
262     vacc3x01234567 = vmulq_f16(vacc3x01234567, vscale);
263     vacc4x01234567 = vmulq_f16(vacc4x01234567, vscale);
264     vacc5x01234567 = vmulq_f16(vacc5x01234567, vscale);
265     vacc6x01234567 = vmulq_f16(vacc6x01234567, vscale);
266     vacc7x01234567 = vmulq_f16(vacc7x01234567, vscale);
267 
268     const float16x8_t vmax = vld1q_dup_f16((const __fp16*) &params->max);
269     vacc0x01234567 = vminq_f16(vacc0x01234567, vmax);
270     vacc1x01234567 = vminq_f16(vacc1x01234567, vmax);
271     vacc2x01234567 = vminq_f16(vacc2x01234567, vmax);
272     vacc3x01234567 = vminq_f16(vacc3x01234567, vmax);
273     vacc4x01234567 = vminq_f16(vacc4x01234567, vmax);
274     vacc5x01234567 = vminq_f16(vacc5x01234567, vmax);
275     vacc6x01234567 = vminq_f16(vacc6x01234567, vmax);
276     vacc7x01234567 = vminq_f16(vacc7x01234567, vmax);
277 
278     const float16x8_t vmin = vld1q_dup_f16((const __fp16*) &params->min);
279     vacc0x01234567 = vmaxq_f16(vacc0x01234567, vmin);
280     vacc1x01234567 = vmaxq_f16(vacc1x01234567, vmin);
281     vacc2x01234567 = vmaxq_f16(vacc2x01234567, vmin);
282     vacc3x01234567 = vmaxq_f16(vacc3x01234567, vmin);
283     vacc4x01234567 = vmaxq_f16(vacc4x01234567, vmin);
284     vacc5x01234567 = vmaxq_f16(vacc5x01234567, vmin);
285     vacc6x01234567 = vmaxq_f16(vacc6x01234567, vmin);
286     vacc7x01234567 = vmaxq_f16(vacc7x01234567, vmin);
287 
288     if XNN_LIKELY(nc >= 8) {
289       vst1q_f16(c0, vacc0x01234567);
290       c0 = (__fp16*) ((uintptr_t) c0 + cn_stride);
291       vst1q_f16(c1, vacc1x01234567);
292       c1 = (__fp16*) ((uintptr_t) c1 + cn_stride);
293       vst1q_f16(c2, vacc2x01234567);
294       c2 = (__fp16*) ((uintptr_t) c2 + cn_stride);
295       vst1q_f16(c3, vacc3x01234567);
296       c3 = (__fp16*) ((uintptr_t) c3 + cn_stride);
297       vst1q_f16(c4, vacc4x01234567);
298       c4 = (__fp16*) ((uintptr_t) c4 + cn_stride);
299       vst1q_f16(c5, vacc5x01234567);
300       c5 = (__fp16*) ((uintptr_t) c5 + cn_stride);
301       vst1q_f16(c6, vacc6x01234567);
302       c6 = (__fp16*) ((uintptr_t) c6 + cn_stride);
303       vst1q_f16(c7, vacc7x01234567);
304       c7 = (__fp16*) ((uintptr_t) c7 + cn_stride);
305 
306       a0 = (const __fp16*) ((uintptr_t) a0 - kc);
307       a1 = (const __fp16*) ((uintptr_t) a1 - kc);
308       a2 = (const __fp16*) ((uintptr_t) a2 - kc);
309       a3 = (const __fp16*) ((uintptr_t) a3 - kc);
310       a4 = (const __fp16*) ((uintptr_t) a4 - kc);
311       a5 = (const __fp16*) ((uintptr_t) a5 - kc);
312       a6 = (const __fp16*) ((uintptr_t) a6 - kc);
313       a7 = (const __fp16*) ((uintptr_t) a7 - kc);
314 
315       nc -= 8;
316     } else {
317       float16x4_t vacc0x0123 = vget_low_f16(vacc0x01234567);
318       float16x4_t vacc1x0123 = vget_low_f16(vacc1x01234567);
319       float16x4_t vacc2x0123 = vget_low_f16(vacc2x01234567);
320       float16x4_t vacc3x0123 = vget_low_f16(vacc3x01234567);
321       float16x4_t vacc4x0123 = vget_low_f16(vacc4x01234567);
322       float16x4_t vacc5x0123 = vget_low_f16(vacc5x01234567);
323       float16x4_t vacc6x0123 = vget_low_f16(vacc6x01234567);
324       float16x4_t vacc7x0123 = vget_low_f16(vacc7x01234567);
325       if (nc & 4) {
326         vst1_f16(c0, vacc0x0123); c0 += 4;
327         vst1_f16(c1, vacc1x0123); c1 += 4;
328         vst1_f16(c2, vacc2x0123); c2 += 4;
329         vst1_f16(c3, vacc3x0123); c3 += 4;
330         vst1_f16(c4, vacc4x0123); c4 += 4;
331         vst1_f16(c5, vacc5x0123); c5 += 4;
332         vst1_f16(c6, vacc6x0123); c6 += 4;
333         vst1_f16(c7, vacc7x0123); c7 += 4;
334 
335         vacc0x0123 = vget_high_f16(vacc0x01234567);
336         vacc1x0123 = vget_high_f16(vacc1x01234567);
337         vacc2x0123 = vget_high_f16(vacc2x01234567);
338         vacc3x0123 = vget_high_f16(vacc3x01234567);
339         vacc4x0123 = vget_high_f16(vacc4x01234567);
340         vacc5x0123 = vget_high_f16(vacc5x01234567);
341         vacc6x0123 = vget_high_f16(vacc6x01234567);
342         vacc7x0123 = vget_high_f16(vacc7x01234567);
343       }
344       if (nc & 2) {
345         vst1_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpret_u32_f16(vacc0x0123), 0); c0 += 2;
346         vst1_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpret_u32_f16(vacc1x0123), 0); c1 += 2;
347         vst1_lane_u32(__builtin_assume_aligned(c2, 1), vreinterpret_u32_f16(vacc2x0123), 0); c2 += 2;
348         vst1_lane_u32(__builtin_assume_aligned(c3, 1), vreinterpret_u32_f16(vacc3x0123), 0); c3 += 2;
349         vst1_lane_u32(__builtin_assume_aligned(c4, 1), vreinterpret_u32_f16(vacc4x0123), 0); c4 += 2;
350         vst1_lane_u32(__builtin_assume_aligned(c5, 1), vreinterpret_u32_f16(vacc5x0123), 0); c5 += 2;
351         vst1_lane_u32(__builtin_assume_aligned(c6, 1), vreinterpret_u32_f16(vacc6x0123), 0); c6 += 2;
352         vst1_lane_u32(__builtin_assume_aligned(c7, 1), vreinterpret_u32_f16(vacc7x0123), 0); c7 += 2;
353 
354         vacc0x0123 = vext_f16(vacc0x0123, vacc0x0123, 2);
355         vacc1x0123 = vext_f16(vacc1x0123, vacc1x0123, 2);
356         vacc2x0123 = vext_f16(vacc2x0123, vacc2x0123, 2);
357         vacc3x0123 = vext_f16(vacc3x0123, vacc3x0123, 2);
358         vacc4x0123 = vext_f16(vacc4x0123, vacc4x0123, 2);
359         vacc5x0123 = vext_f16(vacc5x0123, vacc5x0123, 2);
360         vacc6x0123 = vext_f16(vacc6x0123, vacc6x0123, 2);
361         vacc7x0123 = vext_f16(vacc7x0123, vacc7x0123, 2);
362       }
363       if (nc & 1) {
364         vst1_lane_f16(c0, vacc0x0123, 0);
365         vst1_lane_f16(c1, vacc1x0123, 0);
366         vst1_lane_f16(c2, vacc2x0123, 0);
367         vst1_lane_f16(c3, vacc3x0123, 0);
368         vst1_lane_f16(c4, vacc4x0123, 0);
369         vst1_lane_f16(c5, vacc5x0123, 0);
370         vst1_lane_f16(c6, vacc6x0123, 0);
371         vst1_lane_f16(c7, vacc7x0123, 0);
372       }
373 
374       nc = 0;
375     }
376   } while (nc != 0);
377 }
378