1 // Auto-generated file. Do not edit!
2 // Template: src/f16-gemm/neonfp16arith-ld64.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10
11 #include <assert.h>
12
13 #include <arm_neon.h>
14
15 #include <xnnpack/common.h>
16
17 #include <xnnpack/gemm.h>
18
19
xnn_f16_gemminc_minmax_ukernel_6x16__neonfp16arith_ld64(size_t mr,size_t nc,size_t kc,const void * restrict a,size_t a_stride,const void * restrict w,void * restrict c,size_t cm_stride,size_t cn_stride,const void * restrict acc,const struct xnn_f16_scaleminmax_params params[restrict XNN_MIN_ELEMENTS (1)])20 void xnn_f16_gemminc_minmax_ukernel_6x16__neonfp16arith_ld64(
21 size_t mr,
22 size_t nc,
23 size_t kc,
24 const void* restrict a,
25 size_t a_stride,
26 const void* restrict w,
27 void* restrict c,
28 size_t cm_stride,
29 size_t cn_stride,
30 const void*restrict acc,
31 const struct xnn_f16_scaleminmax_params params[restrict XNN_MIN_ELEMENTS(1)])
32 {
33 assert(mr != 0);
34 assert(mr <= 6);
35 assert(nc != 0);
36 assert(kc != 0);
37 assert(kc % sizeof(__fp16) == 0);
38 assert(a != NULL);
39 assert(w != NULL);
40 assert(c != NULL);
41 assert(acc != NULL);
42
43 const __fp16* a0 = (const __fp16*) a;
44 __fp16* c0 = (__fp16*) c;
45 const __fp16* a1 = (const __fp16*) ((uintptr_t) a0 + a_stride);
46 __fp16* c1 = (__fp16*) ((uintptr_t) c0 + cm_stride);
47 if XNN_UNPREDICTABLE(mr < 2) {
48 a1 = a0;
49 c1 = c0;
50 }
51 const __fp16* a2 = (const __fp16*) ((uintptr_t) a1 + a_stride);
52 __fp16* c2 = (__fp16*) ((uintptr_t) c1 + cm_stride);
53 if XNN_UNPREDICTABLE(mr <= 2) {
54 a2 = a1;
55 c2 = c1;
56 }
57 const __fp16* a3 = (const __fp16*) ((uintptr_t) a2 + a_stride);
58 __fp16* c3 = (__fp16*) ((uintptr_t) c2 + cm_stride);
59 if XNN_UNPREDICTABLE(mr < 4) {
60 a3 = a2;
61 c3 = c2;
62 }
63 const __fp16* a4 = (const __fp16*) ((uintptr_t) a3 + a_stride);
64 __fp16* c4 = (__fp16*) ((uintptr_t) c3 + cm_stride);
65 if XNN_UNPREDICTABLE(mr <= 4) {
66 a4 = a3;
67 c4 = c3;
68 }
69 const __fp16* a5 = (const __fp16*) ((uintptr_t) a4 + a_stride);
70 __fp16* c5 = (__fp16*) ((uintptr_t) c4 + cm_stride);
71 if XNN_UNPREDICTABLE(mr != 6) {
72 a5 = a4;
73 c5 = c4;
74 }
75
76 do {
77 float16x8_t vacc0x01234567 = vld1q_f16(acc); acc = (const void*) ((uintptr_t) acc + sizeof(float16x8_t));
78 float16x8_t vacc0x89ABCDEF = vld1q_f16(acc); acc = (const void*) ((uintptr_t) acc + sizeof(float16x8_t));
79 float16x8_t vacc1x01234567 = vld1q_f16(acc); acc = (const void*) ((uintptr_t) acc + sizeof(float16x8_t));
80 float16x8_t vacc1x89ABCDEF = vld1q_f16(acc); acc = (const void*) ((uintptr_t) acc + sizeof(float16x8_t));
81 float16x8_t vacc2x01234567 = vld1q_f16(acc); acc = (const void*) ((uintptr_t) acc + sizeof(float16x8_t));
82 float16x8_t vacc2x89ABCDEF = vld1q_f16(acc); acc = (const void*) ((uintptr_t) acc + sizeof(float16x8_t));
83 float16x8_t vacc3x01234567 = vld1q_f16(acc); acc = (const void*) ((uintptr_t) acc + sizeof(float16x8_t));
84 float16x8_t vacc3x89ABCDEF = vld1q_f16(acc); acc = (const void*) ((uintptr_t) acc + sizeof(float16x8_t));
85 float16x8_t vacc4x01234567 = vld1q_f16(acc); acc = (const void*) ((uintptr_t) acc + sizeof(float16x8_t));
86 float16x8_t vacc4x89ABCDEF = vld1q_f16(acc); acc = (const void*) ((uintptr_t) acc + sizeof(float16x8_t));
87 float16x8_t vacc5x01234567 = vld1q_f16(acc); acc = (const void*) ((uintptr_t) acc + sizeof(float16x8_t));
88 float16x8_t vacc5x89ABCDEF = vld1q_f16(acc); acc = (const void*) ((uintptr_t) acc + sizeof(float16x8_t));
89
90 size_t k = kc;
91 while (k >= 4 * sizeof(__fp16)) {
92 const float16x4_t va0 = vld1_f16(a0); a0 += 4;
93 const float16x4_t va1 = vld1_f16(a1); a1 += 4;
94 const float16x4_t va2 = vld1_f16(a2); a2 += 4;
95 const float16x4_t va3 = vld1_f16(a3); a3 += 4;
96 const float16x4_t va4 = vld1_f16(a4); a4 += 4;
97 const float16x4_t va5 = vld1_f16(a5); a5 += 4;
98
99 const float16x8_t vb01234567c0 = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
100 const float16x8_t vb89ABCDEFc0 = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
101
102 #if XNN_ARCH_ARM64
103 vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c0, va0, 0);
104 vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c0, va1, 0);
105 vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c0, va2, 0);
106 vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c0, va3, 0);
107 vacc4x01234567 = vfmaq_lane_f16(vacc4x01234567, vb01234567c0, va4, 0);
108 vacc5x01234567 = vfmaq_lane_f16(vacc5x01234567, vb01234567c0, va5, 0);
109 vacc0x89ABCDEF = vfmaq_lane_f16(vacc0x89ABCDEF, vb89ABCDEFc0, va0, 0);
110 vacc1x89ABCDEF = vfmaq_lane_f16(vacc1x89ABCDEF, vb89ABCDEFc0, va1, 0);
111 vacc2x89ABCDEF = vfmaq_lane_f16(vacc2x89ABCDEF, vb89ABCDEFc0, va2, 0);
112 vacc3x89ABCDEF = vfmaq_lane_f16(vacc3x89ABCDEF, vb89ABCDEFc0, va3, 0);
113 vacc4x89ABCDEF = vfmaq_lane_f16(vacc4x89ABCDEF, vb89ABCDEFc0, va4, 0);
114 vacc5x89ABCDEF = vfmaq_lane_f16(vacc5x89ABCDEF, vb89ABCDEFc0, va5, 0);
115 #else
116 const float16x8_t va0c0 = vdupq_lane_f16(va0, 0);
117 const float16x8_t va1c0 = vdupq_lane_f16(va1, 0);
118 const float16x8_t va2c0 = vdupq_lane_f16(va2, 0);
119 const float16x8_t va3c0 = vdupq_lane_f16(va3, 0);
120 const float16x8_t va4c0 = vdupq_lane_f16(va4, 0);
121 const float16x8_t va5c0 = vdupq_lane_f16(va5, 0);
122
123 vacc0x01234567 = vfmaq_f16(vacc0x01234567, va0c0, vb01234567c0);
124 vacc1x01234567 = vfmaq_f16(vacc1x01234567, va1c0, vb01234567c0);
125 vacc2x01234567 = vfmaq_f16(vacc2x01234567, va2c0, vb01234567c0);
126 vacc3x01234567 = vfmaq_f16(vacc3x01234567, va3c0, vb01234567c0);
127 vacc4x01234567 = vfmaq_f16(vacc4x01234567, va4c0, vb01234567c0);
128 vacc5x01234567 = vfmaq_f16(vacc5x01234567, va5c0, vb01234567c0);
129 vacc0x89ABCDEF = vfmaq_f16(vacc0x89ABCDEF, va0c0, vb89ABCDEFc0);
130 vacc1x89ABCDEF = vfmaq_f16(vacc1x89ABCDEF, va1c0, vb89ABCDEFc0);
131 vacc2x89ABCDEF = vfmaq_f16(vacc2x89ABCDEF, va2c0, vb89ABCDEFc0);
132 vacc3x89ABCDEF = vfmaq_f16(vacc3x89ABCDEF, va3c0, vb89ABCDEFc0);
133 vacc4x89ABCDEF = vfmaq_f16(vacc4x89ABCDEF, va4c0, vb89ABCDEFc0);
134 vacc5x89ABCDEF = vfmaq_f16(vacc5x89ABCDEF, va5c0, vb89ABCDEFc0);
135 #endif
136 const float16x8_t vb01234567c1 = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
137 const float16x8_t vb89ABCDEFc1 = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
138
139 #if XNN_ARCH_ARM64
140 vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c1, va0, 1);
141 vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c1, va1, 1);
142 vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c1, va2, 1);
143 vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c1, va3, 1);
144 vacc4x01234567 = vfmaq_lane_f16(vacc4x01234567, vb01234567c1, va4, 1);
145 vacc5x01234567 = vfmaq_lane_f16(vacc5x01234567, vb01234567c1, va5, 1);
146 vacc0x89ABCDEF = vfmaq_lane_f16(vacc0x89ABCDEF, vb89ABCDEFc1, va0, 1);
147 vacc1x89ABCDEF = vfmaq_lane_f16(vacc1x89ABCDEF, vb89ABCDEFc1, va1, 1);
148 vacc2x89ABCDEF = vfmaq_lane_f16(vacc2x89ABCDEF, vb89ABCDEFc1, va2, 1);
149 vacc3x89ABCDEF = vfmaq_lane_f16(vacc3x89ABCDEF, vb89ABCDEFc1, va3, 1);
150 vacc4x89ABCDEF = vfmaq_lane_f16(vacc4x89ABCDEF, vb89ABCDEFc1, va4, 1);
151 vacc5x89ABCDEF = vfmaq_lane_f16(vacc5x89ABCDEF, vb89ABCDEFc1, va5, 1);
152 #else
153 const float16x8_t va0c1 = vdupq_lane_f16(va0, 1);
154 const float16x8_t va1c1 = vdupq_lane_f16(va1, 1);
155 const float16x8_t va2c1 = vdupq_lane_f16(va2, 1);
156 const float16x8_t va3c1 = vdupq_lane_f16(va3, 1);
157 const float16x8_t va4c1 = vdupq_lane_f16(va4, 1);
158 const float16x8_t va5c1 = vdupq_lane_f16(va5, 1);
159
160 vacc0x01234567 = vfmaq_f16(vacc0x01234567, va0c1, vb01234567c1);
161 vacc1x01234567 = vfmaq_f16(vacc1x01234567, va1c1, vb01234567c1);
162 vacc2x01234567 = vfmaq_f16(vacc2x01234567, va2c1, vb01234567c1);
163 vacc3x01234567 = vfmaq_f16(vacc3x01234567, va3c1, vb01234567c1);
164 vacc4x01234567 = vfmaq_f16(vacc4x01234567, va4c1, vb01234567c1);
165 vacc5x01234567 = vfmaq_f16(vacc5x01234567, va5c1, vb01234567c1);
166 vacc0x89ABCDEF = vfmaq_f16(vacc0x89ABCDEF, va0c1, vb89ABCDEFc1);
167 vacc1x89ABCDEF = vfmaq_f16(vacc1x89ABCDEF, va1c1, vb89ABCDEFc1);
168 vacc2x89ABCDEF = vfmaq_f16(vacc2x89ABCDEF, va2c1, vb89ABCDEFc1);
169 vacc3x89ABCDEF = vfmaq_f16(vacc3x89ABCDEF, va3c1, vb89ABCDEFc1);
170 vacc4x89ABCDEF = vfmaq_f16(vacc4x89ABCDEF, va4c1, vb89ABCDEFc1);
171 vacc5x89ABCDEF = vfmaq_f16(vacc5x89ABCDEF, va5c1, vb89ABCDEFc1);
172 #endif
173 const float16x8_t vb01234567c2 = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
174 const float16x8_t vb89ABCDEFc2 = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
175
176 #if XNN_ARCH_ARM64
177 vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c2, va0, 2);
178 vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c2, va1, 2);
179 vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c2, va2, 2);
180 vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c2, va3, 2);
181 vacc4x01234567 = vfmaq_lane_f16(vacc4x01234567, vb01234567c2, va4, 2);
182 vacc5x01234567 = vfmaq_lane_f16(vacc5x01234567, vb01234567c2, va5, 2);
183 vacc0x89ABCDEF = vfmaq_lane_f16(vacc0x89ABCDEF, vb89ABCDEFc2, va0, 2);
184 vacc1x89ABCDEF = vfmaq_lane_f16(vacc1x89ABCDEF, vb89ABCDEFc2, va1, 2);
185 vacc2x89ABCDEF = vfmaq_lane_f16(vacc2x89ABCDEF, vb89ABCDEFc2, va2, 2);
186 vacc3x89ABCDEF = vfmaq_lane_f16(vacc3x89ABCDEF, vb89ABCDEFc2, va3, 2);
187 vacc4x89ABCDEF = vfmaq_lane_f16(vacc4x89ABCDEF, vb89ABCDEFc2, va4, 2);
188 vacc5x89ABCDEF = vfmaq_lane_f16(vacc5x89ABCDEF, vb89ABCDEFc2, va5, 2);
189 #else
190 const float16x8_t va0c2 = vdupq_lane_f16(va0, 2);
191 const float16x8_t va1c2 = vdupq_lane_f16(va1, 2);
192 const float16x8_t va2c2 = vdupq_lane_f16(va2, 2);
193 const float16x8_t va3c2 = vdupq_lane_f16(va3, 2);
194 const float16x8_t va4c2 = vdupq_lane_f16(va4, 2);
195 const float16x8_t va5c2 = vdupq_lane_f16(va5, 2);
196
197 vacc0x01234567 = vfmaq_f16(vacc0x01234567, va0c2, vb01234567c2);
198 vacc1x01234567 = vfmaq_f16(vacc1x01234567, va1c2, vb01234567c2);
199 vacc2x01234567 = vfmaq_f16(vacc2x01234567, va2c2, vb01234567c2);
200 vacc3x01234567 = vfmaq_f16(vacc3x01234567, va3c2, vb01234567c2);
201 vacc4x01234567 = vfmaq_f16(vacc4x01234567, va4c2, vb01234567c2);
202 vacc5x01234567 = vfmaq_f16(vacc5x01234567, va5c2, vb01234567c2);
203 vacc0x89ABCDEF = vfmaq_f16(vacc0x89ABCDEF, va0c2, vb89ABCDEFc2);
204 vacc1x89ABCDEF = vfmaq_f16(vacc1x89ABCDEF, va1c2, vb89ABCDEFc2);
205 vacc2x89ABCDEF = vfmaq_f16(vacc2x89ABCDEF, va2c2, vb89ABCDEFc2);
206 vacc3x89ABCDEF = vfmaq_f16(vacc3x89ABCDEF, va3c2, vb89ABCDEFc2);
207 vacc4x89ABCDEF = vfmaq_f16(vacc4x89ABCDEF, va4c2, vb89ABCDEFc2);
208 vacc5x89ABCDEF = vfmaq_f16(vacc5x89ABCDEF, va5c2, vb89ABCDEFc2);
209 #endif
210 const float16x8_t vb01234567c3 = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
211 const float16x8_t vb89ABCDEFc3 = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
212
213 #if XNN_ARCH_ARM64
214 vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c3, va0, 3);
215 vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c3, va1, 3);
216 vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c3, va2, 3);
217 vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c3, va3, 3);
218 vacc4x01234567 = vfmaq_lane_f16(vacc4x01234567, vb01234567c3, va4, 3);
219 vacc5x01234567 = vfmaq_lane_f16(vacc5x01234567, vb01234567c3, va5, 3);
220 vacc0x89ABCDEF = vfmaq_lane_f16(vacc0x89ABCDEF, vb89ABCDEFc3, va0, 3);
221 vacc1x89ABCDEF = vfmaq_lane_f16(vacc1x89ABCDEF, vb89ABCDEFc3, va1, 3);
222 vacc2x89ABCDEF = vfmaq_lane_f16(vacc2x89ABCDEF, vb89ABCDEFc3, va2, 3);
223 vacc3x89ABCDEF = vfmaq_lane_f16(vacc3x89ABCDEF, vb89ABCDEFc3, va3, 3);
224 vacc4x89ABCDEF = vfmaq_lane_f16(vacc4x89ABCDEF, vb89ABCDEFc3, va4, 3);
225 vacc5x89ABCDEF = vfmaq_lane_f16(vacc5x89ABCDEF, vb89ABCDEFc3, va5, 3);
226 #else
227 const float16x8_t va0c3 = vdupq_lane_f16(va0, 3);
228 const float16x8_t va1c3 = vdupq_lane_f16(va1, 3);
229 const float16x8_t va2c3 = vdupq_lane_f16(va2, 3);
230 const float16x8_t va3c3 = vdupq_lane_f16(va3, 3);
231 const float16x8_t va4c3 = vdupq_lane_f16(va4, 3);
232 const float16x8_t va5c3 = vdupq_lane_f16(va5, 3);
233
234 vacc0x01234567 = vfmaq_f16(vacc0x01234567, va0c3, vb01234567c3);
235 vacc1x01234567 = vfmaq_f16(vacc1x01234567, va1c3, vb01234567c3);
236 vacc2x01234567 = vfmaq_f16(vacc2x01234567, va2c3, vb01234567c3);
237 vacc3x01234567 = vfmaq_f16(vacc3x01234567, va3c3, vb01234567c3);
238 vacc4x01234567 = vfmaq_f16(vacc4x01234567, va4c3, vb01234567c3);
239 vacc5x01234567 = vfmaq_f16(vacc5x01234567, va5c3, vb01234567c3);
240 vacc0x89ABCDEF = vfmaq_f16(vacc0x89ABCDEF, va0c3, vb89ABCDEFc3);
241 vacc1x89ABCDEF = vfmaq_f16(vacc1x89ABCDEF, va1c3, vb89ABCDEFc3);
242 vacc2x89ABCDEF = vfmaq_f16(vacc2x89ABCDEF, va2c3, vb89ABCDEFc3);
243 vacc3x89ABCDEF = vfmaq_f16(vacc3x89ABCDEF, va3c3, vb89ABCDEFc3);
244 vacc4x89ABCDEF = vfmaq_f16(vacc4x89ABCDEF, va4c3, vb89ABCDEFc3);
245 vacc5x89ABCDEF = vfmaq_f16(vacc5x89ABCDEF, va5c3, vb89ABCDEFc3);
246 #endif
247
248 k -= 4 * sizeof(__fp16);
249 }
250 if XNN_UNLIKELY(k != 0) {
251 do {
252 const float16x8_t va0 = vld1q_dup_f16(a0); a0 += 1;
253 const float16x8_t va1 = vld1q_dup_f16(a1); a1 += 1;
254 const float16x8_t va2 = vld1q_dup_f16(a2); a2 += 1;
255 const float16x8_t va3 = vld1q_dup_f16(a3); a3 += 1;
256 const float16x8_t va4 = vld1q_dup_f16(a4); a4 += 1;
257 const float16x8_t va5 = vld1q_dup_f16(a5); a5 += 1;
258
259 const float16x8_t vb01234567 = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
260 const float16x8_t vb89ABCDEF = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
261
262 vacc0x01234567 = vfmaq_f16(vacc0x01234567, va0, vb01234567);
263 vacc1x01234567 = vfmaq_f16(vacc1x01234567, va1, vb01234567);
264 vacc2x01234567 = vfmaq_f16(vacc2x01234567, va2, vb01234567);
265 vacc3x01234567 = vfmaq_f16(vacc3x01234567, va3, vb01234567);
266 vacc4x01234567 = vfmaq_f16(vacc4x01234567, va4, vb01234567);
267 vacc5x01234567 = vfmaq_f16(vacc5x01234567, va5, vb01234567);
268 vacc0x89ABCDEF = vfmaq_f16(vacc0x89ABCDEF, va0, vb89ABCDEF);
269 vacc1x89ABCDEF = vfmaq_f16(vacc1x89ABCDEF, va1, vb89ABCDEF);
270 vacc2x89ABCDEF = vfmaq_f16(vacc2x89ABCDEF, va2, vb89ABCDEF);
271 vacc3x89ABCDEF = vfmaq_f16(vacc3x89ABCDEF, va3, vb89ABCDEF);
272 vacc4x89ABCDEF = vfmaq_f16(vacc4x89ABCDEF, va4, vb89ABCDEF);
273 vacc5x89ABCDEF = vfmaq_f16(vacc5x89ABCDEF, va5, vb89ABCDEF);
274
275 k -= sizeof(__fp16);
276 } while (k != 0);
277 }
278
279 const float16x8_t vscale = vld1q_dup_f16((const __fp16*) ¶ms->scale);
280 vacc0x01234567 = vmulq_f16(vacc0x01234567, vscale);
281 vacc1x01234567 = vmulq_f16(vacc1x01234567, vscale);
282 vacc2x01234567 = vmulq_f16(vacc2x01234567, vscale);
283 vacc3x01234567 = vmulq_f16(vacc3x01234567, vscale);
284 vacc4x01234567 = vmulq_f16(vacc4x01234567, vscale);
285 vacc5x01234567 = vmulq_f16(vacc5x01234567, vscale);
286 vacc0x89ABCDEF = vmulq_f16(vacc0x89ABCDEF, vscale);
287 vacc1x89ABCDEF = vmulq_f16(vacc1x89ABCDEF, vscale);
288 vacc2x89ABCDEF = vmulq_f16(vacc2x89ABCDEF, vscale);
289 vacc3x89ABCDEF = vmulq_f16(vacc3x89ABCDEF, vscale);
290 vacc4x89ABCDEF = vmulq_f16(vacc4x89ABCDEF, vscale);
291 vacc5x89ABCDEF = vmulq_f16(vacc5x89ABCDEF, vscale);
292
293 const float16x8_t vmax = vld1q_dup_f16((const __fp16*) ¶ms->max);
294 vacc0x01234567 = vminq_f16(vacc0x01234567, vmax);
295 vacc1x01234567 = vminq_f16(vacc1x01234567, vmax);
296 vacc2x01234567 = vminq_f16(vacc2x01234567, vmax);
297 vacc3x01234567 = vminq_f16(vacc3x01234567, vmax);
298 vacc4x01234567 = vminq_f16(vacc4x01234567, vmax);
299 vacc5x01234567 = vminq_f16(vacc5x01234567, vmax);
300 vacc0x89ABCDEF = vminq_f16(vacc0x89ABCDEF, vmax);
301 vacc1x89ABCDEF = vminq_f16(vacc1x89ABCDEF, vmax);
302 vacc2x89ABCDEF = vminq_f16(vacc2x89ABCDEF, vmax);
303 vacc3x89ABCDEF = vminq_f16(vacc3x89ABCDEF, vmax);
304 vacc4x89ABCDEF = vminq_f16(vacc4x89ABCDEF, vmax);
305 vacc5x89ABCDEF = vminq_f16(vacc5x89ABCDEF, vmax);
306
307 const float16x8_t vmin = vld1q_dup_f16((const __fp16*) ¶ms->min);
308 vacc0x01234567 = vmaxq_f16(vacc0x01234567, vmin);
309 vacc1x01234567 = vmaxq_f16(vacc1x01234567, vmin);
310 vacc2x01234567 = vmaxq_f16(vacc2x01234567, vmin);
311 vacc3x01234567 = vmaxq_f16(vacc3x01234567, vmin);
312 vacc4x01234567 = vmaxq_f16(vacc4x01234567, vmin);
313 vacc5x01234567 = vmaxq_f16(vacc5x01234567, vmin);
314 vacc0x89ABCDEF = vmaxq_f16(vacc0x89ABCDEF, vmin);
315 vacc1x89ABCDEF = vmaxq_f16(vacc1x89ABCDEF, vmin);
316 vacc2x89ABCDEF = vmaxq_f16(vacc2x89ABCDEF, vmin);
317 vacc3x89ABCDEF = vmaxq_f16(vacc3x89ABCDEF, vmin);
318 vacc4x89ABCDEF = vmaxq_f16(vacc4x89ABCDEF, vmin);
319 vacc5x89ABCDEF = vmaxq_f16(vacc5x89ABCDEF, vmin);
320
321 if XNN_LIKELY(nc >= 16) {
322 vst1q_f16(c0, vacc0x01234567);
323 vst1q_f16(c0 + 8, vacc0x89ABCDEF);
324 c0 = (__fp16*) ((uintptr_t) c0 + cn_stride);
325 vst1q_f16(c1, vacc1x01234567);
326 vst1q_f16(c1 + 8, vacc1x89ABCDEF);
327 c1 = (__fp16*) ((uintptr_t) c1 + cn_stride);
328 vst1q_f16(c2, vacc2x01234567);
329 vst1q_f16(c2 + 8, vacc2x89ABCDEF);
330 c2 = (__fp16*) ((uintptr_t) c2 + cn_stride);
331 vst1q_f16(c3, vacc3x01234567);
332 vst1q_f16(c3 + 8, vacc3x89ABCDEF);
333 c3 = (__fp16*) ((uintptr_t) c3 + cn_stride);
334 vst1q_f16(c4, vacc4x01234567);
335 vst1q_f16(c4 + 8, vacc4x89ABCDEF);
336 c4 = (__fp16*) ((uintptr_t) c4 + cn_stride);
337 vst1q_f16(c5, vacc5x01234567);
338 vst1q_f16(c5 + 8, vacc5x89ABCDEF);
339 c5 = (__fp16*) ((uintptr_t) c5 + cn_stride);
340
341 a0 = (const __fp16*) ((uintptr_t) a0 - kc);
342 a1 = (const __fp16*) ((uintptr_t) a1 - kc);
343 a2 = (const __fp16*) ((uintptr_t) a2 - kc);
344 a3 = (const __fp16*) ((uintptr_t) a3 - kc);
345 a4 = (const __fp16*) ((uintptr_t) a4 - kc);
346 a5 = (const __fp16*) ((uintptr_t) a5 - kc);
347
348 nc -= 16;
349 } else {
350 if (nc & 8) {
351 vst1q_f16(c0, vacc0x01234567); c0 += 8;
352 vst1q_f16(c1, vacc1x01234567); c1 += 8;
353 vst1q_f16(c2, vacc2x01234567); c2 += 8;
354 vst1q_f16(c3, vacc3x01234567); c3 += 8;
355 vst1q_f16(c4, vacc4x01234567); c4 += 8;
356 vst1q_f16(c5, vacc5x01234567); c5 += 8;
357
358 vacc0x01234567 = vacc0x89ABCDEF;
359 vacc1x01234567 = vacc1x89ABCDEF;
360 vacc2x01234567 = vacc2x89ABCDEF;
361 vacc3x01234567 = vacc3x89ABCDEF;
362 vacc4x01234567 = vacc4x89ABCDEF;
363 vacc5x01234567 = vacc5x89ABCDEF;
364 }
365 float16x4_t vacc0x0123 = vget_low_f16(vacc0x01234567);
366 float16x4_t vacc1x0123 = vget_low_f16(vacc1x01234567);
367 float16x4_t vacc2x0123 = vget_low_f16(vacc2x01234567);
368 float16x4_t vacc3x0123 = vget_low_f16(vacc3x01234567);
369 float16x4_t vacc4x0123 = vget_low_f16(vacc4x01234567);
370 float16x4_t vacc5x0123 = vget_low_f16(vacc5x01234567);
371 if (nc & 4) {
372 vst1_f16(c0, vacc0x0123); c0 += 4;
373 vst1_f16(c1, vacc1x0123); c1 += 4;
374 vst1_f16(c2, vacc2x0123); c2 += 4;
375 vst1_f16(c3, vacc3x0123); c3 += 4;
376 vst1_f16(c4, vacc4x0123); c4 += 4;
377 vst1_f16(c5, vacc5x0123); c5 += 4;
378
379 vacc0x0123 = vget_high_f16(vacc0x01234567);
380 vacc1x0123 = vget_high_f16(vacc1x01234567);
381 vacc2x0123 = vget_high_f16(vacc2x01234567);
382 vacc3x0123 = vget_high_f16(vacc3x01234567);
383 vacc4x0123 = vget_high_f16(vacc4x01234567);
384 vacc5x0123 = vget_high_f16(vacc5x01234567);
385 }
386 if (nc & 2) {
387 vst1_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpret_u32_f16(vacc0x0123), 0); c0 += 2;
388 vst1_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpret_u32_f16(vacc1x0123), 0); c1 += 2;
389 vst1_lane_u32(__builtin_assume_aligned(c2, 1), vreinterpret_u32_f16(vacc2x0123), 0); c2 += 2;
390 vst1_lane_u32(__builtin_assume_aligned(c3, 1), vreinterpret_u32_f16(vacc3x0123), 0); c3 += 2;
391 vst1_lane_u32(__builtin_assume_aligned(c4, 1), vreinterpret_u32_f16(vacc4x0123), 0); c4 += 2;
392 vst1_lane_u32(__builtin_assume_aligned(c5, 1), vreinterpret_u32_f16(vacc5x0123), 0); c5 += 2;
393
394 vacc0x0123 = vext_f16(vacc0x0123, vacc0x0123, 2);
395 vacc1x0123 = vext_f16(vacc1x0123, vacc1x0123, 2);
396 vacc2x0123 = vext_f16(vacc2x0123, vacc2x0123, 2);
397 vacc3x0123 = vext_f16(vacc3x0123, vacc3x0123, 2);
398 vacc4x0123 = vext_f16(vacc4x0123, vacc4x0123, 2);
399 vacc5x0123 = vext_f16(vacc5x0123, vacc5x0123, 2);
400 }
401 if (nc & 1) {
402 vst1_lane_f16(c0, vacc0x0123, 0);
403 vst1_lane_f16(c1, vacc1x0123, 0);
404 vst1_lane_f16(c2, vacc2x0123, 0);
405 vst1_lane_f16(c3, vacc3x0123, 0);
406 vst1_lane_f16(c4, vacc4x0123, 0);
407 vst1_lane_f16(c5, vacc5x0123, 0);
408 }
409
410 nc = 0;
411 }
412 } while (nc != 0);
413 }
414