1 // Auto-generated file. Do not edit!
2 // Template: src/f32-velu/neon-lut16-p3.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <arm_neon.h>
13
14 #include <xnnpack/vunary.h>
15 #include <xnnpack/common.h>
16
17
18 extern XNN_INTERNAL const int32_t xnn_table_exp2minus_k_over_16[16];
19
xnn_f32_velu_ukernel__neon_rr2_lut16_p3_x20(size_t n,const float * x,float * y,const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS (1)])20 void xnn_f32_velu_ukernel__neon_rr2_lut16_p3_x20(
21 size_t n,
22 const float* x,
23 float* y,
24 const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
25 {
26 assert(n != 0);
27 assert(n % sizeof(float) == 0);
28 assert(x != NULL);
29 assert(y != NULL);
30
31 const float32x4_t vprescale = vld1q_dup_f32(¶ms->scalar.prescale);
32 const float32x4_t valpha = vld1q_dup_f32(¶ms->scalar.alpha);
33 const float32x4_t vbeta = vld1q_dup_f32(¶ms->scalar.beta);
34
35 const float32x4_t vsat_cutoff = vmovq_n_f32(-0x1.154246p+4f);
36 const float32x4_t vmagic_bias = vmovq_n_f32(0x1.800000p19f);
37 const float32x4_t vlog2e = vmovq_n_f32(0x1.715476p+0f);
38 const int32x4_t vindex_mask = vmovq_n_s32(0xF);
39 const float32x4_t vminus_ln2_hi = vmovq_n_f32(-0x1.62E400p-1f);
40 const float32x4_t vminus_ln2_lo = vmovq_n_f32(-0x1.7F7D1Cp-20f);
41 const float32x4_t vc3 = vmovq_n_f32(0x1.55561Cp-3f);
42 const float32x4_t vc2 = vmovq_n_f32(0x1.0001ECp-1f);
43 const float32x4_t vone = vmovq_n_f32(1.0f);
44
45 for (; n >= 20 * sizeof(float); n -= 20 * sizeof(float)) {
46 float32x4_t vx0123 = vld1q_f32(x); x += 4;
47 float32x4_t vx4567 = vld1q_f32(x); x += 4;
48 float32x4_t vx89AB = vld1q_f32(x); x += 4;
49 float32x4_t vxCDEF = vld1q_f32(x); x += 4;
50 float32x4_t vxGHIJ = vld1q_f32(x); x += 4;
51
52 const float32x4_t vz0123 = vmaxq_f32(vmulq_f32(vx0123, vprescale), vsat_cutoff);
53 const float32x4_t vz4567 = vmaxq_f32(vmulq_f32(vx4567, vprescale), vsat_cutoff);
54 const float32x4_t vz89AB = vmaxq_f32(vmulq_f32(vx89AB, vprescale), vsat_cutoff);
55 const float32x4_t vzCDEF = vmaxq_f32(vmulq_f32(vxCDEF, vprescale), vsat_cutoff);
56 const float32x4_t vzGHIJ = vmaxq_f32(vmulq_f32(vxGHIJ, vprescale), vsat_cutoff);
57
58 float32x4_t vn0123 = vmlaq_f32(vmagic_bias, vz0123, vlog2e);
59 float32x4_t vn4567 = vmlaq_f32(vmagic_bias, vz4567, vlog2e);
60 float32x4_t vn89AB = vmlaq_f32(vmagic_bias, vz89AB, vlog2e);
61 float32x4_t vnCDEF = vmlaq_f32(vmagic_bias, vzCDEF, vlog2e);
62 float32x4_t vnGHIJ = vmlaq_f32(vmagic_bias, vzGHIJ, vlog2e);
63
64 const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask), 2));
65 const int32x4_t ven0123 = vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 19);
66 const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask), 2));
67 const int32x4_t ven4567 = vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 19);
68 const uint64x2_t vidx89AB = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn89AB), vindex_mask), 2));
69 const int32x4_t ven89AB = vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 19);
70 const uint64x2_t vidxCDEF = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vnCDEF), vindex_mask), 2));
71 const int32x4_t venCDEF = vshlq_n_s32(vreinterpretq_s32_f32(vnCDEF), 19);
72 const uint64x2_t vidxGHIJ = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vnGHIJ), vindex_mask), 2));
73 const int32x4_t venGHIJ = vshlq_n_s32(vreinterpretq_s32_f32(vnGHIJ), 19);
74
75 const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
76 const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
77 int32x2_t vl01 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx01));
78 int32x2_t vl23 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx23));
79 vl01 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx01 >> 32)), vl01, 1);
80 vl23 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx23 >> 32)), vl23, 1);
81 const int32x4_t vl0123 = vcombine_s32(vl01, vl23);
82 const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
83 const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
84 int32x2_t vl45 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx45));
85 int32x2_t vl67 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx67));
86 vl45 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx45 >> 32)), vl45, 1);
87 vl67 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx67 >> 32)), vl67, 1);
88 const int32x4_t vl4567 = vcombine_s32(vl45, vl67);
89 const uint64_t vidx89 = vgetq_lane_u64(vidx89AB, 0);
90 const uint64_t vidxAB = vgetq_lane_u64(vidx89AB, 1);
91 int32x2_t vl89 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx89));
92 int32x2_t vlAB = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxAB));
93 vl89 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx89 >> 32)), vl89, 1);
94 vlAB = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxAB >> 32)), vlAB, 1);
95 const int32x4_t vl89AB = vcombine_s32(vl89, vlAB);
96 const uint64_t vidxCD = vgetq_lane_u64(vidxCDEF, 0);
97 const uint64_t vidxEF = vgetq_lane_u64(vidxCDEF, 1);
98 int32x2_t vlCD = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxCD));
99 int32x2_t vlEF = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxEF));
100 vlCD = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxCD >> 32)), vlCD, 1);
101 vlEF = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxEF >> 32)), vlEF, 1);
102 const int32x4_t vlCDEF = vcombine_s32(vlCD, vlEF);
103 const uint64_t vidxGH = vgetq_lane_u64(vidxGHIJ, 0);
104 const uint64_t vidxIJ = vgetq_lane_u64(vidxGHIJ, 1);
105 int32x2_t vlGH = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxGH));
106 int32x2_t vlIJ = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxIJ));
107 vlGH = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxGH >> 32)), vlGH, 1);
108 vlIJ = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxIJ >> 32)), vlIJ, 1);
109 const int32x4_t vlGHIJ = vcombine_s32(vlGH, vlIJ);
110
111 vn0123 = vsubq_f32(vn0123, vmagic_bias);
112 float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vl0123, ven0123));
113 vn4567 = vsubq_f32(vn4567, vmagic_bias);
114 float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vl4567, ven4567));
115 vn89AB = vsubq_f32(vn89AB, vmagic_bias);
116 float32x4_t vs89AB = vreinterpretq_f32_s32(vaddq_s32(vl89AB, ven89AB));
117 vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
118 float32x4_t vsCDEF = vreinterpretq_f32_s32(vaddq_s32(vlCDEF, venCDEF));
119 vnGHIJ = vsubq_f32(vnGHIJ, vmagic_bias);
120 float32x4_t vsGHIJ = vreinterpretq_f32_s32(vaddq_s32(vlGHIJ, venGHIJ));
121
122 float32x4_t vt0123 = vmlaq_f32(vz0123, vn0123, vminus_ln2_hi);
123 float32x4_t vt4567 = vmlaq_f32(vz4567, vn4567, vminus_ln2_hi);
124 float32x4_t vt89AB = vmlaq_f32(vz89AB, vn89AB, vminus_ln2_hi);
125 float32x4_t vtCDEF = vmlaq_f32(vzCDEF, vnCDEF, vminus_ln2_hi);
126 float32x4_t vtGHIJ = vmlaq_f32(vzGHIJ, vnGHIJ, vminus_ln2_hi);
127
128 vt0123 = vmlaq_f32(vt0123, vn0123, vminus_ln2_lo);
129 vt4567 = vmlaq_f32(vt4567, vn4567, vminus_ln2_lo);
130 vt89AB = vmlaq_f32(vt89AB, vn89AB, vminus_ln2_lo);
131 vtCDEF = vmlaq_f32(vtCDEF, vnCDEF, vminus_ln2_lo);
132 vtGHIJ = vmlaq_f32(vtGHIJ, vnGHIJ, vminus_ln2_lo);
133
134 float32x4_t vp0123 = vmlaq_f32(vc2, vc3, vt0123);
135 float32x4_t vp4567 = vmlaq_f32(vc2, vc3, vt4567);
136 float32x4_t vp89AB = vmlaq_f32(vc2, vc3, vt89AB);
137 float32x4_t vpCDEF = vmlaq_f32(vc2, vc3, vtCDEF);
138 float32x4_t vpGHIJ = vmlaq_f32(vc2, vc3, vtGHIJ);
139
140 vp0123 = vmulq_f32(vp0123, vt0123);
141 vp4567 = vmulq_f32(vp4567, vt4567);
142 vp89AB = vmulq_f32(vp89AB, vt89AB);
143 vpCDEF = vmulq_f32(vpCDEF, vtCDEF);
144 vpGHIJ = vmulq_f32(vpGHIJ, vtGHIJ);
145
146 vt0123 = vmulq_f32(vt0123, vs0123);
147 vs0123 = vsubq_f32(vs0123, vone);
148 vt4567 = vmulq_f32(vt4567, vs4567);
149 vs4567 = vsubq_f32(vs4567, vone);
150 vt89AB = vmulq_f32(vt89AB, vs89AB);
151 vs89AB = vsubq_f32(vs89AB, vone);
152 vtCDEF = vmulq_f32(vtCDEF, vsCDEF);
153 vsCDEF = vsubq_f32(vsCDEF, vone);
154 vtGHIJ = vmulq_f32(vtGHIJ, vsGHIJ);
155 vsGHIJ = vsubq_f32(vsGHIJ, vone);
156
157 vp0123 = vmlaq_f32(vt0123, vp0123, vt0123);
158 vp4567 = vmlaq_f32(vt4567, vp4567, vt4567);
159 vp89AB = vmlaq_f32(vt89AB, vp89AB, vt89AB);
160 vpCDEF = vmlaq_f32(vtCDEF, vpCDEF, vtCDEF);
161 vpGHIJ = vmlaq_f32(vtGHIJ, vpGHIJ, vtGHIJ);
162
163 const float32x4_t ve0123 = vmulq_f32(vaddq_f32(vp0123, vs0123), valpha);
164 const float32x4_t ve4567 = vmulq_f32(vaddq_f32(vp4567, vs4567), valpha);
165 const float32x4_t ve89AB = vmulq_f32(vaddq_f32(vp89AB, vs89AB), valpha);
166 const float32x4_t veCDEF = vmulq_f32(vaddq_f32(vpCDEF, vsCDEF), valpha);
167 const float32x4_t veGHIJ = vmulq_f32(vaddq_f32(vpGHIJ, vsGHIJ), valpha);
168
169 const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
170 vx0123 = vmulq_f32(vx0123, vbeta);
171 const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
172 vx4567 = vmulq_f32(vx4567, vbeta);
173 const uint32x4_t vm89AB = vcltq_f32(vx89AB, vmovq_n_f32(0.0f));
174 vx89AB = vmulq_f32(vx89AB, vbeta);
175 const uint32x4_t vmCDEF = vcltq_f32(vxCDEF, vmovq_n_f32(0.0f));
176 vxCDEF = vmulq_f32(vxCDEF, vbeta);
177 const uint32x4_t vmGHIJ = vcltq_f32(vxGHIJ, vmovq_n_f32(0.0f));
178 vxGHIJ = vmulq_f32(vxGHIJ, vbeta);
179
180 const float32x4_t vy0123 = vbslq_f32(vm0123, ve0123, vx0123);
181 const float32x4_t vy4567 = vbslq_f32(vm4567, ve4567, vx4567);
182 const float32x4_t vy89AB = vbslq_f32(vm89AB, ve89AB, vx89AB);
183 const float32x4_t vyCDEF = vbslq_f32(vmCDEF, veCDEF, vxCDEF);
184 const float32x4_t vyGHIJ = vbslq_f32(vmGHIJ, veGHIJ, vxGHIJ);
185
186 vst1q_f32(y, vy0123); y += 4;
187 vst1q_f32(y, vy4567); y += 4;
188 vst1q_f32(y, vy89AB); y += 4;
189 vst1q_f32(y, vyCDEF); y += 4;
190 vst1q_f32(y, vyGHIJ); y += 4;
191 }
192 for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
193 float32x4_t vx = vld1q_f32(x); x += 4;
194
195 const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
196
197 float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vlog2e);
198 const uint64x2_t vidx = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask), 2));
199 const int32x4_t ven = vshlq_n_s32(vreinterpretq_s32_f32(vn), 19);
200
201 const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
202 const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
203 int32x2_t vl_lo = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo));
204 int32x2_t vl_hi = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi));
205 vl_lo = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32)), vl_lo, 1);
206 vl_hi = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32)), vl_hi, 1);
207
208 vn = vsubq_f32(vn, vmagic_bias);
209 const int32x4_t vl = vcombine_s32(vl_lo, vl_hi);
210
211 float32x4_t vt = vmlaq_f32(vz, vn, vminus_ln2_hi);
212 vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
213 float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vl, ven));
214
215 float32x4_t vp = vmlaq_f32(vc2, vc3, vt);
216 vp = vmulq_f32(vp, vt);
217
218 vt = vmulq_f32(vt, vs);
219 vs = vsubq_f32(vs, vone);
220 vp = vmlaq_f32(vt, vp, vt);
221 const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
222
223 const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
224 vx = vmulq_f32(vx, vbeta);
225 const float32x4_t vy = vbslq_f32(vm, ve, vx);
226
227 vst1q_f32(y, vy); y += 4;
228 }
229 if XNN_UNLIKELY(n != 0) {
230 float32x4_t vx = vld1q_f32(x);
231
232 const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
233
234 float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vlog2e);
235 const uint64x2_t vidx = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask), 2));
236 const int32x4_t ven = vshlq_n_s32(vreinterpretq_s32_f32(vn), 19);
237
238 const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
239 const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
240 int32x2_t vl_lo = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo));
241 int32x2_t vl_hi = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi));
242 vl_lo = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32)), vl_lo, 1);
243 vl_hi = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32)), vl_hi, 1);
244
245 vn = vsubq_f32(vn, vmagic_bias);
246 const int32x4_t vl = vcombine_s32(vl_lo, vl_hi);
247
248 float32x4_t vt = vmlaq_f32(vz, vn, vminus_ln2_hi);
249 vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
250 float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vl, ven));
251
252 float32x4_t vp = vmlaq_f32(vc2, vc3, vt);
253 vp = vmulq_f32(vp, vt);
254
255 vt = vmulq_f32(vt, vs);
256 vs = vsubq_f32(vs, vone);
257 vp = vmlaq_f32(vt, vp, vt);
258 const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
259
260 const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
261 vx = vmulq_f32(vx, vbeta);
262 const float32x4_t vy = vbslq_f32(vm, ve, vx);
263
264 float32x2_t vy_lo = vget_low_f32(vy);
265 if (n & (2 * sizeof(float))) {
266 vst1_f32(y, vy_lo); y += 2;
267 vy_lo = vget_high_f32(vy);
268 }
269 if (n & (1 * sizeof(float))) {
270 vst1_lane_f32(y, vy_lo, 0);
271 }
272 }
273 }
274