1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-velu/neon-lut16-p3.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <arm_neon.h>
13 
14 #include <xnnpack/vunary.h>
15 #include <xnnpack/common.h>
16 
17 
18 extern XNN_INTERNAL const int32_t xnn_table_exp2minus_k_over_16[16];
19 
xnn_f32_velu_ukernel__neon_rr2_lut16_p3_x12(size_t n,const float * x,float * y,const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS (1)])20 void xnn_f32_velu_ukernel__neon_rr2_lut16_p3_x12(
21     size_t n,
22     const float* x,
23     float* y,
24     const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
25 {
26   assert(n != 0);
27   assert(n % sizeof(float) == 0);
28   assert(x != NULL);
29   assert(y != NULL);
30 
31   const float32x4_t vprescale = vld1q_dup_f32(&params->scalar.prescale);
32   const float32x4_t valpha = vld1q_dup_f32(&params->scalar.alpha);
33   const float32x4_t vbeta = vld1q_dup_f32(&params->scalar.beta);
34 
35   const float32x4_t vsat_cutoff = vmovq_n_f32(-0x1.154246p+4f);
36   const float32x4_t vmagic_bias = vmovq_n_f32(0x1.800000p19f);
37   const float32x4_t vlog2e = vmovq_n_f32(0x1.715476p+0f);
38   const int32x4_t vindex_mask = vmovq_n_s32(0xF);
39   const float32x4_t vminus_ln2_hi = vmovq_n_f32(-0x1.62E400p-1f);
40   const float32x4_t vminus_ln2_lo = vmovq_n_f32(-0x1.7F7D1Cp-20f);
41   const float32x4_t vc3 = vmovq_n_f32(0x1.55561Cp-3f);
42   const float32x4_t vc2 = vmovq_n_f32(0x1.0001ECp-1f);
43   const float32x4_t vone = vmovq_n_f32(1.0f);
44 
45   for (; n >= 12 * sizeof(float); n -= 12 * sizeof(float)) {
46     float32x4_t vx0123 = vld1q_f32(x); x += 4;
47     float32x4_t vx4567 = vld1q_f32(x); x += 4;
48     float32x4_t vx89AB = vld1q_f32(x); x += 4;
49 
50     const float32x4_t vz0123 = vmaxq_f32(vmulq_f32(vx0123, vprescale), vsat_cutoff);
51     const float32x4_t vz4567 = vmaxq_f32(vmulq_f32(vx4567, vprescale), vsat_cutoff);
52     const float32x4_t vz89AB = vmaxq_f32(vmulq_f32(vx89AB, vprescale), vsat_cutoff);
53 
54     float32x4_t vn0123 = vmlaq_f32(vmagic_bias, vz0123, vlog2e);
55     float32x4_t vn4567 = vmlaq_f32(vmagic_bias, vz4567, vlog2e);
56     float32x4_t vn89AB = vmlaq_f32(vmagic_bias, vz89AB, vlog2e);
57 
58     const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask), 2));
59     const int32x4_t ven0123 = vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 19);
60     const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask), 2));
61     const int32x4_t ven4567 = vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 19);
62     const uint64x2_t vidx89AB = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn89AB), vindex_mask), 2));
63     const int32x4_t ven89AB = vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 19);
64 
65     const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
66     const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
67     int32x2_t vl01 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx01));
68     int32x2_t vl23 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx23));
69     vl01 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx01 >> 32)), vl01, 1);
70     vl23 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx23 >> 32)), vl23, 1);
71     const int32x4_t vl0123 = vcombine_s32(vl01, vl23);
72     const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
73     const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
74     int32x2_t vl45 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx45));
75     int32x2_t vl67 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx67));
76     vl45 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx45 >> 32)), vl45, 1);
77     vl67 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx67 >> 32)), vl67, 1);
78     const int32x4_t vl4567 = vcombine_s32(vl45, vl67);
79     const uint64_t vidx89 = vgetq_lane_u64(vidx89AB, 0);
80     const uint64_t vidxAB = vgetq_lane_u64(vidx89AB, 1);
81     int32x2_t vl89 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx89));
82     int32x2_t vlAB = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxAB));
83     vl89 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx89 >> 32)), vl89, 1);
84     vlAB = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxAB >> 32)), vlAB, 1);
85     const int32x4_t vl89AB = vcombine_s32(vl89, vlAB);
86 
87     vn0123 = vsubq_f32(vn0123, vmagic_bias);
88     float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vl0123, ven0123));
89     vn4567 = vsubq_f32(vn4567, vmagic_bias);
90     float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vl4567, ven4567));
91     vn89AB = vsubq_f32(vn89AB, vmagic_bias);
92     float32x4_t vs89AB = vreinterpretq_f32_s32(vaddq_s32(vl89AB, ven89AB));
93 
94     float32x4_t vt0123 = vmlaq_f32(vz0123, vn0123, vminus_ln2_hi);
95     float32x4_t vt4567 = vmlaq_f32(vz4567, vn4567, vminus_ln2_hi);
96     float32x4_t vt89AB = vmlaq_f32(vz89AB, vn89AB, vminus_ln2_hi);
97 
98     vt0123 = vmlaq_f32(vt0123, vn0123, vminus_ln2_lo);
99     vt4567 = vmlaq_f32(vt4567, vn4567, vminus_ln2_lo);
100     vt89AB = vmlaq_f32(vt89AB, vn89AB, vminus_ln2_lo);
101 
102     float32x4_t vp0123 = vmlaq_f32(vc2, vc3, vt0123);
103     float32x4_t vp4567 = vmlaq_f32(vc2, vc3, vt4567);
104     float32x4_t vp89AB = vmlaq_f32(vc2, vc3, vt89AB);
105 
106     vp0123 = vmulq_f32(vp0123, vt0123);
107     vp4567 = vmulq_f32(vp4567, vt4567);
108     vp89AB = vmulq_f32(vp89AB, vt89AB);
109 
110     vt0123 = vmulq_f32(vt0123, vs0123);
111     vs0123 = vsubq_f32(vs0123, vone);
112     vt4567 = vmulq_f32(vt4567, vs4567);
113     vs4567 = vsubq_f32(vs4567, vone);
114     vt89AB = vmulq_f32(vt89AB, vs89AB);
115     vs89AB = vsubq_f32(vs89AB, vone);
116 
117     vp0123 = vmlaq_f32(vt0123, vp0123, vt0123);
118     vp4567 = vmlaq_f32(vt4567, vp4567, vt4567);
119     vp89AB = vmlaq_f32(vt89AB, vp89AB, vt89AB);
120 
121     const float32x4_t ve0123 = vmulq_f32(vaddq_f32(vp0123, vs0123), valpha);
122     const float32x4_t ve4567 = vmulq_f32(vaddq_f32(vp4567, vs4567), valpha);
123     const float32x4_t ve89AB = vmulq_f32(vaddq_f32(vp89AB, vs89AB), valpha);
124 
125     const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
126     vx0123 = vmulq_f32(vx0123, vbeta);
127     const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
128     vx4567 = vmulq_f32(vx4567, vbeta);
129     const uint32x4_t vm89AB = vcltq_f32(vx89AB, vmovq_n_f32(0.0f));
130     vx89AB = vmulq_f32(vx89AB, vbeta);
131 
132     const float32x4_t vy0123 = vbslq_f32(vm0123, ve0123, vx0123);
133     const float32x4_t vy4567 = vbslq_f32(vm4567, ve4567, vx4567);
134     const float32x4_t vy89AB = vbslq_f32(vm89AB, ve89AB, vx89AB);
135 
136     vst1q_f32(y, vy0123); y += 4;
137     vst1q_f32(y, vy4567); y += 4;
138     vst1q_f32(y, vy89AB); y += 4;
139   }
140   for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
141     float32x4_t vx = vld1q_f32(x); x += 4;
142 
143     const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
144 
145     float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vlog2e);
146     const uint64x2_t vidx = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask), 2));
147     const int32x4_t ven = vshlq_n_s32(vreinterpretq_s32_f32(vn), 19);
148 
149     const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
150     const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
151     int32x2_t vl_lo = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo));
152     int32x2_t vl_hi = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi));
153     vl_lo = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32)), vl_lo, 1);
154     vl_hi = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32)), vl_hi, 1);
155 
156     vn = vsubq_f32(vn, vmagic_bias);
157     const int32x4_t vl = vcombine_s32(vl_lo, vl_hi);
158 
159     float32x4_t vt = vmlaq_f32(vz, vn, vminus_ln2_hi);
160     vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
161     float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vl, ven));
162 
163     float32x4_t vp = vmlaq_f32(vc2, vc3, vt);
164     vp = vmulq_f32(vp, vt);
165 
166     vt = vmulq_f32(vt, vs);
167     vs = vsubq_f32(vs, vone);
168     vp = vmlaq_f32(vt, vp, vt);
169     const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
170 
171     const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
172     vx = vmulq_f32(vx, vbeta);
173     const float32x4_t vy = vbslq_f32(vm, ve, vx);
174 
175     vst1q_f32(y, vy); y += 4;
176   }
177   if XNN_UNLIKELY(n != 0) {
178     float32x4_t vx = vld1q_f32(x);
179 
180     const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
181 
182     float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vlog2e);
183     const uint64x2_t vidx = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask), 2));
184     const int32x4_t ven = vshlq_n_s32(vreinterpretq_s32_f32(vn), 19);
185 
186     const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
187     const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
188     int32x2_t vl_lo = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo));
189     int32x2_t vl_hi = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi));
190     vl_lo = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32)), vl_lo, 1);
191     vl_hi = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32)), vl_hi, 1);
192 
193     vn = vsubq_f32(vn, vmagic_bias);
194     const int32x4_t vl = vcombine_s32(vl_lo, vl_hi);
195 
196     float32x4_t vt = vmlaq_f32(vz, vn, vminus_ln2_hi);
197     vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
198     float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vl, ven));
199 
200     float32x4_t vp = vmlaq_f32(vc2, vc3, vt);
201     vp = vmulq_f32(vp, vt);
202 
203     vt = vmulq_f32(vt, vs);
204     vs = vsubq_f32(vs, vone);
205     vp = vmlaq_f32(vt, vp, vt);
206     const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
207 
208     const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
209     vx = vmulq_f32(vx, vbeta);
210     const float32x4_t vy = vbslq_f32(vm, ve, vx);
211 
212     float32x2_t vy_lo = vget_low_f32(vy);
213     if (n & (2 * sizeof(float))) {
214       vst1_f32(y, vy_lo); y += 2;
215       vy_lo = vget_high_f32(vy);
216     }
217     if (n & (1 * sizeof(float))) {
218       vst1_lane_f32(y, vy_lo, 0);
219     }
220   }
221 }
222