1 // Auto-generated file. Do not edit!
2 // Template: src/f32-velu/avx512f-rr1-p6.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <immintrin.h>
13
14 #include <xnnpack/common.h>
15 #include <xnnpack/intrinsics-polyfill.h>
16 #include <xnnpack/vunary.h>
17
18
xnn_f32_velu_ukernel__avx512f_rr1_p6_x32(size_t n,const float * x,float * y,const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS (1)])19 void xnn_f32_velu_ukernel__avx512f_rr1_p6_x32(
20 size_t n,
21 const float* x,
22 float* y,
23 const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
24 {
25 assert(n != 0);
26 assert(n % sizeof(float) == 0);
27
28 const __m512 vprescale = _mm512_broadcast_f32x4(_mm_load_ps(params->sse.prescale));
29 const __m512 valpha = _mm512_broadcast_f32x4(_mm_load_ps(params->sse.alpha));
30 const __m512 vbeta = _mm512_broadcast_f32x4(_mm_load_ps(params->sse.beta));
31
32 const __m512 vsat_cutoff = _mm512_set1_ps(-0x1.154246p+4f);
33 const __m512 vmagic_bias = _mm512_set1_ps(0x1.8000FEp23f);
34 const __m512 vlog2e = _mm512_set1_ps(0x1.715476p+0f);
35 const __m512 vminus_ln2 = _mm512_set1_ps(-0x1.62E43p-1f);
36 const __m512 vc6 = _mm512_set1_ps(0x1.6b7338p-10f);
37 const __m512 vc5 = _mm512_set1_ps(0x1.12278Ep-7f);
38 const __m512 vc4 = _mm512_set1_ps(0x1.555716p-5f);
39 const __m512 vc3 = _mm512_set1_ps(0x1.5554B0p-3f);
40 const __m512 vc2 = _mm512_set1_ps(0x1.FFFFFEp-2f);
41
42 for (; n >= 32 * sizeof(float); n -= 32 * sizeof(float)) {
43 __m512 vx0 = _mm512_loadu_ps(x);
44 __m512 vx1 = _mm512_loadu_ps(x + 16);
45 x += 32;
46
47 const __m512 vz0 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx0, vprescale));
48 const __m512 vz1 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx1, vprescale));
49
50 __m512 vn0 = _mm512_fmadd_ps(vz0, vlog2e, vmagic_bias);
51 __m512 vn1 = _mm512_fmadd_ps(vz1, vlog2e, vmagic_bias);
52
53 __m512 vs0 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn0), 23));
54 vn0 = _mm512_sub_ps(vn0, vmagic_bias);
55 __m512 vs1 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn1), 23));
56 vn1 = _mm512_sub_ps(vn1, vmagic_bias);
57
58 __m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2, vz0);
59 __m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2, vz1);
60
61 __m512 vp0 = _mm512_fmadd_ps(vc6, vt0, vc5);
62 __m512 vp1 = _mm512_fmadd_ps(vc6, vt1, vc5);
63
64 vp0 = _mm512_fmadd_ps(vp0, vt0, vc4);
65 vp1 = _mm512_fmadd_ps(vp1, vt1, vc4);
66
67 vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
68 vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
69
70 vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
71 vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
72
73 vp0 = _mm512_mul_ps(vp0, vt0);
74 vt0 = _mm512_mul_ps(vt0, vs0);
75 vp1 = _mm512_mul_ps(vp1, vt1);
76 vt1 = _mm512_mul_ps(vt1, vs1);
77
78 vs0 = _mm512_fmsub_ps(vs0, valpha, valpha);
79 vs1 = _mm512_fmsub_ps(vs1, valpha, valpha);
80
81 vp0 = _mm512_fmadd_ps(vp0, vt0, vt0);
82 vp1 = _mm512_fmadd_ps(vp1, vt1, vt1);
83
84 const __m512 vzero = _mm512_setzero_ps();
85 __m512 vy0 = _mm512_fmadd_ps(vp0, valpha, vs0);
86 const __mmask16 vsign0 = _mm512_cmp_ps_mask(vx0, vzero, _CMP_NLT_US);
87 __m512 vy1 = _mm512_fmadd_ps(vp1, valpha, vs1);
88 const __mmask16 vsign1 = _mm512_cmp_ps_mask(vx1, vzero, _CMP_NLT_US);
89
90 vy0 = _mm512_mask_mul_ps(vy0, vsign0, vx0, vbeta);
91 vy1 = _mm512_mask_mul_ps(vy1, vsign1, vx1, vbeta);
92
93 _mm512_storeu_ps(y, vy0);
94 _mm512_storeu_ps(y + 16, vy1);
95 y += 32;
96 }
97 for (; n >= 16 * sizeof(float); n -= 16 * sizeof(float)) {
98 __m512 vx = _mm512_loadu_ps(x);
99 x += 16;
100
101 const __m512 vz = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx, vprescale));
102 const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US);
103
104 __m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
105 __m512 vs = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn), 23));
106 vn = _mm512_sub_ps(vn, vmagic_bias);
107
108 __m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
109
110 __m512 vp = _mm512_fmadd_ps(vc6, vt, vc5);
111 vp = _mm512_fmadd_ps(vp, vt, vc4);
112 vp = _mm512_fmadd_ps(vp, vt, vc3);
113 vp = _mm512_fmadd_ps(vp, vt, vc2);
114 vp = _mm512_mul_ps(vp, vt);
115
116 vt = _mm512_mul_ps(vt, vs);
117 vs = _mm512_fmsub_ps(vs, valpha, valpha);
118 vp = _mm512_fmadd_ps(vp, vt, vt);
119 __m512 vy = _mm512_fmadd_ps(vp, valpha, vs);
120
121 vy = _mm512_mask_mul_ps(vy, vsign, vx, vbeta);
122
123 _mm512_storeu_ps(y, vy);
124 y += 16;
125 }
126 if XNN_UNLIKELY(n != 0) {
127 assert(n >= 1 * sizeof(float));
128 assert(n <= 15 * sizeof(float));
129 // Prepare mask for valid 32-bit elements (depends on n).
130 n >>= 2 /* log2(sizeof(float)) */;
131 const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << n) - UINT32_C(1)));
132
133 __m512 vx = _mm512_maskz_loadu_ps(vmask, x);
134
135 const __m512 vz = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx, vprescale));
136 const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US);
137
138 __m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
139 __m512 vs = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn), 23));
140 vn = _mm512_sub_ps(vn, vmagic_bias);
141
142 __m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
143
144 __m512 vp = _mm512_fmadd_ps(vc6, vt, vc5);
145 vp = _mm512_fmadd_ps(vp, vt, vc4);
146 vp = _mm512_fmadd_ps(vp, vt, vc3);
147 vp = _mm512_fmadd_ps(vp, vt, vc2);
148 vp = _mm512_mul_ps(vp, vt);
149
150 vt = _mm512_mul_ps(vt, vs);
151 vs = _mm512_fmsub_ps(vs, valpha, valpha);
152 vp = _mm512_fmadd_ps(vp, vt, vt);
153 __m512 vy = _mm512_fmadd_ps(vp, valpha, vs);
154
155 vy = _mm512_mask_mul_ps(vy, vsign, vx, vbeta);
156
157 _mm512_mask_storeu_ps(y, vmask, vy);
158 }
159 }
160