1 /*
2  * Double-precision vector log(x) function.
3  *
4  * Copyright (c) 2019, Arm Limited.
5  * SPDX-License-Identifier: MIT
6  */
7 
8 #include "mathlib.h"
9 #include "v_math.h"
10 #include "v_log.h"
11 #if V_SUPPORTED
12 
13 /* Worst-case error: 1.17 + 0.5 ulp.  */
14 
15 static const f64_t Poly[] = {
16   /* rel error: 0x1.6272e588p-56 in [ -0x1.fc1p-9 0x1.009p-8 ].  */
17   -0x1.ffffffffffff7p-2,
18    0x1.55555555170d4p-2,
19   -0x1.0000000399c27p-2,
20    0x1.999b2e90e94cap-3,
21   -0x1.554e550bd501ep-3,
22 };
23 
24 #define A0 v_f64 (Poly[0])
25 #define A1 v_f64 (Poly[1])
26 #define A2 v_f64 (Poly[2])
27 #define A3 v_f64 (Poly[3])
28 #define A4 v_f64 (Poly[4])
29 #define Ln2 v_f64 (0x1.62e42fefa39efp-1)
30 #define N (1 << V_LOG_TABLE_BITS)
31 #define OFF v_u64 (0x3fe6900900000000)
32 
33 struct entry
34 {
35   v_f64_t invc;
36   v_f64_t logc;
37 };
38 
39 static inline struct entry
40 lookup (v_u64_t i)
41 {
42   struct entry e;
43 #ifdef SCALAR
44   e.invc = __v_log_data[i].invc;
45   e.logc = __v_log_data[i].logc;
46 #else
47   e.invc[0] = __v_log_data[i[0]].invc;
48   e.logc[0] = __v_log_data[i[0]].logc;
49   e.invc[1] = __v_log_data[i[1]].invc;
50   e.logc[1] = __v_log_data[i[1]].logc;
51 #endif
52   return e;
53 }
54 
55 VPCS_ATTR
56 __attribute__ ((noinline)) static v_f64_t
57 specialcase (v_f64_t x, v_f64_t y, v_u64_t cmp)
58 {
59   return v_call_f64 (log, x, y, cmp);
60 }
61 
62 VPCS_ATTR
63 v_f64_t
64 V_NAME(log) (v_f64_t x)
65 {
66   v_f64_t z, r, r2, p, y, kd, hi;
67   v_u64_t ix, iz, tmp, top, i, cmp;
68   v_s64_t k;
69   struct entry e;
70 
71   ix = v_as_u64_f64 (x);
72   top = ix >> 48;
73   cmp = v_cond_u64 (top - v_u64 (0x0010) >= v_u64 (0x7ff0 - 0x0010));
74 
75   /* x = 2^k z; where z is in range [OFF,2*OFF) and exact.
76      The range is split into N subintervals.
77      The ith subinterval contains z and c is near its center.  */
78   tmp = ix - OFF;
79   i = (tmp >> (52 - V_LOG_TABLE_BITS)) % N;
80   k = v_as_s64_u64 (tmp) >> 52; /* arithmetic shift */
81   iz = ix - (tmp & v_u64 (0xfffULL << 52));
82   z = v_as_f64_u64 (iz);
83   e = lookup (i);
84 
85   /* log(x) = log1p(z/c-1) + log(c) + k*Ln2.  */
86   r = v_fma_f64 (z, e.invc, v_f64 (-1.0));
87   kd = v_to_f64_s64 (k);
88 
89   /* hi = r + log(c) + k*Ln2.  */
90   hi = v_fma_f64 (kd, Ln2, e.logc + r);
91   /* y = r2*(A0 + r*A1 + r2*(A2 + r*A3 + r2*A4)) + hi.  */
92   r2 = r * r;
93   y = v_fma_f64 (A3, r, A2);
94   p = v_fma_f64 (A1, r, A0);
95   y = v_fma_f64 (A4, r2, y);
96   y = v_fma_f64 (y, r2, p);
97   y = v_fma_f64 (y, r2, hi);
98 
99   if (unlikely (v_any_u64 (cmp)))
100     return specialcase (x, y, cmp);
101   return y;
102 }
103 VPCS_ALIAS
104 #endif
105