1 /* Definitions of some C99 math library functions, for those platforms
2 that don't implement these functions already. */
3
4 #include "Python.h"
5 #include <float.h>
6 #include "_math.h"
7
8 /* The following copyright notice applies to the original
9 implementations of acosh, asinh and atanh. */
10
11 /*
12 * ====================================================
13 * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
14 *
15 * Developed at SunPro, a Sun Microsystems, Inc. business.
16 * Permission to use, copy, modify, and distribute this
17 * software is freely granted, provided that this notice
18 * is preserved.
19 * ====================================================
20 */
21
22 #if !defined(HAVE_ACOSH) || !defined(HAVE_ASINH)
23 static const double ln2 = 6.93147180559945286227E-01;
24 static const double two_pow_p28 = 268435456.0; /* 2**28 */
25 #endif
26 #if !defined(HAVE_ASINH) || !defined(HAVE_ATANH)
27 static const double two_pow_m28 = 3.7252902984619141E-09; /* 2**-28 */
28 #endif
29 #if !defined(HAVE_ATANH) && !defined(Py_NAN)
30 static const double zero = 0.0;
31 #endif
32
33
34 #ifndef HAVE_ACOSH
35 /* acosh(x)
36 * Method :
37 * Based on
38 * acosh(x) = log [ x + sqrt(x*x-1) ]
39 * we have
40 * acosh(x) := log(x)+ln2, if x is large; else
41 * acosh(x) := log(2x-1/(sqrt(x*x-1)+x)) if x>2; else
42 * acosh(x) := log1p(t+sqrt(2.0*t+t*t)); where t=x-1.
43 *
44 * Special cases:
45 * acosh(x) is NaN with signal if x<1.
46 * acosh(NaN) is NaN without signal.
47 */
48
49 double
_Py_acosh(double x)50 _Py_acosh(double x)
51 {
52 if (Py_IS_NAN(x)) {
53 return x+x;
54 }
55 if (x < 1.) { /* x < 1; return a signaling NaN */
56 errno = EDOM;
57 #ifdef Py_NAN
58 return Py_NAN;
59 #else
60 return (x-x)/(x-x);
61 #endif
62 }
63 else if (x >= two_pow_p28) { /* x > 2**28 */
64 if (Py_IS_INFINITY(x)) {
65 return x+x;
66 }
67 else {
68 return log(x) + ln2; /* acosh(huge)=log(2x) */
69 }
70 }
71 else if (x == 1.) {
72 return 0.0; /* acosh(1) = 0 */
73 }
74 else if (x > 2.) { /* 2 < x < 2**28 */
75 double t = x * x;
76 return log(2.0 * x - 1.0 / (x + sqrt(t - 1.0)));
77 }
78 else { /* 1 < x <= 2 */
79 double t = x - 1.0;
80 return m_log1p(t + sqrt(2.0 * t + t * t));
81 }
82 }
83 #endif /* HAVE_ACOSH */
84
85
86 #ifndef HAVE_ASINH
87 /* asinh(x)
88 * Method :
89 * Based on
90 * asinh(x) = sign(x) * log [ |x| + sqrt(x*x+1) ]
91 * we have
92 * asinh(x) := x if 1+x*x=1,
93 * := sign(x)*(log(x)+ln2)) for large |x|, else
94 * := sign(x)*log(2|x|+1/(|x|+sqrt(x*x+1))) if|x|>2, else
95 * := sign(x)*log1p(|x| + x^2/(1 + sqrt(1+x^2)))
96 */
97
98 double
_Py_asinh(double x)99 _Py_asinh(double x)
100 {
101 double w;
102 double absx = fabs(x);
103
104 if (Py_IS_NAN(x) || Py_IS_INFINITY(x)) {
105 return x+x;
106 }
107 if (absx < two_pow_m28) { /* |x| < 2**-28 */
108 return x; /* return x inexact except 0 */
109 }
110 if (absx > two_pow_p28) { /* |x| > 2**28 */
111 w = log(absx) + ln2;
112 }
113 else if (absx > 2.0) { /* 2 < |x| < 2**28 */
114 w = log(2.0 * absx + 1.0 / (sqrt(x * x + 1.0) + absx));
115 }
116 else { /* 2**-28 <= |x| < 2= */
117 double t = x*x;
118 w = m_log1p(absx + t / (1.0 + sqrt(1.0 + t)));
119 }
120 return copysign(w, x);
121
122 }
123 #endif /* HAVE_ASINH */
124
125
126 #ifndef HAVE_ATANH
127 /* atanh(x)
128 * Method :
129 * 1.Reduced x to positive by atanh(-x) = -atanh(x)
130 * 2.For x>=0.5
131 * 1 2x x
132 * atanh(x) = --- * log(1 + -------) = 0.5 * log1p(2 * -------)
133 * 2 1 - x 1 - x
134 *
135 * For x<0.5
136 * atanh(x) = 0.5*log1p(2x+2x*x/(1-x))
137 *
138 * Special cases:
139 * atanh(x) is NaN if |x| >= 1 with signal;
140 * atanh(NaN) is that NaN with no signal;
141 *
142 */
143
144 double
_Py_atanh(double x)145 _Py_atanh(double x)
146 {
147 double absx;
148 double t;
149
150 if (Py_IS_NAN(x)) {
151 return x+x;
152 }
153 absx = fabs(x);
154 if (absx >= 1.) { /* |x| >= 1 */
155 errno = EDOM;
156 #ifdef Py_NAN
157 return Py_NAN;
158 #else
159 return x / zero;
160 #endif
161 }
162 if (absx < two_pow_m28) { /* |x| < 2**-28 */
163 return x;
164 }
165 if (absx < 0.5) { /* |x| < 0.5 */
166 t = absx+absx;
167 t = 0.5 * m_log1p(t + t*absx / (1.0 - absx));
168 }
169 else { /* 0.5 <= |x| <= 1.0 */
170 t = 0.5 * m_log1p((absx + absx) / (1.0 - absx));
171 }
172 return copysign(t, x);
173 }
174 #endif /* HAVE_ATANH */
175
176
177 #ifndef HAVE_EXPM1
178 /* Mathematically, expm1(x) = exp(x) - 1. The expm1 function is designed
179 to avoid the significant loss of precision that arises from direct
180 evaluation of the expression exp(x) - 1, for x near 0. */
181
182 double
_Py_expm1(double x)183 _Py_expm1(double x)
184 {
185 /* For abs(x) >= log(2), it's safe to evaluate exp(x) - 1 directly; this
186 also works fine for infinities and nans.
187
188 For smaller x, we can use a method due to Kahan that achieves close to
189 full accuracy.
190 */
191
192 if (fabs(x) < 0.7) {
193 double u;
194 u = exp(x);
195 if (u == 1.0)
196 return x;
197 else
198 return (u - 1.0) * x / log(u);
199 }
200 else
201 return exp(x) - 1.0;
202 }
203 #endif /* HAVE_EXPM1 */
204
205
206 /* log1p(x) = log(1+x). The log1p function is designed to avoid the
207 significant loss of precision that arises from direct evaluation when x is
208 small. */
209
210 double
_Py_log1p(double x)211 _Py_log1p(double x)
212 {
213 #ifdef HAVE_LOG1P
214 /* Some platforms supply a log1p function but don't respect the sign of
215 zero: log1p(-0.0) gives 0.0 instead of the correct result of -0.0.
216
217 To save fiddling with configure tests and platform checks, we handle the
218 special case of zero input directly on all platforms.
219 */
220 if (x == 0.0) {
221 return x;
222 }
223 else {
224 return log1p(x);
225 }
226 #else
227 /* For x small, we use the following approach. Let y be the nearest float
228 to 1+x, then
229
230 1+x = y * (1 - (y-1-x)/y)
231
232 so log(1+x) = log(y) + log(1-(y-1-x)/y). Since (y-1-x)/y is tiny, the
233 second term is well approximated by (y-1-x)/y. If abs(x) >=
234 DBL_EPSILON/2 or the rounding-mode is some form of round-to-nearest
235 then y-1-x will be exactly representable, and is computed exactly by
236 (y-1)-x.
237
238 If abs(x) < DBL_EPSILON/2 and the rounding mode is not known to be
239 round-to-nearest then this method is slightly dangerous: 1+x could be
240 rounded up to 1+DBL_EPSILON instead of down to 1, and in that case
241 y-1-x will not be exactly representable any more and the result can be
242 off by many ulps. But this is easily fixed: for a floating-point
243 number |x| < DBL_EPSILON/2., the closest floating-point number to
244 log(1+x) is exactly x.
245 */
246
247 double y;
248 if (fabs(x) < DBL_EPSILON / 2.) {
249 return x;
250 }
251 else if (-0.5 <= x && x <= 1.) {
252 /* WARNING: it's possible that an overeager compiler
253 will incorrectly optimize the following two lines
254 to the equivalent of "return log(1.+x)". If this
255 happens, then results from log1p will be inaccurate
256 for small x. */
257 y = 1.+x;
258 return log(y) - ((y - 1.) - x) / y;
259 }
260 else {
261 /* NaNs and infinities should end up here */
262 return log(1.+x);
263 }
264 #endif /* ifdef HAVE_LOG1P */
265 }
266
267