1 /*
2  * Copyright 2018 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ANDROID_AUDIO_UTILS_STATISTICS_H
18 #define ANDROID_AUDIO_UTILS_STATISTICS_H
19 
20 #ifdef __cplusplus
21 
22 #include "variadic_utils.h"
23 
24 // variadic_utils already contains stl headers; in addition:
25 #include <deque> // for ReferenceStatistics implementation
26 #include <sstream>
27 
28 namespace android {
29 namespace audio_utils {
30 
31 /**
32  * Compensated summation is used to accumulate a sequence of floating point
33  * values, with "compensation" information to help preserve precision lost
34  * due to catastrophic cancellation, e.g. (BIG) + (SMALL) - (BIG) = 0.
35  *
36  * We provide two forms of compensated summation:
37  * the Kahan variant (which has better properties if the sum is generally
38  * larger than the data added; and the Neumaier variant which is better if
39  * the sum or delta may alternatively be larger.
40  *
41  * https://en.wikipedia.org/wiki/Kahan_summation_algorithm
42  *
43  * Alternative approaches include divide-and-conquer summation
44  * which provides increased accuracy with log n stack depth (recursion).
45  *
46  * https://en.wikipedia.org/wiki/Pairwise_summation
47  */
48 
49 template <typename T>
50 struct KahanSum {
51     T mSum{};
52     T mCorrection{}; // negative low order bits of mSum.
53 
54     constexpr KahanSum<T>() = default;
55 
KahanSumKahanSum56     explicit constexpr KahanSum<T>(const T& value)
57         : mSum{value}
58     { }
59 
60     // takes T not KahanSum<T>
61     friend constexpr KahanSum<T> operator+(KahanSum<T> lhs, const T& rhs) {
62         const T y = rhs - lhs.mCorrection;
63         const T t = lhs.mSum + y;
64 
65 #ifdef __FAST_MATH__
66 #warning "fast math enabled, could optimize out KahanSum correction"
67 #endif
68 
69         lhs.mCorrection = (t - lhs.mSum) - y; // compiler, please do not optimize with /fp:fast
70         lhs.mSum = t;
71         return lhs;
72     }
73 
74     constexpr KahanSum<T>& operator+=(const T& rhs) { // takes T not KahanSum<T>
75         *this = *this + rhs;
76         return *this;
77     }
78 
TKahanSum79     constexpr operator T() const {
80         return mSum;
81     }
82 
resetKahanSum83     constexpr void reset() {
84         mSum = {};
85         mCorrection = {};
86     }
87 };
88 
89 // A more robust version of Kahan summation for input greater than sum.
90 // TODO: investigate variants that reincorporate mCorrection bits into mSum if possible.
91 template <typename T>
92 struct NeumaierSum {
93     T mSum{};
94     T mCorrection{}; // low order bits of mSum.
95 
96     constexpr NeumaierSum<T>() = default;
97 
NeumaierSumNeumaierSum98     explicit constexpr NeumaierSum<T>(const T& value)
99         : mSum{value}
100     { }
101 
102     friend constexpr NeumaierSum<T> operator+(NeumaierSum<T> lhs, const T& rhs) {
103         const T t = lhs.mSum + rhs;
104 
105         if (const_abs(lhs.mSum) >= const_abs(rhs)) {
106             lhs.mCorrection += (lhs.mSum - t) + rhs;
107         } else {
108             lhs.mCorrection += (rhs - t) + lhs.mSum;
109         }
110         lhs.mSum = t;
111         return lhs;
112     }
113 
114     constexpr NeumaierSum<T>& operator+=(const T& rhs) { // takes T not NeumaierSum<T>
115         *this = *this + rhs;
116         return *this;
117     }
118 
const_absNeumaierSum119     static constexpr T const_abs(T x) {
120         return x < T{} ? -x : x;
121     }
122 
TNeumaierSum123     constexpr operator T() const {
124         return mSum + mCorrection;
125     }
126 
resetNeumaierSum127     constexpr void reset() {
128         mSum = {};
129         mCorrection = {};
130     }
131 };
132 
133 //-------------------------------------------------------------------
134 // Constants and limits
135 
136 template <typename T, typename T2=void>  struct StatisticsConstants;
137 
138 template <typename T>
139 struct StatisticsConstants<T, std::enable_if_t<std::is_arithmetic<T>::value>> {
140     // value closest to negative infinity for type T
141     static constexpr T negativeInfinity() {
142         return std::numeric_limits<T>::has_infinity ?
143                 -std::numeric_limits<T>::infinity() : std::numeric_limits<T>::min();
144     };
145 
146     static constexpr T mNegativeInfinity = negativeInfinity();
147 
148     // value closest to positive infinity for type T
149     static constexpr T positiveInfinity() {
150         return std::numeric_limits<T>::has_infinity ?
151                 std::numeric_limits<T>::infinity() : std::numeric_limits<T>::max();
152     }
153 
154     static constexpr T mPositiveInfinity = positiveInfinity();
155 };
156 
157 // specialize for tuple and pair
158 template <typename T>
159 struct StatisticsConstants<T, std::enable_if_t<!std::is_arithmetic<T>::value>> {
160 private:
161     template <std::size_t... I >
162     static constexpr auto negativeInfinity(std::index_sequence<I...>) {
163        return T{StatisticsConstants<
164                typename std::tuple_element<I, T>::type>::mNegativeInfinity...};
165     }
166     template <std::size_t... I >
167     static constexpr auto positiveInfinity(std::index_sequence<I...>) {
168        return T{StatisticsConstants<
169                typename std::tuple_element<I, T>::type>::mPositiveInfinity...};
170     }
171 public:
172     static constexpr auto negativeInfinity() {
173        return negativeInfinity(std::make_index_sequence<std::tuple_size<T>::value>());
174     }
175     static constexpr auto mNegativeInfinity =
176         negativeInfinity(std::make_index_sequence<std::tuple_size<T>::value>());
177     static constexpr auto positiveInfinity() {
178        return positiveInfinity(std::make_index_sequence<std::tuple_size<T>::value>());
179     }
180     static constexpr auto mPositiveInfinity =
181         positiveInfinity(std::make_index_sequence<std::tuple_size<T>::value>());
182 };
183 
184 /**
185  * Statistics provides a running weighted average, variance, and standard deviation of
186  * a sample stream. It is more numerically stable for floating point computation than a
187  * naive sum of values, sum of values squared.
188  *
189  * The weighting is like an IIR filter, with the most recent sample weighted as 1, and decaying
190  * by alpha (between 0 and 1).  With alpha == 1. this is rectangular weighting, reducing to
191  * Welford's algorithm.
192  *
193  * The IIR filter weighting emphasizes more recent samples, has low overhead updates,
194  * constant storage, and constant computation (per update or variance read).
195  *
196  * This is a variant of the weighted mean and variance algorithms described here:
197  * https://en.wikipedia.org/wiki/Moving_average
198  * https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Online_algorithm
199  * https://en.wikipedia.org/wiki/Weighted_arithmetic_mean
200  *
201  * weight = sum_{i=1}^n \alpha^{n-i}
202  * mean = 1/weight * sum_{i=1}^n \alpha^{n-i}x_i
203  * var = 1/weight * sum_{i=1}^n alpha^{n-i}(x_i- mean)^2
204  *
205  * The Statistics class is safe to call from a SCHED_FIFO thread with the exception of
206  * the toString() method, which uses std::stringstream to format data for printing.
207  *
208  * Long term data accumulation and constant alpha:
209  * If the alpha weight is 1 (or not specified) then statistics objects with float
210  * summation types (D, S) should NOT add more than the mantissa-bits elements
211  * without reset to prevent variance increases due to weight precision underflow.
212  * This is 1 << 23 elements for float and 1 << 52 elements for double.
213  *
214  * Setting alpha less than 1 avoids this underflow problem.
215  * Alpha < 1 - (epsilon * 32), where epsilon is std::numeric_limits<D>::epsilon()
216  * is recommended for continuously running statistics (alpha <= 0.999996
217  * for float summation precision).
218  *
219  * Alpha may also change on-the-fly, based on the reliability of
220  * new information.  In that case, alpha may be set temporarily greater
221  * than 1.
222  *
223  * https://en.wikipedia.org/wiki/Weighted_arithmetic_mean#Reliability_weights_2
224  *
225  * Statistics may also be collected on variadic "vector" object instead of
226  * scalars, where the variance may be computed as an inner product radial squared
227  * distance from the mean; or as an outer product where the variance returned
228  * is a covariance matrix.
229  *
230  * TODO:
231  * 1) Alternative versions of Kahan/Neumaier sum that better preserve precision.
232  * 2) Add binary math ops to corrected sum classes for better precision in lieu of long double.
233  * 3) Add Cholesky decomposition to ensure positive definite covariance matrices if
234  *    the input is a variadic object.
235  */
236 
237 /**
238  * Mean may have catastrophic cancellation of positive and negative sample values,
239  * so we use Kahan summation in the algorithms below (or substitute "D" if not needed).
240  *
241  * https://en.wikipedia.org/wiki/Kahan_summation_algorithm
242  */
243 
244 template <
245     typename T,               // input data type
246     typename D = double,      // output mean data type
247     typename S = KahanSum<D>, // compensated mean summation type, if any
248     typename A = double,      // weight type
249     typename D2 = double,     // output variance "D^2" type
250     typename PRODUCT = std::multiplies<D> // how the output variance is computed
251     >
252 class Statistics {
253 public:
254     /** alpha is the weight (if alpha == 1. we use a rectangular window) */
255     explicit constexpr Statistics(A alpha = A(1.))
256         : mAlpha(alpha)
257     { }
258 
259     template <size_t N>
260     explicit constexpr Statistics(const T (&a)[N], A alpha = A(1.))
261         : mAlpha(alpha)
262     {
263         for (const auto &data : a) {
264             add(data);
265         }
266     }
267 
268     constexpr void setAlpha(A alpha) {
269         mAlpha = alpha;
270     }
271 
272     constexpr void add(const T &value) {
273         // Note: fastest implementation uses fmin fminf but would not be constexpr
274 
275         mMax = audio_utils::max(mMax, value); // order important: reject NaN
276         mMin = audio_utils::min(mMin, value); // order important: reject NaN
277         ++mN;
278         const D delta = value - mMean;
279         /* if (mAlpha == 1.) we have Welford's algorithm
280             ++mN;
281             mMean += delta / mN;
282             mM2 += delta * (value - mMean);
283 
284             Note delta * (value - mMean) should be non-negative.
285         */
286         mWeight = A(1.) + mAlpha * mWeight;
287         mWeight2 = A(1.) + mAlpha * mAlpha * mWeight2;
288         D meanDelta = delta / mWeight;
289         mMean += meanDelta;
290         mM2 = mAlpha * mM2 + PRODUCT()(delta, (value - mMean));
291 
292         /*
293            Alternate variant related to:
294            http://mathworld.wolfram.com/SampleVarianceComputation.html
295 
296            const double sweight = mAlpha * mWeight;
297            mWeight = 1. + sweight;
298            const double dmean = delta / mWeight;
299            mMean += dmean;
300            mM2 = mAlpha * mM2 + mWeight * sweight * dmean * dmean;
301 
302            The update is slightly different than Welford's algorithm
303            showing a by-construction non-negative update to M2.
304         */
305     }
306 
307     constexpr int64_t getN() const {
308         return mN;
309     }
310 
311     constexpr void reset() {
312         mMin = StatisticsConstants<T>::positiveInfinity();
313         mMax = StatisticsConstants<T>::negativeInfinity();
314         mN = 0;
315         mWeight = {};
316         mWeight2 = {};
317         mMean = {};
318         mM2 = {};
319     }
320 
321     constexpr A getWeight() const {
322         return mWeight;
323     }
324 
325     constexpr D getMean() const {
326         return mMean;
327     }
328 
329     constexpr D2 getVariance() const {
330         if (mN < 2) {
331             // must have 2 samples for sample variance.
332             return {};
333         } else {
334             return mM2 / getSampleWeight();
335         }
336     }
337 
338     constexpr D2 getPopVariance() const {
339         if (mN < 1) {
340             return {};
341         } else {
342             return mM2 / mWeight;
343         }
344     }
345 
346     // explicitly use sqrt_constexpr if you need a constexpr version
347     D2 getStdDev() const {
348         return android::audio_utils::sqrt(getVariance());
349     }
350 
351     D2 getPopStdDev() const {
352         return android::audio_utils::sqrt(getPopVariance());
353     }
354 
355     constexpr T getMin() const {
356         return mMin;
357     }
358 
359     constexpr T getMax() const {
360         return mMax;
361     }
362 
363     std::string toString() const {
364         const int64_t N = getN();
365         if (N == 0) return "unavail";
366 
367         std::stringstream ss;
368         ss << "ave=" << getMean();
369         if (N > 1) {
370             // we use the sample standard deviation (not entirely unbiased,
371             // though the sample variance is unbiased).
372             ss << " std=" << getStdDev();
373         }
374         ss << " min=" << getMin();
375         ss << " max=" << getMax();
376         return ss.str();
377     }
378 
379 private:
380     A mAlpha;
381     T mMin{StatisticsConstants<T>::positiveInfinity()};
382     T mMax{StatisticsConstants<T>::negativeInfinity()};
383 
384     int64_t mN = 0;  // running count of samples.
385     A mWeight{};     // sum of weights.
386     A mWeight2{};    // sum of weights squared.
387     S mMean{};       // running mean.
388     D2 mM2{};         // running unnormalized variance.
389 
390     // Reliability correction for unbiasing variance, since mean is estimated
391     // from same sample stream as variance.
392     // if mAlpha == 1 this is mWeight - 1;
393     //
394     // TODO: consider exposing the correction factor.
395     constexpr A getSampleWeight() const {
396         // if mAlpha is constant then the mWeight2 member variable is not
397         // needed, one can use instead:
398         // return (mWeight - D(1.)) * D(2.) / (D(1.) + mAlpha);
399 
400         return mWeight - mWeight2 / mWeight;
401     }
402 };
403 
404 /**
405  * ReferenceStatistics is a naive implementation of the weighted running variance,
406  * which consumes more space and is slower than Statistics.  It is provided for
407  * comparison and testing purposes.  Do not call from a SCHED_FIFO thread!
408  *
409  * Note: Common code not combined for implementation clarity.
410  *       We don't invoke Kahan summation or other tricks.
411  */
412 template <
413     typename T, // input data type
414     typename D = double // output mean/variance data type
415     >
416 class ReferenceStatistics {
417 public:
418     /** alpha is the weight (alpha == 1. is rectangular window) */
419     explicit ReferenceStatistics(D alpha = D(1.))
420         : mAlpha(alpha)
421     { }
422 
423     constexpr void setAlpha(D alpha) {
424         mAlpha = alpha;
425     }
426 
427     // For independent testing, have intentionally slightly different behavior
428     // of min and max than Statistics with respect to Nan.
429     constexpr void add(const T &value) {
430         if (getN() == 0) {
431             mMax = value;
432             mMin = value;
433         } else if (value > mMax) {
434             mMax = value;
435         } else if (value < mMin) {
436             mMin = value;
437         }
438 
439         mData.push_front(value);
440         mAlphaList.push_front(mAlpha);
441     }
442 
443     int64_t getN() const {
444         return mData.size();
445     }
446 
447     void reset() {
448         mMin = {};
449         mMax = {};
450         mData.clear();
451         mAlphaList.clear();
452     }
453 
454     D getWeight() const {
455         D weight{};
456         D alpha_i(1.);
457         for (size_t i = 0; i < mData.size(); ++i) {
458             weight += alpha_i;
459             alpha_i *= mAlphaList[i];
460         }
461         return weight;
462     }
463 
464     D getWeight2() const {
465         D weight2{};
466         D alpha2_i(1.);
467         for (size_t i = 0; i < mData.size(); ++i) {
468             weight2 += alpha2_i;
469             alpha2_i *= mAlphaList[i] * mAlphaList[i];
470         }
471         return weight2;
472     }
473 
474     D getMean() const {
475         D wsum{};
476         D alpha_i(1.);
477         for (size_t i = 0; i < mData.size(); ++i) {
478             wsum += alpha_i * mData[i];
479             alpha_i *= mAlphaList[i];
480         }
481         return wsum / getWeight();
482     }
483 
484     // Should always return a positive value.
485     D getVariance() const {
486         return getUnweightedVariance() / (getWeight() - getWeight2() / getWeight());
487     }
488 
489     // Should always return a positive value.
490     D getPopVariance() const {
491         return getUnweightedVariance() / getWeight();
492     }
493 
494     D getStdDev() const {
495         return sqrt(getVariance());
496     }
497 
498     D getPopStdDev() const {
499         return sqrt(getPopVariance());
500     }
501 
502     T getMin() const {
503         return mMin;
504     }
505 
506     T getMax() const {
507         return mMax;
508     }
509 
510     std::string toString() const {
511         const auto N = getN();
512         if (N == 0) return "unavail";
513 
514         std::stringstream ss;
515         ss << "ave=" << getMean();
516         if (N > 1) {
517             // we use the sample standard deviation (not entirely unbiased,
518             // though the sample variance is unbiased).
519             ss << " std=" << getStdDev();
520         }
521         ss << " min=" << getMin();
522         ss << " max=" << getMax();
523         return ss.str();
524     }
525 
526 private:
527     T mMin{};
528     T mMax{};
529 
530     D mAlpha;                 // current alpha value
531     std::deque<T> mData;      // store all the data for exact summation, mData[0] most recent.
532     std::deque<D> mAlphaList; // alpha value for the data added.
533 
534     D getUnweightedVariance() const {
535         const D mean = getMean();
536         D wsum{};
537         D alpha_i(1.);
538         for (size_t i = 0; i < mData.size(); ++i) {
539             const D diff = mData[i] - mean;
540             wsum += alpha_i * diff * diff;
541             alpha_i *= mAlphaList[i];
542         }
543         return wsum;
544     }
545 };
546 
547 /**
548  * Least squares fitting of a 2D straight line based on the covariance matrix.
549  *
550  * See formula from:
551  * http://mathworld.wolfram.com/LeastSquaresFitting.html
552  *
553  * y = a + b*x
554  *
555  * returns a: y intercept
556  *         b: slope
557  *         r2: correlation coefficient (1.0 means great fit, 0.0 means no fit.)
558  *
559  * For better numerical stability, it is suggested to use the slope b only:
560  * as the least squares fit line intersects the mean.
561  *
562  * (y - mean_y) = b * (x - mean_x).
563  *
564  */
565 template <typename T>
566 constexpr void computeYLineFromStatistics(
567         T &a, T& b, T &r2,
568         const T& mean_x,
569         const T& mean_y,
570         const T& var_x,
571         const T& cov_xy,
572         const T& var_y) {
573 
574     // Dimensionally r2 is unitless.  If there is no correlation
575     // then r2 is clearly 0 as cov_xy == 0.  If x and y are identical up to a scale
576     // and shift, then r2 is 1.
577     r2 = cov_xy * cov_xy / (var_x * var_y);
578 
579     // The least squares solution to the overconstrained matrix equation requires
580     // the pseudo-inverse. In 2D, the best-fit slope is the mean removed
581     // (via covariance and variance) dy/dx derived from the joint expectation
582     // (this is also dimensionally correct).
583     b = cov_xy / var_x;
584 
585     // The best fit line goes through the mean, and can be used to find the y intercept.
586     a = mean_y - b * mean_x;
587 }
588 
589 /**
590  * LinearLeastSquaresFit<> class is derived from the Statistics<> class, with a 2 element array.
591  * Arrays are preferred over tuples or pairs because copy assignment is constexpr and
592  * arrays are trivially copyable.
593  */
594 template <typename T>
595 class LinearLeastSquaresFit : public
596     Statistics<std::array<T, 2>, // input
597                std::array<T, 2>, // mean data output
598                std::array<T, 2>, // compensated mean sum
599                T,                // weight type
600                std::array<T, 3>, // covariance_ut
601                audio_utils::outerProduct_UT_array<std::array<T, 2>>>
602 {
603 public:
604     constexpr explicit LinearLeastSquaresFit(const T &alpha = T(1.))
605         : Statistics<std::array<T, 2>,
606              std::array<T, 2>,
607              std::array<T, 2>,
608              T,
609              std::array<T, 3>, // covariance_ut
610              audio_utils::outerProduct_UT_array<std::array<T, 2>>>(alpha) { }
611 
612     /* Note: base class method: add(value)
613 
614     constexpr void add(const std::array<T, 2>& value);
615 
616        use:
617           add({1., 2.});
618        or
619           add(to_array(myTuple));
620     */
621 
622     /**
623      * y = a + b*x
624      *
625      * returns a: y intercept
626      *         b: y slope (dy / dx)
627      *         r2: correlation coefficient (1.0 means great fit, 0.0 means no fit.)
628      */
629     constexpr void computeYLine(T &a, T &b, T &r2) const {
630         computeYLineFromStatistics(a, b, r2,
631                 std::get<0>(this->getMean()), /* mean_x */
632                 std::get<1>(this->getMean()), /* mean_y */
633                 std::get<0>(this->getPopVariance()), /* var_x */
634                 std::get<1>(this->getPopVariance()), /* cov_xy */
635                 std::get<2>(this->getPopVariance())); /* var_y */
636     }
637 
638     /**
639      * x = a + b*y
640      *
641      * returns a: x intercept
642      *         b: x slope (dx / dy)
643      *         r2: correlation coefficient (1.0 means great fit, 0.0 means no fit.)
644      */
645     constexpr void computeXLine(T &a, T &b, T &r2) const {
646         // reverse x and y for X line computation
647         computeYLineFromStatistics(a, b, r2,
648                 std::get<1>(this->getMean()), /* mean_x */
649                 std::get<0>(this->getMean()), /* mean_y */
650                 std::get<2>(this->getPopVariance()), /* var_x */
651                 std::get<1>(this->getPopVariance()), /* cov_xy */
652                 std::get<0>(this->getPopVariance())); /* var_y */
653     }
654 
655     /**
656      * this returns the estimate of y from a given x
657      */
658     constexpr T getYFromX(const T &x) const {
659         const T var_x = std::get<0>(this->getPopVariance());
660         const T cov_xy = std::get<1>(this->getPopVariance());
661         const T b = cov_xy / var_x;  // dy / dx
662 
663         const T mean_x = std::get<0>(this->getMean());
664         const T mean_y = std::get<1>(this->getMean());
665         return /* y = */ b * (x - mean_x) + mean_y;
666     }
667 
668     /**
669      * this returns the estimate of x from a given y
670      */
671     constexpr T getXFromY(const T &y) const {
672         const T cov_xy = std::get<1>(this->getPopVariance());
673         const T var_y = std::get<2>(this->getPopVariance());
674         const T b = cov_xy / var_y;  // dx / dy
675 
676         const T mean_x = std::get<0>(this->getMean());
677         const T mean_y = std::get<1>(this->getMean());
678         return /* x = */ b * (y - mean_y) + mean_x;
679     }
680 
681     constexpr T getR2() const {
682         const T var_x = std::get<0>(this->getPopVariance());
683         const T cov_xy = std::get<1>(this->getPopVariance());
684         const T var_y = std::get<2>(this->getPopVariance());
685         return cov_xy * cov_xy / (var_x * var_y);
686     }
687 };
688 
689 /**
690  * constexpr statistics functions of form:
691  * algorithm(forward_iterator begin, forward_iterator end)
692  *
693  * These check that the input looks like an iterator, but doesn't
694  * check if __is_forward_iterator<>.
695  *
696  * divide-and-conquer pairwise summation forms will require
697  * __is_random_access_iterator<>.
698  */
699 
700 // returns max of elements, or if no elements negative infinity.
701 template <typename T,
702           std::enable_if_t<is_iterator<T>::value, int> = 0>
703 constexpr auto max(T begin, T end) {
704     using S = std::remove_cv_t<std::remove_reference_t<
705             decltype(*begin)>>;
706     S maxValue = StatisticsConstants<S>::mNegativeInfinity;
707     for (auto it = begin; it != end; ++it) {
708         maxValue = std::max(maxValue, *it);
709     }
710     return maxValue;
711 }
712 
713 // returns min of elements, or if no elements positive infinity.
714 template <typename T,
715           std::enable_if_t<is_iterator<T>::value, int> = 0>
716 constexpr auto min(T begin, T end) {
717     using S = std::remove_cv_t<std::remove_reference_t<
718             decltype(*begin)>>;
719     S minValue = StatisticsConstants<S>::mPositiveInfinity;
720     for (auto it = begin; it != end; ++it) {
721         minValue = std::min(minValue, *it);
722     }
723     return minValue;
724 }
725 
726 template <typename D = double, typename S = KahanSum<D>, typename T,
727           std::enable_if_t<is_iterator<T>::value, int> = 0>
728 constexpr auto sum(T begin, T end) {
729     S sum{};
730     for (auto it = begin; it != end; ++it) {
731         sum += D(*it);
732     }
733     return sum;
734 }
735 
736 template <typename D = double, typename S = KahanSum<D>, typename T,
737           std::enable_if_t<is_iterator<T>::value, int> = 0>
738 constexpr auto sumSqDiff(T begin, T end, D x = {}) {
739     S sum{};
740     for (auto it = begin; it != end; ++it) {
741         const D diff = *it - x;
742         sum += diff * diff;
743     }
744     return sum;
745 }
746 
747 // Form: algorithm(array[]), where array size is known to the compiler.
748 template <typename T, size_t N>
749 constexpr T max(const T (&a)[N]) {
750     return max(&a[0], &a[N]);
751 }
752 
753 template <typename T, size_t N>
754 constexpr T min(const T (&a)[N]) {
755     return min(&a[0], &a[N]);
756 }
757 
758 template <typename D = double, typename S = KahanSum<D>, typename T, size_t N>
759 constexpr D sum(const T (&a)[N]) {
760     return sum<D, S>(&a[0], &a[N]);
761 }
762 
763 template <typename D = double, typename S = KahanSum<D>, typename T, size_t N>
764 constexpr D sumSqDiff(const T (&a)[N], D x = {}) {
765     return sumSqDiff<D, S>(&a[0], &a[N], x);
766 }
767 
768 // TODO: remove when std::isnan is constexpr
769 template <typename T>
770 constexpr T isnan(T x) {
771     return __builtin_isnan(x);
772 }
773 
774 // constexpr sqrt computed by the Babylonian (Newton's) method.
775 // Please use math libraries for non-constexpr cases.
776 // TODO: remove when there is some std::sqrt which is constexpr.
777 //
778 // https://en.wikipedia.org/wiki/Methods_of_computing_square_roots
779 
780 // watch out using the unchecked version, use the checked version below.
781 template <typename T>
782 constexpr T sqrt_constexpr_unchecked(T x, T prev) {
783     static_assert(std::is_floating_point<T>::value, "must be floating point type");
784     const T next = T(0.5) * (prev + x / prev);
785     return next == prev ? next : sqrt_constexpr_unchecked(x, next);
786 }
787 
788 // checked sqrt
789 template <typename T>
790 constexpr T sqrt_constexpr(T x) {
791     static_assert(std::is_floating_point<T>::value, "must be floating point type");
792     if (x < T{}) { // negative values return nan
793         return std::numeric_limits<T>::quiet_NaN();
794     } else if (isnan(x)
795             || x == std::numeric_limits<T>::infinity()
796             || x == T{}) {
797         return x;
798     } else { // good to go.
799         return sqrt_constexpr_unchecked(x, T(1.));
800     }
801 }
802 
803 } // namespace audio_utils
804 } // namespace android
805 
806 #endif // __cplusplus
807 
808 /** \cond */
809  __BEGIN_DECLS
810 /** \endcond */
811 
812 /** Simple stats structure for low overhead statistics gathering.
813  * Designed to be accessed by C (with no functional getters).
814  * Zero initialize {} to clear or reset.
815  */
816 typedef struct {
817    int64_t n;
818    double min;
819    double max;
820    double last;
821    double mean;
822 } simple_stats_t;
823 
824 /** logs new value to the simple_stats_t */
825 static inline void simple_stats_log(simple_stats_t *stats, double value) {
826     if (++stats->n == 1) {
827         stats->min = stats->max = stats->last = stats->mean = value;
828     } else {
829         stats->last = value;
830         if (value < stats->min) {
831             stats->min = value;
832         } else if (value > stats->max) {
833             stats->max = value;
834         }
835         // Welford's algorithm for mean
836         const double delta = value - stats->mean;
837         stats->mean += delta / stats->n;
838     }
839 }
840 
841 /** dumps statistics to a string, returns the length of string excluding null termination. */
842 static inline size_t simple_stats_to_string(simple_stats_t *stats, char *buffer, size_t size) {
843     if (size == 0) {
844         return 0;
845     } else if (stats->n == 0) {
846         return snprintf(buffer, size, "none");
847     } else {
848         return snprintf(buffer, size, "(mean: %lf  min: %lf  max: %lf  last: %lf  n: %lld)",
849                 stats->mean, stats->min, stats->max, stats->last, (long long)stats->n);
850     }
851 }
852 
853 /** \cond */
854 __END_DECLS
855 /** \endcond */
856 
857 #endif // !ANDROID_AUDIO_UTILS_STATISTICS_H
858