1 /*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #define LOG_TAG "FastMixerDumpState"
18 //#define LOG_NDEBUG 0
19
20 #include "Configuration.h"
21 #ifdef FAST_THREAD_STATISTICS
22 #include <cpustats/CentralTendencyStatistics.h>
23 #ifdef CPU_FREQUENCY_STATISTICS
24 #include <cpustats/ThreadCpuUsage.h>
25 #endif
26 #endif
27 #include <utils/Debug.h>
28 #include <utils/Log.h>
29 #include "FastMixerDumpState.h"
30
31 namespace android {
32
FastMixerDumpState()33 FastMixerDumpState::FastMixerDumpState() : FastThreadDumpState(),
34 mWriteSequence(0), mFramesWritten(0),
35 mNumTracks(0), mWriteErrors(0),
36 mSampleRate(0), mFrameCount(0),
37 mTrackMask(0)
38 {
39 }
40
~FastMixerDumpState()41 FastMixerDumpState::~FastMixerDumpState()
42 {
43 }
44
45 // helper function called by qsort()
compare_uint32_t(const void * pa,const void * pb)46 static int compare_uint32_t(const void *pa, const void *pb)
47 {
48 uint32_t a = *(const uint32_t *)pa;
49 uint32_t b = *(const uint32_t *)pb;
50 if (a < b) {
51 return -1;
52 } else if (a > b) {
53 return 1;
54 } else {
55 return 0;
56 }
57 }
58
dump(int fd) const59 void FastMixerDumpState::dump(int fd) const
60 {
61 if (mCommand == FastMixerState::INITIAL) {
62 dprintf(fd, " FastMixer not initialized\n");
63 return;
64 }
65 double measuredWarmupMs = (mMeasuredWarmupTs.tv_sec * 1000.0) +
66 (mMeasuredWarmupTs.tv_nsec / 1000000.0);
67 double mixPeriodSec = (double) mFrameCount / mSampleRate;
68 dprintf(fd, " FastMixer command=%s writeSequence=%u framesWritten=%u\n"
69 " numTracks=%u writeErrors=%u underruns=%u overruns=%u\n"
70 " sampleRate=%u frameCount=%zu measuredWarmup=%.3g ms, warmupCycles=%u\n"
71 " mixPeriod=%.2f ms\n",
72 FastMixerState::commandToString(mCommand), mWriteSequence, mFramesWritten,
73 mNumTracks, mWriteErrors, mUnderruns, mOverruns,
74 mSampleRate, mFrameCount, measuredWarmupMs, mWarmupCycles,
75 mixPeriodSec * 1e3);
76 #ifdef FAST_THREAD_STATISTICS
77 // find the interval of valid samples
78 uint32_t bounds = mBounds;
79 uint32_t newestOpen = bounds & 0xFFFF;
80 uint32_t oldestClosed = bounds >> 16;
81
82 //uint32_t n = (newestOpen - oldestClosed) & 0xFFFF;
83 uint32_t n;
84 __builtin_sub_overflow(newestOpen, oldestClosed, &n);
85 n = n & 0xFFFF;
86
87 if (n > mSamplingN) {
88 ALOGE("too many samples %u", n);
89 n = mSamplingN;
90 }
91 // statistics for monotonic (wall clock) time, thread raw CPU load in time, CPU clock frequency,
92 // and adjusted CPU load in MHz normalized for CPU clock frequency
93 CentralTendencyStatistics wall, loadNs;
94 #ifdef CPU_FREQUENCY_STATISTICS
95 CentralTendencyStatistics kHz, loadMHz;
96 uint32_t previousCpukHz = 0;
97 #endif
98 // Assuming a normal distribution for cycle times, three standard deviations on either side of
99 // the mean account for 99.73% of the population. So if we take each tail to be 1/1000 of the
100 // sample set, we get 99.8% combined, or close to three standard deviations.
101 static const uint32_t kTailDenominator = 1000;
102 uint32_t *tail = n >= kTailDenominator ? new uint32_t[n] : NULL;
103 // loop over all the samples
104 for (uint32_t j = 0; j < n; ++j) {
105 size_t i = oldestClosed++ & (mSamplingN - 1);
106 uint32_t wallNs = mMonotonicNs[i];
107 if (tail != NULL) {
108 tail[j] = wallNs;
109 }
110 wall.sample(wallNs);
111 uint32_t sampleLoadNs = mLoadNs[i];
112 loadNs.sample(sampleLoadNs);
113 #ifdef CPU_FREQUENCY_STATISTICS
114 uint32_t sampleCpukHz = mCpukHz[i];
115 // skip bad kHz samples
116 if ((sampleCpukHz & ~0xF) != 0) {
117 kHz.sample(sampleCpukHz >> 4);
118 if (sampleCpukHz == previousCpukHz) {
119 double megacycles = (double) sampleLoadNs * (double) (sampleCpukHz >> 4) * 1e-12;
120 double adjMHz = megacycles / mixPeriodSec; // _not_ wallNs * 1e9
121 loadMHz.sample(adjMHz);
122 }
123 }
124 previousCpukHz = sampleCpukHz;
125 #endif
126 }
127 if (n) {
128 dprintf(fd, " Simple moving statistics over last %.1f seconds:\n",
129 wall.n() * mixPeriodSec);
130 dprintf(fd, " wall clock time in ms per mix cycle:\n"
131 " mean=%.2f min=%.2f max=%.2f stddev=%.2f\n",
132 wall.mean()*1e-6, wall.minimum()*1e-6, wall.maximum()*1e-6,
133 wall.stddev()*1e-6);
134 dprintf(fd, " raw CPU load in us per mix cycle:\n"
135 " mean=%.0f min=%.0f max=%.0f stddev=%.0f\n",
136 loadNs.mean()*1e-3, loadNs.minimum()*1e-3, loadNs.maximum()*1e-3,
137 loadNs.stddev()*1e-3);
138 } else {
139 dprintf(fd, " No FastMixer statistics available currently\n");
140 }
141 #ifdef CPU_FREQUENCY_STATISTICS
142 dprintf(fd, " CPU clock frequency in MHz:\n"
143 " mean=%.0f min=%.0f max=%.0f stddev=%.0f\n",
144 kHz.mean()*1e-3, kHz.minimum()*1e-3, kHz.maximum()*1e-3, kHz.stddev()*1e-3);
145 dprintf(fd, " adjusted CPU load in MHz (i.e. normalized for CPU clock frequency):\n"
146 " mean=%.1f min=%.1f max=%.1f stddev=%.1f\n",
147 loadMHz.mean(), loadMHz.minimum(), loadMHz.maximum(), loadMHz.stddev());
148 #endif
149 if (tail != NULL) {
150 qsort(tail, n, sizeof(uint32_t), compare_uint32_t);
151 // assume same number of tail samples on each side, left and right
152 uint32_t count = n / kTailDenominator;
153 CentralTendencyStatistics left, right;
154 for (uint32_t i = 0; i < count; ++i) {
155 left.sample(tail[i]);
156 right.sample(tail[n - (i + 1)]);
157 }
158 dprintf(fd, " Distribution of mix cycle times in ms for the tails "
159 "(> ~3 stddev outliers):\n"
160 " left tail: mean=%.2f min=%.2f max=%.2f stddev=%.2f\n"
161 " right tail: mean=%.2f min=%.2f max=%.2f stddev=%.2f\n",
162 left.mean()*1e-6, left.minimum()*1e-6, left.maximum()*1e-6, left.stddev()*1e-6,
163 right.mean()*1e-6, right.minimum()*1e-6, right.maximum()*1e-6,
164 right.stddev()*1e-6);
165 delete[] tail;
166 }
167 #endif
168 // The active track mask and track states are updated non-atomically.
169 // So if we relied on isActive to decide whether to display,
170 // then we might display an obsolete track or omit an active track.
171 // Instead we always display all tracks, with an indication
172 // of whether we think the track is active.
173 uint32_t trackMask = mTrackMask;
174 dprintf(fd, " Fast tracks: sMaxFastTracks=%u activeMask=%#x\n",
175 FastMixerState::sMaxFastTracks, trackMask);
176 dprintf(fd, " Index Active Full Partial Empty Recent Ready Written\n");
177 for (uint32_t i = 0; i < FastMixerState::sMaxFastTracks; ++i, trackMask >>= 1) {
178 bool isActive = trackMask & 1;
179 const FastTrackDump *ftDump = &mTracks[i];
180 const FastTrackUnderruns& underruns = ftDump->mUnderruns;
181 const char *mostRecent;
182 switch (underruns.mBitFields.mMostRecent) {
183 case UNDERRUN_FULL:
184 mostRecent = "full";
185 break;
186 case UNDERRUN_PARTIAL:
187 mostRecent = "partial";
188 break;
189 case UNDERRUN_EMPTY:
190 mostRecent = "empty";
191 break;
192 default:
193 mostRecent = "?";
194 break;
195 }
196 dprintf(fd, " %5u %6s %4u %7u %5u %7s %5zu %10lld\n",
197 i, isActive ? "yes" : "no",
198 (underruns.mBitFields.mFull) & UNDERRUN_MASK,
199 (underruns.mBitFields.mPartial) & UNDERRUN_MASK,
200 (underruns.mBitFields.mEmpty) & UNDERRUN_MASK,
201 mostRecent, ftDump->mFramesReady,
202 (long long)ftDump->mFramesWritten);
203 }
204 }
205
206 } // android
207