1 /*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #define LOG_TAG "FastMixerDumpState"
18 //#define LOG_NDEBUG 0
19
20 #include "Configuration.h"
21 #ifdef FAST_THREAD_STATISTICS
22 #include <cpustats/CentralTendencyStatistics.h>
23 #ifdef CPU_FREQUENCY_STATISTICS
24 #include <cpustats/ThreadCpuUsage.h>
25 #endif
26 #endif
27 #include <utils/Debug.h>
28 #include <utils/Log.h>
29 #include "FastMixerDumpState.h"
30
31 namespace android {
32
FastMixerDumpState()33 FastMixerDumpState::FastMixerDumpState() : FastThreadDumpState(),
34 mWriteSequence(0), mFramesWritten(0),
35 mNumTracks(0), mWriteErrors(0),
36 mSampleRate(0), mFrameCount(0),
37 mTrackMask(0)
38 {
39 }
40
~FastMixerDumpState()41 FastMixerDumpState::~FastMixerDumpState()
42 {
43 }
44
45 // helper function called by qsort()
compare_uint32_t(const void * pa,const void * pb)46 static int compare_uint32_t(const void *pa, const void *pb)
47 {
48 uint32_t a = *(const uint32_t *)pa;
49 uint32_t b = *(const uint32_t *)pb;
50 if (a < b) {
51 return -1;
52 } else if (a > b) {
53 return 1;
54 } else {
55 return 0;
56 }
57 }
58
dump(int fd) const59 void FastMixerDumpState::dump(int fd) const
60 {
61 if (mCommand == FastMixerState::INITIAL) {
62 dprintf(fd, " FastMixer not initialized\n");
63 return;
64 }
65 double measuredWarmupMs = (mMeasuredWarmupTs.tv_sec * 1000.0) +
66 (mMeasuredWarmupTs.tv_nsec / 1000000.0);
67 double mixPeriodSec = (double) mFrameCount / mSampleRate;
68 dprintf(fd, " FastMixer command=%s writeSequence=%u framesWritten=%u\n"
69 " numTracks=%u writeErrors=%u underruns=%u overruns=%u\n"
70 " sampleRate=%u frameCount=%zu measuredWarmup=%.3g ms, warmupCycles=%u\n"
71 " mixPeriod=%.2f ms\n",
72 FastMixerState::commandToString(mCommand), mWriteSequence, mFramesWritten,
73 mNumTracks, mWriteErrors, mUnderruns, mOverruns,
74 mSampleRate, mFrameCount, measuredWarmupMs, mWarmupCycles,
75 mixPeriodSec * 1e3);
76 #ifdef FAST_THREAD_STATISTICS
77 // find the interval of valid samples
78 uint32_t bounds = mBounds;
79 uint32_t newestOpen = bounds & 0xFFFF;
80 uint32_t oldestClosed = bounds >> 16;
81 uint32_t n = (newestOpen - oldestClosed) & 0xFFFF;
82 if (n > mSamplingN) {
83 ALOGE("too many samples %u", n);
84 n = mSamplingN;
85 }
86 // statistics for monotonic (wall clock) time, thread raw CPU load in time, CPU clock frequency,
87 // and adjusted CPU load in MHz normalized for CPU clock frequency
88 CentralTendencyStatistics wall, loadNs;
89 #ifdef CPU_FREQUENCY_STATISTICS
90 CentralTendencyStatistics kHz, loadMHz;
91 uint32_t previousCpukHz = 0;
92 #endif
93 // Assuming a normal distribution for cycle times, three standard deviations on either side of
94 // the mean account for 99.73% of the population. So if we take each tail to be 1/1000 of the
95 // sample set, we get 99.8% combined, or close to three standard deviations.
96 static const uint32_t kTailDenominator = 1000;
97 uint32_t *tail = n >= kTailDenominator ? new uint32_t[n] : NULL;
98 // loop over all the samples
99 for (uint32_t j = 0; j < n; ++j) {
100 size_t i = oldestClosed++ & (mSamplingN - 1);
101 uint32_t wallNs = mMonotonicNs[i];
102 if (tail != NULL) {
103 tail[j] = wallNs;
104 }
105 wall.sample(wallNs);
106 uint32_t sampleLoadNs = mLoadNs[i];
107 loadNs.sample(sampleLoadNs);
108 #ifdef CPU_FREQUENCY_STATISTICS
109 uint32_t sampleCpukHz = mCpukHz[i];
110 // skip bad kHz samples
111 if ((sampleCpukHz & ~0xF) != 0) {
112 kHz.sample(sampleCpukHz >> 4);
113 if (sampleCpukHz == previousCpukHz) {
114 double megacycles = (double) sampleLoadNs * (double) (sampleCpukHz >> 4) * 1e-12;
115 double adjMHz = megacycles / mixPeriodSec; // _not_ wallNs * 1e9
116 loadMHz.sample(adjMHz);
117 }
118 }
119 previousCpukHz = sampleCpukHz;
120 #endif
121 }
122 if (n) {
123 dprintf(fd, " Simple moving statistics over last %.1f seconds:\n",
124 wall.n() * mixPeriodSec);
125 dprintf(fd, " wall clock time in ms per mix cycle:\n"
126 " mean=%.2f min=%.2f max=%.2f stddev=%.2f\n",
127 wall.mean()*1e-6, wall.minimum()*1e-6, wall.maximum()*1e-6,
128 wall.stddev()*1e-6);
129 dprintf(fd, " raw CPU load in us per mix cycle:\n"
130 " mean=%.0f min=%.0f max=%.0f stddev=%.0f\n",
131 loadNs.mean()*1e-3, loadNs.minimum()*1e-3, loadNs.maximum()*1e-3,
132 loadNs.stddev()*1e-3);
133 } else {
134 dprintf(fd, " No FastMixer statistics available currently\n");
135 }
136 #ifdef CPU_FREQUENCY_STATISTICS
137 dprintf(fd, " CPU clock frequency in MHz:\n"
138 " mean=%.0f min=%.0f max=%.0f stddev=%.0f\n",
139 kHz.mean()*1e-3, kHz.minimum()*1e-3, kHz.maximum()*1e-3, kHz.stddev()*1e-3);
140 dprintf(fd, " adjusted CPU load in MHz (i.e. normalized for CPU clock frequency):\n"
141 " mean=%.1f min=%.1f max=%.1f stddev=%.1f\n",
142 loadMHz.mean(), loadMHz.minimum(), loadMHz.maximum(), loadMHz.stddev());
143 #endif
144 if (tail != NULL) {
145 qsort(tail, n, sizeof(uint32_t), compare_uint32_t);
146 // assume same number of tail samples on each side, left and right
147 uint32_t count = n / kTailDenominator;
148 CentralTendencyStatistics left, right;
149 for (uint32_t i = 0; i < count; ++i) {
150 left.sample(tail[i]);
151 right.sample(tail[n - (i + 1)]);
152 }
153 dprintf(fd, " Distribution of mix cycle times in ms for the tails "
154 "(> ~3 stddev outliers):\n"
155 " left tail: mean=%.2f min=%.2f max=%.2f stddev=%.2f\n"
156 " right tail: mean=%.2f min=%.2f max=%.2f stddev=%.2f\n",
157 left.mean()*1e-6, left.minimum()*1e-6, left.maximum()*1e-6, left.stddev()*1e-6,
158 right.mean()*1e-6, right.minimum()*1e-6, right.maximum()*1e-6,
159 right.stddev()*1e-6);
160 delete[] tail;
161 }
162 #endif
163 // The active track mask and track states are updated non-atomically.
164 // So if we relied on isActive to decide whether to display,
165 // then we might display an obsolete track or omit an active track.
166 // Instead we always display all tracks, with an indication
167 // of whether we think the track is active.
168 uint32_t trackMask = mTrackMask;
169 dprintf(fd, " Fast tracks: kMaxFastTracks=%u activeMask=%#x\n",
170 FastMixerState::kMaxFastTracks, trackMask);
171 dprintf(fd, " Index Active Full Partial Empty Recent Ready\n");
172 for (uint32_t i = 0; i < FastMixerState::kMaxFastTracks; ++i, trackMask >>= 1) {
173 bool isActive = trackMask & 1;
174 const FastTrackDump *ftDump = &mTracks[i];
175 const FastTrackUnderruns& underruns = ftDump->mUnderruns;
176 const char *mostRecent;
177 switch (underruns.mBitFields.mMostRecent) {
178 case UNDERRUN_FULL:
179 mostRecent = "full";
180 break;
181 case UNDERRUN_PARTIAL:
182 mostRecent = "partial";
183 break;
184 case UNDERRUN_EMPTY:
185 mostRecent = "empty";
186 break;
187 default:
188 mostRecent = "?";
189 break;
190 }
191 dprintf(fd, " %5u %6s %4u %7u %5u %7s %5zu\n", i, isActive ? "yes" : "no",
192 (underruns.mBitFields.mFull) & UNDERRUN_MASK,
193 (underruns.mBitFields.mPartial) & UNDERRUN_MASK,
194 (underruns.mBitFields.mEmpty) & UNDERRUN_MASK,
195 mostRecent, ftDump->mFramesReady);
196 }
197 }
198
199 } // android
200