1 /*
2  * Copyright (C) 2014 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #define LOG_TAG "FastThread"
18 //#define LOG_NDEBUG 0
19 
20 #define ATRACE_TAG ATRACE_TAG_AUDIO
21 
22 #include "Configuration.h"
23 #include <linux/futex.h>
24 #include <sys/syscall.h>
25 #include <cutils/atomic.h>
26 #include <utils/Log.h>
27 #include <utils/Trace.h>
28 #include "FastThread.h"
29 #include "FastThreadDumpState.h"
30 
31 #define FAST_DEFAULT_NS    999999999L   // ~1 sec: default time to sleep
32 #define FAST_HOT_IDLE_NS     1000000L   // 1 ms: time to sleep while hot idling
33 #define MIN_WARMUP_CYCLES          2    // minimum number of consecutive in-range loop cycles
34                                         // to wait for warmup
35 #define MAX_WARMUP_CYCLES         10    // maximum number of loop cycles to wait for warmup
36 
37 namespace android {
38 
FastThread(const char * cycleMs,const char * loadUs)39 FastThread::FastThread(const char *cycleMs, const char *loadUs) : Thread(false /*canCallJava*/),
40     // re-initialized to &sInitial by subclass constructor
41     mPrevious(NULL), mCurrent(NULL),
42     /* mOldTs({0, 0}), */
43     mOldTsValid(false),
44     mSleepNs(-1),
45     mPeriodNs(0),
46     mUnderrunNs(0),
47     mOverrunNs(0),
48     mForceNs(0),
49     mWarmupNsMin(0),
50     mWarmupNsMax(LONG_MAX),
51     // re-initialized to &mDummySubclassDumpState by subclass constructor
52     mDummyDumpState(NULL),
53     mDumpState(NULL),
54     mIgnoreNextOverrun(true),
55 #ifdef FAST_THREAD_STATISTICS
56     // mOldLoad
57     mOldLoadValid(false),
58     mBounds(0),
59     mFull(false),
60     // mTcu
61 #endif
62     mColdGen(0),
63     mIsWarm(false),
64     /* mMeasuredWarmupTs({0, 0}), */
65     mWarmupCycles(0),
66     mWarmupConsecutiveInRangeCycles(0),
67     // mDummyLogWriter
68     mLogWriter(&mDummyLogWriter),
69     mTimestampStatus(INVALID_OPERATION),
70 
71     mCommand(FastThreadState::INITIAL),
72 #if 0
73     frameCount(0),
74 #endif
75     mAttemptedWrite(false)
76     // mCycleMs(cycleMs)
77     // mLoadUs(loadUs)
78 {
79     mOldTs.tv_sec = 0;
80     mOldTs.tv_nsec = 0;
81     mMeasuredWarmupTs.tv_sec = 0;
82     mMeasuredWarmupTs.tv_nsec = 0;
83     strlcpy(mCycleMs, cycleMs, sizeof(mCycleMs));
84     strlcpy(mLoadUs, loadUs, sizeof(mLoadUs));
85 }
86 
~FastThread()87 FastThread::~FastThread()
88 {
89 }
90 
threadLoop()91 bool FastThread::threadLoop()
92 {
93     for (;;) {
94 
95         // either nanosleep, sched_yield, or busy wait
96         if (mSleepNs >= 0) {
97             if (mSleepNs > 0) {
98                 ALOG_ASSERT(mSleepNs < 1000000000);
99                 const struct timespec req = {0, mSleepNs};
100                 nanosleep(&req, NULL);
101             } else {
102                 sched_yield();
103             }
104         }
105         // default to long sleep for next cycle
106         mSleepNs = FAST_DEFAULT_NS;
107 
108         // poll for state change
109         const FastThreadState *next = poll();
110         if (next == NULL) {
111             // continue to use the default initial state until a real state is available
112             // FIXME &sInitial not available, should save address earlier
113             //ALOG_ASSERT(mCurrent == &sInitial && previous == &sInitial);
114             next = mCurrent;
115         }
116 
117         mCommand = next->mCommand;
118         if (next != mCurrent) {
119 
120             // As soon as possible of learning of a new dump area, start using it
121             mDumpState = next->mDumpState != NULL ? next->mDumpState : mDummyDumpState;
122             mLogWriter = next->mNBLogWriter != NULL ? next->mNBLogWriter : &mDummyLogWriter;
123             setLog(mLogWriter);
124 
125             // We want to always have a valid reference to the previous (non-idle) state.
126             // However, the state queue only guarantees access to current and previous states.
127             // So when there is a transition from a non-idle state into an idle state, we make a
128             // copy of the last known non-idle state so it is still available on return from idle.
129             // The possible transitions are:
130             //  non-idle -> non-idle    update previous from current in-place
131             //  non-idle -> idle        update previous from copy of current
132             //  idle     -> idle        don't update previous
133             //  idle     -> non-idle    don't update previous
134             if (!(mCurrent->mCommand & FastThreadState::IDLE)) {
135                 if (mCommand & FastThreadState::IDLE) {
136                     onIdle();
137                     mOldTsValid = false;
138 #ifdef FAST_THREAD_STATISTICS
139                     mOldLoadValid = false;
140 #endif
141                     mIgnoreNextOverrun = true;
142                 }
143                 mPrevious = mCurrent;
144             }
145             mCurrent = next;
146         }
147 #if !LOG_NDEBUG
148         next = NULL;    // not referenced again
149 #endif
150 
151         mDumpState->mCommand = mCommand;
152 
153         // FIXME what does this comment mean?
154         // << current, previous, command, dumpState >>
155 
156         switch (mCommand) {
157         case FastThreadState::INITIAL:
158         case FastThreadState::HOT_IDLE:
159             mSleepNs = FAST_HOT_IDLE_NS;
160             continue;
161         case FastThreadState::COLD_IDLE:
162             // only perform a cold idle command once
163             // FIXME consider checking previous state and only perform if previous != COLD_IDLE
164             if (mCurrent->mColdGen != mColdGen) {
165                 int32_t *coldFutexAddr = mCurrent->mColdFutexAddr;
166                 ALOG_ASSERT(coldFutexAddr != NULL);
167                 int32_t old = android_atomic_dec(coldFutexAddr);
168                 if (old <= 0) {
169                     syscall(__NR_futex, coldFutexAddr, FUTEX_WAIT_PRIVATE, old - 1, NULL);
170                 }
171                 int policy = sched_getscheduler(0) & ~SCHED_RESET_ON_FORK;
172                 if (!(policy == SCHED_FIFO || policy == SCHED_RR)) {
173                     ALOGE("did not receive expected priority boost on time");
174                 }
175                 // This may be overly conservative; there could be times that the normal mixer
176                 // requests such a brief cold idle that it doesn't require resetting this flag.
177                 mIsWarm = false;
178                 mMeasuredWarmupTs.tv_sec = 0;
179                 mMeasuredWarmupTs.tv_nsec = 0;
180                 mWarmupCycles = 0;
181                 mWarmupConsecutiveInRangeCycles = 0;
182                 mSleepNs = -1;
183                 mColdGen = mCurrent->mColdGen;
184 #ifdef FAST_THREAD_STATISTICS
185                 mBounds = 0;
186                 mFull = false;
187 #endif
188                 mOldTsValid = !clock_gettime(CLOCK_MONOTONIC, &mOldTs);
189                 mTimestampStatus = INVALID_OPERATION;
190             } else {
191                 mSleepNs = FAST_HOT_IDLE_NS;
192             }
193             continue;
194         case FastThreadState::EXIT:
195             onExit();
196             return false;
197         default:
198             LOG_ALWAYS_FATAL_IF(!isSubClassCommand(mCommand));
199             break;
200         }
201 
202         // there is a non-idle state available to us; did the state change?
203         if (mCurrent != mPrevious) {
204             onStateChange();
205 #if 1   // FIXME shouldn't need this
206             // only process state change once
207             mPrevious = mCurrent;
208 #endif
209         }
210 
211         // do work using current state here
212         mAttemptedWrite = false;
213         onWork();
214 
215         // To be exactly periodic, compute the next sleep time based on current time.
216         // This code doesn't have long-term stability when the sink is non-blocking.
217         // FIXME To avoid drift, use the local audio clock or watch the sink's fill status.
218         struct timespec newTs;
219         int rc = clock_gettime(CLOCK_MONOTONIC, &newTs);
220         if (rc == 0) {
221             //mLogWriter->logTimestamp(newTs);
222             if (mOldTsValid) {
223                 time_t sec = newTs.tv_sec - mOldTs.tv_sec;
224                 long nsec = newTs.tv_nsec - mOldTs.tv_nsec;
225                 ALOGE_IF(sec < 0 || (sec == 0 && nsec < 0),
226                         "clock_gettime(CLOCK_MONOTONIC) failed: was %ld.%09ld but now %ld.%09ld",
227                         mOldTs.tv_sec, mOldTs.tv_nsec, newTs.tv_sec, newTs.tv_nsec);
228                 if (nsec < 0) {
229                     --sec;
230                     nsec += 1000000000;
231                 }
232                 // To avoid an initial underrun on fast tracks after exiting standby,
233                 // do not start pulling data from tracks and mixing until warmup is complete.
234                 // Warmup is considered complete after the earlier of:
235                 //      MIN_WARMUP_CYCLES consecutive in-range write() attempts,
236                 //          where "in-range" means mWarmupNsMin <= cycle time <= mWarmupNsMax
237                 //      MAX_WARMUP_CYCLES write() attempts.
238                 // This is overly conservative, but to get better accuracy requires a new HAL API.
239                 if (!mIsWarm && mAttemptedWrite) {
240                     mMeasuredWarmupTs.tv_sec += sec;
241                     mMeasuredWarmupTs.tv_nsec += nsec;
242                     if (mMeasuredWarmupTs.tv_nsec >= 1000000000) {
243                         mMeasuredWarmupTs.tv_sec++;
244                         mMeasuredWarmupTs.tv_nsec -= 1000000000;
245                     }
246                     ++mWarmupCycles;
247                     if (mWarmupNsMin <= nsec && nsec <= mWarmupNsMax) {
248                         ALOGV("warmup cycle %d in range: %.03f ms", mWarmupCycles, nsec * 1e-9);
249                         ++mWarmupConsecutiveInRangeCycles;
250                     } else {
251                         ALOGV("warmup cycle %d out of range: %.03f ms", mWarmupCycles, nsec * 1e-9);
252                         mWarmupConsecutiveInRangeCycles = 0;
253                     }
254                     if ((mWarmupConsecutiveInRangeCycles >= MIN_WARMUP_CYCLES) ||
255                             (mWarmupCycles >= MAX_WARMUP_CYCLES)) {
256                         mIsWarm = true;
257                         mDumpState->mMeasuredWarmupTs = mMeasuredWarmupTs;
258                         mDumpState->mWarmupCycles = mWarmupCycles;
259                     }
260                 }
261                 mSleepNs = -1;
262                 if (mIsWarm) {
263                     if (sec > 0 || nsec > mUnderrunNs) {
264                         ATRACE_NAME("underrun");
265                         // FIXME only log occasionally
266                         ALOGV("underrun: time since last cycle %d.%03ld sec",
267                                 (int) sec, nsec / 1000000L);
268                         mDumpState->mUnderruns++;
269                         mIgnoreNextOverrun = true;
270                     } else if (nsec < mOverrunNs) {
271                         if (mIgnoreNextOverrun) {
272                             mIgnoreNextOverrun = false;
273                         } else {
274                             // FIXME only log occasionally
275                             ALOGV("overrun: time since last cycle %d.%03ld sec",
276                                     (int) sec, nsec / 1000000L);
277                             mDumpState->mOverruns++;
278                         }
279                         // This forces a minimum cycle time. It:
280                         //  - compensates for an audio HAL with jitter due to sample rate conversion
281                         //  - works with a variable buffer depth audio HAL that never pulls at a
282                         //    rate < than mOverrunNs per buffer.
283                         //  - recovers from overrun immediately after underrun
284                         // It doesn't work with a non-blocking audio HAL.
285                         mSleepNs = mForceNs - nsec;
286                     } else {
287                         mIgnoreNextOverrun = false;
288                     }
289                 }
290 #ifdef FAST_THREAD_STATISTICS
291                 if (mIsWarm) {
292                     // advance the FIFO queue bounds
293                     size_t i = mBounds & (mDumpState->mSamplingN - 1);
294                     mBounds = (mBounds & 0xFFFF0000) | ((mBounds + 1) & 0xFFFF);
295                     if (mFull) {
296                         mBounds += 0x10000;
297                     } else if (!(mBounds & (mDumpState->mSamplingN - 1))) {
298                         mFull = true;
299                     }
300                     // compute the delta value of clock_gettime(CLOCK_MONOTONIC)
301                     uint32_t monotonicNs = nsec;
302                     if (sec > 0 && sec < 4) {
303                         monotonicNs += sec * 1000000000;
304                     }
305                     // compute raw CPU load = delta value of clock_gettime(CLOCK_THREAD_CPUTIME_ID)
306                     uint32_t loadNs = 0;
307                     struct timespec newLoad;
308                     rc = clock_gettime(CLOCK_THREAD_CPUTIME_ID, &newLoad);
309                     if (rc == 0) {
310                         if (mOldLoadValid) {
311                             sec = newLoad.tv_sec - mOldLoad.tv_sec;
312                             nsec = newLoad.tv_nsec - mOldLoad.tv_nsec;
313                             if (nsec < 0) {
314                                 --sec;
315                                 nsec += 1000000000;
316                             }
317                             loadNs = nsec;
318                             if (sec > 0 && sec < 4) {
319                                 loadNs += sec * 1000000000;
320                             }
321                         } else {
322                             // first time through the loop
323                             mOldLoadValid = true;
324                         }
325                         mOldLoad = newLoad;
326                     }
327 #ifdef CPU_FREQUENCY_STATISTICS
328                     // get the absolute value of CPU clock frequency in kHz
329                     int cpuNum = sched_getcpu();
330                     uint32_t kHz = mTcu.getCpukHz(cpuNum);
331                     kHz = (kHz << 4) | (cpuNum & 0xF);
332 #endif
333                     // save values in FIFO queues for dumpsys
334                     // these stores #1, #2, #3 are not atomic with respect to each other,
335                     // or with respect to store #4 below
336                     mDumpState->mMonotonicNs[i] = monotonicNs;
337                     mDumpState->mLoadNs[i] = loadNs;
338 #ifdef CPU_FREQUENCY_STATISTICS
339                     mDumpState->mCpukHz[i] = kHz;
340 #endif
341                     // this store #4 is not atomic with respect to stores #1, #2, #3 above, but
342                     // the newest open & oldest closed halves are atomic with respect to each other
343                     mDumpState->mBounds = mBounds;
344                     ATRACE_INT(mCycleMs, monotonicNs / 1000000);
345                     ATRACE_INT(mLoadUs, loadNs / 1000);
346                 }
347 #endif
348             } else {
349                 // first time through the loop
350                 mOldTsValid = true;
351                 mSleepNs = mPeriodNs;
352                 mIgnoreNextOverrun = true;
353             }
354             mOldTs = newTs;
355         } else {
356             // monotonic clock is broken
357             mOldTsValid = false;
358             mSleepNs = mPeriodNs;
359         }
360 
361     }   // for (;;)
362 
363     // never return 'true'; Thread::_threadLoop() locks mutex which can result in priority inversion
364 }
365 
366 }   // namespace android
367