1 /* 2 * Copyright (C) 2012 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #pragma once 18 19 #include <stdatomic.h> 20 21 // The state queue template class was originally driven by this use case / requirements: 22 // There are two threads: a fast mixer, and a normal mixer, and they share state. 23 // The interesting part of the shared state is a set of active fast tracks, 24 // and the output HAL configuration (buffer size in frames, sample rate, etc.). 25 // Fast mixer thread: 26 // periodic with typical period < 10 ms 27 // FIFO/RR scheduling policy and a low fixed priority 28 // ok to block for bounded time using nanosleep() to achieve desired period 29 // must not block on condition wait, mutex lock, atomic operation spin, I/O, etc. 30 // under typical operations of mixing, writing, or adding/removing tracks 31 // ok to block for unbounded time when the output HAL configuration changes, 32 // and this may result in an audible artifact 33 // needs read-only access to a recent stable state, 34 // but not necessarily the most current one 35 // only allocate and free memory when configuration changes 36 // avoid conventional logging, as this is a form of I/O and could block 37 // defer computation to other threads when feasible; for example 38 // cycle times are collected by fast mixer thread but the floating-point 39 // statistical calculations on these cycle times are computed by normal mixer 40 // these requirements also apply to callouts such as AudioBufferProvider and VolumeProvider 41 // Normal mixer thread: 42 // periodic with typical period ~20 ms 43 // SCHED_OTHER scheduling policy and nice priority == urgent audio 44 // ok to block, but prefer to avoid as much as possible 45 // needs read/write access to state 46 // The normal mixer may need to temporarily suspend the fast mixer thread during mode changes. 47 // It will do this using the state -- one of the fields tells the fast mixer to idle. 48 49 // Additional requirements: 50 // - observer must always be able to poll for and view the latest pushed state; it must never be 51 // blocked from seeing that state 52 // - observer does not need to see every state in sequence; it is OK for it to skip states 53 // [see below for more on this] 54 // - mutator must always be able to read/modify a state, it must never be blocked from reading or 55 // modifying state 56 // - reduce memcpy where possible 57 // - work well if the observer runs more frequently than the mutator, 58 // as is the case with fast mixer/normal mixer. 59 // It is not a requirement to work well if the roles were reversed, 60 // and the mutator were to run more frequently than the observer. 61 // In this case, the mutator could get blocked waiting for a slot to fill up for 62 // it to work with. This could be solved somewhat by increasing the depth of the queue, but it would 63 // still limit the mutator to a finite number of changes before it would block. A future 64 // possibility, not implemented here, would be to allow the mutator to safely overwrite an already 65 // pushed state. This could be done by the mutator overwriting mNext, but then being prepared to 66 // read an mAck which is actually for the earlier mNext (since there is a race). 67 68 // Solution: 69 // Let's call the fast mixer thread the "observer" and normal mixer thread the "mutator". 70 // We assume there is only a single observer and a single mutator; this is critical. 71 // Each state is of type <T>, and should contain only POD (Plain Old Data) and raw pointers, as 72 // memcpy() may be used to copy state, and the destructors are run in unpredictable order. 73 // The states in chronological order are: previous, current, next, and mutating: 74 // previous read-only, observer can compare vs. current to see the subset that changed 75 // current read-only, this is the primary state for observer 76 // next read-only, when observer is ready to accept a new state it will shift it in: 77 // previous = current 78 // current = next 79 // and the slot formerly used by previous is now available to the mutator. 80 // mutating invisible to observer, read/write to mutator 81 // Initialization is tricky, especially for the observer. If the observer starts execution 82 // before the mutator, there are no previous, current, or next states. And even if the observer 83 // starts execution after the mutator, there is a next state but no previous or current states. 84 // To solve this, we'll have the observer idle until there is a next state, 85 // and it will have to deal with the case where there is no previous state. 86 // The states are stored in a shared FIFO queue represented using a circular array. 87 // The observer polls for mutations, and receives a new state pointer after a 88 // a mutation is pushed onto the queue. To the observer, the state pointers are 89 // effectively in random order, that is the observer should not do address 90 // arithmetic on the state pointers. However to the mutator, the state pointers 91 // are in a definite circular order. 92 93 #include "Configuration.h" 94 95 namespace android { 96 97 #ifdef STATE_QUEUE_DUMP 98 // The StateQueueObserverDump and StateQueueMutatorDump keep 99 // a cache of StateQueue statistics that can be logged by dumpsys. 100 // Each individual native word-sized field is accessed atomically. But the 101 // overall structure is non-atomic, that is there may be an inconsistency between fields. 102 // No barriers or locks are used for either writing or reading. 103 // Only POD types are permitted, and the contents shouldn't be trusted (i.e. do range checks). 104 // It has a different lifetime than the StateQueue, and so it can't be a member of StateQueue. 105 106 struct StateQueueObserverDump { StateQueueObserverDumpStateQueueObserverDump107 StateQueueObserverDump() : mStateChanges(0) { } ~StateQueueObserverDumpStateQueueObserverDump108 /*virtual*/ ~StateQueueObserverDump() { } 109 unsigned mStateChanges; // incremented each time poll() detects a state change 110 void dump(int fd); 111 }; 112 113 struct StateQueueMutatorDump { StateQueueMutatorDumpStateQueueMutatorDump114 StateQueueMutatorDump() : mPushDirty(0), mPushAck(0), mBlockedSequence(0) { } ~StateQueueMutatorDumpStateQueueMutatorDump115 /*virtual*/ ~StateQueueMutatorDump() { } 116 unsigned mPushDirty; // incremented each time push() is called with a dirty state 117 unsigned mPushAck; // incremented each time push(BLOCK_UNTIL_ACKED) is called 118 unsigned mBlockedSequence; // incremented before and after each time that push() 119 // blocks for more than one PUSH_BLOCK_ACK_NS; 120 // if odd, then mutator is currently blocked inside push() 121 void dump(int fd); 122 }; 123 #endif 124 125 // manages a FIFO queue of states 126 // marking as final to avoid derived classes as there are no virtuals. 127 template<typename T> class StateQueue final { 128 129 public: 130 // Observer APIs 131 132 // Poll for a state change. Returns a pointer to a read-only state, 133 // or NULL if the state has not been initialized yet. 134 // If a new state has not pushed by mutator since the previous poll, 135 // then the returned pointer will be unchanged. 136 // The previous state pointer is guaranteed to still be valid; 137 // this allows the observer to diff the previous and new states. 138 const T* poll(); 139 140 // Mutator APIs 141 142 // Begin a mutation. Returns a pointer to a read/write state, except the 143 // first time it is called the state is write-only and _must_ be initialized. 144 // Mutations cannot be nested. 145 // If the state is dirty and has not been pushed onto the state queue yet, then 146 // this new mutation will be squashed together with the previous one. 147 T* begin(); 148 149 // End the current mutation and indicate whether caller modified the state. 150 // If didModify is true, then the state is marked dirty (in need of pushing). 151 // There is no rollback option because modifications are done in place. 152 // Does not automatically push the new state onto the state queue. 153 void end(bool didModify = true); 154 155 // Push a new state, if any, out to the observer via the state queue. 156 // For BLOCK_NEVER, returns: 157 // true if not dirty, or dirty and pushed successfully 158 // false if dirty and not pushed because that would block; remains dirty 159 // For BLOCK_UNTIL_PUSHED and BLOCK_UNTIL_ACKED, always returns true. 160 // No-op if there are no pending modifications (not dirty), except 161 // for BLOCK_UNTIL_ACKED it will wait until a prior push has been acknowledged. 162 // Must not be called in the middle of a mutation. 163 enum block_t { 164 BLOCK_NEVER, // do not block 165 BLOCK_UNTIL_PUSHED, // block until there's a slot available for the push 166 BLOCK_UNTIL_ACKED, // also block until the push is acknowledged by the observer 167 }; 168 bool push(block_t block = BLOCK_NEVER); 169 170 // Return whether the current state is dirty (modified and not pushed). isDirty()171 bool isDirty() const { return mIsDirty; } 172 173 #ifdef STATE_QUEUE_DUMP 174 // Register location of observer dump area setObserverDump(StateQueueObserverDump * dump)175 void setObserverDump(StateQueueObserverDump *dump) 176 { mObserverDump = dump != NULL ? dump : &mObserverDummyDump; } 177 178 // Register location of mutator dump area setMutatorDump(StateQueueMutatorDump * dump)179 void setMutatorDump(StateQueueMutatorDump *dump) 180 { mMutatorDump = dump != NULL ? dump : &mMutatorDummyDump; } 181 #endif 182 183 private: 184 static const unsigned kN = 4; // values < 4 are not supported by this code 185 T mStates[kN]; // written by mutator, read by observer 186 187 // "volatile" is meaningless with SMP, but here it indicates that we're using atomic ops 188 atomic_uintptr_t mNext{}; // written by mutator to advance next, read by observer 189 volatile const T* mAck = nullptr; // written by observer to acknowledge advance of next, 190 // read by mutator 191 192 // only used by observer 193 const T* mCurrent = nullptr; // most recent value returned by poll() 194 195 // only used by mutator 196 T* mMutating{&mStates[0]}; // where updates by mutator are done in place 197 const T* mExpecting = nullptr; // what the mutator expects mAck to be set to 198 bool mInMutation = false; // whether we're currently in the middle of a mutation 199 bool mIsDirty = false; // whether mutating state has been modified since last push 200 bool mIsInitialized = false; // whether mutating state has been initialized yet 201 202 #ifdef STATE_QUEUE_DUMP 203 StateQueueObserverDump mObserverDummyDump; // default area for observer dump if not set 204 // pointer to active observer dump, always non-nullptr 205 StateQueueObserverDump* mObserverDump{&mObserverDummyDump}; 206 StateQueueMutatorDump mMutatorDummyDump; // default area for mutator dump if not set 207 // pointer to active mutator dump, always non-nullptr 208 StateQueueMutatorDump* mMutatorDump{&mMutatorDummyDump}; 209 #endif 210 211 }; // class StateQueue 212 213 } // namespace android 214