1 /*
2 **
3 ** Copyright 2012, The Android Open Source Project
4 **
5 ** Licensed under the Apache License, Version 2.0 (the "License");
6 ** you may not use this file except in compliance with the License.
7 ** You may obtain a copy of the License at
8 **
9 ** http://www.apache.org/licenses/LICENSE-2.0
10 **
11 ** Unless required by applicable law or agreed to in writing, software
12 ** distributed under the License is distributed on an "AS IS" BASIS,
13 ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 ** See the License for the specific language governing permissions and
15 ** limitations under the License.
16 */
17
18
19 #define LOG_TAG "AudioFlinger"
20 // #define LOG_NDEBUG 0
21 #define ATRACE_TAG ATRACE_TAG_AUDIO
22
23 #include "Threads.h"
24
25 #include "Client.h"
26 #include "IAfEffect.h"
27 #include "MelReporter.h"
28 #include "ResamplerBufferProvider.h"
29
30 #include <afutils/DumpTryLock.h>
31 #include <afutils/Permission.h>
32 #include <afutils/TypedLogger.h>
33 #include <afutils/Vibrator.h>
34 #include <audio_utils/MelProcessor.h>
35 #include <audio_utils/Metadata.h>
36 #include <com_android_media_audioserver.h>
37 #ifdef DEBUG_CPU_USAGE
38 #include <audio_utils/Statistics.h>
39 #include <cpustats/ThreadCpuUsage.h>
40 #endif
41 #include <audio_utils/channels.h>
42 #include <audio_utils/format.h>
43 #include <audio_utils/minifloat.h>
44 #include <audio_utils/mono_blend.h>
45 #include <audio_utils/primitives.h>
46 #include <audio_utils/safe_math.h>
47 #include <audiomanager/AudioManager.h>
48 #include <binder/IPCThreadState.h>
49 #include <binder/IServiceManager.h>
50 #include <binder/PersistableBundle.h>
51 #include <com_android_media_audio.h>
52 #include <cutils/bitops.h>
53 #include <cutils/properties.h>
54 #include <fastpath/AutoPark.h>
55 #include <media/AudioContainers.h>
56 #include <media/AudioDeviceTypeAddr.h>
57 #include <media/AudioParameter.h>
58 #include <media/AudioResamplerPublic.h>
59 #ifdef ADD_BATTERY_DATA
60 #include <media/IMediaPlayerService.h>
61 #include <media/IMediaDeathNotifier.h>
62 #endif
63 #include <media/MmapStreamCallback.h>
64 #include <media/RecordBufferConverter.h>
65 #include <media/TypeConverter.h>
66 #include <media/audiohal/EffectsFactoryHalInterface.h>
67 #include <media/audiohal/StreamHalInterface.h>
68 #include <media/nbaio/AudioStreamInSource.h>
69 #include <media/nbaio/AudioStreamOutSink.h>
70 #include <media/nbaio/MonoPipe.h>
71 #include <media/nbaio/MonoPipeReader.h>
72 #include <media/nbaio/Pipe.h>
73 #include <media/nbaio/PipeReader.h>
74 #include <media/nbaio/SourceAudioBufferProvider.h>
75 #include <mediautils/BatteryNotifier.h>
76 #include <mediautils/Process.h>
77 #include <mediautils/SchedulingPolicyService.h>
78 #include <mediautils/ServiceUtilities.h>
79 #include <powermanager/PowerManager.h>
80 #include <private/android_filesystem_config.h>
81 #include <private/media/AudioTrackShared.h>
82 #include <system/audio_effects/effect_aec.h>
83 #include <system/audio_effects/effect_downmix.h>
84 #include <system/audio_effects/effect_ns.h>
85 #include <system/audio_effects/effect_spatializer.h>
86 #include <utils/Log.h>
87 #include <utils/Trace.h>
88
89 #include <fcntl.h>
90 #include <linux/futex.h>
91 #include <math.h>
92 #include <memory>
93 #include <pthread.h>
94 #include <sstream>
95 #include <string>
96 #include <sys/stat.h>
97 #include <sys/syscall.h>
98
99 // ----------------------------------------------------------------------------
100
101 // Note: the following macro is used for extremely verbose logging message. In
102 // order to run with ALOG_ASSERT turned on, we need to have LOG_NDEBUG set to
103 // 0; but one side effect of this is to turn all LOGV's as well. Some messages
104 // are so verbose that we want to suppress them even when we have ALOG_ASSERT
105 // turned on. Do not uncomment the #def below unless you really know what you
106 // are doing and want to see all of the extremely verbose messages.
107 //#define VERY_VERY_VERBOSE_LOGGING
108 #ifdef VERY_VERY_VERBOSE_LOGGING
109 #define ALOGVV ALOGV
110 #else
111 #define ALOGVV(a...) do { } while(0)
112 #endif
113
114 // TODO: Move these macro/inlines to a header file.
115 #define max(a, b) ((a) > (b) ? (a) : (b))
116
117 template <typename T>
min(const T & a,const T & b)118 static inline T min(const T& a, const T& b)
119 {
120 return a < b ? a : b;
121 }
122
123 namespace android {
124
125 using audioflinger::SyncEvent;
126 using media::IEffectClient;
127 using content::AttributionSourceState;
128
129 // Keep in sync with java definition in media/java/android/media/AudioRecord.java
130 static constexpr int32_t kMaxSharedAudioHistoryMs = 5000;
131
132 // retry counts for buffer fill timeout
133 // 50 * ~20msecs = 1 second
134 static const int8_t kMaxTrackRetries = 50;
135 static const int8_t kMaxTrackStartupRetries = 50;
136
137 // allow less retry attempts on direct output thread.
138 // direct outputs can be a scarce resource in audio hardware and should
139 // be released as quickly as possible.
140 // Notes:
141 // 1) The retry duration kMaxTrackRetriesDirectMs may be increased
142 // in case the data write is bursty for the AudioTrack. The application
143 // should endeavor to write at least once every kMaxTrackRetriesDirectMs
144 // to prevent an underrun situation. If the data is bursty, then
145 // the application can also throttle the data sent to be even.
146 // 2) For compressed audio data, any data present in the AudioTrack buffer
147 // will be sent and reset the retry count. This delivers data as
148 // it arrives, with approximately kDirectMinSleepTimeUs = 10ms checking interval.
149 // 3) For linear PCM or proportional PCM, we wait one period for a period's worth
150 // of data to be available, then any remaining data is delivered.
151 // This is required to ensure the last bit of data is delivered before underrun.
152 //
153 // Sleep time per cycle is kDirectMinSleepTimeUs for compressed tracks
154 // or the size of the HAL period for proportional / linear PCM tracks.
155 static const int32_t kMaxTrackRetriesDirectMs = 200;
156
157 // don't warn about blocked writes or record buffer overflows more often than this
158 static const nsecs_t kWarningThrottleNs = seconds(5);
159
160 // RecordThread loop sleep time upon application overrun or audio HAL read error
161 static const int kRecordThreadSleepUs = 5000;
162
163 // maximum time to wait in sendConfigEvent_l() for a status to be received
164 static const nsecs_t kConfigEventTimeoutNs = seconds(2);
165
166 // minimum sleep time for the mixer thread loop when tracks are active but in underrun
167 static const uint32_t kMinThreadSleepTimeUs = 5000;
168 // maximum divider applied to the active sleep time in the mixer thread loop
169 static const uint32_t kMaxThreadSleepTimeShift = 2;
170
171 // minimum normal sink buffer size, expressed in milliseconds rather than frames
172 // FIXME This should be based on experimentally observed scheduling jitter
173 static const uint32_t kMinNormalSinkBufferSizeMs = 20;
174 // maximum normal sink buffer size
175 static const uint32_t kMaxNormalSinkBufferSizeMs = 24;
176
177 // minimum capture buffer size in milliseconds to _not_ need a fast capture thread
178 // FIXME This should be based on experimentally observed scheduling jitter
179 static const uint32_t kMinNormalCaptureBufferSizeMs = 12;
180
181 // Offloaded output thread standby delay: allows track transition without going to standby
182 static const nsecs_t kOffloadStandbyDelayNs = seconds(1);
183
184 // Direct output thread minimum sleep time in idle or active(underrun) state
185 static const nsecs_t kDirectMinSleepTimeUs = 10000;
186
187 // Minimum amount of time between checking to see if the timestamp is advancing
188 // for underrun detection. If we check too frequently, we may not detect a
189 // timestamp update and will falsely detect underrun.
190 static constexpr nsecs_t kMinimumTimeBetweenTimestampChecksNs = 150 /* ms */ * 1'000'000;
191
192 // The universal constant for ubiquitous 20ms value. The value of 20ms seems to provide a good
193 // balance between power consumption and latency, and allows threads to be scheduled reliably
194 // by the CFS scheduler.
195 // FIXME Express other hardcoded references to 20ms with references to this constant and move
196 // it appropriately.
197 #define FMS_20 20
198
199 // Whether to use fast mixer
200 static const enum {
201 FastMixer_Never, // never initialize or use: for debugging only
202 FastMixer_Always, // always initialize and use, even if not needed: for debugging only
203 // normal mixer multiplier is 1
204 FastMixer_Static, // initialize if needed, then use all the time if initialized,
205 // multiplier is calculated based on min & max normal mixer buffer size
206 FastMixer_Dynamic, // initialize if needed, then use dynamically depending on track load,
207 // multiplier is calculated based on min & max normal mixer buffer size
208 // FIXME for FastMixer_Dynamic:
209 // Supporting this option will require fixing HALs that can't handle large writes.
210 // For example, one HAL implementation returns an error from a large write,
211 // and another HAL implementation corrupts memory, possibly in the sample rate converter.
212 // We could either fix the HAL implementations, or provide a wrapper that breaks
213 // up large writes into smaller ones, and the wrapper would need to deal with scheduler.
214 } kUseFastMixer = FastMixer_Static;
215
216 // Whether to use fast capture
217 static const enum {
218 FastCapture_Never, // never initialize or use: for debugging only
219 FastCapture_Always, // always initialize and use, even if not needed: for debugging only
220 FastCapture_Static, // initialize if needed, then use all the time if initialized
221 } kUseFastCapture = FastCapture_Static;
222
223 // Priorities for requestPriority
224 static const int kPriorityAudioApp = 2;
225 static const int kPriorityFastMixer = 3;
226 static const int kPriorityFastCapture = 3;
227 // Request real-time priority for PlaybackThread in ARC
228 static const int kPriorityPlaybackThreadArc = 1;
229
230 // IAudioFlinger::createTrack() has an in/out parameter 'pFrameCount' for the total size of the
231 // track buffer in shared memory. Zero on input means to use a default value. For fast tracks,
232 // AudioFlinger derives the default from HAL buffer size and 'fast track multiplier'.
233
234 // This is the default value, if not specified by property.
235 static const int kFastTrackMultiplier = 2;
236
237 // The minimum and maximum allowed values
238 static const int kFastTrackMultiplierMin = 1;
239 static const int kFastTrackMultiplierMax = 2;
240
241 // The actual value to use, which can be specified per-device via property af.fast_track_multiplier.
242 static int sFastTrackMultiplier = kFastTrackMultiplier;
243
244 // See Thread::readOnlyHeap().
245 // Initially this heap is used to allocate client buffers for "fast" AudioRecord.
246 // Eventually it will be the single buffer that FastCapture writes into via HAL read(),
247 // and that all "fast" AudioRecord clients read from. In either case, the size can be small.
248 static const size_t kRecordThreadReadOnlyHeapSize = 0xD000;
249
250 static const nsecs_t kDefaultStandbyTimeInNsecs = seconds(3);
251
getStandbyTimeInNanos()252 static nsecs_t getStandbyTimeInNanos() {
253 static nsecs_t standbyTimeInNanos = []() {
254 const int ms = property_get_int32("ro.audio.flinger_standbytime_ms",
255 kDefaultStandbyTimeInNsecs / NANOS_PER_MILLISECOND);
256 ALOGI("%s: Using %d ms as standby time", __func__, ms);
257 return milliseconds(ms);
258 }();
259 return standbyTimeInNanos;
260 }
261
262 // Set kEnableExtendedChannels to true to enable greater than stereo output
263 // for the MixerThread and device sink. Number of channels allowed is
264 // FCC_2 <= channels <= FCC_LIMIT.
265 constexpr bool kEnableExtendedChannels = true;
266
267 // Returns true if channel mask is permitted for the PCM sink in the MixerThread
268 /* static */
isValidPcmSinkChannelMask(audio_channel_mask_t channelMask)269 bool IAfThreadBase::isValidPcmSinkChannelMask(audio_channel_mask_t channelMask) {
270 switch (audio_channel_mask_get_representation(channelMask)) {
271 case AUDIO_CHANNEL_REPRESENTATION_POSITION: {
272 // Haptic channel mask is only applicable for channel position mask.
273 const uint32_t channelCount = audio_channel_count_from_out_mask(
274 static_cast<audio_channel_mask_t>(channelMask & ~AUDIO_CHANNEL_HAPTIC_ALL));
275 const uint32_t maxChannelCount = kEnableExtendedChannels
276 ? FCC_LIMIT : FCC_2;
277 if (channelCount < FCC_2 // mono is not supported at this time
278 || channelCount > maxChannelCount) {
279 return false;
280 }
281 // check that channelMask is the "canonical" one we expect for the channelCount.
282 return audio_channel_position_mask_is_out_canonical(channelMask);
283 }
284 case AUDIO_CHANNEL_REPRESENTATION_INDEX:
285 if (kEnableExtendedChannels) {
286 const uint32_t channelCount = audio_channel_count_from_out_mask(channelMask);
287 if (channelCount >= FCC_2 // mono is not supported at this time
288 && channelCount <= FCC_LIMIT) {
289 return true;
290 }
291 }
292 return false;
293 default:
294 return false;
295 }
296 }
297
298 // Set kEnableExtendedPrecision to true to use extended precision in MixerThread
299 constexpr bool kEnableExtendedPrecision = true;
300
301 // Returns true if format is permitted for the PCM sink in the MixerThread
302 /* static */
isValidPcmSinkFormat(audio_format_t format)303 bool IAfThreadBase::isValidPcmSinkFormat(audio_format_t format) {
304 switch (format) {
305 case AUDIO_FORMAT_PCM_16_BIT:
306 return true;
307 case AUDIO_FORMAT_PCM_FLOAT:
308 case AUDIO_FORMAT_PCM_24_BIT_PACKED:
309 case AUDIO_FORMAT_PCM_32_BIT:
310 case AUDIO_FORMAT_PCM_8_24_BIT:
311 return kEnableExtendedPrecision;
312 default:
313 return false;
314 }
315 }
316
317 // ----------------------------------------------------------------------------
318
319 // formatToString() needs to be exact for MediaMetrics purposes.
320 // Do not use media/TypeConverter.h toString().
321 /* static */
formatToString(audio_format_t format)322 std::string IAfThreadBase::formatToString(audio_format_t format) {
323 std::string result;
324 FormatConverter::toString(format, result);
325 return result;
326 }
327
328 // TODO: move all toString helpers to audio.h
329 // under #ifdef __cplusplus #endif
patchSinksToString(const struct audio_patch * patch)330 static std::string patchSinksToString(const struct audio_patch *patch)
331 {
332 std::stringstream ss;
333 for (size_t i = 0; i < patch->num_sinks; ++i) {
334 if (i > 0) {
335 ss << "|";
336 }
337 ss << "(" << toString(patch->sinks[i].ext.device.type)
338 << ", " << patch->sinks[i].ext.device.address << ")";
339 }
340 return ss.str();
341 }
342
patchSourcesToString(const struct audio_patch * patch)343 static std::string patchSourcesToString(const struct audio_patch *patch)
344 {
345 std::stringstream ss;
346 for (size_t i = 0; i < patch->num_sources; ++i) {
347 if (i > 0) {
348 ss << "|";
349 }
350 ss << "(" << toString(patch->sources[i].ext.device.type)
351 << ", " << patch->sources[i].ext.device.address << ")";
352 }
353 return ss.str();
354 }
355
toString(audio_latency_mode_t mode)356 static std::string toString(audio_latency_mode_t mode) {
357 // We convert to the AIDL type to print (eventually the legacy type will be removed).
358 const auto result = legacy2aidl_audio_latency_mode_t_AudioLatencyMode(mode);
359 return result.has_value() ? media::audio::common::toString(*result) : "UNKNOWN";
360 }
361
362 // Could be made a template, but other toString overloads for std::vector are confused.
toString(const std::vector<audio_latency_mode_t> & elements)363 static std::string toString(const std::vector<audio_latency_mode_t>& elements) {
364 std::string s("{ ");
365 for (const auto& e : elements) {
366 s.append(toString(e));
367 s.append(" ");
368 }
369 s.append("}");
370 return s;
371 }
372
373 static pthread_once_t sFastTrackMultiplierOnce = PTHREAD_ONCE_INIT;
374
sFastTrackMultiplierInit()375 static void sFastTrackMultiplierInit()
376 {
377 char value[PROPERTY_VALUE_MAX];
378 if (property_get("af.fast_track_multiplier", value, NULL) > 0) {
379 char *endptr;
380 unsigned long ul = strtoul(value, &endptr, 0);
381 if (*endptr == '\0' && kFastTrackMultiplierMin <= ul && ul <= kFastTrackMultiplierMax) {
382 sFastTrackMultiplier = (int) ul;
383 }
384 }
385 }
386
387 // ----------------------------------------------------------------------------
388
389 #ifdef ADD_BATTERY_DATA
390 // To collect the amplifier usage
addBatteryData(uint32_t params)391 static void addBatteryData(uint32_t params) {
392 sp<IMediaPlayerService> service = IMediaDeathNotifier::getMediaPlayerService();
393 if (service == NULL) {
394 // it already logged
395 return;
396 }
397
398 service->addBatteryData(params);
399 }
400 #endif
401
402 // Track the CLOCK_BOOTTIME versus CLOCK_MONOTONIC timebase offset
403 struct {
404 // call when you acquire a partial wakelock
acquireandroid::__anonf7c4eeac0408405 void acquire(const sp<IBinder> &wakeLockToken) {
406 pthread_mutex_lock(&mLock);
407 if (wakeLockToken.get() == nullptr) {
408 adjustTimebaseOffset(&mBoottimeOffset, ExtendedTimestamp::TIMEBASE_BOOTTIME);
409 } else {
410 if (mCount == 0) {
411 adjustTimebaseOffset(&mBoottimeOffset, ExtendedTimestamp::TIMEBASE_BOOTTIME);
412 }
413 ++mCount;
414 }
415 pthread_mutex_unlock(&mLock);
416 }
417
418 // call when you release a partial wakelock.
releaseandroid::__anonf7c4eeac0408419 void release(const sp<IBinder> &wakeLockToken) {
420 if (wakeLockToken.get() == nullptr) {
421 return;
422 }
423 pthread_mutex_lock(&mLock);
424 if (--mCount < 0) {
425 ALOGE("negative wakelock count");
426 mCount = 0;
427 }
428 pthread_mutex_unlock(&mLock);
429 }
430
431 // retrieves the boottime timebase offset from monotonic.
getBoottimeOffsetandroid::__anonf7c4eeac0408432 int64_t getBoottimeOffset() {
433 pthread_mutex_lock(&mLock);
434 int64_t boottimeOffset = mBoottimeOffset;
435 pthread_mutex_unlock(&mLock);
436 return boottimeOffset;
437 }
438
439 // Adjusts the timebase offset between TIMEBASE_MONOTONIC
440 // and the selected timebase.
441 // Currently only TIMEBASE_BOOTTIME is allowed.
442 //
443 // This only needs to be called upon acquiring the first partial wakelock
444 // after all other partial wakelocks are released.
445 //
446 // We do an empirical measurement of the offset rather than parsing
447 // /proc/timer_list since the latter is not a formal kernel ABI.
adjustTimebaseOffsetandroid::__anonf7c4eeac0408448 static void adjustTimebaseOffset(int64_t *offset, ExtendedTimestamp::Timebase timebase) {
449 int clockbase;
450 switch (timebase) {
451 case ExtendedTimestamp::TIMEBASE_BOOTTIME:
452 clockbase = SYSTEM_TIME_BOOTTIME;
453 break;
454 default:
455 LOG_ALWAYS_FATAL("invalid timebase %d", timebase);
456 break;
457 }
458 // try three times to get the clock offset, choose the one
459 // with the minimum gap in measurements.
460 const int tries = 3;
461 nsecs_t bestGap = 0, measured = 0; // not required, initialized for clang-tidy
462 for (int i = 0; i < tries; ++i) {
463 const nsecs_t tmono = systemTime(SYSTEM_TIME_MONOTONIC);
464 const nsecs_t tbase = systemTime(clockbase);
465 const nsecs_t tmono2 = systemTime(SYSTEM_TIME_MONOTONIC);
466 const nsecs_t gap = tmono2 - tmono;
467 if (i == 0 || gap < bestGap) {
468 bestGap = gap;
469 measured = tbase - ((tmono + tmono2) >> 1);
470 }
471 }
472
473 // to avoid micro-adjusting, we don't change the timebase
474 // unless it is significantly different.
475 //
476 // Assumption: It probably takes more than toleranceNs to
477 // suspend and resume the device.
478 static int64_t toleranceNs = 10000; // 10 us
479 if (llabs(*offset - measured) > toleranceNs) {
480 ALOGV("Adjusting timebase offset old: %lld new: %lld",
481 (long long)*offset, (long long)measured);
482 *offset = measured;
483 }
484 }
485
486 pthread_mutex_t mLock;
487 int32_t mCount;
488 int64_t mBoottimeOffset;
489 } gBoottime = { PTHREAD_MUTEX_INITIALIZER, 0, 0 }; // static, so use POD initialization
490
491 // ----------------------------------------------------------------------------
492 // CPU Stats
493 // ----------------------------------------------------------------------------
494
495 class CpuStats {
496 public:
497 CpuStats();
498 void sample(const String8 &title);
499 #ifdef DEBUG_CPU_USAGE
500 private:
501 ThreadCpuUsage mCpuUsage; // instantaneous thread CPU usage in wall clock ns
502 audio_utils::Statistics<double> mWcStats; // statistics on thread CPU usage in wall clock ns
503
504 audio_utils::Statistics<double> mHzStats; // statistics on thread CPU usage in cycles
505
506 int mCpuNum; // thread's current CPU number
507 int mCpukHz; // frequency of thread's current CPU in kHz
508 #endif
509 };
510
CpuStats()511 CpuStats::CpuStats()
512 #ifdef DEBUG_CPU_USAGE
513 : mCpuNum(-1), mCpukHz(-1)
514 #endif
515 {
516 }
517
sample(const String8 & title __unused)518 void CpuStats::sample(const String8 &title
519 #ifndef DEBUG_CPU_USAGE
520 __unused
521 #endif
522 ) {
523 #ifdef DEBUG_CPU_USAGE
524 // get current thread's delta CPU time in wall clock ns
525 double wcNs;
526 bool valid = mCpuUsage.sampleAndEnable(wcNs);
527
528 // record sample for wall clock statistics
529 if (valid) {
530 mWcStats.add(wcNs);
531 }
532
533 // get the current CPU number
534 int cpuNum = sched_getcpu();
535
536 // get the current CPU frequency in kHz
537 int cpukHz = mCpuUsage.getCpukHz(cpuNum);
538
539 // check if either CPU number or frequency changed
540 if (cpuNum != mCpuNum || cpukHz != mCpukHz) {
541 mCpuNum = cpuNum;
542 mCpukHz = cpukHz;
543 // ignore sample for purposes of cycles
544 valid = false;
545 }
546
547 // if no change in CPU number or frequency, then record sample for cycle statistics
548 if (valid && mCpukHz > 0) {
549 const double cycles = wcNs * cpukHz * 0.000001;
550 mHzStats.add(cycles);
551 }
552
553 const unsigned n = mWcStats.getN();
554 // mCpuUsage.elapsed() is expensive, so don't call it every loop
555 if ((n & 127) == 1) {
556 const long long elapsed = mCpuUsage.elapsed();
557 if (elapsed >= DEBUG_CPU_USAGE * 1000000000LL) {
558 const double perLoop = elapsed / (double) n;
559 const double perLoop100 = perLoop * 0.01;
560 const double perLoop1k = perLoop * 0.001;
561 const double mean = mWcStats.getMean();
562 const double stddev = mWcStats.getStdDev();
563 const double minimum = mWcStats.getMin();
564 const double maximum = mWcStats.getMax();
565 const double meanCycles = mHzStats.getMean();
566 const double stddevCycles = mHzStats.getStdDev();
567 const double minCycles = mHzStats.getMin();
568 const double maxCycles = mHzStats.getMax();
569 mCpuUsage.resetElapsed();
570 mWcStats.reset();
571 mHzStats.reset();
572 ALOGD("CPU usage for %s over past %.1f secs\n"
573 " (%u mixer loops at %.1f mean ms per loop):\n"
574 " us per mix loop: mean=%.0f stddev=%.0f min=%.0f max=%.0f\n"
575 " %% of wall: mean=%.1f stddev=%.1f min=%.1f max=%.1f\n"
576 " MHz: mean=%.1f, stddev=%.1f, min=%.1f max=%.1f",
577 title.c_str(),
578 elapsed * .000000001, n, perLoop * .000001,
579 mean * .001,
580 stddev * .001,
581 minimum * .001,
582 maximum * .001,
583 mean / perLoop100,
584 stddev / perLoop100,
585 minimum / perLoop100,
586 maximum / perLoop100,
587 meanCycles / perLoop1k,
588 stddevCycles / perLoop1k,
589 minCycles / perLoop1k,
590 maxCycles / perLoop1k);
591
592 }
593 }
594 #endif
595 };
596
597 // ----------------------------------------------------------------------------
598 // ThreadBase
599 // ----------------------------------------------------------------------------
600
601 // static
threadTypeToString(ThreadBase::type_t type)602 const char* ThreadBase::threadTypeToString(ThreadBase::type_t type)
603 {
604 switch (type) {
605 case MIXER:
606 return "MIXER";
607 case DIRECT:
608 return "DIRECT";
609 case DUPLICATING:
610 return "DUPLICATING";
611 case RECORD:
612 return "RECORD";
613 case OFFLOAD:
614 return "OFFLOAD";
615 case MMAP_PLAYBACK:
616 return "MMAP_PLAYBACK";
617 case MMAP_CAPTURE:
618 return "MMAP_CAPTURE";
619 case SPATIALIZER:
620 return "SPATIALIZER";
621 case BIT_PERFECT:
622 return "BIT_PERFECT";
623 default:
624 return "unknown";
625 }
626 }
627
ThreadBase(const sp<IAfThreadCallback> & afThreadCallback,audio_io_handle_t id,type_t type,bool systemReady,bool isOut)628 ThreadBase::ThreadBase(const sp<IAfThreadCallback>& afThreadCallback, audio_io_handle_t id,
629 type_t type, bool systemReady, bool isOut)
630 : Thread(false /*canCallJava*/),
631 mType(type),
632 mAfThreadCallback(afThreadCallback),
633 mThreadMetrics(std::string(AMEDIAMETRICS_KEY_PREFIX_AUDIO_THREAD) + std::to_string(id),
634 isOut),
635 mIsOut(isOut),
636 // mSampleRate, mFrameCount, mChannelMask, mChannelCount, mFrameSize, mFormat, mBufferSize
637 // are set by PlaybackThread::readOutputParameters_l() or
638 // RecordThread::readInputParameters_l()
639 //FIXME: mStandby should be true here. Is this some kind of hack?
640 mStandby(false),
641 mAudioSource(AUDIO_SOURCE_DEFAULT), mId(id),
642 // mName will be set by concrete (non-virtual) subclass
643 mDeathRecipient(new PMDeathRecipient(this)),
644 mSystemReady(systemReady),
645 mSignalPending(false)
646 {
647 mThreadMetrics.logConstructor(getpid(), threadTypeToString(type), id);
648 memset(&mPatch, 0, sizeof(struct audio_patch));
649 }
650
~ThreadBase()651 ThreadBase::~ThreadBase()
652 {
653 // mConfigEvents should be empty, but just in case it isn't, free the memory it owns
654 mConfigEvents.clear();
655
656 // do not lock the mutex in destructor
657 releaseWakeLock_l();
658 if (mPowerManager != 0) {
659 sp<IBinder> binder = IInterface::asBinder(mPowerManager);
660 binder->unlinkToDeath(mDeathRecipient);
661 }
662
663 sendStatistics(true /* force */);
664 }
665
readyToRun()666 status_t ThreadBase::readyToRun()
667 {
668 status_t status = initCheck();
669 if (status == NO_ERROR) {
670 ALOGI("AudioFlinger's thread %p tid=%d ready to run", this, getTid());
671 } else {
672 ALOGE("No working audio driver found.");
673 }
674 return status;
675 }
676
exit()677 void ThreadBase::exit()
678 {
679 ALOGV("ThreadBase::exit");
680 // do any cleanup required for exit to succeed
681 preExit();
682 {
683 // This lock prevents the following race in thread (uniprocessor for illustration):
684 // if (!exitPending()) {
685 // // context switch from here to exit()
686 // // exit() calls requestExit(), what exitPending() observes
687 // // exit() calls signal(), which is dropped since no waiters
688 // // context switch back from exit() to here
689 // mWaitWorkCV.wait(...);
690 // // now thread is hung
691 // }
692 audio_utils::lock_guard lock(mutex());
693 requestExit();
694 mWaitWorkCV.notify_all();
695 }
696 // When Thread::requestExitAndWait is made virtual and this method is renamed to
697 // "virtual status_t requestExitAndWait()", replace by "return Thread::requestExitAndWait();"
698
699 // For TimeCheck: track waiting on the thread join of getTid().
700 audio_utils::mutex::scoped_join_wait_check sjw(getTid());
701
702 requestExitAndWait();
703 }
704
setParameters(const String8 & keyValuePairs)705 status_t ThreadBase::setParameters(const String8& keyValuePairs)
706 {
707 ALOGV("ThreadBase::setParameters() %s", keyValuePairs.c_str());
708 audio_utils::lock_guard _l(mutex());
709
710 return sendSetParameterConfigEvent_l(keyValuePairs);
711 }
712
713 // sendConfigEvent_l() must be called with ThreadBase::mLock held
714 // Can temporarily release the lock if waiting for a reply from processConfigEvents_l().
sendConfigEvent_l(sp<ConfigEvent> & event)715 status_t ThreadBase::sendConfigEvent_l(sp<ConfigEvent>& event)
716 NO_THREAD_SAFETY_ANALYSIS // condition variable
717 {
718 status_t status = NO_ERROR;
719
720 if (event->mRequiresSystemReady && !mSystemReady) {
721 event->mWaitStatus = false;
722 mPendingConfigEvents.add(event);
723 return status;
724 }
725 mConfigEvents.add(event);
726 ALOGV("sendConfigEvent_l() num events %zu event %d", mConfigEvents.size(), event->mType);
727 mWaitWorkCV.notify_one();
728 mutex().unlock();
729 {
730 audio_utils::unique_lock _l(event->mutex());
731 while (event->mWaitStatus) {
732 if (event->mCondition.wait_for(
733 _l, std::chrono::nanoseconds(kConfigEventTimeoutNs), getTid())
734 == std::cv_status::timeout) {
735 event->mStatus = TIMED_OUT;
736 event->mWaitStatus = false;
737 }
738 }
739 status = event->mStatus;
740 }
741 mutex().lock();
742 return status;
743 }
744
sendIoConfigEvent(audio_io_config_event_t event,pid_t pid,audio_port_handle_t portId)745 void ThreadBase::sendIoConfigEvent(audio_io_config_event_t event, pid_t pid,
746 audio_port_handle_t portId)
747 {
748 audio_utils::lock_guard _l(mutex());
749 sendIoConfigEvent_l(event, pid, portId);
750 }
751
752 // sendIoConfigEvent_l() must be called with ThreadBase::mutex() held
sendIoConfigEvent_l(audio_io_config_event_t event,pid_t pid,audio_port_handle_t portId)753 void ThreadBase::sendIoConfigEvent_l(audio_io_config_event_t event, pid_t pid,
754 audio_port_handle_t portId)
755 {
756 // The audio statistics history is exponentially weighted to forget events
757 // about five or more seconds in the past. In order to have
758 // crisper statistics for mediametrics, we reset the statistics on
759 // an IoConfigEvent, to reflect different properties for a new device.
760 mIoJitterMs.reset();
761 mLatencyMs.reset();
762 mProcessTimeMs.reset();
763 mMonopipePipeDepthStats.reset();
764 mTimestampVerifier.discontinuity(mTimestampVerifier.DISCONTINUITY_MODE_CONTINUOUS);
765
766 sp<ConfigEvent> configEvent = (ConfigEvent *)new IoConfigEvent(event, pid, portId);
767 sendConfigEvent_l(configEvent);
768 }
769
sendPrioConfigEvent(pid_t pid,pid_t tid,int32_t prio,bool forApp)770 void ThreadBase::sendPrioConfigEvent(pid_t pid, pid_t tid, int32_t prio, bool forApp)
771 {
772 audio_utils::lock_guard _l(mutex());
773 sendPrioConfigEvent_l(pid, tid, prio, forApp);
774 }
775
776 // sendPrioConfigEvent_l() must be called with ThreadBase::mutex() held
sendPrioConfigEvent_l(pid_t pid,pid_t tid,int32_t prio,bool forApp)777 void ThreadBase::sendPrioConfigEvent_l(
778 pid_t pid, pid_t tid, int32_t prio, bool forApp)
779 {
780 sp<ConfigEvent> configEvent = (ConfigEvent *)new PrioConfigEvent(pid, tid, prio, forApp);
781 sendConfigEvent_l(configEvent);
782 }
783
784 // sendSetParameterConfigEvent_l() must be called with ThreadBase::mutex() held
sendSetParameterConfigEvent_l(const String8 & keyValuePair)785 status_t ThreadBase::sendSetParameterConfigEvent_l(const String8& keyValuePair)
786 {
787 sp<ConfigEvent> configEvent;
788 AudioParameter param(keyValuePair);
789 int value;
790 if (param.getInt(String8(AudioParameter::keyMonoOutput), value) == NO_ERROR) {
791 setMasterMono_l(value != 0);
792 if (param.size() == 1) {
793 return NO_ERROR; // should be a solo parameter - we don't pass down
794 }
795 param.remove(String8(AudioParameter::keyMonoOutput));
796 configEvent = new SetParameterConfigEvent(param.toString());
797 } else {
798 configEvent = new SetParameterConfigEvent(keyValuePair);
799 }
800 return sendConfigEvent_l(configEvent);
801 }
802
sendCreateAudioPatchConfigEvent(const struct audio_patch * patch,audio_patch_handle_t * handle)803 status_t ThreadBase::sendCreateAudioPatchConfigEvent(
804 const struct audio_patch *patch,
805 audio_patch_handle_t *handle)
806 {
807 audio_utils::lock_guard _l(mutex());
808 sp<ConfigEvent> configEvent = (ConfigEvent *)new CreateAudioPatchConfigEvent(*patch, *handle);
809 status_t status = sendConfigEvent_l(configEvent);
810 if (status == NO_ERROR) {
811 CreateAudioPatchConfigEventData *data =
812 (CreateAudioPatchConfigEventData *)configEvent->mData.get();
813 *handle = data->mHandle;
814 }
815 return status;
816 }
817
sendReleaseAudioPatchConfigEvent(const audio_patch_handle_t handle)818 status_t ThreadBase::sendReleaseAudioPatchConfigEvent(
819 const audio_patch_handle_t handle)
820 {
821 audio_utils::lock_guard _l(mutex());
822 sp<ConfigEvent> configEvent = (ConfigEvent *)new ReleaseAudioPatchConfigEvent(handle);
823 return sendConfigEvent_l(configEvent);
824 }
825
sendUpdateOutDeviceConfigEvent(const DeviceDescriptorBaseVector & outDevices)826 status_t ThreadBase::sendUpdateOutDeviceConfigEvent(
827 const DeviceDescriptorBaseVector& outDevices)
828 {
829 if (type() != RECORD) {
830 // The update out device operation is only for record thread.
831 return INVALID_OPERATION;
832 }
833 audio_utils::lock_guard _l(mutex());
834 sp<ConfigEvent> configEvent = (ConfigEvent *)new UpdateOutDevicesConfigEvent(outDevices);
835 return sendConfigEvent_l(configEvent);
836 }
837
sendResizeBufferConfigEvent_l(int32_t maxSharedAudioHistoryMs)838 void ThreadBase::sendResizeBufferConfigEvent_l(int32_t maxSharedAudioHistoryMs)
839 {
840 ALOG_ASSERT(type() == RECORD, "sendResizeBufferConfigEvent_l() called on non record thread");
841 sp<ConfigEvent> configEvent =
842 (ConfigEvent *)new ResizeBufferConfigEvent(maxSharedAudioHistoryMs);
843 sendConfigEvent_l(configEvent);
844 }
845
sendCheckOutputStageEffectsEvent()846 void ThreadBase::sendCheckOutputStageEffectsEvent()
847 {
848 audio_utils::lock_guard _l(mutex());
849 sendCheckOutputStageEffectsEvent_l();
850 }
851
sendCheckOutputStageEffectsEvent_l()852 void ThreadBase::sendCheckOutputStageEffectsEvent_l()
853 {
854 sp<ConfigEvent> configEvent =
855 (ConfigEvent *)new CheckOutputStageEffectsEvent();
856 sendConfigEvent_l(configEvent);
857 }
858
sendHalLatencyModesChangedEvent_l()859 void ThreadBase::sendHalLatencyModesChangedEvent_l()
860 {
861 sp<ConfigEvent> configEvent = sp<HalLatencyModesChangedEvent>::make();
862 sendConfigEvent_l(configEvent);
863 }
864
865 // post condition: mConfigEvents.isEmpty()
processConfigEvents_l()866 void ThreadBase::processConfigEvents_l()
867 {
868 bool configChanged = false;
869
870 while (!mConfigEvents.isEmpty()) {
871 ALOGV("processConfigEvents_l() remaining events %zu", mConfigEvents.size());
872 sp<ConfigEvent> event = mConfigEvents[0];
873 mConfigEvents.removeAt(0);
874 switch (event->mType) {
875 case CFG_EVENT_PRIO: {
876 PrioConfigEventData *data = (PrioConfigEventData *)event->mData.get();
877 // FIXME Need to understand why this has to be done asynchronously
878 int err = requestPriority(data->mPid, data->mTid, data->mPrio, data->mForApp,
879 true /*asynchronous*/);
880 if (err != 0) {
881 ALOGW("Policy SCHED_FIFO priority %d is unavailable for pid %d tid %d; error %d",
882 data->mPrio, data->mPid, data->mTid, err);
883 }
884 } break;
885 case CFG_EVENT_IO: {
886 IoConfigEventData *data = (IoConfigEventData *)event->mData.get();
887 ioConfigChanged_l(data->mEvent, data->mPid, data->mPortId);
888 } break;
889 case CFG_EVENT_SET_PARAMETER: {
890 SetParameterConfigEventData *data = (SetParameterConfigEventData *)event->mData.get();
891 if (checkForNewParameter_l(data->mKeyValuePairs, event->mStatus)) {
892 configChanged = true;
893 mLocalLog.log("CFG_EVENT_SET_PARAMETER: (%s) configuration changed",
894 data->mKeyValuePairs.c_str());
895 }
896 } break;
897 case CFG_EVENT_CREATE_AUDIO_PATCH: {
898 const DeviceTypeSet oldDevices = getDeviceTypes_l();
899 CreateAudioPatchConfigEventData *data =
900 (CreateAudioPatchConfigEventData *)event->mData.get();
901 event->mStatus = createAudioPatch_l(&data->mPatch, &data->mHandle);
902 const DeviceTypeSet newDevices = getDeviceTypes_l();
903 configChanged = oldDevices != newDevices;
904 mLocalLog.log("CFG_EVENT_CREATE_AUDIO_PATCH: old device %s (%s) new device %s (%s)",
905 dumpDeviceTypes(oldDevices).c_str(), toString(oldDevices).c_str(),
906 dumpDeviceTypes(newDevices).c_str(), toString(newDevices).c_str());
907 } break;
908 case CFG_EVENT_RELEASE_AUDIO_PATCH: {
909 const DeviceTypeSet oldDevices = getDeviceTypes_l();
910 ReleaseAudioPatchConfigEventData *data =
911 (ReleaseAudioPatchConfigEventData *)event->mData.get();
912 event->mStatus = releaseAudioPatch_l(data->mHandle);
913 const DeviceTypeSet newDevices = getDeviceTypes_l();
914 configChanged = oldDevices != newDevices;
915 mLocalLog.log("CFG_EVENT_RELEASE_AUDIO_PATCH: old device %s (%s) new device %s (%s)",
916 dumpDeviceTypes(oldDevices).c_str(), toString(oldDevices).c_str(),
917 dumpDeviceTypes(newDevices).c_str(), toString(newDevices).c_str());
918 } break;
919 case CFG_EVENT_UPDATE_OUT_DEVICE: {
920 UpdateOutDevicesConfigEventData *data =
921 (UpdateOutDevicesConfigEventData *)event->mData.get();
922 updateOutDevices(data->mOutDevices);
923 } break;
924 case CFG_EVENT_RESIZE_BUFFER: {
925 ResizeBufferConfigEventData *data =
926 (ResizeBufferConfigEventData *)event->mData.get();
927 resizeInputBuffer_l(data->mMaxSharedAudioHistoryMs);
928 } break;
929
930 case CFG_EVENT_CHECK_OUTPUT_STAGE_EFFECTS: {
931 setCheckOutputStageEffects();
932 } break;
933
934 case CFG_EVENT_HAL_LATENCY_MODES_CHANGED: {
935 onHalLatencyModesChanged_l();
936 } break;
937
938 default:
939 ALOG_ASSERT(false, "processConfigEvents_l() unknown event type %d", event->mType);
940 break;
941 }
942 {
943 audio_utils::lock_guard _l(event->mutex());
944 if (event->mWaitStatus) {
945 event->mWaitStatus = false;
946 event->mCondition.notify_one();
947 }
948 }
949 ALOGV_IF(mConfigEvents.isEmpty(), "processConfigEvents_l() DONE thread %p", this);
950 }
951
952 if (configChanged) {
953 cacheParameters_l();
954 }
955 }
956
channelMaskToString(audio_channel_mask_t mask,bool output)957 String8 channelMaskToString(audio_channel_mask_t mask, bool output) {
958 String8 s;
959 const audio_channel_representation_t representation =
960 audio_channel_mask_get_representation(mask);
961
962 switch (representation) {
963 // Travel all single bit channel mask to convert channel mask to string.
964 case AUDIO_CHANNEL_REPRESENTATION_POSITION: {
965 if (output) {
966 if (mask & AUDIO_CHANNEL_OUT_FRONT_LEFT) s.append("front-left, ");
967 if (mask & AUDIO_CHANNEL_OUT_FRONT_RIGHT) s.append("front-right, ");
968 if (mask & AUDIO_CHANNEL_OUT_FRONT_CENTER) s.append("front-center, ");
969 if (mask & AUDIO_CHANNEL_OUT_LOW_FREQUENCY) s.append("low-frequency, ");
970 if (mask & AUDIO_CHANNEL_OUT_BACK_LEFT) s.append("back-left, ");
971 if (mask & AUDIO_CHANNEL_OUT_BACK_RIGHT) s.append("back-right, ");
972 if (mask & AUDIO_CHANNEL_OUT_FRONT_LEFT_OF_CENTER) s.append("front-left-of-center, ");
973 if (mask & AUDIO_CHANNEL_OUT_FRONT_RIGHT_OF_CENTER) s.append("front-right-of-center, ");
974 if (mask & AUDIO_CHANNEL_OUT_BACK_CENTER) s.append("back-center, ");
975 if (mask & AUDIO_CHANNEL_OUT_SIDE_LEFT) s.append("side-left, ");
976 if (mask & AUDIO_CHANNEL_OUT_SIDE_RIGHT) s.append("side-right, ");
977 if (mask & AUDIO_CHANNEL_OUT_TOP_CENTER) s.append("top-center ,");
978 if (mask & AUDIO_CHANNEL_OUT_TOP_FRONT_LEFT) s.append("top-front-left, ");
979 if (mask & AUDIO_CHANNEL_OUT_TOP_FRONT_CENTER) s.append("top-front-center, ");
980 if (mask & AUDIO_CHANNEL_OUT_TOP_FRONT_RIGHT) s.append("top-front-right, ");
981 if (mask & AUDIO_CHANNEL_OUT_TOP_BACK_LEFT) s.append("top-back-left, ");
982 if (mask & AUDIO_CHANNEL_OUT_TOP_BACK_CENTER) s.append("top-back-center, ");
983 if (mask & AUDIO_CHANNEL_OUT_TOP_BACK_RIGHT) s.append("top-back-right, ");
984 if (mask & AUDIO_CHANNEL_OUT_TOP_SIDE_LEFT) s.append("top-side-left, ");
985 if (mask & AUDIO_CHANNEL_OUT_TOP_SIDE_RIGHT) s.append("top-side-right, ");
986 if (mask & AUDIO_CHANNEL_OUT_BOTTOM_FRONT_LEFT) s.append("bottom-front-left, ");
987 if (mask & AUDIO_CHANNEL_OUT_BOTTOM_FRONT_CENTER) s.append("bottom-front-center, ");
988 if (mask & AUDIO_CHANNEL_OUT_BOTTOM_FRONT_RIGHT) s.append("bottom-front-right, ");
989 if (mask & AUDIO_CHANNEL_OUT_LOW_FREQUENCY_2) s.append("low-frequency-2, ");
990 if (mask & AUDIO_CHANNEL_OUT_HAPTIC_B) s.append("haptic-B, ");
991 if (mask & AUDIO_CHANNEL_OUT_HAPTIC_A) s.append("haptic-A, ");
992 if (mask & ~AUDIO_CHANNEL_OUT_ALL) s.append("unknown, ");
993 } else {
994 if (mask & AUDIO_CHANNEL_IN_LEFT) s.append("left, ");
995 if (mask & AUDIO_CHANNEL_IN_RIGHT) s.append("right, ");
996 if (mask & AUDIO_CHANNEL_IN_FRONT) s.append("front, ");
997 if (mask & AUDIO_CHANNEL_IN_BACK) s.append("back, ");
998 if (mask & AUDIO_CHANNEL_IN_LEFT_PROCESSED) s.append("left-processed, ");
999 if (mask & AUDIO_CHANNEL_IN_RIGHT_PROCESSED) s.append("right-processed, ");
1000 if (mask & AUDIO_CHANNEL_IN_FRONT_PROCESSED) s.append("front-processed, ");
1001 if (mask & AUDIO_CHANNEL_IN_BACK_PROCESSED) s.append("back-processed, ");
1002 if (mask & AUDIO_CHANNEL_IN_PRESSURE) s.append("pressure, ");
1003 if (mask & AUDIO_CHANNEL_IN_X_AXIS) s.append("X, ");
1004 if (mask & AUDIO_CHANNEL_IN_Y_AXIS) s.append("Y, ");
1005 if (mask & AUDIO_CHANNEL_IN_Z_AXIS) s.append("Z, ");
1006 if (mask & AUDIO_CHANNEL_IN_BACK_LEFT) s.append("back-left, ");
1007 if (mask & AUDIO_CHANNEL_IN_BACK_RIGHT) s.append("back-right, ");
1008 if (mask & AUDIO_CHANNEL_IN_CENTER) s.append("center, ");
1009 if (mask & AUDIO_CHANNEL_IN_LOW_FREQUENCY) s.append("low-frequency, ");
1010 if (mask & AUDIO_CHANNEL_IN_TOP_LEFT) s.append("top-left, ");
1011 if (mask & AUDIO_CHANNEL_IN_TOP_RIGHT) s.append("top-right, ");
1012 if (mask & AUDIO_CHANNEL_IN_VOICE_UPLINK) s.append("voice-uplink, ");
1013 if (mask & AUDIO_CHANNEL_IN_VOICE_DNLINK) s.append("voice-dnlink, ");
1014 if (mask & ~AUDIO_CHANNEL_IN_ALL) s.append("unknown, ");
1015 }
1016 const int len = s.length();
1017 if (len > 2) {
1018 (void) s.lockBuffer(len); // needed?
1019 s.unlockBuffer(len - 2); // remove trailing ", "
1020 }
1021 return s;
1022 }
1023 case AUDIO_CHANNEL_REPRESENTATION_INDEX:
1024 s.appendFormat("index mask, bits:%#x", audio_channel_mask_get_bits(mask));
1025 return s;
1026 default:
1027 s.appendFormat("unknown mask, representation:%d bits:%#x",
1028 representation, audio_channel_mask_get_bits(mask));
1029 return s;
1030 }
1031 }
1032
dump(int fd,const Vector<String16> & args)1033 void ThreadBase::dump(int fd, const Vector<String16>& args)
1034 NO_THREAD_SAFETY_ANALYSIS // conditional try lock
1035 {
1036 dprintf(fd, "\n%s thread %p, name %s, tid %d, type %d (%s):\n", isOutput() ? "Output" : "Input",
1037 this, mThreadName, getTid(), type(), threadTypeToString(type()));
1038
1039 const bool locked = afutils::dumpTryLock(mutex());
1040 if (!locked) {
1041 dprintf(fd, " Thread may be deadlocked\n");
1042 }
1043
1044 dumpBase_l(fd, args);
1045 dumpInternals_l(fd, args);
1046 dumpTracks_l(fd, args);
1047 dumpEffectChains_l(fd, args);
1048
1049 if (locked) {
1050 mutex().unlock();
1051 }
1052
1053 dprintf(fd, " Local log:\n");
1054 mLocalLog.dump(fd, " " /* prefix */, 40 /* lines */);
1055
1056 // --all does the statistics
1057 bool dumpAll = false;
1058 for (const auto &arg : args) {
1059 if (arg == String16("--all")) {
1060 dumpAll = true;
1061 }
1062 }
1063 if (dumpAll || type() == SPATIALIZER) {
1064 const std::string sched = mThreadSnapshot.toString();
1065 if (!sched.empty()) {
1066 (void)write(fd, sched.c_str(), sched.size());
1067 }
1068 }
1069 }
1070
dumpBase_l(int fd,const Vector<String16> &)1071 void ThreadBase::dumpBase_l(int fd, const Vector<String16>& /* args */)
1072 {
1073 dprintf(fd, " I/O handle: %d\n", mId);
1074 dprintf(fd, " Standby: %s\n", mStandby ? "yes" : "no");
1075 dprintf(fd, " Sample rate: %u Hz\n", mSampleRate);
1076 dprintf(fd, " HAL frame count: %zu\n", mFrameCount);
1077 dprintf(fd, " HAL format: 0x%x (%s)\n", mHALFormat,
1078 IAfThreadBase::formatToString(mHALFormat).c_str());
1079 dprintf(fd, " HAL buffer size: %zu bytes\n", mBufferSize);
1080 dprintf(fd, " Channel count: %u\n", mChannelCount);
1081 dprintf(fd, " Channel mask: 0x%08x (%s)\n", mChannelMask,
1082 channelMaskToString(mChannelMask, mType != RECORD).c_str());
1083 dprintf(fd, " Processing format: 0x%x (%s)\n", mFormat,
1084 IAfThreadBase::formatToString(mFormat).c_str());
1085 dprintf(fd, " Processing frame size: %zu bytes\n", mFrameSize);
1086 dprintf(fd, " Pending config events:");
1087 size_t numConfig = mConfigEvents.size();
1088 if (numConfig) {
1089 const size_t SIZE = 256;
1090 char buffer[SIZE];
1091 for (size_t i = 0; i < numConfig; i++) {
1092 mConfigEvents[i]->dump(buffer, SIZE);
1093 dprintf(fd, "\n %s", buffer);
1094 }
1095 dprintf(fd, "\n");
1096 } else {
1097 dprintf(fd, " none\n");
1098 }
1099 // Note: output device may be used by capture threads for effects such as AEC.
1100 dprintf(fd, " Output devices: %s (%s)\n",
1101 dumpDeviceTypes(outDeviceTypes_l()).c_str(), toString(outDeviceTypes_l()).c_str());
1102 dprintf(fd, " Input device: %#x (%s)\n",
1103 inDeviceType_l(), toString(inDeviceType_l()).c_str());
1104 dprintf(fd, " Audio source: %d (%s)\n", mAudioSource, toString(mAudioSource).c_str());
1105
1106 // Dump timestamp statistics for the Thread types that support it.
1107 if (mType == RECORD
1108 || mType == MIXER
1109 || mType == DUPLICATING
1110 || mType == DIRECT
1111 || mType == OFFLOAD
1112 || mType == SPATIALIZER) {
1113 dprintf(fd, " Timestamp stats: %s\n", mTimestampVerifier.toString().c_str());
1114 dprintf(fd, " Timestamp corrected: %s\n",
1115 isTimestampCorrectionEnabled_l() ? "yes" : "no");
1116 }
1117
1118 if (mLastIoBeginNs > 0) { // MMAP may not set this
1119 dprintf(fd, " Last %s occurred (msecs): %lld\n",
1120 isOutput() ? "write" : "read",
1121 (long long) (systemTime() - mLastIoBeginNs) / NANOS_PER_MILLISECOND);
1122 }
1123
1124 if (mProcessTimeMs.getN() > 0) {
1125 dprintf(fd, " Process time ms stats: %s\n", mProcessTimeMs.toString().c_str());
1126 }
1127
1128 if (mIoJitterMs.getN() > 0) {
1129 dprintf(fd, " Hal %s jitter ms stats: %s\n",
1130 isOutput() ? "write" : "read",
1131 mIoJitterMs.toString().c_str());
1132 }
1133
1134 if (mLatencyMs.getN() > 0) {
1135 dprintf(fd, " Threadloop %s latency stats: %s\n",
1136 isOutput() ? "write" : "read",
1137 mLatencyMs.toString().c_str());
1138 }
1139
1140 if (mMonopipePipeDepthStats.getN() > 0) {
1141 dprintf(fd, " Monopipe %s pipe depth stats: %s\n",
1142 isOutput() ? "write" : "read",
1143 mMonopipePipeDepthStats.toString().c_str());
1144 }
1145 }
1146
dumpEffectChains_l(int fd,const Vector<String16> & args)1147 void ThreadBase::dumpEffectChains_l(int fd, const Vector<String16>& args)
1148 {
1149 const size_t SIZE = 256;
1150 char buffer[SIZE];
1151
1152 size_t numEffectChains = mEffectChains.size();
1153 snprintf(buffer, SIZE, " %zu Effect Chains\n", numEffectChains);
1154 write(fd, buffer, strlen(buffer));
1155
1156 for (size_t i = 0; i < numEffectChains; ++i) {
1157 sp<IAfEffectChain> chain = mEffectChains[i];
1158 if (chain != 0) {
1159 chain->dump(fd, args);
1160 }
1161 }
1162 }
1163
acquireWakeLock()1164 void ThreadBase::acquireWakeLock()
1165 {
1166 audio_utils::lock_guard _l(mutex());
1167 acquireWakeLock_l();
1168 }
1169
getWakeLockTag()1170 String16 ThreadBase::getWakeLockTag()
1171 {
1172 switch (mType) {
1173 case MIXER:
1174 return String16("AudioMix");
1175 case DIRECT:
1176 return String16("AudioDirectOut");
1177 case DUPLICATING:
1178 return String16("AudioDup");
1179 case RECORD:
1180 return String16("AudioIn");
1181 case OFFLOAD:
1182 return String16("AudioOffload");
1183 case MMAP_PLAYBACK:
1184 return String16("MmapPlayback");
1185 case MMAP_CAPTURE:
1186 return String16("MmapCapture");
1187 case SPATIALIZER:
1188 return String16("AudioSpatial");
1189 default:
1190 ALOG_ASSERT(false);
1191 return String16("AudioUnknown");
1192 }
1193 }
1194
acquireWakeLock_l()1195 void ThreadBase::acquireWakeLock_l()
1196 {
1197 getPowerManager_l();
1198 if (mPowerManager != 0) {
1199 sp<IBinder> binder = new BBinder();
1200 // Uses AID_AUDIOSERVER for wakelock. updateWakeLockUids_l() updates with client uids.
1201 binder::Status status = mPowerManager->acquireWakeLockAsync(binder,
1202 POWERMANAGER_PARTIAL_WAKE_LOCK,
1203 getWakeLockTag(),
1204 String16("audioserver"),
1205 {} /* workSource */,
1206 {} /* historyTag */);
1207 if (status.isOk()) {
1208 mWakeLockToken = binder;
1209 }
1210 ALOGV("acquireWakeLock_l() %s status %d", mThreadName, status.exceptionCode());
1211 }
1212
1213 gBoottime.acquire(mWakeLockToken);
1214 mTimestamp.mTimebaseOffset[ExtendedTimestamp::TIMEBASE_BOOTTIME] =
1215 gBoottime.getBoottimeOffset();
1216 }
1217
releaseWakeLock()1218 void ThreadBase::releaseWakeLock()
1219 {
1220 audio_utils::lock_guard _l(mutex());
1221 releaseWakeLock_l();
1222 }
1223
releaseWakeLock_l()1224 void ThreadBase::releaseWakeLock_l()
1225 {
1226 gBoottime.release(mWakeLockToken);
1227 if (mWakeLockToken != 0) {
1228 ALOGV("releaseWakeLock_l() %s", mThreadName);
1229 if (mPowerManager != 0) {
1230 mPowerManager->releaseWakeLockAsync(mWakeLockToken, 0);
1231 }
1232 mWakeLockToken.clear();
1233 }
1234 }
1235
getPowerManager_l()1236 void ThreadBase::getPowerManager_l() {
1237 if (mSystemReady && mPowerManager == 0) {
1238 // use checkService() to avoid blocking if power service is not up yet
1239 sp<IBinder> binder =
1240 defaultServiceManager()->checkService(String16("power"));
1241 if (binder == 0) {
1242 ALOGW("Thread %s cannot connect to the power manager service", mThreadName);
1243 } else {
1244 mPowerManager = interface_cast<os::IPowerManager>(binder);
1245 binder->linkToDeath(mDeathRecipient);
1246 }
1247 }
1248 }
1249
updateWakeLockUids_l(const SortedVector<uid_t> & uids)1250 void ThreadBase::updateWakeLockUids_l(const SortedVector<uid_t>& uids) {
1251 getPowerManager_l();
1252
1253 #if !LOG_NDEBUG
1254 std::stringstream s;
1255 for (uid_t uid : uids) {
1256 s << uid << " ";
1257 }
1258 ALOGD("updateWakeLockUids_l %s uids:%s", mThreadName, s.str().c_str());
1259 #endif
1260
1261 if (mWakeLockToken == NULL) { // token may be NULL if AudioFlinger::systemReady() not called.
1262 if (mSystemReady) {
1263 ALOGE("no wake lock to update, but system ready!");
1264 } else {
1265 ALOGW("no wake lock to update, system not ready yet");
1266 }
1267 return;
1268 }
1269 if (mPowerManager != 0) {
1270 std::vector<int> uidsAsInt(uids.begin(), uids.end()); // powermanager expects uids as ints
1271 binder::Status status = mPowerManager->updateWakeLockUidsAsync(
1272 mWakeLockToken, uidsAsInt);
1273 ALOGV("updateWakeLockUids_l() %s status %d", mThreadName, status.exceptionCode());
1274 }
1275 }
1276
clearPowerManager()1277 void ThreadBase::clearPowerManager()
1278 {
1279 audio_utils::lock_guard _l(mutex());
1280 releaseWakeLock_l();
1281 mPowerManager.clear();
1282 }
1283
updateOutDevices(const DeviceDescriptorBaseVector & outDevices __unused)1284 void ThreadBase::updateOutDevices(
1285 const DeviceDescriptorBaseVector& outDevices __unused)
1286 {
1287 ALOGE("%s should only be called in RecordThread", __func__);
1288 }
1289
resizeInputBuffer_l(int32_t)1290 void ThreadBase::resizeInputBuffer_l(int32_t /* maxSharedAudioHistoryMs */)
1291 {
1292 ALOGE("%s should only be called in RecordThread", __func__);
1293 }
1294
binderDied(const wp<IBinder> &)1295 void ThreadBase::PMDeathRecipient::binderDied(const wp<IBinder>& /* who */)
1296 {
1297 sp<ThreadBase> thread = mThread.promote();
1298 if (thread != 0) {
1299 thread->clearPowerManager();
1300 }
1301 ALOGW("power manager service died !!!");
1302 }
1303
setEffectSuspended_l(const effect_uuid_t * type,bool suspend,audio_session_t sessionId)1304 void ThreadBase::setEffectSuspended_l(
1305 const effect_uuid_t *type, bool suspend, audio_session_t sessionId)
1306 {
1307 sp<IAfEffectChain> chain = getEffectChain_l(sessionId);
1308 if (chain != 0) {
1309 if (type != NULL) {
1310 chain->setEffectSuspended_l(type, suspend);
1311 } else {
1312 chain->setEffectSuspendedAll_l(suspend);
1313 }
1314 }
1315
1316 updateSuspendedSessions_l(type, suspend, sessionId);
1317 }
1318
checkSuspendOnAddEffectChain_l(const sp<IAfEffectChain> & chain)1319 void ThreadBase::checkSuspendOnAddEffectChain_l(const sp<IAfEffectChain>& chain)
1320 {
1321 ssize_t index = mSuspendedSessions.indexOfKey(chain->sessionId());
1322 if (index < 0) {
1323 return;
1324 }
1325
1326 const KeyedVector <int, sp<SuspendedSessionDesc> >& sessionEffects =
1327 mSuspendedSessions.valueAt(index);
1328
1329 for (size_t i = 0; i < sessionEffects.size(); i++) {
1330 const sp<SuspendedSessionDesc>& desc = sessionEffects.valueAt(i);
1331 for (int j = 0; j < desc->mRefCount; j++) {
1332 if (sessionEffects.keyAt(i) == IAfEffectChain::kKeyForSuspendAll) {
1333 chain->setEffectSuspendedAll_l(true);
1334 } else {
1335 ALOGV("checkSuspendOnAddEffectChain_l() suspending effects %08x",
1336 desc->mType.timeLow);
1337 chain->setEffectSuspended_l(&desc->mType, true);
1338 }
1339 }
1340 }
1341 }
1342
updateSuspendedSessions_l(const effect_uuid_t * type,bool suspend,audio_session_t sessionId)1343 void ThreadBase::updateSuspendedSessions_l(const effect_uuid_t* type,
1344 bool suspend,
1345 audio_session_t sessionId)
1346 {
1347 ssize_t index = mSuspendedSessions.indexOfKey(sessionId);
1348
1349 KeyedVector <int, sp<SuspendedSessionDesc> > sessionEffects;
1350
1351 if (suspend) {
1352 if (index >= 0) {
1353 sessionEffects = mSuspendedSessions.valueAt(index);
1354 } else {
1355 mSuspendedSessions.add(sessionId, sessionEffects);
1356 }
1357 } else {
1358 if (index < 0) {
1359 return;
1360 }
1361 sessionEffects = mSuspendedSessions.valueAt(index);
1362 }
1363
1364
1365 int key = IAfEffectChain::kKeyForSuspendAll;
1366 if (type != NULL) {
1367 key = type->timeLow;
1368 }
1369 index = sessionEffects.indexOfKey(key);
1370
1371 sp<SuspendedSessionDesc> desc;
1372 if (suspend) {
1373 if (index >= 0) {
1374 desc = sessionEffects.valueAt(index);
1375 } else {
1376 desc = new SuspendedSessionDesc();
1377 if (type != NULL) {
1378 desc->mType = *type;
1379 }
1380 sessionEffects.add(key, desc);
1381 ALOGV("updateSuspendedSessions_l() suspend adding effect %08x", key);
1382 }
1383 desc->mRefCount++;
1384 } else {
1385 if (index < 0) {
1386 return;
1387 }
1388 desc = sessionEffects.valueAt(index);
1389 if (--desc->mRefCount == 0) {
1390 ALOGV("updateSuspendedSessions_l() restore removing effect %08x", key);
1391 sessionEffects.removeItemsAt(index);
1392 if (sessionEffects.isEmpty()) {
1393 ALOGV("updateSuspendedSessions_l() restore removing session %d",
1394 sessionId);
1395 mSuspendedSessions.removeItem(sessionId);
1396 }
1397 }
1398 }
1399 if (!sessionEffects.isEmpty()) {
1400 mSuspendedSessions.replaceValueFor(sessionId, sessionEffects);
1401 }
1402 }
1403
checkSuspendOnEffectEnabled(bool enabled,audio_session_t sessionId,bool threadLocked)1404 void ThreadBase::checkSuspendOnEffectEnabled(bool enabled,
1405 audio_session_t sessionId,
1406 bool threadLocked)
1407 NO_THREAD_SAFETY_ANALYSIS // manual locking
1408 {
1409 if (!threadLocked) {
1410 mutex().lock();
1411 }
1412
1413 if (mType != RECORD) {
1414 // suspend all effects in AUDIO_SESSION_OUTPUT_MIX when enabling any effect on
1415 // another session. This gives the priority to well behaved effect control panels
1416 // and applications not using global effects.
1417 // Enabling post processing in AUDIO_SESSION_OUTPUT_STAGE session does not affect
1418 // global effects
1419 if (!audio_is_global_session(sessionId)) {
1420 setEffectSuspended_l(NULL, enabled, AUDIO_SESSION_OUTPUT_MIX);
1421 }
1422 }
1423
1424 if (!threadLocked) {
1425 mutex().unlock();
1426 }
1427 }
1428
1429 // checkEffectCompatibility_l() must be called with ThreadBase::mutex() held
checkEffectCompatibility_l(const effect_descriptor_t * desc,audio_session_t sessionId)1430 status_t RecordThread::checkEffectCompatibility_l(
1431 const effect_descriptor_t *desc, audio_session_t sessionId)
1432 {
1433 // No global output effect sessions on record threads
1434 if (sessionId == AUDIO_SESSION_OUTPUT_MIX
1435 || sessionId == AUDIO_SESSION_OUTPUT_STAGE) {
1436 ALOGW("checkEffectCompatibility_l(): global effect %s on record thread %s",
1437 desc->name, mThreadName);
1438 return BAD_VALUE;
1439 }
1440 // only pre processing effects on record thread
1441 if ((desc->flags & EFFECT_FLAG_TYPE_MASK) != EFFECT_FLAG_TYPE_PRE_PROC) {
1442 ALOGW("checkEffectCompatibility_l(): non pre processing effect %s on record thread %s",
1443 desc->name, mThreadName);
1444 return BAD_VALUE;
1445 }
1446
1447 // always allow effects without processing load or latency
1448 if ((desc->flags & EFFECT_FLAG_NO_PROCESS_MASK) == EFFECT_FLAG_NO_PROCESS) {
1449 return NO_ERROR;
1450 }
1451
1452 audio_input_flags_t flags = mInput->flags;
1453 if (hasFastCapture() || (flags & AUDIO_INPUT_FLAG_FAST)) {
1454 if (flags & AUDIO_INPUT_FLAG_RAW) {
1455 ALOGW("checkEffectCompatibility_l(): effect %s on record thread %s in raw mode",
1456 desc->name, mThreadName);
1457 return BAD_VALUE;
1458 }
1459 if ((desc->flags & EFFECT_FLAG_HW_ACC_TUNNEL) == 0) {
1460 ALOGW("checkEffectCompatibility_l(): non HW effect %s on record thread %s in fast mode",
1461 desc->name, mThreadName);
1462 return BAD_VALUE;
1463 }
1464 }
1465
1466 if (IAfEffectModule::isHapticGenerator(&desc->type)) {
1467 ALOGE("%s(): HapticGenerator is not supported in RecordThread", __func__);
1468 return BAD_VALUE;
1469 }
1470 return NO_ERROR;
1471 }
1472
1473 // checkEffectCompatibility_l() must be called with ThreadBase::mutex() held
checkEffectCompatibility_l(const effect_descriptor_t * desc,audio_session_t sessionId)1474 status_t PlaybackThread::checkEffectCompatibility_l(
1475 const effect_descriptor_t *desc, audio_session_t sessionId)
1476 {
1477 // no preprocessing on playback threads
1478 if ((desc->flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_PRE_PROC) {
1479 ALOGW("%s: pre processing effect %s created on playback"
1480 " thread %s", __func__, desc->name, mThreadName);
1481 return BAD_VALUE;
1482 }
1483
1484 // always allow effects without processing load or latency
1485 if ((desc->flags & EFFECT_FLAG_NO_PROCESS_MASK) == EFFECT_FLAG_NO_PROCESS) {
1486 return NO_ERROR;
1487 }
1488
1489 if (IAfEffectModule::isHapticGenerator(&desc->type) && mHapticChannelCount == 0) {
1490 ALOGW("%s: thread (%s) doesn't support haptic playback while the effect is HapticGenerator",
1491 __func__, threadTypeToString(mType));
1492 return BAD_VALUE;
1493 }
1494
1495 if (IAfEffectModule::isSpatializer(&desc->type)
1496 && mType != SPATIALIZER) {
1497 ALOGW("%s: attempt to create a spatializer effect on a thread of type %d",
1498 __func__, mType);
1499 return BAD_VALUE;
1500 }
1501
1502 switch (mType) {
1503 case MIXER: {
1504 audio_output_flags_t flags = mOutput->flags;
1505 if (hasFastMixer() || (flags & AUDIO_OUTPUT_FLAG_FAST)) {
1506 if (sessionId == AUDIO_SESSION_OUTPUT_MIX) {
1507 // global effects are applied only to non fast tracks if they are SW
1508 if ((desc->flags & EFFECT_FLAG_HW_ACC_TUNNEL) == 0) {
1509 break;
1510 }
1511 } else if (sessionId == AUDIO_SESSION_OUTPUT_STAGE) {
1512 // only post processing on output stage session
1513 if ((desc->flags & EFFECT_FLAG_TYPE_MASK) != EFFECT_FLAG_TYPE_POST_PROC) {
1514 ALOGW("%s: non post processing effect %s not allowed on output stage session",
1515 __func__, desc->name);
1516 return BAD_VALUE;
1517 }
1518 } else if (sessionId == AUDIO_SESSION_DEVICE) {
1519 // only post processing on output stage session
1520 if ((desc->flags & EFFECT_FLAG_TYPE_MASK) != EFFECT_FLAG_TYPE_POST_PROC) {
1521 ALOGW("%s: non post processing effect %s not allowed on device session",
1522 __func__, desc->name);
1523 return BAD_VALUE;
1524 }
1525 } else {
1526 // no restriction on effects applied on non fast tracks
1527 if ((hasAudioSession_l(sessionId) & ThreadBase::FAST_SESSION) == 0) {
1528 break;
1529 }
1530 }
1531
1532 if (flags & AUDIO_OUTPUT_FLAG_RAW) {
1533 ALOGW("%s: effect %s on playback thread in raw mode", __func__, desc->name);
1534 return BAD_VALUE;
1535 }
1536 if ((desc->flags & EFFECT_FLAG_HW_ACC_TUNNEL) == 0) {
1537 ALOGW("%s: non HW effect %s on playback thread in fast mode",
1538 __func__, desc->name);
1539 return BAD_VALUE;
1540 }
1541 }
1542 } break;
1543 case OFFLOAD:
1544 // nothing actionable on offload threads, if the effect:
1545 // - is offloadable: the effect can be created
1546 // - is NOT offloadable: the effect should still be created, but EffectHandle::enable()
1547 // will take care of invalidating the tracks of the thread
1548 break;
1549 case DIRECT:
1550 // Reject any effect on Direct output threads for now, since the format of
1551 // mSinkBuffer is not guaranteed to be compatible with effect processing (PCM 16 stereo).
1552 ALOGW("%s: effect %s on DIRECT output thread %s",
1553 __func__, desc->name, mThreadName);
1554 return BAD_VALUE;
1555 case DUPLICATING:
1556 if (audio_is_global_session(sessionId)) {
1557 ALOGW("%s: global effect %s on DUPLICATING thread %s",
1558 __func__, desc->name, mThreadName);
1559 return BAD_VALUE;
1560 }
1561 if ((desc->flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_POST_PROC) {
1562 ALOGW("%s: post processing effect %s on DUPLICATING thread %s",
1563 __func__, desc->name, mThreadName);
1564 return BAD_VALUE;
1565 }
1566 if ((desc->flags & EFFECT_FLAG_HW_ACC_TUNNEL) != 0) {
1567 ALOGW("%s: HW tunneled effect %s on DUPLICATING thread %s",
1568 __func__, desc->name, mThreadName);
1569 return BAD_VALUE;
1570 }
1571 break;
1572 case SPATIALIZER:
1573 // Global effects (AUDIO_SESSION_OUTPUT_MIX) are not supported on spatializer mixer
1574 // as there is no common accumulation buffer for sptialized and non sptialized tracks.
1575 // Post processing effects (AUDIO_SESSION_OUTPUT_STAGE or AUDIO_SESSION_DEVICE)
1576 // are supported and added after the spatializer.
1577 if (sessionId == AUDIO_SESSION_OUTPUT_MIX) {
1578 ALOGW("%s: global effect %s not supported on spatializer thread %s",
1579 __func__, desc->name, mThreadName);
1580 return BAD_VALUE;
1581 } else if (sessionId == AUDIO_SESSION_OUTPUT_STAGE) {
1582 // only post processing , downmixer or spatializer effects on output stage session
1583 if (IAfEffectModule::isSpatializer(&desc->type)
1584 || memcmp(&desc->type, EFFECT_UIID_DOWNMIX, sizeof(effect_uuid_t)) == 0) {
1585 break;
1586 }
1587 if ((desc->flags & EFFECT_FLAG_TYPE_MASK) != EFFECT_FLAG_TYPE_POST_PROC) {
1588 ALOGW("%s: non post processing effect %s not allowed on output stage session",
1589 __func__, desc->name);
1590 return BAD_VALUE;
1591 }
1592 } else if (sessionId == AUDIO_SESSION_DEVICE) {
1593 // only post processing on output stage session
1594 if ((desc->flags & EFFECT_FLAG_TYPE_MASK) != EFFECT_FLAG_TYPE_POST_PROC) {
1595 ALOGW("%s: non post processing effect %s not allowed on device session",
1596 __func__, desc->name);
1597 return BAD_VALUE;
1598 }
1599 }
1600 break;
1601 case BIT_PERFECT:
1602 if ((desc->flags & EFFECT_FLAG_HW_ACC_TUNNEL) != 0) {
1603 // Allow HW accelerated effects of tunnel type
1604 break;
1605 }
1606 // As bit-perfect tracks will not be allowed to apply audio effect that will touch the audio
1607 // data, effects will not be allowed on 1) global effects (AUDIO_SESSION_OUTPUT_MIX),
1608 // 2) post-processing effects (AUDIO_SESSION_OUTPUT_STAGE or AUDIO_SESSION_DEVICE) and
1609 // 3) there is any bit-perfect track with the given session id.
1610 if (sessionId == AUDIO_SESSION_OUTPUT_MIX || sessionId == AUDIO_SESSION_OUTPUT_STAGE ||
1611 sessionId == AUDIO_SESSION_DEVICE) {
1612 ALOGW("%s: effect %s not supported on bit-perfect thread %s",
1613 __func__, desc->name, mThreadName);
1614 return BAD_VALUE;
1615 } else if ((hasAudioSession_l(sessionId) & ThreadBase::BIT_PERFECT_SESSION) != 0) {
1616 ALOGW("%s: effect %s not supported as there is a bit-perfect track with session as %d",
1617 __func__, desc->name, sessionId);
1618 return BAD_VALUE;
1619 }
1620 break;
1621 default:
1622 LOG_ALWAYS_FATAL("checkEffectCompatibility_l(): wrong thread type %d", mType);
1623 }
1624
1625 return NO_ERROR;
1626 }
1627
1628 // ThreadBase::createEffect_l() must be called with AudioFlinger::mutex() held
createEffect_l(const sp<Client> & client,const sp<IEffectClient> & effectClient,int32_t priority,audio_session_t sessionId,effect_descriptor_t * desc,int * enabled,status_t * status,bool pinned,bool probe,bool notifyFramesProcessed)1629 sp<IAfEffectHandle> ThreadBase::createEffect_l(
1630 const sp<Client>& client,
1631 const sp<IEffectClient>& effectClient,
1632 int32_t priority,
1633 audio_session_t sessionId,
1634 effect_descriptor_t *desc,
1635 int *enabled,
1636 status_t *status,
1637 bool pinned,
1638 bool probe,
1639 bool notifyFramesProcessed)
1640 {
1641 sp<IAfEffectModule> effect;
1642 sp<IAfEffectHandle> handle;
1643 status_t lStatus;
1644 sp<IAfEffectChain> chain;
1645 bool chainCreated = false;
1646 bool effectCreated = false;
1647 audio_unique_id_t effectId = AUDIO_UNIQUE_ID_USE_UNSPECIFIED;
1648
1649 lStatus = initCheck();
1650 if (lStatus != NO_ERROR) {
1651 ALOGW("createEffect_l() Audio driver not initialized.");
1652 goto Exit;
1653 }
1654
1655 ALOGV("createEffect_l() thread %p effect %s on session %d", this, desc->name, sessionId);
1656
1657 { // scope for mutex()
1658 audio_utils::lock_guard _l(mutex());
1659
1660 lStatus = checkEffectCompatibility_l(desc, sessionId);
1661 if (probe || lStatus != NO_ERROR) {
1662 goto Exit;
1663 }
1664
1665 // check for existing effect chain with the requested audio session
1666 chain = getEffectChain_l(sessionId);
1667 if (chain == 0) {
1668 // create a new chain for this session
1669 ALOGV("createEffect_l() new effect chain for session %d", sessionId);
1670 chain = IAfEffectChain::create(this, sessionId, mAfThreadCallback);
1671 addEffectChain_l(chain);
1672 chain->setStrategy(getStrategyForSession_l(sessionId));
1673 chainCreated = true;
1674 } else {
1675 effect = chain->getEffectFromDesc(desc);
1676 }
1677
1678 ALOGV("createEffect_l() got effect %p on chain %p", effect.get(), chain.get());
1679
1680 if (effect == 0) {
1681 effectId = mAfThreadCallback->nextUniqueId(AUDIO_UNIQUE_ID_USE_EFFECT);
1682 // create a new effect module if none present in the chain
1683 lStatus = chain->createEffect(effect, desc, effectId, sessionId, pinned);
1684 if (lStatus != NO_ERROR) {
1685 goto Exit;
1686 }
1687 effectCreated = true;
1688
1689 // FIXME: use vector of device and address when effect interface is ready.
1690 effect->setDevices(outDeviceTypeAddrs());
1691 effect->setInputDevice(inDeviceTypeAddr());
1692 effect->setMode(mAfThreadCallback->getMode());
1693 effect->setAudioSource(mAudioSource);
1694 }
1695 if (effect->isHapticGenerator()) {
1696 // TODO(b/184194057): Use the vibrator information from the vibrator that will be used
1697 // for the HapticGenerator.
1698 const std::optional<media::AudioVibratorInfo> defaultVibratorInfo =
1699 std::move(mAfThreadCallback->getDefaultVibratorInfo_l());
1700 if (defaultVibratorInfo) {
1701 audio_utils::lock_guard _cl(chain->mutex());
1702 // Only set the vibrator info when it is a valid one.
1703 effect->setVibratorInfo_l(*defaultVibratorInfo);
1704 }
1705 }
1706 // create effect handle and connect it to effect module
1707 handle = IAfEffectHandle::create(
1708 effect, client, effectClient, priority, notifyFramesProcessed);
1709 lStatus = handle->initCheck();
1710 if (lStatus == OK) {
1711 lStatus = effect->addHandle(handle.get());
1712 sendCheckOutputStageEffectsEvent_l();
1713 }
1714 if (enabled != NULL) {
1715 *enabled = (int)effect->isEnabled();
1716 }
1717 }
1718
1719 Exit:
1720 if (!probe && lStatus != NO_ERROR && lStatus != ALREADY_EXISTS) {
1721 audio_utils::lock_guard _l(mutex());
1722 if (effectCreated) {
1723 chain->removeEffect(effect);
1724 }
1725 if (chainCreated) {
1726 removeEffectChain_l(chain);
1727 }
1728 // handle must be cleared by caller to avoid deadlock.
1729 }
1730
1731 *status = lStatus;
1732 return handle;
1733 }
1734
disconnectEffectHandle(IAfEffectHandle * handle,bool unpinIfLast)1735 void ThreadBase::disconnectEffectHandle(IAfEffectHandle* handle,
1736 bool unpinIfLast)
1737 {
1738 bool remove = false;
1739 sp<IAfEffectModule> effect;
1740 {
1741 audio_utils::lock_guard _l(mutex());
1742 sp<IAfEffectBase> effectBase = handle->effect().promote();
1743 if (effectBase == nullptr) {
1744 return;
1745 }
1746 effect = effectBase->asEffectModule();
1747 if (effect == nullptr) {
1748 return;
1749 }
1750 // restore suspended effects if the disconnected handle was enabled and the last one.
1751 remove = (effect->removeHandle(handle) == 0) && (!effect->isPinned() || unpinIfLast);
1752 if (remove) {
1753 removeEffect_l(effect, true);
1754 }
1755 sendCheckOutputStageEffectsEvent_l();
1756 }
1757 if (remove) {
1758 mAfThreadCallback->updateOrphanEffectChains(effect);
1759 if (handle->enabled()) {
1760 effect->checkSuspendOnEffectEnabled(false, false /*threadLocked*/);
1761 }
1762 }
1763 }
1764
onEffectEnable(const sp<IAfEffectModule> & effect)1765 void ThreadBase::onEffectEnable(const sp<IAfEffectModule>& effect) {
1766 if (isOffloadOrMmap()) {
1767 audio_utils::lock_guard _l(mutex());
1768 broadcast_l();
1769 }
1770 if (!effect->isOffloadable()) {
1771 if (mType == ThreadBase::OFFLOAD) {
1772 PlaybackThread *t = (PlaybackThread *)this;
1773 t->invalidateTracks(AUDIO_STREAM_MUSIC);
1774 }
1775 if (effect->sessionId() == AUDIO_SESSION_OUTPUT_MIX) {
1776 mAfThreadCallback->onNonOffloadableGlobalEffectEnable();
1777 }
1778 }
1779 }
1780
onEffectDisable()1781 void ThreadBase::onEffectDisable() {
1782 if (isOffloadOrMmap()) {
1783 audio_utils::lock_guard _l(mutex());
1784 broadcast_l();
1785 }
1786 }
1787
getEffect(audio_session_t sessionId,int effectId) const1788 sp<IAfEffectModule> ThreadBase::getEffect(audio_session_t sessionId,
1789 int effectId) const
1790 {
1791 audio_utils::lock_guard _l(mutex());
1792 return getEffect_l(sessionId, effectId);
1793 }
1794
getEffect_l(audio_session_t sessionId,int effectId) const1795 sp<IAfEffectModule> ThreadBase::getEffect_l(audio_session_t sessionId,
1796 int effectId) const
1797 {
1798 sp<IAfEffectChain> chain = getEffectChain_l(sessionId);
1799 return chain != 0 ? chain->getEffectFromId_l(effectId) : 0;
1800 }
1801
getEffectIds_l(audio_session_t sessionId) const1802 std::vector<int> ThreadBase::getEffectIds_l(audio_session_t sessionId) const
1803 {
1804 sp<IAfEffectChain> chain = getEffectChain_l(sessionId);
1805 return chain != nullptr ? chain->getEffectIds_l() : std::vector<int>{};
1806 }
1807
1808 // PlaybackThread::addEffect_ll() must be called with AudioFlinger::mutex() and
1809 // ThreadBase::mutex() held
addEffect_ll(const sp<IAfEffectModule> & effect)1810 status_t ThreadBase::addEffect_ll(const sp<IAfEffectModule>& effect)
1811 {
1812 // check for existing effect chain with the requested audio session
1813 audio_session_t sessionId = effect->sessionId();
1814 sp<IAfEffectChain> chain = getEffectChain_l(sessionId);
1815 bool chainCreated = false;
1816
1817 ALOGD_IF((mType == OFFLOAD) && !effect->isOffloadable(),
1818 "%s: on offloaded thread %p: effect %s does not support offload flags %#x",
1819 __func__, this, effect->desc().name, effect->desc().flags);
1820
1821 if (chain == 0) {
1822 // create a new chain for this session
1823 ALOGV("%s: new effect chain for session %d", __func__, sessionId);
1824 chain = IAfEffectChain::create(this, sessionId, mAfThreadCallback);
1825 addEffectChain_l(chain);
1826 chain->setStrategy(getStrategyForSession_l(sessionId));
1827 chainCreated = true;
1828 }
1829 ALOGV("%s: %p chain %p effect %p", __func__, this, chain.get(), effect.get());
1830
1831 if (chain->getEffectFromId_l(effect->id()) != 0) {
1832 ALOGW("%s: %p effect %s already present in chain %p",
1833 __func__, this, effect->desc().name, chain.get());
1834 return BAD_VALUE;
1835 }
1836
1837 effect->setOffloaded_l(mType == OFFLOAD, mId);
1838
1839 status_t status = chain->addEffect(effect);
1840 if (status != NO_ERROR) {
1841 if (chainCreated) {
1842 removeEffectChain_l(chain);
1843 }
1844 return status;
1845 }
1846
1847 effect->setDevices(outDeviceTypeAddrs());
1848 effect->setInputDevice(inDeviceTypeAddr());
1849 effect->setMode(mAfThreadCallback->getMode());
1850 effect->setAudioSource(mAudioSource);
1851
1852 return NO_ERROR;
1853 }
1854
removeEffect_l(const sp<IAfEffectModule> & effect,bool release)1855 void ThreadBase::removeEffect_l(const sp<IAfEffectModule>& effect, bool release) {
1856
1857 ALOGV("%s %p effect %p", __FUNCTION__, this, effect.get());
1858 effect_descriptor_t desc = effect->desc();
1859 if ((desc.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_AUXILIARY) {
1860 detachAuxEffect_l(effect->id());
1861 }
1862
1863 sp<IAfEffectChain> chain = effect->getCallback()->chain().promote();
1864 if (chain != 0) {
1865 // remove effect chain if removing last effect
1866 if (chain->removeEffect(effect, release) == 0) {
1867 removeEffectChain_l(chain);
1868 }
1869 } else {
1870 ALOGW("removeEffect_l() %p cannot promote chain for effect %p", this, effect.get());
1871 }
1872 }
1873
lockEffectChains_l(Vector<sp<IAfEffectChain>> & effectChains)1874 void ThreadBase::lockEffectChains_l(Vector<sp<IAfEffectChain>>& effectChains)
1875 NO_THREAD_SAFETY_ANALYSIS // calls EffectChain::lock()
1876 {
1877 effectChains = mEffectChains;
1878 for (const auto& effectChain : effectChains) {
1879 effectChain->mutex().lock();
1880 }
1881 }
1882
unlockEffectChains(const Vector<sp<IAfEffectChain>> & effectChains)1883 void ThreadBase::unlockEffectChains(const Vector<sp<IAfEffectChain>>& effectChains)
1884 NO_THREAD_SAFETY_ANALYSIS // calls EffectChain::unlock()
1885 {
1886 for (const auto& effectChain : effectChains) {
1887 effectChain->mutex().unlock();
1888 }
1889 }
1890
getEffectChain(audio_session_t sessionId) const1891 sp<IAfEffectChain> ThreadBase::getEffectChain(audio_session_t sessionId) const
1892 {
1893 audio_utils::lock_guard _l(mutex());
1894 return getEffectChain_l(sessionId);
1895 }
1896
getEffectChain_l(audio_session_t sessionId) const1897 sp<IAfEffectChain> ThreadBase::getEffectChain_l(audio_session_t sessionId)
1898 const
1899 {
1900 size_t size = mEffectChains.size();
1901 for (size_t i = 0; i < size; i++) {
1902 if (mEffectChains[i]->sessionId() == sessionId) {
1903 return mEffectChains[i];
1904 }
1905 }
1906 return 0;
1907 }
1908
setMode(audio_mode_t mode)1909 void ThreadBase::setMode(audio_mode_t mode)
1910 {
1911 audio_utils::lock_guard _l(mutex());
1912 size_t size = mEffectChains.size();
1913 for (size_t i = 0; i < size; i++) {
1914 mEffectChains[i]->setMode_l(mode);
1915 }
1916 }
1917
toAudioPortConfig(struct audio_port_config * config)1918 void ThreadBase::toAudioPortConfig(struct audio_port_config* config)
1919 {
1920 config->type = AUDIO_PORT_TYPE_MIX;
1921 config->ext.mix.handle = mId;
1922 config->sample_rate = mSampleRate;
1923 config->format = mHALFormat;
1924 config->channel_mask = mChannelMask;
1925 config->config_mask = AUDIO_PORT_CONFIG_SAMPLE_RATE|AUDIO_PORT_CONFIG_CHANNEL_MASK|
1926 AUDIO_PORT_CONFIG_FORMAT;
1927 }
1928
systemReady()1929 void ThreadBase::systemReady()
1930 {
1931 audio_utils::lock_guard _l(mutex());
1932 if (mSystemReady) {
1933 return;
1934 }
1935 mSystemReady = true;
1936
1937 for (size_t i = 0; i < mPendingConfigEvents.size(); i++) {
1938 sendConfigEvent_l(mPendingConfigEvents.editItemAt(i));
1939 }
1940 mPendingConfigEvents.clear();
1941 }
1942
1943 template <typename T>
add(const sp<T> & track)1944 ssize_t ThreadBase::ActiveTracks<T>::add(const sp<T>& track) {
1945 ssize_t index = mActiveTracks.indexOf(track);
1946 if (index >= 0) {
1947 ALOGW("ActiveTracks<T>::add track %p already there", track.get());
1948 return index;
1949 }
1950 logTrack("add", track);
1951 mActiveTracksGeneration++;
1952 mLatestActiveTrack = track;
1953 track->beginBatteryAttribution();
1954 mHasChanged = true;
1955 return mActiveTracks.add(track);
1956 }
1957
1958 template <typename T>
remove(const sp<T> & track)1959 ssize_t ThreadBase::ActiveTracks<T>::remove(const sp<T>& track) {
1960 ssize_t index = mActiveTracks.remove(track);
1961 if (index < 0) {
1962 ALOGW("ActiveTracks<T>::remove nonexistent track %p", track.get());
1963 return index;
1964 }
1965 logTrack("remove", track);
1966 mActiveTracksGeneration++;
1967 track->endBatteryAttribution();
1968 // mLatestActiveTrack is not cleared even if is the same as track.
1969 mHasChanged = true;
1970 #ifdef TEE_SINK
1971 track->dumpTee(-1 /* fd */, "_REMOVE");
1972 #endif
1973 track->logEndInterval(); // log to MediaMetrics
1974 return index;
1975 }
1976
1977 template <typename T>
clear()1978 void ThreadBase::ActiveTracks<T>::clear() {
1979 for (const sp<T> &track : mActiveTracks) {
1980 track->endBatteryAttribution();
1981 logTrack("clear", track);
1982 }
1983 mLastActiveTracksGeneration = mActiveTracksGeneration;
1984 if (!mActiveTracks.empty()) { mHasChanged = true; }
1985 mActiveTracks.clear();
1986 mLatestActiveTrack.clear();
1987 }
1988
1989 template <typename T>
updatePowerState_l(const sp<ThreadBase> & thread,bool force)1990 void ThreadBase::ActiveTracks<T>::updatePowerState_l(
1991 const sp<ThreadBase>& thread, bool force) {
1992 // Updates ActiveTracks client uids to the thread wakelock.
1993 if (mActiveTracksGeneration != mLastActiveTracksGeneration || force) {
1994 thread->updateWakeLockUids_l(getWakeLockUids());
1995 mLastActiveTracksGeneration = mActiveTracksGeneration;
1996 }
1997 }
1998
1999 template <typename T>
readAndClearHasChanged()2000 bool ThreadBase::ActiveTracks<T>::readAndClearHasChanged() {
2001 bool hasChanged = mHasChanged;
2002 mHasChanged = false;
2003
2004 for (const sp<T> &track : mActiveTracks) {
2005 // Do not short-circuit as all hasChanged states must be reset
2006 // as all the metadata are going to be sent
2007 hasChanged |= track->readAndClearHasChanged();
2008 }
2009 return hasChanged;
2010 }
2011
2012 template <typename T>
logTrack(const char * funcName,const sp<T> & track) const2013 void ThreadBase::ActiveTracks<T>::logTrack(
2014 const char *funcName, const sp<T> &track) const {
2015 if (mLocalLog != nullptr) {
2016 String8 result;
2017 track->appendDump(result, false /* active */);
2018 mLocalLog->log("AT::%-10s(%p) %s", funcName, track.get(), result.c_str());
2019 }
2020 }
2021
broadcast_l()2022 void ThreadBase::broadcast_l()
2023 {
2024 // Thread could be blocked waiting for async
2025 // so signal it to handle state changes immediately
2026 // If threadLoop is currently unlocked a signal of mWaitWorkCV will
2027 // be lost so we also flag to prevent it blocking on mWaitWorkCV
2028 mSignalPending = true;
2029 mWaitWorkCV.notify_all();
2030 }
2031
2032 // Call only from threadLoop() or when it is idle.
2033 // Do not call from high performance code as this may do binder rpc to the MediaMetrics service.
sendStatistics(bool force)2034 void ThreadBase::sendStatistics(bool force)
2035 NO_THREAD_SAFETY_ANALYSIS
2036 {
2037 // Do not log if we have no stats.
2038 // We choose the timestamp verifier because it is the most likely item to be present.
2039 const int64_t nstats = mTimestampVerifier.getN() - mLastRecordedTimestampVerifierN;
2040 if (nstats == 0) {
2041 return;
2042 }
2043
2044 // Don't log more frequently than once per 12 hours.
2045 // We use BOOTTIME to include suspend time.
2046 const int64_t timeNs = systemTime(SYSTEM_TIME_BOOTTIME);
2047 const int64_t sinceNs = timeNs - mLastRecordedTimeNs; // ok if mLastRecordedTimeNs = 0
2048 if (!force && sinceNs <= 12 * NANOS_PER_HOUR) {
2049 return;
2050 }
2051
2052 mLastRecordedTimestampVerifierN = mTimestampVerifier.getN();
2053 mLastRecordedTimeNs = timeNs;
2054
2055 std::unique_ptr<mediametrics::Item> item(mediametrics::Item::create("audiothread"));
2056
2057 #define MM_PREFIX "android.media.audiothread." // avoid cut-n-paste errors.
2058
2059 // thread configuration
2060 item->setInt32(MM_PREFIX "id", (int32_t)mId); // IO handle
2061 // item->setInt32(MM_PREFIX "portId", (int32_t)mPortId);
2062 item->setCString(MM_PREFIX "type", threadTypeToString(mType));
2063 item->setInt32(MM_PREFIX "sampleRate", (int32_t)mSampleRate);
2064 item->setInt64(MM_PREFIX "channelMask", (int64_t)mChannelMask);
2065 item->setCString(MM_PREFIX "encoding", toString(mFormat).c_str());
2066 item->setInt32(MM_PREFIX "frameCount", (int32_t)mFrameCount);
2067 item->setCString(MM_PREFIX "outDevice", toString(outDeviceTypes_l()).c_str());
2068 item->setCString(MM_PREFIX "inDevice", toString(inDeviceType_l()).c_str());
2069
2070 // thread statistics
2071 if (mIoJitterMs.getN() > 0) {
2072 item->setDouble(MM_PREFIX "ioJitterMs.mean", mIoJitterMs.getMean());
2073 item->setDouble(MM_PREFIX "ioJitterMs.std", mIoJitterMs.getStdDev());
2074 }
2075 if (mProcessTimeMs.getN() > 0) {
2076 item->setDouble(MM_PREFIX "processTimeMs.mean", mProcessTimeMs.getMean());
2077 item->setDouble(MM_PREFIX "processTimeMs.std", mProcessTimeMs.getStdDev());
2078 }
2079 const auto tsjitter = mTimestampVerifier.getJitterMs();
2080 if (tsjitter.getN() > 0) {
2081 item->setDouble(MM_PREFIX "timestampJitterMs.mean", tsjitter.getMean());
2082 item->setDouble(MM_PREFIX "timestampJitterMs.std", tsjitter.getStdDev());
2083 }
2084 if (mLatencyMs.getN() > 0) {
2085 item->setDouble(MM_PREFIX "latencyMs.mean", mLatencyMs.getMean());
2086 item->setDouble(MM_PREFIX "latencyMs.std", mLatencyMs.getStdDev());
2087 }
2088 if (mMonopipePipeDepthStats.getN() > 0) {
2089 item->setDouble(MM_PREFIX "monopipePipeDepthStats.mean",
2090 mMonopipePipeDepthStats.getMean());
2091 item->setDouble(MM_PREFIX "monopipePipeDepthStats.std",
2092 mMonopipePipeDepthStats.getStdDev());
2093 }
2094
2095 item->selfrecord();
2096 }
2097
getStrategyForStream(audio_stream_type_t stream) const2098 product_strategy_t ThreadBase::getStrategyForStream(audio_stream_type_t stream) const
2099 {
2100 if (!mAfThreadCallback->isAudioPolicyReady()) {
2101 return PRODUCT_STRATEGY_NONE;
2102 }
2103 return AudioSystem::getStrategyForStream(stream);
2104 }
2105
2106 // startMelComputation_l() must be called with AudioFlinger::mutex() held
startMelComputation_l(const sp<audio_utils::MelProcessor> &)2107 void ThreadBase::startMelComputation_l(
2108 const sp<audio_utils::MelProcessor>& /*processor*/)
2109 {
2110 // Do nothing
2111 ALOGW("%s: ThreadBase does not support CSD", __func__);
2112 }
2113
2114 // stopMelComputation_l() must be called with AudioFlinger::mutex() held
stopMelComputation_l()2115 void ThreadBase::stopMelComputation_l()
2116 {
2117 // Do nothing
2118 ALOGW("%s: ThreadBase does not support CSD", __func__);
2119 }
2120
2121 // ----------------------------------------------------------------------------
2122 // Playback
2123 // ----------------------------------------------------------------------------
2124
PlaybackThread(const sp<IAfThreadCallback> & afThreadCallback,AudioStreamOut * output,audio_io_handle_t id,type_t type,bool systemReady,audio_config_base_t * mixerConfig)2125 PlaybackThread::PlaybackThread(const sp<IAfThreadCallback>& afThreadCallback,
2126 AudioStreamOut* output,
2127 audio_io_handle_t id,
2128 type_t type,
2129 bool systemReady,
2130 audio_config_base_t *mixerConfig)
2131 : ThreadBase(afThreadCallback, id, type, systemReady, true /* isOut */),
2132 mNormalFrameCount(0), mSinkBuffer(NULL),
2133 mMixerBufferEnabled(kEnableExtendedPrecision || type == SPATIALIZER),
2134 mMixerBuffer(NULL),
2135 mMixerBufferSize(0),
2136 mMixerBufferFormat(AUDIO_FORMAT_INVALID),
2137 mMixerBufferValid(false),
2138 mEffectBufferEnabled(kEnableExtendedPrecision || type == SPATIALIZER),
2139 mEffectBuffer(NULL),
2140 mEffectBufferSize(0),
2141 mEffectBufferFormat(AUDIO_FORMAT_INVALID),
2142 mEffectBufferValid(false),
2143 mSuspended(0), mBytesWritten(0),
2144 mFramesWritten(0),
2145 mSuspendedFrames(0),
2146 mActiveTracks(&this->mLocalLog),
2147 // mStreamTypes[] initialized in constructor body
2148 mTracks(type == MIXER),
2149 mOutput(output),
2150 mNumWrites(0), mNumDelayedWrites(0), mInWrite(false),
2151 mMixerStatus(MIXER_IDLE),
2152 mMixerStatusIgnoringFastTracks(MIXER_IDLE),
2153 mStandbyDelayNs(getStandbyTimeInNanos()),
2154 mBytesRemaining(0),
2155 mCurrentWriteLength(0),
2156 mUseAsyncWrite(false),
2157 mWriteAckSequence(0),
2158 mDrainSequence(0),
2159 mScreenState(mAfThreadCallback->getScreenState()),
2160 // index 0 is reserved for normal mixer's submix
2161 mFastTrackAvailMask(((1 << FastMixerState::sMaxFastTracks) - 1) & ~1),
2162 mHwSupportsPause(false), mHwPaused(false), mFlushPending(false),
2163 mLeftVolFloat(-1.0), mRightVolFloat(-1.0),
2164 mDownStreamPatch{},
2165 mIsTimestampAdvancing(kMinimumTimeBetweenTimestampChecksNs)
2166 {
2167 snprintf(mThreadName, kThreadNameLength, "AudioOut_%X", id);
2168 mNBLogWriter = afThreadCallback->newWriter_l(kLogSize, mThreadName);
2169
2170 // Assumes constructor is called by AudioFlinger with its mutex() held, but
2171 // it would be safer to explicitly pass initial masterVolume/masterMute as
2172 // parameter.
2173 //
2174 // If the HAL we are using has support for master volume or master mute,
2175 // then do not attenuate or mute during mixing (just leave the volume at 1.0
2176 // and the mute set to false).
2177 mMasterVolume = afThreadCallback->masterVolume_l();
2178 mMasterMute = afThreadCallback->masterMute_l();
2179 if (mOutput->audioHwDev) {
2180 if (mOutput->audioHwDev->canSetMasterVolume()) {
2181 mMasterVolume = 1.0;
2182 }
2183
2184 if (mOutput->audioHwDev->canSetMasterMute()) {
2185 mMasterMute = false;
2186 }
2187 mIsMsdDevice = strcmp(
2188 mOutput->audioHwDev->moduleName(), AUDIO_HARDWARE_MODULE_ID_MSD) == 0;
2189 }
2190
2191 if (mixerConfig != nullptr && mixerConfig->channel_mask != AUDIO_CHANNEL_NONE) {
2192 mMixerChannelMask = mixerConfig->channel_mask;
2193 }
2194
2195 readOutputParameters_l();
2196
2197 if (mType != SPATIALIZER
2198 && mMixerChannelMask != mChannelMask) {
2199 LOG_ALWAYS_FATAL("HAL channel mask %#x does not match mixer channel mask %#x",
2200 mChannelMask, mMixerChannelMask);
2201 }
2202
2203 // TODO: We may also match on address as well as device type for
2204 // AUDIO_DEVICE_OUT_BUS, AUDIO_DEVICE_OUT_ALL_A2DP, AUDIO_DEVICE_OUT_REMOTE_SUBMIX
2205 if (type == MIXER || type == DIRECT || type == OFFLOAD) {
2206 // TODO: This property should be ensure that only contains one single device type.
2207 mTimestampCorrectedDevice = (audio_devices_t)property_get_int64(
2208 "audio.timestamp.corrected_output_device",
2209 (int64_t)(mIsMsdDevice ? AUDIO_DEVICE_OUT_BUS // turn on by default for MSD
2210 : AUDIO_DEVICE_NONE));
2211 }
2212
2213 for (int i = AUDIO_STREAM_MIN; i < AUDIO_STREAM_FOR_POLICY_CNT; ++i) {
2214 const audio_stream_type_t stream{static_cast<audio_stream_type_t>(i)};
2215 mStreamTypes[stream].volume = 0.0f;
2216 mStreamTypes[stream].mute = mAfThreadCallback->streamMute_l(stream);
2217 }
2218 // Audio patch and call assistant volume are always max
2219 mStreamTypes[AUDIO_STREAM_PATCH].volume = 1.0f;
2220 mStreamTypes[AUDIO_STREAM_PATCH].mute = false;
2221 mStreamTypes[AUDIO_STREAM_CALL_ASSISTANT].volume = 1.0f;
2222 mStreamTypes[AUDIO_STREAM_CALL_ASSISTANT].mute = false;
2223 }
2224
~PlaybackThread()2225 PlaybackThread::~PlaybackThread()
2226 {
2227 mAfThreadCallback->unregisterWriter(mNBLogWriter);
2228 free(mSinkBuffer);
2229 free(mMixerBuffer);
2230 free(mEffectBuffer);
2231 free(mPostSpatializerBuffer);
2232 }
2233
2234 // Thread virtuals
2235
onFirstRef()2236 void PlaybackThread::onFirstRef()
2237 {
2238 if (!isStreamInitialized()) {
2239 ALOGE("The stream is not open yet"); // This should not happen.
2240 } else {
2241 // Callbacks take strong or weak pointers as a parameter.
2242 // Since PlaybackThread passes itself as a callback handler, it can only
2243 // be done outside of the constructor. Creating weak and especially strong
2244 // pointers to a refcounted object in its own constructor is strongly
2245 // discouraged, see comments in system/core/libutils/include/utils/RefBase.h.
2246 // Even if a function takes a weak pointer, it is possible that it will
2247 // need to convert it to a strong pointer down the line.
2248 if (mOutput->flags & AUDIO_OUTPUT_FLAG_NON_BLOCKING &&
2249 mOutput->stream->setCallback(this) == OK) {
2250 mUseAsyncWrite = true;
2251 mCallbackThread = sp<AsyncCallbackThread>::make(this);
2252 }
2253
2254 if (mOutput->stream->setEventCallback(this) != OK) {
2255 ALOGD("Failed to add event callback");
2256 }
2257 }
2258 run(mThreadName, ANDROID_PRIORITY_URGENT_AUDIO);
2259 mThreadSnapshot.setTid(getTid());
2260 }
2261
2262 // ThreadBase virtuals
preExit()2263 void PlaybackThread::preExit()
2264 {
2265 ALOGV(" preExit()");
2266 status_t result = mOutput->stream->exit();
2267 ALOGE_IF(result != OK, "Error when calling exit(): %d", result);
2268 }
2269
dumpTracks_l(int fd,const Vector<String16> &)2270 void PlaybackThread::dumpTracks_l(int fd, const Vector<String16>& /* args */)
2271 {
2272 String8 result;
2273
2274 result.appendFormat(" Stream volumes in dB: ");
2275 for (int i = 0; i < AUDIO_STREAM_CNT; ++i) {
2276 const stream_type_t *st = &mStreamTypes[i];
2277 if (i > 0) {
2278 result.appendFormat(", ");
2279 }
2280 result.appendFormat("%d:%.2g", i, 20.0 * log10(st->volume));
2281 if (st->mute) {
2282 result.append("M");
2283 }
2284 }
2285 result.append("\n");
2286 write(fd, result.c_str(), result.length());
2287 result.clear();
2288
2289 // These values are "raw"; they will wrap around. See prepareTracks_l() for a better way.
2290 FastTrackUnderruns underruns = getFastTrackUnderruns(0);
2291 dprintf(fd, " Normal mixer raw underrun counters: partial=%u empty=%u\n",
2292 underruns.mBitFields.mPartial, underruns.mBitFields.mEmpty);
2293
2294 size_t numtracks = mTracks.size();
2295 size_t numactive = mActiveTracks.size();
2296 dprintf(fd, " %zu Tracks", numtracks);
2297 size_t numactiveseen = 0;
2298 const char *prefix = " ";
2299 if (numtracks) {
2300 dprintf(fd, " of which %zu are active\n", numactive);
2301 result.append(prefix);
2302 mTracks[0]->appendDumpHeader(result);
2303 for (size_t i = 0; i < numtracks; ++i) {
2304 sp<IAfTrack> track = mTracks[i];
2305 if (track != 0) {
2306 bool active = mActiveTracks.indexOf(track) >= 0;
2307 if (active) {
2308 numactiveseen++;
2309 }
2310 result.append(prefix);
2311 track->appendDump(result, active);
2312 }
2313 }
2314 } else {
2315 result.append("\n");
2316 }
2317 if (numactiveseen != numactive) {
2318 // some tracks in the active list were not in the tracks list
2319 result.append(" The following tracks are in the active list but"
2320 " not in the track list\n");
2321 result.append(prefix);
2322 mActiveTracks[0]->appendDumpHeader(result);
2323 for (size_t i = 0; i < numactive; ++i) {
2324 sp<IAfTrack> track = mActiveTracks[i];
2325 if (mTracks.indexOf(track) < 0) {
2326 result.append(prefix);
2327 track->appendDump(result, true /* active */);
2328 }
2329 }
2330 }
2331
2332 write(fd, result.c_str(), result.size());
2333 }
2334
dumpInternals_l(int fd,const Vector<String16> & args)2335 void PlaybackThread::dumpInternals_l(int fd, const Vector<String16>& args)
2336 {
2337 dprintf(fd, " Master volume: %f\n", mMasterVolume);
2338 dprintf(fd, " Master mute: %s\n", mMasterMute ? "on" : "off");
2339 dprintf(fd, " Mixer channel Mask: %#x (%s)\n",
2340 mMixerChannelMask, channelMaskToString(mMixerChannelMask, true /* output */).c_str());
2341 if (mHapticChannelMask != AUDIO_CHANNEL_NONE) {
2342 dprintf(fd, " Haptic channel mask: %#x (%s)\n", mHapticChannelMask,
2343 channelMaskToString(mHapticChannelMask, true /* output */).c_str());
2344 }
2345 dprintf(fd, " Normal frame count: %zu\n", mNormalFrameCount);
2346 dprintf(fd, " Total writes: %d\n", mNumWrites);
2347 dprintf(fd, " Delayed writes: %d\n", mNumDelayedWrites);
2348 dprintf(fd, " Blocked in write: %s\n", mInWrite ? "yes" : "no");
2349 dprintf(fd, " Suspend count: %d\n", (int32_t)mSuspended);
2350 dprintf(fd, " Fast track availMask=%#x\n", mFastTrackAvailMask);
2351 dprintf(fd, " Standby delay ns=%lld\n", (long long)mStandbyDelayNs);
2352 AudioStreamOut *output = mOutput;
2353 audio_output_flags_t flags = output != NULL ? output->flags : AUDIO_OUTPUT_FLAG_NONE;
2354 dprintf(fd, " AudioStreamOut: %p flags %#x (%s)\n",
2355 output, flags, toString(flags).c_str());
2356 dprintf(fd, " Frames written: %lld\n", (long long)mFramesWritten);
2357 dprintf(fd, " Suspended frames: %lld\n", (long long)mSuspendedFrames);
2358 if (mPipeSink.get() != nullptr) {
2359 dprintf(fd, " PipeSink frames written: %lld\n", (long long)mPipeSink->framesWritten());
2360 }
2361 if (output != nullptr) {
2362 dprintf(fd, " Hal stream dump:\n");
2363 (void)output->stream->dump(fd, args);
2364 }
2365 }
2366
2367 // PlaybackThread::createTrack_l() must be called with AudioFlinger::mutex() held
createTrack_l(const sp<Client> & client,audio_stream_type_t streamType,const audio_attributes_t & attr,uint32_t * pSampleRate,audio_format_t format,audio_channel_mask_t channelMask,size_t * pFrameCount,size_t * pNotificationFrameCount,uint32_t notificationsPerBuffer,float speed,const sp<IMemory> & sharedBuffer,audio_session_t sessionId,audio_output_flags_t * flags,pid_t creatorPid,const AttributionSourceState & attributionSource,pid_t tid,status_t * status,audio_port_handle_t portId,const sp<media::IAudioTrackCallback> & callback,bool isSpatialized,bool isBitPerfect,audio_output_flags_t * afTrackFlags)2368 sp<IAfTrack> PlaybackThread::createTrack_l(
2369 const sp<Client>& client,
2370 audio_stream_type_t streamType,
2371 const audio_attributes_t& attr,
2372 uint32_t *pSampleRate,
2373 audio_format_t format,
2374 audio_channel_mask_t channelMask,
2375 size_t *pFrameCount,
2376 size_t *pNotificationFrameCount,
2377 uint32_t notificationsPerBuffer,
2378 float speed,
2379 const sp<IMemory>& sharedBuffer,
2380 audio_session_t sessionId,
2381 audio_output_flags_t *flags,
2382 pid_t creatorPid,
2383 const AttributionSourceState& attributionSource,
2384 pid_t tid,
2385 status_t *status,
2386 audio_port_handle_t portId,
2387 const sp<media::IAudioTrackCallback>& callback,
2388 bool isSpatialized,
2389 bool isBitPerfect,
2390 audio_output_flags_t *afTrackFlags)
2391 {
2392 size_t frameCount = *pFrameCount;
2393 size_t notificationFrameCount = *pNotificationFrameCount;
2394 sp<IAfTrack> track;
2395 status_t lStatus;
2396 audio_output_flags_t outputFlags = mOutput->flags;
2397 audio_output_flags_t requestedFlags = *flags;
2398 uint32_t sampleRate;
2399
2400 if (sharedBuffer != 0 && checkIMemory(sharedBuffer) != NO_ERROR) {
2401 lStatus = BAD_VALUE;
2402 goto Exit;
2403 }
2404
2405 if (*pSampleRate == 0) {
2406 *pSampleRate = mSampleRate;
2407 }
2408 sampleRate = *pSampleRate;
2409
2410 // special case for FAST flag considered OK if fast mixer is present
2411 if (hasFastMixer()) {
2412 outputFlags = (audio_output_flags_t)(outputFlags | AUDIO_OUTPUT_FLAG_FAST);
2413 }
2414
2415 // Check if requested flags are compatible with output stream flags
2416 if ((*flags & outputFlags) != *flags) {
2417 ALOGW("createTrack_l(): mismatch between requested flags (%08x) and output flags (%08x)",
2418 *flags, outputFlags);
2419 *flags = (audio_output_flags_t)(*flags & outputFlags);
2420 }
2421
2422 if (isBitPerfect) {
2423 audio_utils::lock_guard _l(mutex());
2424 sp<IAfEffectChain> chain = getEffectChain_l(sessionId);
2425 if (chain.get() != nullptr) {
2426 // Bit-perfect is required according to the configuration and preferred mixer
2427 // attributes, but it is not in the output flag from the client's request. Explicitly
2428 // adding bit-perfect flag to check the compatibility
2429 audio_output_flags_t flagsToCheck =
2430 (audio_output_flags_t)(*flags & AUDIO_OUTPUT_FLAG_BIT_PERFECT);
2431 chain->checkOutputFlagCompatibility(&flagsToCheck);
2432 if ((flagsToCheck & AUDIO_OUTPUT_FLAG_BIT_PERFECT) == AUDIO_OUTPUT_FLAG_NONE) {
2433 ALOGE("%s cannot create track as there is data-processing effect attached to "
2434 "given session id(%d)", __func__, sessionId);
2435 lStatus = BAD_VALUE;
2436 goto Exit;
2437 }
2438 *flags = flagsToCheck;
2439 }
2440 }
2441
2442 // client expresses a preference for FAST, but we get the final say
2443 if (*flags & AUDIO_OUTPUT_FLAG_FAST) {
2444 if (
2445 // PCM data
2446 audio_is_linear_pcm(format) &&
2447 // TODO: extract as a data library function that checks that a computationally
2448 // expensive downmixer is not required: isFastOutputChannelConversion()
2449 (channelMask == (mChannelMask | mHapticChannelMask) ||
2450 mChannelMask != AUDIO_CHANNEL_OUT_STEREO ||
2451 (channelMask == AUDIO_CHANNEL_OUT_MONO
2452 /* && mChannelMask == AUDIO_CHANNEL_OUT_STEREO */)) &&
2453 // hardware sample rate
2454 (sampleRate == mSampleRate) &&
2455 // normal mixer has an associated fast mixer
2456 hasFastMixer() &&
2457 // there are sufficient fast track slots available
2458 (mFastTrackAvailMask != 0)
2459 // FIXME test that MixerThread for this fast track has a capable output HAL
2460 // FIXME add a permission test also?
2461 ) {
2462 // static tracks can have any nonzero framecount, streaming tracks check against minimum.
2463 if (sharedBuffer == 0) {
2464 // read the fast track multiplier property the first time it is needed
2465 int ok = pthread_once(&sFastTrackMultiplierOnce, sFastTrackMultiplierInit);
2466 if (ok != 0) {
2467 ALOGE("%s pthread_once failed: %d", __func__, ok);
2468 }
2469 frameCount = max(frameCount, mFrameCount * sFastTrackMultiplier); // incl framecount 0
2470 }
2471
2472 // check compatibility with audio effects.
2473 { // scope for mutex()
2474 audio_utils::lock_guard _l(mutex());
2475 for (audio_session_t session : {
2476 AUDIO_SESSION_DEVICE,
2477 AUDIO_SESSION_OUTPUT_STAGE,
2478 AUDIO_SESSION_OUTPUT_MIX,
2479 sessionId,
2480 }) {
2481 sp<IAfEffectChain> chain = getEffectChain_l(session);
2482 if (chain.get() != nullptr) {
2483 audio_output_flags_t old = *flags;
2484 chain->checkOutputFlagCompatibility(flags);
2485 if (old != *flags) {
2486 ALOGV("AUDIO_OUTPUT_FLAGS denied by effect, session=%d old=%#x new=%#x",
2487 (int)session, (int)old, (int)*flags);
2488 }
2489 }
2490 }
2491 }
2492 ALOGV_IF((*flags & AUDIO_OUTPUT_FLAG_FAST) != 0,
2493 "AUDIO_OUTPUT_FLAG_FAST accepted: frameCount=%zu mFrameCount=%zu",
2494 frameCount, mFrameCount);
2495 } else {
2496 ALOGD("AUDIO_OUTPUT_FLAG_FAST denied: sharedBuffer=%p frameCount=%zu "
2497 "mFrameCount=%zu format=%#x mFormat=%#x isLinear=%d channelMask=%#x "
2498 "sampleRate=%u mSampleRate=%u "
2499 "hasFastMixer=%d tid=%d fastTrackAvailMask=%#x",
2500 sharedBuffer.get(), frameCount, mFrameCount, format, mFormat,
2501 audio_is_linear_pcm(format), channelMask, sampleRate,
2502 mSampleRate, hasFastMixer(), tid, mFastTrackAvailMask);
2503 *flags = (audio_output_flags_t)(*flags & ~AUDIO_OUTPUT_FLAG_FAST);
2504 }
2505 }
2506
2507 if (!audio_has_proportional_frames(format)) {
2508 if (sharedBuffer != 0) {
2509 // Same comment as below about ignoring frameCount parameter for set()
2510 frameCount = sharedBuffer->size();
2511 } else if (frameCount == 0) {
2512 frameCount = mNormalFrameCount;
2513 }
2514 if (notificationFrameCount != frameCount) {
2515 notificationFrameCount = frameCount;
2516 }
2517 } else if (sharedBuffer != 0) {
2518 // FIXME: Ensure client side memory buffers need
2519 // not have additional alignment beyond sample
2520 // (e.g. 16 bit stereo accessed as 32 bit frame).
2521 size_t alignment = audio_bytes_per_sample(format);
2522 if (alignment & 1) {
2523 // for AUDIO_FORMAT_PCM_24_BIT_PACKED (not exposed through Java).
2524 alignment = 1;
2525 }
2526 uint32_t channelCount = audio_channel_count_from_out_mask(channelMask);
2527 size_t frameSize = channelCount * audio_bytes_per_sample(format);
2528 if (channelCount > 1) {
2529 // More than 2 channels does not require stronger alignment than stereo
2530 alignment <<= 1;
2531 }
2532 if (((uintptr_t)sharedBuffer->unsecurePointer() & (alignment - 1)) != 0) {
2533 ALOGE("Invalid buffer alignment: address %p, channel count %u",
2534 sharedBuffer->unsecurePointer(), channelCount);
2535 lStatus = BAD_VALUE;
2536 goto Exit;
2537 }
2538
2539 // When initializing a shared buffer AudioTrack via constructors,
2540 // there's no frameCount parameter.
2541 // But when initializing a shared buffer AudioTrack via set(),
2542 // there _is_ a frameCount parameter. We silently ignore it.
2543 frameCount = sharedBuffer->size() / frameSize;
2544 } else {
2545 size_t minFrameCount = 0;
2546 // For fast tracks we try to respect the application's request for notifications per buffer.
2547 if (*flags & AUDIO_OUTPUT_FLAG_FAST) {
2548 if (notificationsPerBuffer > 0) {
2549 // Avoid possible arithmetic overflow during multiplication.
2550 if (notificationsPerBuffer > SIZE_MAX / mFrameCount) {
2551 ALOGE("Requested notificationPerBuffer=%u ignored for HAL frameCount=%zu",
2552 notificationsPerBuffer, mFrameCount);
2553 } else {
2554 minFrameCount = mFrameCount * notificationsPerBuffer;
2555 }
2556 }
2557 } else {
2558 // For normal PCM streaming tracks, update minimum frame count.
2559 // Buffer depth is forced to be at least 2 x the normal mixer frame count and
2560 // cover audio hardware latency.
2561 // This is probably too conservative, but legacy application code may depend on it.
2562 // If you change this calculation, also review the start threshold which is related.
2563 uint32_t latencyMs = latency_l();
2564 if (latencyMs == 0) {
2565 ALOGE("Error when retrieving output stream latency");
2566 lStatus = UNKNOWN_ERROR;
2567 goto Exit;
2568 }
2569
2570 minFrameCount = AudioSystem::calculateMinFrameCount(latencyMs, mNormalFrameCount,
2571 mSampleRate, sampleRate, speed /*, 0 mNotificationsPerBufferReq*/);
2572
2573 }
2574 if (frameCount < minFrameCount) {
2575 frameCount = minFrameCount;
2576 }
2577 }
2578
2579 // Make sure that application is notified with sufficient margin before underrun.
2580 // The client can divide the AudioTrack buffer into sub-buffers,
2581 // and expresses its desire to server as the notification frame count.
2582 if (sharedBuffer == 0 && audio_is_linear_pcm(format)) {
2583 size_t maxNotificationFrames;
2584 if (*flags & AUDIO_OUTPUT_FLAG_FAST) {
2585 // notify every HAL buffer, regardless of the size of the track buffer
2586 maxNotificationFrames = mFrameCount;
2587 } else {
2588 // Triple buffer the notification period for a triple buffered mixer period;
2589 // otherwise, double buffering for the notification period is fine.
2590 //
2591 // TODO: This should be moved to AudioTrack to modify the notification period
2592 // on AudioTrack::setBufferSizeInFrames() changes.
2593 const int nBuffering =
2594 (uint64_t{frameCount} * mSampleRate)
2595 / (uint64_t{mNormalFrameCount} * sampleRate) == 3 ? 3 : 2;
2596
2597 maxNotificationFrames = frameCount / nBuffering;
2598 // If client requested a fast track but this was denied, then use the smaller maximum.
2599 if (requestedFlags & AUDIO_OUTPUT_FLAG_FAST) {
2600 size_t maxNotificationFramesFastDenied = FMS_20 * sampleRate / 1000;
2601 if (maxNotificationFrames > maxNotificationFramesFastDenied) {
2602 maxNotificationFrames = maxNotificationFramesFastDenied;
2603 }
2604 }
2605 }
2606 if (notificationFrameCount == 0 || notificationFrameCount > maxNotificationFrames) {
2607 if (notificationFrameCount == 0) {
2608 ALOGD("Client defaulted notificationFrames to %zu for frameCount %zu",
2609 maxNotificationFrames, frameCount);
2610 } else {
2611 ALOGW("Client adjusted notificationFrames from %zu to %zu for frameCount %zu",
2612 notificationFrameCount, maxNotificationFrames, frameCount);
2613 }
2614 notificationFrameCount = maxNotificationFrames;
2615 }
2616 }
2617
2618 *pFrameCount = frameCount;
2619 *pNotificationFrameCount = notificationFrameCount;
2620
2621 switch (mType) {
2622 case BIT_PERFECT:
2623 if (isBitPerfect) {
2624 if (sampleRate != mSampleRate || format != mFormat || channelMask != mChannelMask) {
2625 ALOGE("%s, bad parameter when request streaming bit-perfect, sampleRate=%u, "
2626 "format=%#x, channelMask=%#x, mSampleRate=%u, mFormat=%#x, mChannelMask=%#x",
2627 __func__, sampleRate, format, channelMask, mSampleRate, mFormat,
2628 mChannelMask);
2629 lStatus = BAD_VALUE;
2630 goto Exit;
2631 }
2632 }
2633 break;
2634
2635 case DIRECT:
2636 if (audio_is_linear_pcm(format)) { // TODO maybe use audio_has_proportional_frames()?
2637 if (sampleRate != mSampleRate || format != mFormat || channelMask != mChannelMask) {
2638 ALOGE("createTrack_l() Bad parameter: sampleRate %u format %#x, channelMask 0x%08x "
2639 "for output %p with format %#x",
2640 sampleRate, format, channelMask, mOutput, mFormat);
2641 lStatus = BAD_VALUE;
2642 goto Exit;
2643 }
2644 }
2645 break;
2646
2647 case OFFLOAD:
2648 if (sampleRate != mSampleRate || format != mFormat || channelMask != mChannelMask) {
2649 ALOGE("createTrack_l() Bad parameter: sampleRate %d format %#x, channelMask 0x%08x \""
2650 "for output %p with format %#x",
2651 sampleRate, format, channelMask, mOutput, mFormat);
2652 lStatus = BAD_VALUE;
2653 goto Exit;
2654 }
2655 break;
2656
2657 default:
2658 if (!audio_is_linear_pcm(format)) {
2659 ALOGE("createTrack_l() Bad parameter: format %#x \""
2660 "for output %p with format %#x",
2661 format, mOutput, mFormat);
2662 lStatus = BAD_VALUE;
2663 goto Exit;
2664 }
2665 if (sampleRate > mSampleRate * AUDIO_RESAMPLER_DOWN_RATIO_MAX) {
2666 ALOGE("Sample rate out of range: %u mSampleRate %u", sampleRate, mSampleRate);
2667 lStatus = BAD_VALUE;
2668 goto Exit;
2669 }
2670 break;
2671
2672 }
2673
2674 lStatus = initCheck();
2675 if (lStatus != NO_ERROR) {
2676 ALOGE("createTrack_l() audio driver not initialized");
2677 goto Exit;
2678 }
2679
2680 { // scope for mutex()
2681 audio_utils::lock_guard _l(mutex());
2682
2683 // all tracks in same audio session must share the same routing strategy otherwise
2684 // conflicts will happen when tracks are moved from one output to another by audio policy
2685 // manager
2686 product_strategy_t strategy = getStrategyForStream(streamType);
2687 for (size_t i = 0; i < mTracks.size(); ++i) {
2688 sp<IAfTrack> t = mTracks[i];
2689 if (t != 0 && t->isExternalTrack()) {
2690 product_strategy_t actual = getStrategyForStream(t->streamType());
2691 if (sessionId == t->sessionId() && strategy != actual) {
2692 ALOGE("createTrack_l() mismatched strategy; expected %u but found %u",
2693 strategy, actual);
2694 lStatus = BAD_VALUE;
2695 goto Exit;
2696 }
2697 }
2698 }
2699
2700 // Set DIRECT/OFFLOAD flag if current thread is DirectOutputThread/OffloadThread.
2701 // This can happen when the playback is rerouted to direct output/offload thread by
2702 // dynamic audio policy.
2703 // Do NOT report the flag changes back to client, since the client
2704 // doesn't explicitly request a direct/offload flag.
2705 audio_output_flags_t trackFlags = *flags;
2706 if (mType == DIRECT) {
2707 trackFlags = static_cast<audio_output_flags_t>(trackFlags | AUDIO_OUTPUT_FLAG_DIRECT);
2708 } else if (mType == OFFLOAD) {
2709 trackFlags = static_cast<audio_output_flags_t>(trackFlags |
2710 AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD | AUDIO_OUTPUT_FLAG_DIRECT);
2711 }
2712 *afTrackFlags = trackFlags;
2713
2714 track = IAfTrack::create(this, client, streamType, attr, sampleRate, format,
2715 channelMask, frameCount,
2716 nullptr /* buffer */, (size_t)0 /* bufferSize */, sharedBuffer,
2717 sessionId, creatorPid, attributionSource, trackFlags,
2718 IAfTrackBase::TYPE_DEFAULT, portId, SIZE_MAX /*frameCountToBeReady*/,
2719 speed, isSpatialized, isBitPerfect);
2720
2721 lStatus = track != 0 ? track->initCheck() : (status_t) NO_MEMORY;
2722 if (lStatus != NO_ERROR) {
2723 ALOGE("createTrack_l() initCheck failed %d; no control block?", lStatus);
2724 // track must be cleared from the caller as the caller has the AF lock
2725 goto Exit;
2726 }
2727 mTracks.add(track);
2728 {
2729 audio_utils::lock_guard _atCbL(audioTrackCbMutex());
2730 if (callback.get() != nullptr) {
2731 mAudioTrackCallbacks.emplace(track, callback);
2732 }
2733 }
2734
2735 sp<IAfEffectChain> chain = getEffectChain_l(sessionId);
2736 if (chain != 0) {
2737 ALOGV("createTrack_l() setting main buffer %p", chain->inBuffer());
2738 track->setMainBuffer(chain->inBuffer());
2739 chain->setStrategy(getStrategyForStream(track->streamType()));
2740 chain->incTrackCnt();
2741 }
2742
2743 if ((*flags & AUDIO_OUTPUT_FLAG_FAST) && (tid != -1)) {
2744 pid_t callingPid = IPCThreadState::self()->getCallingPid();
2745 // we don't have CAP_SYS_NICE, nor do we want to have it as it's too powerful,
2746 // so ask activity manager to do this on our behalf
2747 sendPrioConfigEvent_l(callingPid, tid, kPriorityAudioApp, true /*forApp*/);
2748 }
2749 }
2750
2751 lStatus = NO_ERROR;
2752
2753 Exit:
2754 *status = lStatus;
2755 return track;
2756 }
2757
2758 template<typename T>
remove(const sp<T> & track)2759 ssize_t PlaybackThread::Tracks<T>::remove(const sp<T>& track)
2760 {
2761 const int trackId = track->id();
2762 const ssize_t index = mTracks.remove(track);
2763 if (index >= 0) {
2764 if (mSaveDeletedTrackIds) {
2765 // We can't directly access mAudioMixer since the caller may be outside of threadLoop.
2766 // Instead, we add to mDeletedTrackIds which is solely used for mAudioMixer update,
2767 // to be handled when MixerThread::prepareTracks_l() next changes mAudioMixer.
2768 mDeletedTrackIds.emplace(trackId);
2769 }
2770 }
2771 return index;
2772 }
2773
correctLatency_l(uint32_t latency) const2774 uint32_t PlaybackThread::correctLatency_l(uint32_t latency) const
2775 {
2776 return latency;
2777 }
2778
latency() const2779 uint32_t PlaybackThread::latency() const
2780 {
2781 audio_utils::lock_guard _l(mutex());
2782 return latency_l();
2783 }
latency_l() const2784 uint32_t PlaybackThread::latency_l() const
2785 NO_THREAD_SAFETY_ANALYSIS
2786 // Fix later.
2787 {
2788 uint32_t latency;
2789 if (initCheck() == NO_ERROR && mOutput->stream->getLatency(&latency) == OK) {
2790 return correctLatency_l(latency);
2791 }
2792 return 0;
2793 }
2794
setMasterVolume(float value)2795 void PlaybackThread::setMasterVolume(float value)
2796 {
2797 audio_utils::lock_guard _l(mutex());
2798 // Don't apply master volume in SW if our HAL can do it for us.
2799 if (mOutput && mOutput->audioHwDev &&
2800 mOutput->audioHwDev->canSetMasterVolume()) {
2801 mMasterVolume = 1.0;
2802 } else {
2803 mMasterVolume = value;
2804 }
2805 }
2806
setMasterBalance(float balance)2807 void PlaybackThread::setMasterBalance(float balance)
2808 {
2809 mMasterBalance.store(balance);
2810 }
2811
setMasterMute(bool muted)2812 void PlaybackThread::setMasterMute(bool muted)
2813 {
2814 if (isDuplicating()) {
2815 return;
2816 }
2817 audio_utils::lock_guard _l(mutex());
2818 // Don't apply master mute in SW if our HAL can do it for us.
2819 if (mOutput && mOutput->audioHwDev &&
2820 mOutput->audioHwDev->canSetMasterMute()) {
2821 mMasterMute = false;
2822 } else {
2823 mMasterMute = muted;
2824 }
2825 }
2826
setStreamVolume(audio_stream_type_t stream,float value)2827 void PlaybackThread::setStreamVolume(audio_stream_type_t stream, float value)
2828 {
2829 audio_utils::lock_guard _l(mutex());
2830 mStreamTypes[stream].volume = value;
2831 broadcast_l();
2832 }
2833
setStreamMute(audio_stream_type_t stream,bool muted)2834 void PlaybackThread::setStreamMute(audio_stream_type_t stream, bool muted)
2835 {
2836 audio_utils::lock_guard _l(mutex());
2837 mStreamTypes[stream].mute = muted;
2838 broadcast_l();
2839 }
2840
streamVolume(audio_stream_type_t stream) const2841 float PlaybackThread::streamVolume(audio_stream_type_t stream) const
2842 {
2843 audio_utils::lock_guard _l(mutex());
2844 return mStreamTypes[stream].volume;
2845 }
2846
setVolumeForOutput_l(float left,float right) const2847 void PlaybackThread::setVolumeForOutput_l(float left, float right) const
2848 {
2849 mOutput->stream->setVolume(left, right);
2850 }
2851
2852 // addTrack_l() must be called with ThreadBase::mutex() held
addTrack_l(const sp<IAfTrack> & track)2853 status_t PlaybackThread::addTrack_l(const sp<IAfTrack>& track)
2854 {
2855 status_t status = ALREADY_EXISTS;
2856
2857 if (mActiveTracks.indexOf(track) < 0) {
2858 // the track is newly added, make sure it fills up all its
2859 // buffers before playing. This is to ensure the client will
2860 // effectively get the latency it requested.
2861 if (track->isExternalTrack()) {
2862 IAfTrackBase::track_state state = track->state();
2863 // Because the track is not on the ActiveTracks,
2864 // at this point, only the TrackHandle will be adding the track.
2865 mutex().unlock();
2866 status = AudioSystem::startOutput(track->portId());
2867 mutex().lock();
2868 // abort track was stopped/paused while we released the lock
2869 if (state != track->state()) {
2870 if (status == NO_ERROR) {
2871 mutex().unlock();
2872 AudioSystem::stopOutput(track->portId());
2873 mutex().lock();
2874 }
2875 return INVALID_OPERATION;
2876 }
2877 // abort if start is rejected by audio policy manager
2878 if (status != NO_ERROR) {
2879 // Do not replace the error if it is DEAD_OBJECT. When this happens, it indicates
2880 // current playback thread is reopened, which may happen when clients set preferred
2881 // mixer configuration. Returning DEAD_OBJECT will make the client restore track
2882 // immediately.
2883 return status == DEAD_OBJECT ? status : PERMISSION_DENIED;
2884 }
2885 #ifdef ADD_BATTERY_DATA
2886 // to track the speaker usage
2887 addBatteryData(IMediaPlayerService::kBatteryDataAudioFlingerStart);
2888 #endif
2889 sendIoConfigEvent_l(AUDIO_CLIENT_STARTED, track->creatorPid(), track->portId());
2890 }
2891
2892 // set retry count for buffer fill
2893 if (track->isOffloaded()) {
2894 if (track->isStopping_1()) {
2895 track->retryCount() = kMaxTrackStopRetriesOffload;
2896 } else {
2897 track->retryCount() = kMaxTrackStartupRetriesOffload;
2898 }
2899 track->fillingStatus() = mStandby ? IAfTrack::FS_FILLING : IAfTrack::FS_FILLED;
2900 } else {
2901 track->retryCount() = kMaxTrackStartupRetries;
2902 track->fillingStatus() =
2903 track->sharedBuffer() != 0 ? IAfTrack::FS_FILLED : IAfTrack::FS_FILLING;
2904 }
2905
2906 sp<IAfEffectChain> chain = getEffectChain_l(track->sessionId());
2907 if (mHapticChannelMask != AUDIO_CHANNEL_NONE
2908 && ((track->channelMask() & AUDIO_CHANNEL_HAPTIC_ALL) != AUDIO_CHANNEL_NONE
2909 || (chain != nullptr && chain->containsHapticGeneratingEffect()))) {
2910 // Unlock due to VibratorService will lock for this call and will
2911 // call Tracks.mute/unmute which also require thread's lock.
2912 mutex().unlock();
2913 const os::HapticScale hapticScale = afutils::onExternalVibrationStart(
2914 track->getExternalVibration());
2915 std::optional<media::AudioVibratorInfo> vibratorInfo;
2916 {
2917 // TODO(b/184194780): Use the vibrator information from the vibrator that will be
2918 // used to play this track.
2919 audio_utils::lock_guard _l(mAfThreadCallback->mutex());
2920 vibratorInfo = std::move(mAfThreadCallback->getDefaultVibratorInfo_l());
2921 }
2922 mutex().lock();
2923 track->setHapticScale(hapticScale);
2924 if (vibratorInfo) {
2925 track->setHapticMaxAmplitude(vibratorInfo->maxAmplitude);
2926 }
2927
2928 // Haptic playback should be enabled by vibrator service.
2929 if (track->getHapticPlaybackEnabled()) {
2930 // Disable haptic playback of all active track to ensure only
2931 // one track playing haptic if current track should play haptic.
2932 for (const auto &t : mActiveTracks) {
2933 t->setHapticPlaybackEnabled(false);
2934 }
2935 }
2936
2937 // Set haptic intensity for effect
2938 if (chain != nullptr) {
2939 chain->setHapticScale_l(track->id(), hapticScale);
2940 }
2941 }
2942
2943 track->setResetDone(false);
2944 track->resetPresentationComplete();
2945
2946 // Do not release the ThreadBase mutex after the track is added to mActiveTracks unless
2947 // all key changes are complete. It is possible that the threadLoop will begin
2948 // processing the added track immediately after the ThreadBase mutex is released.
2949 mActiveTracks.add(track);
2950
2951 if (chain != 0) {
2952 ALOGV("addTrack_l() starting track on chain %p for session %d", chain.get(),
2953 track->sessionId());
2954 chain->incActiveTrackCnt();
2955 }
2956
2957 track->logBeginInterval(patchSinksToString(&mPatch)); // log to MediaMetrics
2958 status = NO_ERROR;
2959 }
2960
2961 onAddNewTrack_l();
2962 return status;
2963 }
2964
destroyTrack_l(const sp<IAfTrack> & track)2965 bool PlaybackThread::destroyTrack_l(const sp<IAfTrack>& track)
2966 {
2967 track->terminate();
2968 // active tracks are removed by threadLoop()
2969 bool trackActive = (mActiveTracks.indexOf(track) >= 0);
2970 track->setState(IAfTrackBase::STOPPED);
2971 if (!trackActive) {
2972 removeTrack_l(track);
2973 } else if (track->isFastTrack() || track->isOffloaded() || track->isDirect()) {
2974 if (track->isPausePending()) {
2975 track->pauseAck();
2976 }
2977 track->setState(IAfTrackBase::STOPPING_1);
2978 }
2979
2980 return trackActive;
2981 }
2982
removeTrack_l(const sp<IAfTrack> & track)2983 void PlaybackThread::removeTrack_l(const sp<IAfTrack>& track)
2984 {
2985 track->triggerEvents(AudioSystem::SYNC_EVENT_PRESENTATION_COMPLETE);
2986
2987 String8 result;
2988 track->appendDump(result, false /* active */);
2989 mLocalLog.log("removeTrack_l (%p) %s", track.get(), result.c_str());
2990
2991 mTracks.remove(track);
2992 {
2993 audio_utils::lock_guard _atCbL(audioTrackCbMutex());
2994 mAudioTrackCallbacks.erase(track);
2995 }
2996 if (track->isFastTrack()) {
2997 int index = track->fastIndex();
2998 ALOG_ASSERT(0 < index && index < (int)FastMixerState::sMaxFastTracks);
2999 ALOG_ASSERT(!(mFastTrackAvailMask & (1 << index)));
3000 mFastTrackAvailMask |= 1 << index;
3001 // redundant as track is about to be destroyed, for dumpsys only
3002 track->fastIndex() = -1;
3003 }
3004 sp<IAfEffectChain> chain = getEffectChain_l(track->sessionId());
3005 if (chain != 0) {
3006 chain->decTrackCnt();
3007 }
3008 }
3009
getTrackPortIds_l()3010 std::set<audio_port_handle_t> PlaybackThread::getTrackPortIds_l()
3011 {
3012 std::set<int32_t> result;
3013 for (const auto& t : mTracks) {
3014 if (t->isExternalTrack()) {
3015 result.insert(t->portId());
3016 }
3017 }
3018 return result;
3019 }
3020
getTrackPortIds()3021 std::set<audio_port_handle_t> PlaybackThread::getTrackPortIds()
3022 {
3023 audio_utils::lock_guard _l(mutex());
3024 return getTrackPortIds_l();
3025 }
3026
getParameters(const String8 & keys)3027 String8 PlaybackThread::getParameters(const String8& keys)
3028 {
3029 audio_utils::lock_guard _l(mutex());
3030 String8 out_s8;
3031 if (initCheck() == NO_ERROR && mOutput->stream->getParameters(keys, &out_s8) == OK) {
3032 return out_s8;
3033 }
3034 return {};
3035 }
3036
selectPresentation(int presentationId,int programId)3037 status_t DirectOutputThread::selectPresentation(int presentationId, int programId) {
3038 audio_utils::lock_guard _l(mutex());
3039 if (!isStreamInitialized()) {
3040 return NO_INIT;
3041 }
3042 return mOutput->stream->selectPresentation(presentationId, programId);
3043 }
3044
ioConfigChanged_l(audio_io_config_event_t event,pid_t pid,audio_port_handle_t portId)3045 void PlaybackThread::ioConfigChanged_l(audio_io_config_event_t event, pid_t pid,
3046 audio_port_handle_t portId) {
3047 ALOGV("PlaybackThread::ioConfigChanged, thread %p, event %d", this, event);
3048 sp<AudioIoDescriptor> desc;
3049 const struct audio_patch patch = isMsdDevice() ? mDownStreamPatch : mPatch;
3050 switch (event) {
3051 case AUDIO_OUTPUT_OPENED:
3052 case AUDIO_OUTPUT_REGISTERED:
3053 case AUDIO_OUTPUT_CONFIG_CHANGED:
3054 desc = sp<AudioIoDescriptor>::make(mId, patch, false /*isInput*/,
3055 mSampleRate, mFormat, mChannelMask,
3056 // FIXME AudioFlinger::frameCount(audio_io_handle_t) instead of mNormalFrameCount?
3057 mNormalFrameCount, mFrameCount, latency_l());
3058 break;
3059 case AUDIO_CLIENT_STARTED:
3060 desc = sp<AudioIoDescriptor>::make(mId, patch, portId);
3061 break;
3062 case AUDIO_OUTPUT_CLOSED:
3063 default:
3064 desc = sp<AudioIoDescriptor>::make(mId);
3065 break;
3066 }
3067 mAfThreadCallback->ioConfigChanged_l(event, desc, pid);
3068 }
3069
onWriteReady()3070 void PlaybackThread::onWriteReady()
3071 {
3072 mCallbackThread->resetWriteBlocked();
3073 }
3074
onDrainReady()3075 void PlaybackThread::onDrainReady()
3076 {
3077 mCallbackThread->resetDraining();
3078 }
3079
onError(bool isHardError)3080 void PlaybackThread::onError(bool isHardError)
3081 {
3082 mCallbackThread->setAsyncError(isHardError);
3083 }
3084
onCodecFormatChanged(const std::vector<uint8_t> & metadataBs)3085 void PlaybackThread::onCodecFormatChanged(
3086 const std::vector<uint8_t>& metadataBs)
3087 {
3088 const auto weakPointerThis = wp<PlaybackThread>::fromExisting(this);
3089 std::thread([this, metadataBs, weakPointerThis]() {
3090 const sp<PlaybackThread> playbackThread = weakPointerThis.promote();
3091 if (playbackThread == nullptr) {
3092 ALOGW("PlaybackThread was destroyed, skip codec format change event");
3093 return;
3094 }
3095
3096 audio_utils::metadata::Data metadata =
3097 audio_utils::metadata::dataFromByteString(metadataBs);
3098 if (metadata.empty()) {
3099 ALOGW("Can not transform the buffer to audio metadata, %s, %d",
3100 reinterpret_cast<char*>(const_cast<uint8_t*>(metadataBs.data())),
3101 (int)metadataBs.size());
3102 return;
3103 }
3104
3105 audio_utils::metadata::ByteString metaDataStr =
3106 audio_utils::metadata::byteStringFromData(metadata);
3107 std::vector metadataVec(metaDataStr.begin(), metaDataStr.end());
3108 audio_utils::lock_guard _l(audioTrackCbMutex());
3109 for (const auto& callbackPair : mAudioTrackCallbacks) {
3110 callbackPair.second->onCodecFormatChanged(metadataVec);
3111 }
3112 }).detach();
3113 }
3114
resetWriteBlocked(uint32_t sequence)3115 void PlaybackThread::resetWriteBlocked(uint32_t sequence)
3116 {
3117 audio_utils::lock_guard _l(mutex());
3118 // reject out of sequence requests
3119 if ((mWriteAckSequence & 1) && (sequence == mWriteAckSequence)) {
3120 mWriteAckSequence &= ~1;
3121 mWaitWorkCV.notify_one();
3122 }
3123 }
3124
resetDraining(uint32_t sequence)3125 void PlaybackThread::resetDraining(uint32_t sequence)
3126 {
3127 audio_utils::lock_guard _l(mutex());
3128 // reject out of sequence requests
3129 if ((mDrainSequence & 1) && (sequence == mDrainSequence)) {
3130 // Register discontinuity when HW drain is completed because that can cause
3131 // the timestamp frame position to reset to 0 for direct and offload threads.
3132 // (Out of sequence requests are ignored, since the discontinuity would be handled
3133 // elsewhere, e.g. in flush).
3134 mTimestampVerifier.discontinuity(mTimestampVerifier.DISCONTINUITY_MODE_ZERO);
3135 mDrainSequence &= ~1;
3136 mWaitWorkCV.notify_one();
3137 }
3138 }
3139
readOutputParameters_l()3140 void PlaybackThread::readOutputParameters_l()
3141 NO_THREAD_SAFETY_ANALYSIS
3142 // 'moveEffectChain_ll' requires holding mutex 'AudioFlinger_Mutex' exclusively
3143 {
3144 // unfortunately we have no way of recovering from errors here, hence the LOG_ALWAYS_FATAL
3145 const audio_config_base_t audioConfig = mOutput->getAudioProperties();
3146 mSampleRate = audioConfig.sample_rate;
3147 mChannelMask = audioConfig.channel_mask;
3148 if (!audio_is_output_channel(mChannelMask)) {
3149 LOG_ALWAYS_FATAL("HAL channel mask %#x not valid for output", mChannelMask);
3150 }
3151 if (hasMixer() && !isValidPcmSinkChannelMask(mChannelMask)) {
3152 LOG_ALWAYS_FATAL("HAL channel mask %#x not supported for mixed output",
3153 mChannelMask);
3154 }
3155
3156 if (mMixerChannelMask == AUDIO_CHANNEL_NONE) {
3157 mMixerChannelMask = mChannelMask;
3158 }
3159
3160 mChannelCount = audio_channel_count_from_out_mask(mChannelMask);
3161 mBalance.setChannelMask(mChannelMask);
3162
3163 uint32_t mixerChannelCount = audio_channel_count_from_out_mask(mMixerChannelMask);
3164
3165 // Get actual HAL format.
3166 status_t result = mOutput->stream->getAudioProperties(nullptr, nullptr, &mHALFormat);
3167 LOG_ALWAYS_FATAL_IF(result != OK, "Error when retrieving output stream format: %d", result);
3168 // Get format from the shim, which will be different than the HAL format
3169 // if playing compressed audio over HDMI passthrough.
3170 mFormat = audioConfig.format;
3171 if (!audio_is_valid_format(mFormat)) {
3172 LOG_ALWAYS_FATAL("HAL format %#x not valid for output", mFormat);
3173 }
3174 if (hasMixer() && !isValidPcmSinkFormat(mFormat)) {
3175 LOG_FATAL("HAL format %#x not supported for mixed output",
3176 mFormat);
3177 }
3178 mFrameSize = mOutput->getFrameSize();
3179 result = mOutput->stream->getBufferSize(&mBufferSize);
3180 LOG_ALWAYS_FATAL_IF(result != OK,
3181 "Error when retrieving output stream buffer size: %d", result);
3182 mFrameCount = mBufferSize / mFrameSize;
3183 if (hasMixer() && (mFrameCount & 15)) {
3184 ALOGW("HAL output buffer size is %zu frames but AudioMixer requires multiples of 16 frames",
3185 mFrameCount);
3186 }
3187
3188 mHwSupportsPause = false;
3189 if (mOutput->flags & AUDIO_OUTPUT_FLAG_DIRECT) {
3190 bool supportsPause = false, supportsResume = false;
3191 if (mOutput->stream->supportsPauseAndResume(&supportsPause, &supportsResume) == OK) {
3192 if (supportsPause && supportsResume) {
3193 mHwSupportsPause = true;
3194 } else if (supportsPause) {
3195 ALOGW("direct output implements pause but not resume");
3196 } else if (supportsResume) {
3197 ALOGW("direct output implements resume but not pause");
3198 }
3199 }
3200 }
3201 if (!mHwSupportsPause && mOutput->flags & AUDIO_OUTPUT_FLAG_HW_AV_SYNC) {
3202 LOG_ALWAYS_FATAL("HW_AV_SYNC requested but HAL does not implement pause and resume");
3203 }
3204
3205 if (mType == DUPLICATING && mMixerBufferEnabled && mEffectBufferEnabled) {
3206 // For best precision, we use float instead of the associated output
3207 // device format (typically PCM 16 bit).
3208
3209 mFormat = AUDIO_FORMAT_PCM_FLOAT;
3210 mFrameSize = mChannelCount * audio_bytes_per_sample(mFormat);
3211 mBufferSize = mFrameSize * mFrameCount;
3212
3213 // TODO: We currently use the associated output device channel mask and sample rate.
3214 // (1) Perhaps use the ORed channel mask of all downstream MixerThreads
3215 // (if a valid mask) to avoid premature downmix.
3216 // (2) Perhaps use the maximum sample rate of all downstream MixerThreads
3217 // instead of the output device sample rate to avoid loss of high frequency information.
3218 // This may need to be updated as MixerThread/OutputTracks are added and not here.
3219 }
3220
3221 // Calculate size of normal sink buffer relative to the HAL output buffer size
3222 double multiplier = 1.0;
3223 // Note: mType == SPATIALIZER does not support FastMixer.
3224 if (mType == MIXER && (kUseFastMixer == FastMixer_Static ||
3225 kUseFastMixer == FastMixer_Dynamic)) {
3226 size_t minNormalFrameCount = (kMinNormalSinkBufferSizeMs * mSampleRate) / 1000;
3227 size_t maxNormalFrameCount = (kMaxNormalSinkBufferSizeMs * mSampleRate) / 1000;
3228
3229 // round up minimum and round down maximum to nearest 16 frames to satisfy AudioMixer
3230 minNormalFrameCount = (minNormalFrameCount + 15) & ~15;
3231 maxNormalFrameCount = maxNormalFrameCount & ~15;
3232 if (maxNormalFrameCount < minNormalFrameCount) {
3233 maxNormalFrameCount = minNormalFrameCount;
3234 }
3235 multiplier = (double) minNormalFrameCount / (double) mFrameCount;
3236 if (multiplier <= 1.0) {
3237 multiplier = 1.0;
3238 } else if (multiplier <= 2.0) {
3239 if (2 * mFrameCount <= maxNormalFrameCount) {
3240 multiplier = 2.0;
3241 } else {
3242 multiplier = (double) maxNormalFrameCount / (double) mFrameCount;
3243 }
3244 } else {
3245 multiplier = floor(multiplier);
3246 }
3247 }
3248 mNormalFrameCount = multiplier * mFrameCount;
3249 // round up to nearest 16 frames to satisfy AudioMixer
3250 if (hasMixer()) {
3251 mNormalFrameCount = (mNormalFrameCount + 15) & ~15;
3252 }
3253 ALOGI("HAL output buffer size %zu frames, normal sink buffer size %zu frames",
3254 (size_t)mFrameCount, mNormalFrameCount);
3255
3256 // Check if we want to throttle the processing to no more than 2x normal rate
3257 mThreadThrottle = property_get_bool("af.thread.throttle", true /* default_value */);
3258 mThreadThrottleTimeMs = 0;
3259 mThreadThrottleEndMs = 0;
3260 mHalfBufferMs = mNormalFrameCount * 1000 / (2 * mSampleRate);
3261
3262 // mSinkBuffer is the sink buffer. Size is always multiple-of-16 frames.
3263 // Originally this was int16_t[] array, need to remove legacy implications.
3264 free(mSinkBuffer);
3265 mSinkBuffer = NULL;
3266
3267 // For sink buffer size, we use the frame size from the downstream sink to avoid problems
3268 // with non PCM formats for compressed music, e.g. AAC, and Offload threads.
3269 const size_t sinkBufferSize = mNormalFrameCount * mFrameSize;
3270 (void)posix_memalign(&mSinkBuffer, 32, sinkBufferSize);
3271
3272 // We resize the mMixerBuffer according to the requirements of the sink buffer which
3273 // drives the output.
3274 free(mMixerBuffer);
3275 mMixerBuffer = NULL;
3276 if (mMixerBufferEnabled) {
3277 mMixerBufferFormat = AUDIO_FORMAT_PCM_FLOAT; // no longer valid: AUDIO_FORMAT_PCM_16_BIT.
3278 mMixerBufferSize = mNormalFrameCount * mixerChannelCount
3279 * audio_bytes_per_sample(mMixerBufferFormat);
3280 (void)posix_memalign(&mMixerBuffer, 32, mMixerBufferSize);
3281 }
3282 free(mEffectBuffer);
3283 mEffectBuffer = NULL;
3284 if (mEffectBufferEnabled) {
3285 mEffectBufferFormat = AUDIO_FORMAT_PCM_FLOAT;
3286 mEffectBufferSize = mNormalFrameCount * mixerChannelCount
3287 * audio_bytes_per_sample(mEffectBufferFormat);
3288 (void)posix_memalign(&mEffectBuffer, 32, mEffectBufferSize);
3289 }
3290
3291 if (mType == SPATIALIZER) {
3292 free(mPostSpatializerBuffer);
3293 mPostSpatializerBuffer = nullptr;
3294 mPostSpatializerBufferSize = mNormalFrameCount * mChannelCount
3295 * audio_bytes_per_sample(mEffectBufferFormat);
3296 (void)posix_memalign(&mPostSpatializerBuffer, 32, mPostSpatializerBufferSize);
3297 }
3298
3299 mHapticChannelMask = static_cast<audio_channel_mask_t>(mChannelMask & AUDIO_CHANNEL_HAPTIC_ALL);
3300 mChannelMask = static_cast<audio_channel_mask_t>(mChannelMask & ~mHapticChannelMask);
3301 mHapticChannelCount = audio_channel_count_from_out_mask(mHapticChannelMask);
3302 mChannelCount -= mHapticChannelCount;
3303 mMixerChannelMask = static_cast<audio_channel_mask_t>(mMixerChannelMask & ~mHapticChannelMask);
3304
3305 // force reconfiguration of effect chains and engines to take new buffer size and audio
3306 // parameters into account
3307 // Note that mutex() is not held when readOutputParameters_l() is called from the constructor
3308 // but in this case nothing is done below as no audio sessions have effect yet so it doesn't
3309 // matter.
3310 // create a copy of mEffectChains as calling moveEffectChain_ll()
3311 // can reorder some effect chains
3312 Vector<sp<IAfEffectChain>> effectChains = mEffectChains;
3313 for (size_t i = 0; i < effectChains.size(); i ++) {
3314 mAfThreadCallback->moveEffectChain_ll(effectChains[i]->sessionId(),
3315 this/* srcThread */, this/* dstThread */);
3316 }
3317
3318 audio_output_flags_t flags = mOutput->flags;
3319 mediametrics::LogItem item(mThreadMetrics.getMetricsId()); // TODO: method in ThreadMetrics?
3320 item.set(AMEDIAMETRICS_PROP_EVENT, AMEDIAMETRICS_PROP_EVENT_VALUE_READPARAMETERS)
3321 .set(AMEDIAMETRICS_PROP_ENCODING, IAfThreadBase::formatToString(mFormat).c_str())
3322 .set(AMEDIAMETRICS_PROP_SAMPLERATE, (int32_t)mSampleRate)
3323 .set(AMEDIAMETRICS_PROP_CHANNELMASK, (int32_t)mChannelMask)
3324 .set(AMEDIAMETRICS_PROP_CHANNELCOUNT, (int32_t)mChannelCount)
3325 .set(AMEDIAMETRICS_PROP_FRAMECOUNT, (int32_t)mNormalFrameCount)
3326 .set(AMEDIAMETRICS_PROP_FLAGS, toString(flags).c_str())
3327 .set(AMEDIAMETRICS_PROP_PREFIX_HAPTIC AMEDIAMETRICS_PROP_CHANNELMASK,
3328 (int32_t)mHapticChannelMask)
3329 .set(AMEDIAMETRICS_PROP_PREFIX_HAPTIC AMEDIAMETRICS_PROP_CHANNELCOUNT,
3330 (int32_t)mHapticChannelCount)
3331 .set(AMEDIAMETRICS_PROP_PREFIX_HAL AMEDIAMETRICS_PROP_ENCODING,
3332 IAfThreadBase::formatToString(mHALFormat).c_str())
3333 .set(AMEDIAMETRICS_PROP_PREFIX_HAL AMEDIAMETRICS_PROP_FRAMECOUNT,
3334 (int32_t)mFrameCount) // sic - added HAL
3335 ;
3336 uint32_t latencyMs;
3337 if (mOutput->stream->getLatency(&latencyMs) == NO_ERROR) {
3338 item.set(AMEDIAMETRICS_PROP_PREFIX_HAL AMEDIAMETRICS_PROP_LATENCYMS, (double)latencyMs);
3339 }
3340 item.record();
3341 }
3342
updateMetadata_l()3343 ThreadBase::MetadataUpdate PlaybackThread::updateMetadata_l()
3344 {
3345 if (!isStreamInitialized() || !mActiveTracks.readAndClearHasChanged()) {
3346 return {}; // nothing to do
3347 }
3348 StreamOutHalInterface::SourceMetadata metadata;
3349 static const bool stereo_spatialization_property =
3350 property_get_bool("ro.audio.stereo_spatialization_enabled", false);
3351 const bool stereo_spatialization_enabled =
3352 stereo_spatialization_property && com_android_media_audio_stereo_spatialization();
3353 if (stereo_spatialization_enabled) {
3354 std::map<audio_session_t, std::vector<playback_track_metadata_v7_t> >allSessionsMetadata;
3355 for (const sp<IAfTrack>& track : mActiveTracks) {
3356 std::vector<playback_track_metadata_v7_t>& sessionMetadata =
3357 allSessionsMetadata[track->sessionId()];
3358 auto backInserter = std::back_inserter(sessionMetadata);
3359 // No track is invalid as this is called after prepareTrack_l in the same
3360 // critical section
3361 track->copyMetadataTo(backInserter);
3362 }
3363 std::vector<playback_track_metadata_v7_t> spatializedTracksMetaData;
3364 for (const auto& [session, sessionTrackMetadata] : allSessionsMetadata) {
3365 metadata.tracks.insert(metadata.tracks.end(),
3366 sessionTrackMetadata.begin(), sessionTrackMetadata.end());
3367 if (auto chain = getEffectChain_l(session) ; chain != nullptr) {
3368 chain->sendMetadata_l(sessionTrackMetadata, {});
3369 }
3370 if ((hasAudioSession_l(session) & IAfThreadBase::SPATIALIZED_SESSION) != 0) {
3371 spatializedTracksMetaData.insert(spatializedTracksMetaData.end(),
3372 sessionTrackMetadata.begin(), sessionTrackMetadata.end());
3373 }
3374 }
3375 if (auto chain = getEffectChain_l(AUDIO_SESSION_OUTPUT_MIX); chain != nullptr) {
3376 chain->sendMetadata_l(metadata.tracks, {});
3377 }
3378 if (auto chain = getEffectChain_l(AUDIO_SESSION_OUTPUT_STAGE); chain != nullptr) {
3379 chain->sendMetadata_l(metadata.tracks, spatializedTracksMetaData);
3380 }
3381 if (auto chain = getEffectChain_l(AUDIO_SESSION_DEVICE); chain != nullptr) {
3382 chain->sendMetadata_l(metadata.tracks, {});
3383 }
3384 } else {
3385 auto backInserter = std::back_inserter(metadata.tracks);
3386 for (const sp<IAfTrack>& track : mActiveTracks) {
3387 // No track is invalid as this is called after prepareTrack_l in the same
3388 // critical section
3389 track->copyMetadataTo(backInserter);
3390 }
3391 }
3392 sendMetadataToBackend_l(metadata);
3393 MetadataUpdate change;
3394 change.playbackMetadataUpdate = metadata.tracks;
3395 return change;
3396 }
3397
sendMetadataToBackend_l(const StreamOutHalInterface::SourceMetadata & metadata)3398 void PlaybackThread::sendMetadataToBackend_l(
3399 const StreamOutHalInterface::SourceMetadata& metadata)
3400 {
3401 mOutput->stream->updateSourceMetadata(metadata);
3402 };
3403
getRenderPosition(uint32_t * halFrames,uint32_t * dspFrames) const3404 status_t PlaybackThread::getRenderPosition(
3405 uint32_t* halFrames, uint32_t* dspFrames) const
3406 {
3407 if (halFrames == NULL || dspFrames == NULL) {
3408 return BAD_VALUE;
3409 }
3410 audio_utils::lock_guard _l(mutex());
3411 if (initCheck() != NO_ERROR) {
3412 return INVALID_OPERATION;
3413 }
3414 int64_t framesWritten = mBytesWritten / mFrameSize;
3415 *halFrames = framesWritten;
3416
3417 if (isSuspended()) {
3418 // return an estimation of rendered frames when the output is suspended
3419 size_t latencyFrames = (latency_l() * mSampleRate) / 1000;
3420 *dspFrames = (uint32_t)
3421 (framesWritten >= (int64_t)latencyFrames ? framesWritten - latencyFrames : 0);
3422 return NO_ERROR;
3423 } else {
3424 status_t status;
3425 uint64_t frames = 0;
3426 status = mOutput->getRenderPosition(&frames);
3427 *dspFrames = (uint32_t)frames;
3428 return status;
3429 }
3430 }
3431
getStrategyForSession_l(audio_session_t sessionId) const3432 product_strategy_t PlaybackThread::getStrategyForSession_l(audio_session_t sessionId) const
3433 {
3434 // session AUDIO_SESSION_OUTPUT_MIX is placed in same strategy as MUSIC stream so that
3435 // it is moved to correct output by audio policy manager when A2DP is connected or disconnected
3436 if (sessionId == AUDIO_SESSION_OUTPUT_MIX) {
3437 return getStrategyForStream(AUDIO_STREAM_MUSIC);
3438 }
3439 for (size_t i = 0; i < mTracks.size(); i++) {
3440 sp<IAfTrack> track = mTracks[i];
3441 if (sessionId == track->sessionId() && !track->isInvalid()) {
3442 return getStrategyForStream(track->streamType());
3443 }
3444 }
3445 return getStrategyForStream(AUDIO_STREAM_MUSIC);
3446 }
3447
3448
getOutput() const3449 AudioStreamOut* PlaybackThread::getOutput() const
3450 {
3451 audio_utils::lock_guard _l(mutex());
3452 return mOutput;
3453 }
3454
clearOutput()3455 AudioStreamOut* PlaybackThread::clearOutput()
3456 {
3457 audio_utils::lock_guard _l(mutex());
3458 AudioStreamOut *output = mOutput;
3459 mOutput = NULL;
3460 // FIXME FastMixer might also have a raw ptr to mOutputSink;
3461 // must push a NULL and wait for ack
3462 mOutputSink.clear();
3463 mPipeSink.clear();
3464 mNormalSink.clear();
3465 return output;
3466 }
3467
3468 // this method must always be called either with ThreadBase mutex() held or inside the thread loop
stream() const3469 sp<StreamHalInterface> PlaybackThread::stream() const
3470 {
3471 if (mOutput == NULL) {
3472 return NULL;
3473 }
3474 return mOutput->stream;
3475 }
3476
activeSleepTimeUs() const3477 uint32_t PlaybackThread::activeSleepTimeUs() const
3478 {
3479 return (uint32_t)((uint32_t)((mNormalFrameCount * 1000) / mSampleRate) * 1000);
3480 }
3481
setSyncEvent(const sp<SyncEvent> & event)3482 status_t PlaybackThread::setSyncEvent(const sp<SyncEvent>& event)
3483 {
3484 if (!isValidSyncEvent(event)) {
3485 return BAD_VALUE;
3486 }
3487
3488 audio_utils::lock_guard _l(mutex());
3489
3490 for (size_t i = 0; i < mTracks.size(); ++i) {
3491 sp<IAfTrack> track = mTracks[i];
3492 if (event->triggerSession() == track->sessionId()) {
3493 (void) track->setSyncEvent(event);
3494 return NO_ERROR;
3495 }
3496 }
3497
3498 return NAME_NOT_FOUND;
3499 }
3500
isValidSyncEvent(const sp<SyncEvent> & event) const3501 bool PlaybackThread::isValidSyncEvent(const sp<SyncEvent>& event) const
3502 {
3503 return event->type() == AudioSystem::SYNC_EVENT_PRESENTATION_COMPLETE;
3504 }
3505
threadLoop_removeTracks(const Vector<sp<IAfTrack>> & tracksToRemove)3506 void PlaybackThread::threadLoop_removeTracks(
3507 [[maybe_unused]] const Vector<sp<IAfTrack>>& tracksToRemove)
3508 {
3509 // Miscellaneous track cleanup when removed from the active list,
3510 // called without Thread lock but synchronized with threadLoop processing.
3511 #ifdef ADD_BATTERY_DATA
3512 for (const auto& track : tracksToRemove) {
3513 if (track->isExternalTrack()) {
3514 // to track the speaker usage
3515 addBatteryData(IMediaPlayerService::kBatteryDataAudioFlingerStop);
3516 }
3517 }
3518 #endif
3519 }
3520
checkSilentMode_l()3521 void PlaybackThread::checkSilentMode_l()
3522 {
3523 if (!mMasterMute) {
3524 char value[PROPERTY_VALUE_MAX];
3525 if (mOutDeviceTypeAddrs.empty()) {
3526 ALOGD("ro.audio.silent is ignored since no output device is set");
3527 return;
3528 }
3529 if (isSingleDeviceType(outDeviceTypes_l(), AUDIO_DEVICE_OUT_REMOTE_SUBMIX)) {
3530 ALOGD("ro.audio.silent will be ignored for threads on AUDIO_DEVICE_OUT_REMOTE_SUBMIX");
3531 return;
3532 }
3533 if (property_get("ro.audio.silent", value, "0") > 0) {
3534 char *endptr;
3535 unsigned long ul = strtoul(value, &endptr, 0);
3536 if (*endptr == '\0' && ul != 0) {
3537 ALOGW("%s: mute from ro.audio.silent. Silence is golden", __func__);
3538 // The setprop command will not allow a property to be changed after
3539 // the first time it is set, so we don't have to worry about un-muting.
3540 setMasterMute_l(true);
3541 }
3542 }
3543 }
3544 }
3545
3546 // shared by MIXER and DIRECT, overridden by DUPLICATING
threadLoop_write()3547 ssize_t PlaybackThread::threadLoop_write()
3548 {
3549 LOG_HIST_TS();
3550 mInWrite = true;
3551 ssize_t bytesWritten;
3552 const size_t offset = mCurrentWriteLength - mBytesRemaining;
3553
3554 // If an NBAIO sink is present, use it to write the normal mixer's submix
3555 if (mNormalSink != 0) {
3556
3557 const size_t count = mBytesRemaining / mFrameSize;
3558
3559 ATRACE_BEGIN("write");
3560 // update the setpoint when AudioFlinger::mScreenState changes
3561 const uint32_t screenState = mAfThreadCallback->getScreenState();
3562 if (screenState != mScreenState) {
3563 mScreenState = screenState;
3564 MonoPipe *pipe = (MonoPipe *)mPipeSink.get();
3565 if (pipe != NULL) {
3566 pipe->setAvgFrames((mScreenState & 1) ?
3567 (pipe->maxFrames() * 7) / 8 : mNormalFrameCount * 2);
3568 }
3569 }
3570 ssize_t framesWritten = mNormalSink->write((char *)mSinkBuffer + offset, count);
3571 ATRACE_END();
3572
3573 if (framesWritten > 0) {
3574 bytesWritten = framesWritten * mFrameSize;
3575
3576 #ifdef TEE_SINK
3577 mTee.write((char *)mSinkBuffer + offset, framesWritten);
3578 #endif
3579 } else {
3580 bytesWritten = framesWritten;
3581 }
3582 // otherwise use the HAL / AudioStreamOut directly
3583 } else {
3584 // Direct output and offload threads
3585
3586 if (mUseAsyncWrite) {
3587 ALOGW_IF(mWriteAckSequence & 1, "threadLoop_write(): out of sequence write request");
3588 mWriteAckSequence += 2;
3589 mWriteAckSequence |= 1;
3590 ALOG_ASSERT(mCallbackThread != 0);
3591 mCallbackThread->setWriteBlocked(mWriteAckSequence);
3592 }
3593 ATRACE_BEGIN("write");
3594 // FIXME We should have an implementation of timestamps for direct output threads.
3595 // They are used e.g for multichannel PCM playback over HDMI.
3596 bytesWritten = mOutput->write((char *)mSinkBuffer + offset, mBytesRemaining);
3597 ATRACE_END();
3598
3599 if (mUseAsyncWrite &&
3600 ((bytesWritten < 0) || (bytesWritten == (ssize_t)mBytesRemaining))) {
3601 // do not wait for async callback in case of error of full write
3602 mWriteAckSequence &= ~1;
3603 ALOG_ASSERT(mCallbackThread != 0);
3604 mCallbackThread->setWriteBlocked(mWriteAckSequence);
3605 }
3606 }
3607
3608 mNumWrites++;
3609 mInWrite = false;
3610 if (mStandby) {
3611 mThreadMetrics.logBeginInterval();
3612 mThreadSnapshot.onBegin();
3613 mStandby = false;
3614 }
3615 return bytesWritten;
3616 }
3617
3618 // startMelComputation_l() must be called with AudioFlinger::mutex() held
startMelComputation_l(const sp<audio_utils::MelProcessor> & processor)3619 void PlaybackThread::startMelComputation_l(
3620 const sp<audio_utils::MelProcessor>& processor)
3621 {
3622 auto outputSink = static_cast<AudioStreamOutSink*>(mOutputSink.get());
3623 if (outputSink != nullptr) {
3624 outputSink->startMelComputation(processor);
3625 }
3626 }
3627
3628 // stopMelComputation_l() must be called with AudioFlinger::mutex() held
stopMelComputation_l()3629 void PlaybackThread::stopMelComputation_l()
3630 {
3631 auto outputSink = static_cast<AudioStreamOutSink*>(mOutputSink.get());
3632 if (outputSink != nullptr) {
3633 outputSink->stopMelComputation();
3634 }
3635 }
3636
threadLoop_drain()3637 void PlaybackThread::threadLoop_drain()
3638 {
3639 bool supportsDrain = false;
3640 if (mOutput->stream->supportsDrain(&supportsDrain) == OK && supportsDrain) {
3641 ALOGV("draining %s", (mMixerStatus == MIXER_DRAIN_TRACK) ? "early" : "full");
3642 if (mUseAsyncWrite) {
3643 ALOGW_IF(mDrainSequence & 1, "threadLoop_drain(): out of sequence drain request");
3644 mDrainSequence |= 1;
3645 ALOG_ASSERT(mCallbackThread != 0);
3646 mCallbackThread->setDraining(mDrainSequence);
3647 }
3648 status_t result = mOutput->stream->drain(mMixerStatus == MIXER_DRAIN_TRACK);
3649 ALOGE_IF(result != OK, "Error when draining stream: %d", result);
3650 }
3651 }
3652
threadLoop_exit()3653 void PlaybackThread::threadLoop_exit()
3654 {
3655 {
3656 audio_utils::lock_guard _l(mutex());
3657 for (size_t i = 0; i < mTracks.size(); i++) {
3658 sp<IAfTrack> track = mTracks[i];
3659 track->invalidate();
3660 }
3661 // Clear ActiveTracks to update BatteryNotifier in case active tracks remain.
3662 // After we exit there are no more track changes sent to BatteryNotifier
3663 // because that requires an active threadLoop.
3664 // TODO: should we decActiveTrackCnt() of the cleared track effect chain?
3665 mActiveTracks.clear();
3666 }
3667 }
3668
3669 /*
3670 The derived values that are cached:
3671 - mSinkBufferSize from frame count * frame size
3672 - mActiveSleepTimeUs from activeSleepTimeUs()
3673 - mIdleSleepTimeUs from idleSleepTimeUs()
3674 - mStandbyDelayNs from mActiveSleepTimeUs (DIRECT only) or forced to at least
3675 kDefaultStandbyTimeInNsecs when connected to an A2DP device.
3676 - maxPeriod from frame count and sample rate (MIXER only)
3677
3678 The parameters that affect these derived values are:
3679 - frame count
3680 - frame size
3681 - sample rate
3682 - device type: A2DP or not
3683 - device latency
3684 - format: PCM or not
3685 - active sleep time
3686 - idle sleep time
3687 */
3688
cacheParameters_l()3689 void PlaybackThread::cacheParameters_l()
3690 {
3691 mSinkBufferSize = mNormalFrameCount * mFrameSize;
3692 mActiveSleepTimeUs = activeSleepTimeUs();
3693 mIdleSleepTimeUs = idleSleepTimeUs();
3694
3695 mStandbyDelayNs = getStandbyTimeInNanos();
3696
3697 // make sure standby delay is not too short when connected to an A2DP sink to avoid
3698 // truncating audio when going to standby.
3699 if (!Intersection(outDeviceTypes_l(), getAudioDeviceOutAllA2dpSet()).empty()) {
3700 if (mStandbyDelayNs < kDefaultStandbyTimeInNsecs) {
3701 mStandbyDelayNs = kDefaultStandbyTimeInNsecs;
3702 }
3703 }
3704 }
3705
invalidateTracks_l(audio_stream_type_t streamType)3706 bool PlaybackThread::invalidateTracks_l(audio_stream_type_t streamType)
3707 {
3708 ALOGV("MixerThread::invalidateTracks() mixer %p, streamType %d, mTracks.size %zu",
3709 this, streamType, mTracks.size());
3710 bool trackMatch = false;
3711 size_t size = mTracks.size();
3712 for (size_t i = 0; i < size; i++) {
3713 sp<IAfTrack> t = mTracks[i];
3714 if (t->streamType() == streamType && t->isExternalTrack()) {
3715 t->invalidate();
3716 trackMatch = true;
3717 }
3718 }
3719 return trackMatch;
3720 }
3721
invalidateTracks(audio_stream_type_t streamType)3722 void PlaybackThread::invalidateTracks(audio_stream_type_t streamType)
3723 {
3724 audio_utils::lock_guard _l(mutex());
3725 invalidateTracks_l(streamType);
3726 }
3727
invalidateTracks(std::set<audio_port_handle_t> & portIds)3728 void PlaybackThread::invalidateTracks(std::set<audio_port_handle_t>& portIds) {
3729 audio_utils::lock_guard _l(mutex());
3730 invalidateTracks_l(portIds);
3731 }
3732
invalidateTracks_l(std::set<audio_port_handle_t> & portIds)3733 bool PlaybackThread::invalidateTracks_l(std::set<audio_port_handle_t>& portIds) {
3734 bool trackMatch = false;
3735 const size_t size = mTracks.size();
3736 for (size_t i = 0; i < size; i++) {
3737 sp<IAfTrack> t = mTracks[i];
3738 if (t->isExternalTrack() && portIds.find(t->portId()) != portIds.end()) {
3739 t->invalidate();
3740 portIds.erase(t->portId());
3741 trackMatch = true;
3742 }
3743 if (portIds.empty()) {
3744 break;
3745 }
3746 }
3747 return trackMatch;
3748 }
3749
3750 // getTrackById_l must be called with holding thread lock
getTrackById_l(audio_port_handle_t trackPortId)3751 IAfTrack* PlaybackThread::getTrackById_l(
3752 audio_port_handle_t trackPortId) {
3753 for (size_t i = 0; i < mTracks.size(); i++) {
3754 if (mTracks[i]->portId() == trackPortId) {
3755 return mTracks[i].get();
3756 }
3757 }
3758 return nullptr;
3759 }
3760
addEffectChain_l(const sp<IAfEffectChain> & chain)3761 status_t PlaybackThread::addEffectChain_l(const sp<IAfEffectChain>& chain)
3762 {
3763 audio_session_t session = chain->sessionId();
3764 sp<EffectBufferHalInterface> halInBuffer, halOutBuffer;
3765 float *buffer = nullptr; // only used for non global sessions
3766
3767 if (mType == SPATIALIZER) {
3768 if (!audio_is_global_session(session)) {
3769 // player sessions on a spatializer output will use a dedicated input buffer and
3770 // will either output multi channel to mEffectBuffer if the track is spatilaized
3771 // or stereo to mPostSpatializerBuffer if not spatialized.
3772 uint32_t channelMask;
3773 bool isSessionSpatialized =
3774 (hasAudioSession_l(session) & ThreadBase::SPATIALIZED_SESSION) != 0;
3775 if (isSessionSpatialized) {
3776 channelMask = mMixerChannelMask;
3777 } else {
3778 channelMask = mChannelMask;
3779 }
3780 size_t numSamples = mNormalFrameCount
3781 * (audio_channel_count_from_out_mask(channelMask) + mHapticChannelCount);
3782 status_t result = mAfThreadCallback->getEffectsFactoryHal()->allocateBuffer(
3783 numSamples * sizeof(float),
3784 &halInBuffer);
3785 if (result != OK) return result;
3786
3787 result = mAfThreadCallback->getEffectsFactoryHal()->mirrorBuffer(
3788 isSessionSpatialized ? mEffectBuffer : mPostSpatializerBuffer,
3789 isSessionSpatialized ? mEffectBufferSize : mPostSpatializerBufferSize,
3790 &halOutBuffer);
3791 if (result != OK) return result;
3792
3793 buffer = halInBuffer ? halInBuffer->audioBuffer()->f32 : buffer;
3794
3795 ALOGV("addEffectChain_l() creating new input buffer %p session %d",
3796 buffer, session);
3797 } else {
3798 // A global session on a SPATIALIZER thread is either OUTPUT_STAGE or DEVICE
3799 // - OUTPUT_STAGE session uses the mEffectBuffer as input buffer and
3800 // mPostSpatializerBuffer as output buffer
3801 // - DEVICE session uses the mPostSpatializerBuffer as input and output buffer.
3802 status_t result = mAfThreadCallback->getEffectsFactoryHal()->mirrorBuffer(
3803 mEffectBuffer, mEffectBufferSize, &halInBuffer);
3804 if (result != OK) return result;
3805 result = mAfThreadCallback->getEffectsFactoryHal()->mirrorBuffer(
3806 mPostSpatializerBuffer, mPostSpatializerBufferSize, &halOutBuffer);
3807 if (result != OK) return result;
3808
3809 if (session == AUDIO_SESSION_DEVICE) {
3810 halInBuffer = halOutBuffer;
3811 }
3812 }
3813 } else {
3814 status_t result = mAfThreadCallback->getEffectsFactoryHal()->mirrorBuffer(
3815 mEffectBufferEnabled ? mEffectBuffer : mSinkBuffer,
3816 mEffectBufferEnabled ? mEffectBufferSize : mSinkBufferSize,
3817 &halInBuffer);
3818 if (result != OK) return result;
3819 halOutBuffer = halInBuffer;
3820 ALOGV("addEffectChain_l() %p on thread %p for session %d", chain.get(), this, session);
3821 if (!audio_is_global_session(session)) {
3822 buffer = halInBuffer ? reinterpret_cast<float*>(halInBuffer->externalData())
3823 : buffer;
3824 // Only one effect chain can be present in direct output thread and it uses
3825 // the sink buffer as input
3826 if (mType != DIRECT) {
3827 size_t numSamples = mNormalFrameCount
3828 * (audio_channel_count_from_out_mask(mMixerChannelMask)
3829 + mHapticChannelCount);
3830 const status_t allocateStatus =
3831 mAfThreadCallback->getEffectsFactoryHal()->allocateBuffer(
3832 numSamples * sizeof(float),
3833 &halInBuffer);
3834 if (allocateStatus != OK) return allocateStatus;
3835
3836 buffer = halInBuffer ? halInBuffer->audioBuffer()->f32 : buffer;
3837 ALOGV("addEffectChain_l() creating new input buffer %p session %d",
3838 buffer, session);
3839 }
3840 }
3841 }
3842
3843 if (!audio_is_global_session(session)) {
3844 // Attach all tracks with same session ID to this chain.
3845 for (size_t i = 0; i < mTracks.size(); ++i) {
3846 sp<IAfTrack> track = mTracks[i];
3847 if (session == track->sessionId()) {
3848 ALOGV("addEffectChain_l() track->setMainBuffer track %p buffer %p",
3849 track.get(), buffer);
3850 track->setMainBuffer(buffer);
3851 chain->incTrackCnt();
3852 }
3853 }
3854
3855 // indicate all active tracks in the chain
3856 for (const sp<IAfTrack>& track : mActiveTracks) {
3857 if (session == track->sessionId()) {
3858 ALOGV("addEffectChain_l() activating track %p on session %d",
3859 track.get(), session);
3860 chain->incActiveTrackCnt();
3861 }
3862 }
3863 }
3864
3865 chain->setThread(this);
3866 chain->setInBuffer(halInBuffer);
3867 chain->setOutBuffer(halOutBuffer);
3868 // Effect chain for session AUDIO_SESSION_DEVICE is inserted at end of effect
3869 // chains list in order to be processed last as it contains output device effects.
3870 // Effect chain for session AUDIO_SESSION_OUTPUT_STAGE is inserted just before to apply post
3871 // processing effects specific to an output stream before effects applied to all streams
3872 // routed to a given device.
3873 // Effect chain for session AUDIO_SESSION_OUTPUT_MIX is inserted before
3874 // session AUDIO_SESSION_OUTPUT_STAGE to be processed
3875 // after track specific effects and before output stage.
3876 // It is therefore mandatory that AUDIO_SESSION_OUTPUT_MIX == 0 and
3877 // that AUDIO_SESSION_OUTPUT_STAGE < AUDIO_SESSION_OUTPUT_MIX.
3878 // Effect chain for other sessions are inserted at beginning of effect
3879 // chains list to be processed before output mix effects. Relative order between other
3880 // sessions is not important.
3881 static_assert(AUDIO_SESSION_OUTPUT_MIX == 0 &&
3882 AUDIO_SESSION_OUTPUT_STAGE < AUDIO_SESSION_OUTPUT_MIX &&
3883 AUDIO_SESSION_DEVICE < AUDIO_SESSION_OUTPUT_STAGE,
3884 "audio_session_t constants misdefined");
3885 size_t size = mEffectChains.size();
3886 size_t i = 0;
3887 for (i = 0; i < size; i++) {
3888 if (mEffectChains[i]->sessionId() < session) {
3889 break;
3890 }
3891 }
3892 mEffectChains.insertAt(chain, i);
3893 checkSuspendOnAddEffectChain_l(chain);
3894
3895 return NO_ERROR;
3896 }
3897
removeEffectChain_l(const sp<IAfEffectChain> & chain)3898 size_t PlaybackThread::removeEffectChain_l(const sp<IAfEffectChain>& chain)
3899 {
3900 audio_session_t session = chain->sessionId();
3901
3902 ALOGV("removeEffectChain_l() %p from thread %p for session %d", chain.get(), this, session);
3903
3904 for (size_t i = 0; i < mEffectChains.size(); i++) {
3905 if (chain == mEffectChains[i]) {
3906 mEffectChains.removeAt(i);
3907 // detach all active tracks from the chain
3908 for (const sp<IAfTrack>& track : mActiveTracks) {
3909 if (session == track->sessionId()) {
3910 ALOGV("removeEffectChain_l(): stopping track on chain %p for session Id: %d",
3911 chain.get(), session);
3912 chain->decActiveTrackCnt();
3913 }
3914 }
3915
3916 // detach all tracks with same session ID from this chain
3917 for (size_t j = 0; j < mTracks.size(); ++j) {
3918 sp<IAfTrack> track = mTracks[j];
3919 if (session == track->sessionId()) {
3920 track->setMainBuffer(reinterpret_cast<float*>(mSinkBuffer));
3921 chain->decTrackCnt();
3922 }
3923 }
3924 break;
3925 }
3926 }
3927 return mEffectChains.size();
3928 }
3929
attachAuxEffect(const sp<IAfTrack> & track,int EffectId)3930 status_t PlaybackThread::attachAuxEffect(
3931 const sp<IAfTrack>& track, int EffectId)
3932 {
3933 audio_utils::lock_guard _l(mutex());
3934 return attachAuxEffect_l(track, EffectId);
3935 }
3936
attachAuxEffect_l(const sp<IAfTrack> & track,int EffectId)3937 status_t PlaybackThread::attachAuxEffect_l(
3938 const sp<IAfTrack>& track, int EffectId)
3939 {
3940 status_t status = NO_ERROR;
3941
3942 if (EffectId == 0) {
3943 track->setAuxBuffer(0, NULL);
3944 } else {
3945 // Auxiliary effects are always in audio session AUDIO_SESSION_OUTPUT_MIX
3946 sp<IAfEffectModule> effect = getEffect_l(AUDIO_SESSION_OUTPUT_MIX, EffectId);
3947 if (effect != 0) {
3948 if ((effect->desc().flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_AUXILIARY) {
3949 track->setAuxBuffer(EffectId, (int32_t *)effect->inBuffer());
3950 } else {
3951 status = INVALID_OPERATION;
3952 }
3953 } else {
3954 status = BAD_VALUE;
3955 }
3956 }
3957 return status;
3958 }
3959
detachAuxEffect_l(int effectId)3960 void PlaybackThread::detachAuxEffect_l(int effectId)
3961 {
3962 for (size_t i = 0; i < mTracks.size(); ++i) {
3963 sp<IAfTrack> track = mTracks[i];
3964 if (track->auxEffectId() == effectId) {
3965 attachAuxEffect_l(track, 0);
3966 }
3967 }
3968 }
3969
threadLoop()3970 bool PlaybackThread::threadLoop()
3971 NO_THREAD_SAFETY_ANALYSIS // manual locking of AudioFlinger
3972 {
3973 aflog::setThreadWriter(mNBLogWriter.get());
3974
3975 if (mType == SPATIALIZER) {
3976 const pid_t tid = getTid();
3977 if (tid == -1) { // odd: we are here, we must be a running thread.
3978 ALOGW("%s: Cannot update Spatializer mixer thread priority, no tid", __func__);
3979 } else {
3980 const int priorityBoost = requestSpatializerPriority(getpid(), tid);
3981 if (priorityBoost > 0) {
3982 stream()->setHalThreadPriority(priorityBoost);
3983 }
3984 }
3985 } else if (property_get_bool("ro.boot.container", false /* default_value */)) {
3986 // In ARC experiments (b/73091832), the latency under using CFS scheduler with any priority
3987 // is not enough for PlaybackThread to process audio data in time. We request the lowest
3988 // real-time priority, SCHED_FIFO=1, for PlaybackThread in ARC. ro.boot.container is true
3989 // only on ARC.
3990 const pid_t tid = getTid();
3991 if (tid == -1) {
3992 ALOGW("%s: Cannot update PlaybackThread priority for ARC, no tid", __func__);
3993 } else {
3994 const status_t status = requestPriority(getpid(),
3995 tid,
3996 kPriorityPlaybackThreadArc,
3997 false /* isForApp */,
3998 true /* asynchronous */);
3999 if (status != OK) {
4000 ALOGW("%s: Cannot update PlaybackThread priority for ARC, status %d", __func__,
4001 status);
4002 } else {
4003 stream()->setHalThreadPriority(kPriorityPlaybackThreadArc);
4004 }
4005 }
4006 }
4007
4008 Vector<sp<IAfTrack>> tracksToRemove;
4009
4010 mStandbyTimeNs = systemTime();
4011 int64_t lastLoopCountWritten = -2; // never matches "previous" loop, when loopCount = 0.
4012
4013 // MIXER
4014 nsecs_t lastWarning = 0;
4015
4016 // DUPLICATING
4017 // FIXME could this be made local to while loop?
4018 writeFrames = 0;
4019
4020 cacheParameters_l();
4021 mSleepTimeUs = mIdleSleepTimeUs;
4022
4023 if (mType == MIXER || mType == SPATIALIZER) {
4024 sleepTimeShift = 0;
4025 }
4026
4027 CpuStats cpuStats;
4028 const String8 myName(String8::format("thread %p type %d TID %d", this, mType, gettid()));
4029
4030 acquireWakeLock();
4031
4032 // mNBLogWriter logging APIs can only be called by a single thread, typically the
4033 // thread associated with this PlaybackThread.
4034 // If you want to share the mNBLogWriter with other threads (for example, binder threads)
4035 // then all such threads must agree to hold a common mutex before logging.
4036 // So if you need to log when mutex is unlocked, set logString to a non-NULL string,
4037 // and then that string will be logged at the next convenient opportunity.
4038 // See reference to logString below.
4039 const char *logString = NULL;
4040
4041 // Estimated time for next buffer to be written to hal. This is used only on
4042 // suspended mode (for now) to help schedule the wait time until next iteration.
4043 nsecs_t timeLoopNextNs = 0;
4044
4045 checkSilentMode_l();
4046
4047 audio_patch_handle_t lastDownstreamPatchHandle = AUDIO_PATCH_HANDLE_NONE;
4048
4049 sendCheckOutputStageEffectsEvent();
4050
4051 // loopCount is used for statistics and diagnostics.
4052 for (int64_t loopCount = 0; !exitPending(); ++loopCount)
4053 {
4054 // Log merge requests are performed during AudioFlinger binder transactions, but
4055 // that does not cover audio playback. It's requested here for that reason.
4056 mAfThreadCallback->requestLogMerge();
4057
4058 cpuStats.sample(myName);
4059
4060 Vector<sp<IAfEffectChain>> effectChains;
4061 audio_session_t activeHapticSessionId = AUDIO_SESSION_NONE;
4062 bool isHapticSessionSpatialized = false;
4063 std::vector<sp<IAfTrack>> activeTracks;
4064
4065 // If the device is AUDIO_DEVICE_OUT_BUS, check for downstream latency.
4066 //
4067 // Note: we access outDeviceTypes() outside of mutex().
4068 if (isMsdDevice() && outDeviceTypes_l().count(AUDIO_DEVICE_OUT_BUS) != 0) {
4069 // Here, we try for the AF lock, but do not block on it as the latency
4070 // is more informational.
4071 if (mAfThreadCallback->mutex().try_lock()) {
4072 std::vector<SoftwarePatch> swPatches;
4073 double latencyMs = 0.; // not required; initialized for clang-tidy
4074 status_t status = INVALID_OPERATION;
4075 audio_patch_handle_t downstreamPatchHandle = AUDIO_PATCH_HANDLE_NONE;
4076 if (mAfThreadCallback->getPatchPanel()->getDownstreamSoftwarePatches(
4077 id(), &swPatches) == OK
4078 && swPatches.size() > 0) {
4079 status = swPatches[0].getLatencyMs_l(&latencyMs);
4080 downstreamPatchHandle = swPatches[0].getPatchHandle();
4081 }
4082 if (downstreamPatchHandle != lastDownstreamPatchHandle) {
4083 mDownstreamLatencyStatMs.reset();
4084 lastDownstreamPatchHandle = downstreamPatchHandle;
4085 }
4086 if (status == OK) {
4087 // verify downstream latency (we assume a max reasonable
4088 // latency of 5 seconds).
4089 const double minLatency = 0., maxLatency = 5000.;
4090 if (latencyMs >= minLatency && latencyMs <= maxLatency) {
4091 ALOGVV("new downstream latency %lf ms", latencyMs);
4092 } else {
4093 ALOGD("out of range downstream latency %lf ms", latencyMs);
4094 latencyMs = std::clamp(latencyMs, minLatency, maxLatency);
4095 }
4096 mDownstreamLatencyStatMs.add(latencyMs);
4097 }
4098 mAfThreadCallback->mutex().unlock();
4099 }
4100 } else {
4101 if (lastDownstreamPatchHandle != AUDIO_PATCH_HANDLE_NONE) {
4102 // our device is no longer AUDIO_DEVICE_OUT_BUS, reset patch handle and stats.
4103 mDownstreamLatencyStatMs.reset();
4104 lastDownstreamPatchHandle = AUDIO_PATCH_HANDLE_NONE;
4105 }
4106 }
4107
4108 if (mCheckOutputStageEffects.exchange(false)) {
4109 checkOutputStageEffects();
4110 }
4111
4112 MetadataUpdate metadataUpdate;
4113 { // scope for mutex()
4114
4115 audio_utils::unique_lock _l(mutex());
4116
4117 processConfigEvents_l();
4118 if (mCheckOutputStageEffects.load()) {
4119 continue;
4120 }
4121
4122 // See comment at declaration of logString for why this is done under mutex()
4123 if (logString != NULL) {
4124 mNBLogWriter->logTimestamp();
4125 mNBLogWriter->log(logString);
4126 logString = NULL;
4127 }
4128
4129 collectTimestamps_l();
4130
4131 saveOutputTracks();
4132 if (mSignalPending) {
4133 // A signal was raised while we were unlocked
4134 mSignalPending = false;
4135 } else if (waitingAsyncCallback_l()) {
4136 if (exitPending()) {
4137 break;
4138 }
4139 bool released = false;
4140 if (!keepWakeLock()) {
4141 releaseWakeLock_l();
4142 released = true;
4143 }
4144
4145 const int64_t waitNs = computeWaitTimeNs_l();
4146 ALOGV("wait async completion (wait time: %lld)", (long long)waitNs);
4147 std::cv_status cvstatus =
4148 mWaitWorkCV.wait_for(_l, std::chrono::nanoseconds(waitNs));
4149 if (cvstatus == std::cv_status::timeout) {
4150 mSignalPending = true; // if timeout recheck everything
4151 }
4152 ALOGV("async completion/wake");
4153 if (released) {
4154 acquireWakeLock_l();
4155 }
4156 mStandbyTimeNs = systemTime() + mStandbyDelayNs;
4157 mSleepTimeUs = 0;
4158
4159 continue;
4160 }
4161 if ((mActiveTracks.isEmpty() && systemTime() > mStandbyTimeNs) ||
4162 isSuspended()) {
4163 // put audio hardware into standby after short delay
4164 if (shouldStandby_l()) {
4165
4166 threadLoop_standby();
4167
4168 // This is where we go into standby
4169 if (!mStandby) {
4170 LOG_AUDIO_STATE();
4171 mThreadMetrics.logEndInterval();
4172 mThreadSnapshot.onEnd();
4173 setStandby_l();
4174 }
4175 sendStatistics(false /* force */);
4176 }
4177
4178 if (mActiveTracks.isEmpty() && mConfigEvents.isEmpty()) {
4179 // we're about to wait, flush the binder command buffer
4180 IPCThreadState::self()->flushCommands();
4181
4182 clearOutputTracks();
4183
4184 if (exitPending()) {
4185 break;
4186 }
4187
4188 releaseWakeLock_l();
4189 // wait until we have something to do...
4190 ALOGV("%s going to sleep", myName.c_str());
4191 mWaitWorkCV.wait(_l);
4192 ALOGV("%s waking up", myName.c_str());
4193 acquireWakeLock_l();
4194
4195 mMixerStatus = MIXER_IDLE;
4196 mMixerStatusIgnoringFastTracks = MIXER_IDLE;
4197 mBytesWritten = 0;
4198 mBytesRemaining = 0;
4199 checkSilentMode_l();
4200
4201 mStandbyTimeNs = systemTime() + mStandbyDelayNs;
4202 mSleepTimeUs = mIdleSleepTimeUs;
4203 if (mType == MIXER || mType == SPATIALIZER) {
4204 sleepTimeShift = 0;
4205 }
4206
4207 continue;
4208 }
4209 }
4210 // mMixerStatusIgnoringFastTracks is also updated internally
4211 mMixerStatus = prepareTracks_l(&tracksToRemove);
4212
4213 mActiveTracks.updatePowerState_l(this);
4214
4215 metadataUpdate = updateMetadata_l();
4216
4217 // Acquire a local copy of active tracks with lock (release w/o lock).
4218 //
4219 // Control methods on the track acquire the ThreadBase lock (e.g. start()
4220 // stop(), pause(), etc.), but the threadLoop is entitled to call audio
4221 // data / buffer methods on tracks from activeTracks without the ThreadBase lock.
4222 activeTracks.insert(activeTracks.end(), mActiveTracks.begin(), mActiveTracks.end());
4223
4224 setHalLatencyMode_l();
4225
4226 // updateTeePatches_l will acquire the ThreadBase_Mutex of other threads,
4227 // so this is done before we lock our effect chains.
4228 for (const auto& track : mActiveTracks) {
4229 track->updateTeePatches_l();
4230 }
4231
4232 // signal actual start of output stream when the render position reported by
4233 // the kernel starts moving.
4234 if (!mHalStarted && ((isSuspended() && (mBytesWritten != 0)) || (!mStandby
4235 && (mKernelPositionOnStandby
4236 != mTimestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL])))) {
4237 mHalStarted = true;
4238 mWaitHalStartCV.notify_all();
4239 }
4240
4241 // prevent any changes in effect chain list and in each effect chain
4242 // during mixing and effect process as the audio buffers could be deleted
4243 // or modified if an effect is created or deleted
4244 lockEffectChains_l(effectChains);
4245
4246 // Determine which session to pick up haptic data.
4247 // This must be done under the same lock as prepareTracks_l().
4248 // The haptic data from the effect is at a higher priority than the one from track.
4249 // TODO: Write haptic data directly to sink buffer when mixing.
4250 if (mHapticChannelCount > 0) {
4251 for (const auto& track : mActiveTracks) {
4252 sp<IAfEffectChain> effectChain = getEffectChain_l(track->sessionId());
4253 if (effectChain != nullptr
4254 && effectChain->containsHapticGeneratingEffect_l()) {
4255 activeHapticSessionId = track->sessionId();
4256 isHapticSessionSpatialized =
4257 mType == SPATIALIZER && track->isSpatialized();
4258 break;
4259 }
4260 if (activeHapticSessionId == AUDIO_SESSION_NONE
4261 && track->getHapticPlaybackEnabled()) {
4262 activeHapticSessionId = track->sessionId();
4263 isHapticSessionSpatialized =
4264 mType == SPATIALIZER && track->isSpatialized();
4265 }
4266 }
4267 }
4268 } // mutex() scope ends
4269
4270 if (mBytesRemaining == 0) {
4271 mCurrentWriteLength = 0;
4272 if (mMixerStatus == MIXER_TRACKS_READY) {
4273 // threadLoop_mix() sets mCurrentWriteLength
4274 threadLoop_mix();
4275 } else if ((mMixerStatus != MIXER_DRAIN_TRACK)
4276 && (mMixerStatus != MIXER_DRAIN_ALL)) {
4277 // threadLoop_sleepTime sets mSleepTimeUs to 0 if data
4278 // must be written to HAL
4279 threadLoop_sleepTime();
4280 if (mSleepTimeUs == 0) {
4281 mCurrentWriteLength = mSinkBufferSize;
4282
4283 // Tally underrun frames as we are inserting 0s here.
4284 for (const auto& track : activeTracks) {
4285 if (track->fillingStatus() == IAfTrack::FS_ACTIVE
4286 && !track->isStopped()
4287 && !track->isPaused()
4288 && !track->isTerminated()) {
4289 ALOGV("%s: track(%d) %s underrun due to thread sleep of %zu frames",
4290 __func__, track->id(), track->getTrackStateAsString(),
4291 mNormalFrameCount);
4292 track->audioTrackServerProxy()->tallyUnderrunFrames(
4293 mNormalFrameCount);
4294 }
4295 }
4296 }
4297 }
4298 // Either threadLoop_mix() or threadLoop_sleepTime() should have set
4299 // mMixerBuffer with data if mMixerBufferValid is true and mSleepTimeUs == 0.
4300 // Merge mMixerBuffer data into mEffectBuffer (if any effects are valid)
4301 // or mSinkBuffer (if there are no effects and there is no data already copied to
4302 // mSinkBuffer).
4303 //
4304 // This is done pre-effects computation; if effects change to
4305 // support higher precision, this needs to move.
4306 //
4307 // mMixerBufferValid is only set true by MixerThread::prepareTracks_l().
4308 // TODO use mSleepTimeUs == 0 as an additional condition.
4309 uint32_t mixerChannelCount = mEffectBufferValid ?
4310 audio_channel_count_from_out_mask(mMixerChannelMask) : mChannelCount;
4311 if (mMixerBufferValid && (mEffectBufferValid || !mHasDataCopiedToSinkBuffer)) {
4312 void *buffer = mEffectBufferValid ? mEffectBuffer : mSinkBuffer;
4313 audio_format_t format = mEffectBufferValid ? mEffectBufferFormat : mFormat;
4314
4315 // Apply mono blending and balancing if the effect buffer is not valid. Otherwise,
4316 // do these processes after effects are applied.
4317 if (!mEffectBufferValid) {
4318 // mono blend occurs for mixer threads only (not direct or offloaded)
4319 // and is handled here if we're going directly to the sink.
4320 if (requireMonoBlend()) {
4321 mono_blend(mMixerBuffer, mMixerBufferFormat, mChannelCount,
4322 mNormalFrameCount, true /*limit*/);
4323 }
4324
4325 if (!hasFastMixer()) {
4326 // Balance must take effect after mono conversion.
4327 // We do it here if there is no FastMixer.
4328 // mBalance detects zero balance within the class for speed
4329 // (not needed here).
4330 mBalance.setBalance(mMasterBalance.load());
4331 mBalance.process((float *)mMixerBuffer, mNormalFrameCount);
4332 }
4333 }
4334
4335 memcpy_by_audio_format(buffer, format, mMixerBuffer, mMixerBufferFormat,
4336 mNormalFrameCount * (mixerChannelCount + mHapticChannelCount));
4337
4338 // If we're going directly to the sink and there are haptic channels,
4339 // we should adjust channels as the sample data is partially interleaved
4340 // in this case.
4341 if (!mEffectBufferValid && mHapticChannelCount > 0) {
4342 adjust_channels_non_destructive(buffer, mChannelCount, buffer,
4343 mChannelCount + mHapticChannelCount,
4344 audio_bytes_per_sample(format),
4345 audio_bytes_per_frame(mChannelCount, format) * mNormalFrameCount);
4346 }
4347 }
4348
4349 mBytesRemaining = mCurrentWriteLength;
4350 if (isSuspended()) {
4351 // Simulate write to HAL when suspended (e.g. BT SCO phone call).
4352 mSleepTimeUs = suspendSleepTimeUs(); // assumes full buffer.
4353 const size_t framesRemaining = mBytesRemaining / mFrameSize;
4354 mBytesWritten += mBytesRemaining;
4355 mFramesWritten += framesRemaining;
4356 mSuspendedFrames += framesRemaining; // to adjust kernel HAL position
4357 mBytesRemaining = 0;
4358 }
4359
4360 // only process effects if we're going to write
4361 if (mSleepTimeUs == 0 && mType != OFFLOAD) {
4362 for (size_t i = 0; i < effectChains.size(); i ++) {
4363 effectChains[i]->process_l();
4364 // TODO: Write haptic data directly to sink buffer when mixing.
4365 if (activeHapticSessionId != AUDIO_SESSION_NONE
4366 && activeHapticSessionId == effectChains[i]->sessionId()) {
4367 // Haptic data is active in this case, copy it directly from
4368 // in buffer to out buffer.
4369 uint32_t hapticSessionChannelCount = mEffectBufferValid ?
4370 audio_channel_count_from_out_mask(mMixerChannelMask) :
4371 mChannelCount;
4372 if (mType == SPATIALIZER && !isHapticSessionSpatialized) {
4373 hapticSessionChannelCount = mChannelCount;
4374 }
4375
4376 const size_t audioBufferSize = mNormalFrameCount
4377 * audio_bytes_per_frame(hapticSessionChannelCount,
4378 AUDIO_FORMAT_PCM_FLOAT);
4379 memcpy_by_audio_format(
4380 (uint8_t*)effectChains[i]->outBuffer() + audioBufferSize,
4381 AUDIO_FORMAT_PCM_FLOAT,
4382 (const uint8_t*)effectChains[i]->inBuffer() + audioBufferSize,
4383 AUDIO_FORMAT_PCM_FLOAT, mNormalFrameCount * mHapticChannelCount);
4384 }
4385 }
4386 }
4387 }
4388 // Process effect chains for offloaded thread even if no audio
4389 // was read from audio track: process only updates effect state
4390 // and thus does have to be synchronized with audio writes but may have
4391 // to be called while waiting for async write callback
4392 if (mType == OFFLOAD) {
4393 for (size_t i = 0; i < effectChains.size(); i ++) {
4394 effectChains[i]->process_l();
4395 }
4396 }
4397
4398 // Only if the Effects buffer is enabled and there is data in the
4399 // Effects buffer (buffer valid), we need to
4400 // copy into the sink buffer.
4401 // TODO use mSleepTimeUs == 0 as an additional condition.
4402 if (mEffectBufferValid && !mHasDataCopiedToSinkBuffer) {
4403 //ALOGV("writing effect buffer to sink buffer format %#x", mFormat);
4404 void *effectBuffer = (mType == SPATIALIZER) ? mPostSpatializerBuffer : mEffectBuffer;
4405 if (requireMonoBlend()) {
4406 mono_blend(effectBuffer, mEffectBufferFormat, mChannelCount, mNormalFrameCount,
4407 true /*limit*/);
4408 }
4409
4410 if (!hasFastMixer()) {
4411 // Balance must take effect after mono conversion.
4412 // We do it here if there is no FastMixer.
4413 // mBalance detects zero balance within the class for speed (not needed here).
4414 mBalance.setBalance(mMasterBalance.load());
4415 mBalance.process((float *)effectBuffer, mNormalFrameCount);
4416 }
4417
4418 // for SPATIALIZER thread, Move haptics channels from mEffectBuffer to
4419 // mPostSpatializerBuffer if the haptics track is spatialized.
4420 // Otherwise, the haptics channels are already in mPostSpatializerBuffer.
4421 // For other thread types, the haptics channels are already in mEffectBuffer.
4422 if (mType == SPATIALIZER && isHapticSessionSpatialized) {
4423 const size_t srcBufferSize = mNormalFrameCount *
4424 audio_bytes_per_frame(audio_channel_count_from_out_mask(mMixerChannelMask),
4425 mEffectBufferFormat);
4426 const size_t dstBufferSize = mNormalFrameCount
4427 * audio_bytes_per_frame(mChannelCount, mEffectBufferFormat);
4428
4429 memcpy_by_audio_format((uint8_t*)mPostSpatializerBuffer + dstBufferSize,
4430 mEffectBufferFormat,
4431 (uint8_t*)mEffectBuffer + srcBufferSize,
4432 mEffectBufferFormat,
4433 mNormalFrameCount * mHapticChannelCount);
4434 }
4435 const size_t framesToCopy = mNormalFrameCount * (mChannelCount + mHapticChannelCount);
4436 if (mFormat == AUDIO_FORMAT_PCM_FLOAT &&
4437 mEffectBufferFormat == AUDIO_FORMAT_PCM_FLOAT) {
4438 // Clamp PCM float values more than this distance from 0 to insulate
4439 // a HAL which doesn't handle NaN correctly.
4440 static constexpr float HAL_FLOAT_SAMPLE_LIMIT = 2.0f;
4441 memcpy_to_float_from_float_with_clamping(static_cast<float*>(mSinkBuffer),
4442 static_cast<const float*>(effectBuffer),
4443 framesToCopy, HAL_FLOAT_SAMPLE_LIMIT /* absMax */);
4444 } else {
4445 memcpy_by_audio_format(mSinkBuffer, mFormat,
4446 effectBuffer, mEffectBufferFormat, framesToCopy);
4447 }
4448 // The sample data is partially interleaved when haptic channels exist,
4449 // we need to adjust channels here.
4450 if (mHapticChannelCount > 0) {
4451 adjust_channels_non_destructive(mSinkBuffer, mChannelCount, mSinkBuffer,
4452 mChannelCount + mHapticChannelCount,
4453 audio_bytes_per_sample(mFormat),
4454 audio_bytes_per_frame(mChannelCount, mFormat) * mNormalFrameCount);
4455 }
4456 }
4457
4458 // enable changes in effect chain
4459 unlockEffectChains(effectChains);
4460
4461 if (!metadataUpdate.playbackMetadataUpdate.empty()) {
4462 mAfThreadCallback->getMelReporter()->updateMetadataForCsd(id(),
4463 metadataUpdate.playbackMetadataUpdate);
4464 }
4465
4466 if (!waitingAsyncCallback()) {
4467 // mSleepTimeUs == 0 means we must write to audio hardware
4468 if (mSleepTimeUs == 0) {
4469 ssize_t ret = 0;
4470 // writePeriodNs is updated >= 0 when ret > 0.
4471 int64_t writePeriodNs = -1;
4472 if (mBytesRemaining) {
4473 // FIXME rewrite to reduce number of system calls
4474 const int64_t lastIoBeginNs = systemTime();
4475 ret = threadLoop_write();
4476 const int64_t lastIoEndNs = systemTime();
4477 if (ret < 0) {
4478 mBytesRemaining = 0;
4479 } else if (ret > 0) {
4480 mBytesWritten += ret;
4481 mBytesRemaining -= ret;
4482 const int64_t frames = ret / mFrameSize;
4483 mFramesWritten += frames;
4484
4485 writePeriodNs = lastIoEndNs - mLastIoEndNs;
4486 // process information relating to write time.
4487 if (audio_has_proportional_frames(mFormat)) {
4488 // we are in a continuous mixing cycle
4489 if (mMixerStatus == MIXER_TRACKS_READY &&
4490 loopCount == lastLoopCountWritten + 1) {
4491
4492 const double jitterMs =
4493 TimestampVerifier<int64_t, int64_t>::computeJitterMs(
4494 {frames, writePeriodNs},
4495 {0, 0} /* lastTimestamp */, mSampleRate);
4496 const double processMs =
4497 (lastIoBeginNs - mLastIoEndNs) * 1e-6;
4498
4499 audio_utils::lock_guard _l(mutex());
4500 mIoJitterMs.add(jitterMs);
4501 mProcessTimeMs.add(processMs);
4502
4503 if (mPipeSink.get() != nullptr) {
4504 // Using the Monopipe availableToWrite, we estimate the current
4505 // buffer size.
4506 MonoPipe* monoPipe = static_cast<MonoPipe*>(mPipeSink.get());
4507 const ssize_t
4508 availableToWrite = mPipeSink->availableToWrite();
4509 const size_t pipeFrames = monoPipe->maxFrames();
4510 const size_t
4511 remainingFrames = pipeFrames - max(availableToWrite, 0);
4512 mMonopipePipeDepthStats.add(remainingFrames);
4513 }
4514 }
4515
4516 // write blocked detection
4517 const int64_t deltaWriteNs = lastIoEndNs - lastIoBeginNs;
4518 if ((mType == MIXER || mType == SPATIALIZER)
4519 && deltaWriteNs > maxPeriod) {
4520 mNumDelayedWrites++;
4521 if ((lastIoEndNs - lastWarning) > kWarningThrottleNs) {
4522 ATRACE_NAME("underrun");
4523 ALOGW("write blocked for %lld msecs, "
4524 "%d delayed writes, thread %d",
4525 (long long)deltaWriteNs / NANOS_PER_MILLISECOND,
4526 mNumDelayedWrites, mId);
4527 lastWarning = lastIoEndNs;
4528 }
4529 }
4530 }
4531 // update timing info.
4532 mLastIoBeginNs = lastIoBeginNs;
4533 mLastIoEndNs = lastIoEndNs;
4534 lastLoopCountWritten = loopCount;
4535 }
4536 } else if ((mMixerStatus == MIXER_DRAIN_TRACK) ||
4537 (mMixerStatus == MIXER_DRAIN_ALL)) {
4538 threadLoop_drain();
4539 }
4540 if ((mType == MIXER || mType == SPATIALIZER) && !mStandby) {
4541
4542 if (mThreadThrottle
4543 && mMixerStatus == MIXER_TRACKS_READY // we are mixing (active tracks)
4544 && writePeriodNs > 0) { // we have write period info
4545 // Limit MixerThread data processing to no more than twice the
4546 // expected processing rate.
4547 //
4548 // This helps prevent underruns with NuPlayer and other applications
4549 // which may set up buffers that are close to the minimum size, or use
4550 // deep buffers, and rely on a double-buffering sleep strategy to fill.
4551 //
4552 // The throttle smooths out sudden large data drains from the device,
4553 // e.g. when it comes out of standby, which often causes problems with
4554 // (1) mixer threads without a fast mixer (which has its own warm-up)
4555 // (2) minimum buffer sized tracks (even if the track is full,
4556 // the app won't fill fast enough to handle the sudden draw).
4557 //
4558 // Total time spent in last processing cycle equals time spent in
4559 // 1. threadLoop_write, as well as time spent in
4560 // 2. threadLoop_mix (significant for heavy mixing, especially
4561 // on low tier processors)
4562
4563 // it's OK if deltaMs is an overestimate.
4564
4565 const int32_t deltaMs = writePeriodNs / NANOS_PER_MILLISECOND;
4566
4567 const int32_t throttleMs = (int32_t)mHalfBufferMs - deltaMs;
4568 if ((signed)mHalfBufferMs >= throttleMs && throttleMs > 0) {
4569 mThreadMetrics.logThrottleMs((double)throttleMs);
4570
4571 usleep(throttleMs * 1000);
4572 // notify of throttle start on verbose log
4573 ALOGV_IF(mThreadThrottleEndMs == mThreadThrottleTimeMs,
4574 "mixer(%p) throttle begin:"
4575 " ret(%zd) deltaMs(%d) requires sleep %d ms",
4576 this, ret, deltaMs, throttleMs);
4577 mThreadThrottleTimeMs += throttleMs;
4578 // Throttle must be attributed to the previous mixer loop's write time
4579 // to allow back-to-back throttling.
4580 // This also ensures proper timing statistics.
4581 mLastIoEndNs = systemTime(); // we fetch the write end time again.
4582 } else {
4583 uint32_t diff = mThreadThrottleTimeMs - mThreadThrottleEndMs;
4584 if (diff > 0) {
4585 // notify of throttle end on debug log
4586 // but prevent spamming for bluetooth
4587 ALOGD_IF(!isSingleDeviceType(
4588 outDeviceTypes_l(), audio_is_a2dp_out_device) &&
4589 !isSingleDeviceType(
4590 outDeviceTypes_l(),
4591 audio_is_hearing_aid_out_device),
4592 "mixer(%p) throttle end: throttle time(%u)", this, diff);
4593 mThreadThrottleEndMs = mThreadThrottleTimeMs;
4594 }
4595 }
4596 }
4597 }
4598
4599 } else {
4600 ATRACE_BEGIN("sleep");
4601 audio_utils::unique_lock _l(mutex());
4602 // suspended requires accurate metering of sleep time.
4603 if (isSuspended()) {
4604 // advance by expected sleepTime
4605 timeLoopNextNs += microseconds((nsecs_t)mSleepTimeUs);
4606 const nsecs_t nowNs = systemTime();
4607
4608 // compute expected next time vs current time.
4609 // (negative deltas are treated as delays).
4610 nsecs_t deltaNs = timeLoopNextNs - nowNs;
4611 if (deltaNs < -kMaxNextBufferDelayNs) {
4612 // Delays longer than the max allowed trigger a reset.
4613 ALOGV("DelayNs: %lld, resetting timeLoopNextNs", (long long) deltaNs);
4614 deltaNs = microseconds((nsecs_t)mSleepTimeUs);
4615 timeLoopNextNs = nowNs + deltaNs;
4616 } else if (deltaNs < 0) {
4617 // Delays within the max delay allowed: zero the delta/sleepTime
4618 // to help the system catch up in the next iteration(s)
4619 ALOGV("DelayNs: %lld, catching-up", (long long) deltaNs);
4620 deltaNs = 0;
4621 }
4622 // update sleep time (which is >= 0)
4623 mSleepTimeUs = deltaNs / 1000;
4624 }
4625 if (!mSignalPending && mConfigEvents.isEmpty() && !exitPending()) {
4626 mWaitWorkCV.wait_for(_l, std::chrono::microseconds(mSleepTimeUs));
4627 }
4628 ATRACE_END();
4629 }
4630 }
4631
4632 // Finally let go of removed track(s), without the lock held
4633 // since we can't guarantee the destructors won't acquire that
4634 // same lock. This will also mutate and push a new fast mixer state.
4635 threadLoop_removeTracks(tracksToRemove);
4636 tracksToRemove.clear();
4637
4638 // FIXME I don't understand the need for this here;
4639 // it was in the original code but maybe the
4640 // assignment in saveOutputTracks() makes this unnecessary?
4641 clearOutputTracks();
4642
4643 // Effect chains will be actually deleted here if they were removed from
4644 // mEffectChains list during mixing or effects processing
4645 effectChains.clear();
4646
4647 // FIXME Note that the above .clear() is no longer necessary since effectChains
4648 // is now local to this block, but will keep it for now (at least until merge done).
4649 }
4650
4651 threadLoop_exit();
4652
4653 if (!mStandby) {
4654 threadLoop_standby();
4655 setStandby();
4656 }
4657
4658 releaseWakeLock();
4659
4660 ALOGV("Thread %p type %d exiting", this, mType);
4661 return false;
4662 }
4663
collectTimestamps_l()4664 void PlaybackThread::collectTimestamps_l()
4665 {
4666 if (mStandby) {
4667 mTimestampVerifier.discontinuity(discontinuityForStandbyOrFlush());
4668 return;
4669 } else if (mHwPaused) {
4670 mTimestampVerifier.discontinuity(mTimestampVerifier.DISCONTINUITY_MODE_CONTINUOUS);
4671 return;
4672 }
4673
4674 // Gather the framesReleased counters for all active tracks,
4675 // and associate with the sink frames written out. We need
4676 // this to convert the sink timestamp to the track timestamp.
4677 bool kernelLocationUpdate = false;
4678 ExtendedTimestamp timestamp; // use private copy to fetch
4679
4680 // Always query HAL timestamp and update timestamp verifier. In standby or pause,
4681 // HAL may be draining some small duration buffered data for fade out.
4682 if (threadloop_getHalTimestamp_l(×tamp) == OK) {
4683 mTimestampVerifier.add(timestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL],
4684 timestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL],
4685 mSampleRate);
4686
4687 if (isTimestampCorrectionEnabled_l()) {
4688 ALOGVV("TS_BEFORE: %d %lld %lld", id(),
4689 (long long)timestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL],
4690 (long long)timestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL]);
4691 auto correctedTimestamp = mTimestampVerifier.getLastCorrectedTimestamp();
4692 timestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL]
4693 = correctedTimestamp.mFrames;
4694 timestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL]
4695 = correctedTimestamp.mTimeNs;
4696 ALOGVV("TS_AFTER: %d %lld %lld", id(),
4697 (long long)timestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL],
4698 (long long)timestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL]);
4699
4700 // Note: Downstream latency only added if timestamp correction enabled.
4701 if (mDownstreamLatencyStatMs.getN() > 0) { // we have latency info.
4702 const int64_t newPosition =
4703 timestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL]
4704 - int64_t(mDownstreamLatencyStatMs.getMean() * mSampleRate * 1e-3);
4705 // prevent retrograde
4706 timestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL] = max(
4707 newPosition,
4708 (mTimestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL]
4709 - mSuspendedFrames));
4710 }
4711 }
4712
4713 // We always fetch the timestamp here because often the downstream
4714 // sink will block while writing.
4715
4716 // We keep track of the last valid kernel position in case we are in underrun
4717 // and the normal mixer period is the same as the fast mixer period, or there
4718 // is some error from the HAL.
4719 if (mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL] >= 0) {
4720 mTimestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL_LASTKERNELOK] =
4721 mTimestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL];
4722 mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL_LASTKERNELOK] =
4723 mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL];
4724
4725 mTimestamp.mPosition[ExtendedTimestamp::LOCATION_SERVER_LASTKERNELOK] =
4726 mTimestamp.mPosition[ExtendedTimestamp::LOCATION_SERVER];
4727 mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_SERVER_LASTKERNELOK] =
4728 mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_SERVER];
4729 }
4730
4731 if (timestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL] >= 0) {
4732 kernelLocationUpdate = true;
4733 } else {
4734 ALOGVV("getTimestamp error - no valid kernel position");
4735 }
4736
4737 // copy over kernel info
4738 mTimestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL] =
4739 timestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL]
4740 + mSuspendedFrames; // add frames discarded when suspended
4741 mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL] =
4742 timestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL];
4743 } else {
4744 mTimestampVerifier.error();
4745 }
4746
4747 // mFramesWritten for non-offloaded tracks are contiguous
4748 // even after standby() is called. This is useful for the track frame
4749 // to sink frame mapping.
4750 bool serverLocationUpdate = false;
4751 if (mFramesWritten != mLastFramesWritten) {
4752 serverLocationUpdate = true;
4753 mLastFramesWritten = mFramesWritten;
4754 }
4755 // Only update timestamps if there is a meaningful change.
4756 // Either the kernel timestamp must be valid or we have written something.
4757 if (kernelLocationUpdate || serverLocationUpdate) {
4758 if (serverLocationUpdate) {
4759 // use the time before we called the HAL write - it is a bit more accurate
4760 // to when the server last read data than the current time here.
4761 //
4762 // If we haven't written anything, mLastIoBeginNs will be -1
4763 // and we use systemTime().
4764 mTimestamp.mPosition[ExtendedTimestamp::LOCATION_SERVER] = mFramesWritten;
4765 mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_SERVER] = mLastIoBeginNs == -1
4766 ? systemTime() : (int64_t)mLastIoBeginNs;
4767 }
4768
4769 for (const sp<IAfTrack>& t : mActiveTracks) {
4770 if (!t->isFastTrack()) {
4771 t->updateTrackFrameInfo(
4772 t->audioTrackServerProxy()->framesReleased(),
4773 mFramesWritten,
4774 mSampleRate,
4775 mTimestamp);
4776 }
4777 }
4778 }
4779
4780 if (audio_has_proportional_frames(mFormat)) {
4781 const double latencyMs = mTimestamp.getOutputServerLatencyMs(mSampleRate);
4782 if (latencyMs != 0.) { // note 0. means timestamp is empty.
4783 mLatencyMs.add(latencyMs);
4784 }
4785 }
4786 #if 0
4787 // logFormat example
4788 if (z % 100 == 0) {
4789 timespec ts;
4790 clock_gettime(CLOCK_MONOTONIC, &ts);
4791 LOGT("This is an integer %d, this is a float %f, this is my "
4792 "pid %p %% %s %t", 42, 3.14, "and this is a timestamp", ts);
4793 LOGT("A deceptive null-terminated string %\0");
4794 }
4795 ++z;
4796 #endif
4797 }
4798
4799 // removeTracks_l() must be called with ThreadBase::mutex() held
removeTracks_l(const Vector<sp<IAfTrack>> & tracksToRemove)4800 void PlaybackThread::removeTracks_l(const Vector<sp<IAfTrack>>& tracksToRemove)
4801 NO_THREAD_SAFETY_ANALYSIS // release and re-acquire mutex()
4802 {
4803 if (tracksToRemove.empty()) return;
4804
4805 // Block all incoming TrackHandle requests until we are finished with the release.
4806 setThreadBusy_l(true);
4807
4808 for (const auto& track : tracksToRemove) {
4809 ALOGV("%s(%d): removing track on session %d", __func__, track->id(), track->sessionId());
4810 sp<IAfEffectChain> chain = getEffectChain_l(track->sessionId());
4811 if (chain != 0) {
4812 ALOGV("%s(%d): stopping track on chain %p for session Id: %d",
4813 __func__, track->id(), chain.get(), track->sessionId());
4814 chain->decActiveTrackCnt();
4815 }
4816
4817 // If an external client track, inform APM we're no longer active, and remove if needed.
4818 // Since the track is active, we do it here instead of TrackBase::destroy().
4819 if (track->isExternalTrack()) {
4820 mutex().unlock();
4821 AudioSystem::stopOutput(track->portId());
4822 if (track->isTerminated()) {
4823 AudioSystem::releaseOutput(track->portId());
4824 }
4825 mutex().lock();
4826 }
4827 if (mHapticChannelCount > 0 &&
4828 ((track->channelMask() & AUDIO_CHANNEL_HAPTIC_ALL) != AUDIO_CHANNEL_NONE
4829 || (chain != nullptr && chain->containsHapticGeneratingEffect()))) {
4830 mutex().unlock();
4831 // Unlock due to VibratorService will lock for this call and will
4832 // call Tracks.mute/unmute which also require thread's lock.
4833 afutils::onExternalVibrationStop(track->getExternalVibration());
4834 mutex().lock();
4835
4836 // When the track is stop, set the haptic intensity as MUTE
4837 // for the HapticGenerator effect.
4838 if (chain != nullptr) {
4839 chain->setHapticScale_l(track->id(), os::HapticScale::mute());
4840 }
4841 }
4842
4843 // Under lock, the track is removed from the active tracks list.
4844 //
4845 // Once the track is no longer active, the TrackHandle may directly
4846 // modify it as the threadLoop() is no longer responsible for its maintenance.
4847 // Do not modify the track from threadLoop after the mutex is unlocked
4848 // if it is not active.
4849 mActiveTracks.remove(track);
4850
4851 if (track->isTerminated()) {
4852 // remove from our tracks vector
4853 removeTrack_l(track);
4854 }
4855 }
4856
4857 // Allow incoming TrackHandle requests. We still hold the mutex,
4858 // so pending TrackHandle requests will occur after we unlock it.
4859 setThreadBusy_l(false);
4860 }
4861
getTimestamp_l(AudioTimestamp & timestamp)4862 status_t PlaybackThread::getTimestamp_l(AudioTimestamp& timestamp)
4863 {
4864 if (mNormalSink != 0) {
4865 ExtendedTimestamp ets;
4866 status_t status = mNormalSink->getTimestamp(ets);
4867 if (status == NO_ERROR) {
4868 status = ets.getBestTimestamp(×tamp);
4869 }
4870 return status;
4871 }
4872 if ((mType == OFFLOAD || mType == DIRECT) && mOutput != NULL) {
4873 collectTimestamps_l();
4874 if (mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL] <= 0) {
4875 return INVALID_OPERATION;
4876 }
4877 timestamp.mPosition = mTimestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL];
4878 const int64_t timeNs = mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL];
4879 timestamp.mTime.tv_sec = timeNs / NANOS_PER_SECOND;
4880 timestamp.mTime.tv_nsec = timeNs - (timestamp.mTime.tv_sec * NANOS_PER_SECOND);
4881 return NO_ERROR;
4882 }
4883 return INVALID_OPERATION;
4884 }
4885
4886 // For dedicated VoIP outputs, let the HAL apply the stream volume. Track volume is
4887 // still applied by the mixer.
4888 // All tracks attached to a mixer with flag VOIP_RX are tied to the same
4889 // stream type STREAM_VOICE_CALL so this will only change the HAL volume once even
4890 // if more than one track are active
handleVoipVolume_l(float * volume)4891 status_t PlaybackThread::handleVoipVolume_l(float* volume)
4892 {
4893 status_t result = NO_ERROR;
4894 if ((mOutput->flags & AUDIO_OUTPUT_FLAG_VOIP_RX) != 0) {
4895 if (*volume != mLeftVolFloat) {
4896 result = mOutput->stream->setVolume(*volume, *volume);
4897 // HAL can return INVALID_OPERATION if operation is not supported.
4898 ALOGE_IF(result != OK && result != INVALID_OPERATION,
4899 "Error when setting output stream volume: %d", result);
4900 if (result == NO_ERROR) {
4901 mLeftVolFloat = *volume;
4902 }
4903 }
4904 // if stream volume was successfully sent to the HAL, mLeftVolFloat == v here and we
4905 // remove stream volume contribution from software volume.
4906 if (mLeftVolFloat == *volume) {
4907 *volume = 1.0f;
4908 }
4909 }
4910 return result;
4911 }
4912
createAudioPatch_l(const struct audio_patch * patch,audio_patch_handle_t * handle)4913 status_t MixerThread::createAudioPatch_l(const struct audio_patch* patch,
4914 audio_patch_handle_t *handle)
4915 {
4916 status_t status;
4917 if (property_get_bool("af.patch_park", false /* default_value */)) {
4918 // Park FastMixer to avoid potential DOS issues with writing to the HAL
4919 // or if HAL does not properly lock against access.
4920 AutoPark<FastMixer> park(mFastMixer);
4921 status = PlaybackThread::createAudioPatch_l(patch, handle);
4922 } else {
4923 status = PlaybackThread::createAudioPatch_l(patch, handle);
4924 }
4925
4926 updateHalSupportedLatencyModes_l();
4927 return status;
4928 }
4929
createAudioPatch_l(const struct audio_patch * patch,audio_patch_handle_t * handle)4930 status_t PlaybackThread::createAudioPatch_l(const struct audio_patch *patch,
4931 audio_patch_handle_t *handle)
4932 {
4933 status_t status = NO_ERROR;
4934
4935 // store new device and send to effects
4936 audio_devices_t type = AUDIO_DEVICE_NONE;
4937 AudioDeviceTypeAddrVector deviceTypeAddrs;
4938 for (unsigned int i = 0; i < patch->num_sinks; i++) {
4939 LOG_ALWAYS_FATAL_IF(popcount(patch->sinks[i].ext.device.type) > 1
4940 && !mOutput->audioHwDev->supportsAudioPatches(),
4941 "Enumerated device type(%#x) must not be used "
4942 "as it does not support audio patches",
4943 patch->sinks[i].ext.device.type);
4944 type = static_cast<audio_devices_t>(type | patch->sinks[i].ext.device.type);
4945 deviceTypeAddrs.emplace_back(patch->sinks[i].ext.device.type,
4946 patch->sinks[i].ext.device.address);
4947 }
4948
4949 audio_port_handle_t sinkPortId = patch->sinks[0].id;
4950 #ifdef ADD_BATTERY_DATA
4951 // when changing the audio output device, call addBatteryData to notify
4952 // the change
4953 if (outDeviceTypes() != deviceTypes) {
4954 uint32_t params = 0;
4955 // check whether speaker is on
4956 if (deviceTypes.count(AUDIO_DEVICE_OUT_SPEAKER) > 0) {
4957 params |= IMediaPlayerService::kBatteryDataSpeakerOn;
4958 }
4959
4960 // check if any other device (except speaker) is on
4961 if (!isSingleDeviceType(deviceTypes, AUDIO_DEVICE_OUT_SPEAKER)) {
4962 params |= IMediaPlayerService::kBatteryDataOtherAudioDeviceOn;
4963 }
4964
4965 if (params != 0) {
4966 addBatteryData(params);
4967 }
4968 }
4969 #endif
4970
4971 for (size_t i = 0; i < mEffectChains.size(); i++) {
4972 mEffectChains[i]->setDevices_l(deviceTypeAddrs);
4973 }
4974
4975 // mPatch.num_sinks is not set when the thread is created so that
4976 // the first patch creation triggers an ioConfigChanged callback
4977 bool configChanged = (mPatch.num_sinks == 0) ||
4978 (mPatch.sinks[0].id != sinkPortId);
4979 mPatch = *patch;
4980 mOutDeviceTypeAddrs = deviceTypeAddrs;
4981 checkSilentMode_l();
4982
4983 if (mOutput->audioHwDev->supportsAudioPatches()) {
4984 sp<DeviceHalInterface> hwDevice = mOutput->audioHwDev->hwDevice();
4985 status = hwDevice->createAudioPatch(patch->num_sources,
4986 patch->sources,
4987 patch->num_sinks,
4988 patch->sinks,
4989 handle);
4990 } else {
4991 status = mOutput->stream->legacyCreateAudioPatch(patch->sinks[0], std::nullopt, type);
4992 *handle = AUDIO_PATCH_HANDLE_NONE;
4993 }
4994 const std::string patchSinksAsString = patchSinksToString(patch);
4995
4996 mThreadMetrics.logEndInterval();
4997 mThreadMetrics.logCreatePatch(/* inDevices */ {}, patchSinksAsString);
4998 mThreadMetrics.logBeginInterval();
4999 // also dispatch to active AudioTracks for MediaMetrics
5000 for (const auto &track : mActiveTracks) {
5001 track->logEndInterval();
5002 track->logBeginInterval(patchSinksAsString);
5003 }
5004
5005 if (configChanged) {
5006 sendIoConfigEvent_l(AUDIO_OUTPUT_CONFIG_CHANGED);
5007 }
5008 // Force metadata update after a route change
5009 mActiveTracks.setHasChanged();
5010
5011 return status;
5012 }
5013
releaseAudioPatch_l(const audio_patch_handle_t handle)5014 status_t MixerThread::releaseAudioPatch_l(const audio_patch_handle_t handle)
5015 {
5016 status_t status;
5017 if (property_get_bool("af.patch_park", false /* default_value */)) {
5018 // Park FastMixer to avoid potential DOS issues with writing to the HAL
5019 // or if HAL does not properly lock against access.
5020 AutoPark<FastMixer> park(mFastMixer);
5021 status = PlaybackThread::releaseAudioPatch_l(handle);
5022 } else {
5023 status = PlaybackThread::releaseAudioPatch_l(handle);
5024 }
5025 return status;
5026 }
5027
releaseAudioPatch_l(const audio_patch_handle_t handle)5028 status_t PlaybackThread::releaseAudioPatch_l(const audio_patch_handle_t handle)
5029 {
5030 status_t status = NO_ERROR;
5031
5032 mPatch = audio_patch{};
5033 mOutDeviceTypeAddrs.clear();
5034
5035 if (mOutput->audioHwDev->supportsAudioPatches()) {
5036 sp<DeviceHalInterface> hwDevice = mOutput->audioHwDev->hwDevice();
5037 status = hwDevice->releaseAudioPatch(handle);
5038 } else {
5039 status = mOutput->stream->legacyReleaseAudioPatch();
5040 }
5041 // Force meteadata update after a route change
5042 mActiveTracks.setHasChanged();
5043
5044 return status;
5045 }
5046
addPatchTrack(const sp<IAfPatchTrack> & track)5047 void PlaybackThread::addPatchTrack(const sp<IAfPatchTrack>& track)
5048 {
5049 audio_utils::lock_guard _l(mutex());
5050 mTracks.add(track);
5051 }
5052
deletePatchTrack(const sp<IAfPatchTrack> & track)5053 void PlaybackThread::deletePatchTrack(const sp<IAfPatchTrack>& track)
5054 {
5055 audio_utils::lock_guard _l(mutex());
5056 destroyTrack_l(track);
5057 }
5058
toAudioPortConfig(struct audio_port_config * config)5059 void PlaybackThread::toAudioPortConfig(struct audio_port_config* config)
5060 {
5061 ThreadBase::toAudioPortConfig(config);
5062 config->role = AUDIO_PORT_ROLE_SOURCE;
5063 config->ext.mix.hw_module = mOutput->audioHwDev->handle();
5064 config->ext.mix.usecase.stream = AUDIO_STREAM_DEFAULT;
5065 if (mOutput && mOutput->flags != AUDIO_OUTPUT_FLAG_NONE) {
5066 config->config_mask |= AUDIO_PORT_CONFIG_FLAGS;
5067 config->flags.output = mOutput->flags;
5068 }
5069 }
5070
5071 // ----------------------------------------------------------------------------
5072
5073 /* static */
createMixerThread(const sp<IAfThreadCallback> & afThreadCallback,AudioStreamOut * output,audio_io_handle_t id,bool systemReady,type_t type,audio_config_base_t * mixerConfig)5074 sp<IAfPlaybackThread> IAfPlaybackThread::createMixerThread(
5075 const sp<IAfThreadCallback>& afThreadCallback, AudioStreamOut* output,
5076 audio_io_handle_t id, bool systemReady, type_t type, audio_config_base_t* mixerConfig) {
5077 return sp<MixerThread>::make(afThreadCallback, output, id, systemReady, type, mixerConfig);
5078 }
5079
MixerThread(const sp<IAfThreadCallback> & afThreadCallback,AudioStreamOut * output,audio_io_handle_t id,bool systemReady,type_t type,audio_config_base_t * mixerConfig)5080 MixerThread::MixerThread(const sp<IAfThreadCallback>& afThreadCallback, AudioStreamOut* output,
5081 audio_io_handle_t id, bool systemReady, type_t type, audio_config_base_t *mixerConfig)
5082 : PlaybackThread(afThreadCallback, output, id, type, systemReady, mixerConfig),
5083 // mAudioMixer below
5084 // mFastMixer below
5085 mBluetoothLatencyModesEnabled(false),
5086 mFastMixerFutex(0),
5087 mMasterMono(false)
5088 // mOutputSink below
5089 // mPipeSink below
5090 // mNormalSink below
5091 {
5092 setMasterBalance(afThreadCallback->getMasterBalance_l());
5093 ALOGV("MixerThread() id=%d type=%d", id, type);
5094 ALOGV("mSampleRate=%u, mChannelMask=%#x, mChannelCount=%u, mFormat=%#x, mFrameSize=%zu, "
5095 "mFrameCount=%zu, mNormalFrameCount=%zu",
5096 mSampleRate, mChannelMask, mChannelCount, mFormat, mFrameSize, mFrameCount,
5097 mNormalFrameCount);
5098 mAudioMixer = new AudioMixer(mNormalFrameCount, mSampleRate);
5099
5100 if (type == DUPLICATING) {
5101 // The Duplicating thread uses the AudioMixer and delivers data to OutputTracks
5102 // (downstream MixerThreads) in DuplicatingThread::threadLoop_write().
5103 // Do not create or use mFastMixer, mOutputSink, mPipeSink, or mNormalSink.
5104 return;
5105 }
5106 // create an NBAIO sink for the HAL output stream, and negotiate
5107 mOutputSink = new AudioStreamOutSink(output->stream);
5108 size_t numCounterOffers = 0;
5109 const NBAIO_Format offers[1] = {Format_from_SR_C(
5110 mSampleRate, mChannelCount + mHapticChannelCount, mFormat)};
5111 #if !LOG_NDEBUG
5112 ssize_t index =
5113 #else
5114 (void)
5115 #endif
5116 mOutputSink->negotiate(offers, 1, NULL, numCounterOffers);
5117 ALOG_ASSERT(index == 0);
5118
5119 // initialize fast mixer depending on configuration
5120 bool initFastMixer;
5121 if (mType == SPATIALIZER || mType == BIT_PERFECT) {
5122 initFastMixer = false;
5123 } else {
5124 switch (kUseFastMixer) {
5125 case FastMixer_Never:
5126 initFastMixer = false;
5127 break;
5128 case FastMixer_Always:
5129 initFastMixer = true;
5130 break;
5131 case FastMixer_Static:
5132 case FastMixer_Dynamic:
5133 initFastMixer = mFrameCount < mNormalFrameCount;
5134 break;
5135 }
5136 ALOGW_IF(initFastMixer == false && mFrameCount < mNormalFrameCount,
5137 "FastMixer is preferred for this sink as frameCount %zu is less than threshold %zu",
5138 mFrameCount, mNormalFrameCount);
5139 }
5140 if (initFastMixer) {
5141 audio_format_t fastMixerFormat;
5142 if (mMixerBufferEnabled && mEffectBufferEnabled) {
5143 fastMixerFormat = AUDIO_FORMAT_PCM_FLOAT;
5144 } else {
5145 fastMixerFormat = AUDIO_FORMAT_PCM_16_BIT;
5146 }
5147 if (mFormat != fastMixerFormat) {
5148 // change our Sink format to accept our intermediate precision
5149 mFormat = fastMixerFormat;
5150 free(mSinkBuffer);
5151 mFrameSize = audio_bytes_per_frame(mChannelCount + mHapticChannelCount, mFormat);
5152 const size_t sinkBufferSize = mNormalFrameCount * mFrameSize;
5153 (void)posix_memalign(&mSinkBuffer, 32, sinkBufferSize);
5154 }
5155
5156 // create a MonoPipe to connect our submix to FastMixer
5157 NBAIO_Format format = mOutputSink->format();
5158
5159 // adjust format to match that of the Fast Mixer
5160 ALOGV("format changed from %#x to %#x", format.mFormat, fastMixerFormat);
5161 format.mFormat = fastMixerFormat;
5162 format.mFrameSize = audio_bytes_per_sample(format.mFormat) * format.mChannelCount;
5163
5164 // This pipe depth compensates for scheduling latency of the normal mixer thread.
5165 // When it wakes up after a maximum latency, it runs a few cycles quickly before
5166 // finally blocking. Note the pipe implementation rounds up the request to a power of 2.
5167 MonoPipe *monoPipe = new MonoPipe(mNormalFrameCount * 4, format, true /*writeCanBlock*/);
5168 const NBAIO_Format offersFast[1] = {format};
5169 size_t numCounterOffersFast = 0;
5170 #if !LOG_NDEBUG
5171 index =
5172 #else
5173 (void)
5174 #endif
5175 monoPipe->negotiate(offersFast, std::size(offersFast),
5176 nullptr /* counterOffers */, numCounterOffersFast);
5177 ALOG_ASSERT(index == 0);
5178 monoPipe->setAvgFrames((mScreenState & 1) ?
5179 (monoPipe->maxFrames() * 7) / 8 : mNormalFrameCount * 2);
5180 mPipeSink = monoPipe;
5181
5182 // create fast mixer and configure it initially with just one fast track for our submix
5183 mFastMixer = new FastMixer(mId);
5184 FastMixerStateQueue *sq = mFastMixer->sq();
5185 #ifdef STATE_QUEUE_DUMP
5186 sq->setObserverDump(&mStateQueueObserverDump);
5187 sq->setMutatorDump(&mStateQueueMutatorDump);
5188 #endif
5189 FastMixerState *state = sq->begin();
5190 FastTrack *fastTrack = &state->mFastTracks[0];
5191 // wrap the source side of the MonoPipe to make it an AudioBufferProvider
5192 fastTrack->mBufferProvider = new SourceAudioBufferProvider(new MonoPipeReader(monoPipe));
5193 fastTrack->mVolumeProvider = NULL;
5194 fastTrack->mChannelMask = static_cast<audio_channel_mask_t>(
5195 mChannelMask | mHapticChannelMask); // mPipeSink channel mask for
5196 // audio to FastMixer
5197 fastTrack->mFormat = mFormat; // mPipeSink format for audio to FastMixer
5198 fastTrack->mHapticPlaybackEnabled = mHapticChannelMask != AUDIO_CHANNEL_NONE;
5199 fastTrack->mHapticScale = {/*level=*/os::HapticLevel::NONE };
5200 fastTrack->mHapticMaxAmplitude = NAN;
5201 fastTrack->mGeneration++;
5202 state->mFastTracksGen++;
5203 state->mTrackMask = 1;
5204 // fast mixer will use the HAL output sink
5205 state->mOutputSink = mOutputSink.get();
5206 state->mOutputSinkGen++;
5207 state->mFrameCount = mFrameCount;
5208 // specify sink channel mask when haptic channel mask present as it can not
5209 // be calculated directly from channel count
5210 state->mSinkChannelMask = mHapticChannelMask == AUDIO_CHANNEL_NONE
5211 ? AUDIO_CHANNEL_NONE
5212 : static_cast<audio_channel_mask_t>(mChannelMask | mHapticChannelMask);
5213 state->mCommand = FastMixerState::COLD_IDLE;
5214 // already done in constructor initialization list
5215 //mFastMixerFutex = 0;
5216 state->mColdFutexAddr = &mFastMixerFutex;
5217 state->mColdGen++;
5218 state->mDumpState = &mFastMixerDumpState;
5219 mFastMixerNBLogWriter = afThreadCallback->newWriter_l(kFastMixerLogSize, "FastMixer");
5220 state->mNBLogWriter = mFastMixerNBLogWriter.get();
5221 sq->end();
5222 sq->push(FastMixerStateQueue::BLOCK_UNTIL_PUSHED);
5223
5224 NBLog::thread_info_t info;
5225 info.id = mId;
5226 info.type = NBLog::FASTMIXER;
5227 mFastMixerNBLogWriter->log<NBLog::EVENT_THREAD_INFO>(info);
5228
5229 // start the fast mixer
5230 mFastMixer->run("FastMixer", PRIORITY_URGENT_AUDIO);
5231 pid_t tid = mFastMixer->getTid();
5232 sendPrioConfigEvent(getpid(), tid, kPriorityFastMixer, false /*forApp*/);
5233 stream()->setHalThreadPriority(kPriorityFastMixer);
5234
5235 #ifdef AUDIO_WATCHDOG
5236 // create and start the watchdog
5237 mAudioWatchdog = new AudioWatchdog();
5238 mAudioWatchdog->setDump(&mAudioWatchdogDump);
5239 mAudioWatchdog->run("AudioWatchdog", PRIORITY_URGENT_AUDIO);
5240 tid = mAudioWatchdog->getTid();
5241 sendPrioConfigEvent(getpid(), tid, kPriorityFastMixer, false /*forApp*/);
5242 #endif
5243 } else {
5244 #ifdef TEE_SINK
5245 // Only use the MixerThread tee if there is no FastMixer.
5246 mTee.set(mOutputSink->format(), NBAIO_Tee::TEE_FLAG_OUTPUT_THREAD);
5247 mTee.setId(std::string("_") + std::to_string(mId) + "_M");
5248 #endif
5249 }
5250
5251 switch (kUseFastMixer) {
5252 case FastMixer_Never:
5253 case FastMixer_Dynamic:
5254 mNormalSink = mOutputSink;
5255 break;
5256 case FastMixer_Always:
5257 mNormalSink = mPipeSink;
5258 break;
5259 case FastMixer_Static:
5260 mNormalSink = initFastMixer ? mPipeSink : mOutputSink;
5261 break;
5262 }
5263 }
5264
~MixerThread()5265 MixerThread::~MixerThread()
5266 {
5267 if (mFastMixer != 0) {
5268 FastMixerStateQueue *sq = mFastMixer->sq();
5269 FastMixerState *state = sq->begin();
5270 if (state->mCommand == FastMixerState::COLD_IDLE) {
5271 int32_t old = android_atomic_inc(&mFastMixerFutex);
5272 if (old == -1) {
5273 (void) syscall(__NR_futex, &mFastMixerFutex, FUTEX_WAKE_PRIVATE, 1);
5274 }
5275 }
5276 state->mCommand = FastMixerState::EXIT;
5277 sq->end();
5278 sq->push(FastMixerStateQueue::BLOCK_UNTIL_PUSHED);
5279 mFastMixer->join();
5280 // Though the fast mixer thread has exited, it's state queue is still valid.
5281 // We'll use that extract the final state which contains one remaining fast track
5282 // corresponding to our sub-mix.
5283 state = sq->begin();
5284 ALOG_ASSERT(state->mTrackMask == 1);
5285 FastTrack *fastTrack = &state->mFastTracks[0];
5286 ALOG_ASSERT(fastTrack->mBufferProvider != NULL);
5287 delete fastTrack->mBufferProvider;
5288 sq->end(false /*didModify*/);
5289 mFastMixer.clear();
5290 #ifdef AUDIO_WATCHDOG
5291 if (mAudioWatchdog != 0) {
5292 mAudioWatchdog->requestExit();
5293 mAudioWatchdog->requestExitAndWait();
5294 mAudioWatchdog.clear();
5295 }
5296 #endif
5297 }
5298 mAfThreadCallback->unregisterWriter(mFastMixerNBLogWriter);
5299 delete mAudioMixer;
5300 }
5301
onFirstRef()5302 void MixerThread::onFirstRef() {
5303 PlaybackThread::onFirstRef();
5304
5305 audio_utils::lock_guard _l(mutex());
5306 if (mOutput != nullptr && mOutput->stream != nullptr) {
5307 status_t status = mOutput->stream->setLatencyModeCallback(this);
5308 if (status != INVALID_OPERATION) {
5309 updateHalSupportedLatencyModes_l();
5310 }
5311 // Default to enabled if the HAL supports it. This can be changed by Audioflinger after
5312 // the thread construction according to AudioFlinger::mBluetoothLatencyModesEnabled
5313 mBluetoothLatencyModesEnabled.store(
5314 mOutput->audioHwDev->supportsBluetoothVariableLatency());
5315 }
5316 }
5317
correctLatency_l(uint32_t latency) const5318 uint32_t MixerThread::correctLatency_l(uint32_t latency) const
5319 {
5320 if (mFastMixer != 0) {
5321 MonoPipe *pipe = (MonoPipe *)mPipeSink.get();
5322 latency += (pipe->getAvgFrames() * 1000) / mSampleRate;
5323 }
5324 return latency;
5325 }
5326
threadLoop_write()5327 ssize_t MixerThread::threadLoop_write()
5328 {
5329 // FIXME we should only do one push per cycle; confirm this is true
5330 // Start the fast mixer if it's not already running
5331 if (mFastMixer != 0) {
5332 FastMixerStateQueue *sq = mFastMixer->sq();
5333 FastMixerState *state = sq->begin();
5334 if (state->mCommand != FastMixerState::MIX_WRITE &&
5335 (kUseFastMixer != FastMixer_Dynamic || state->mTrackMask > 1)) {
5336 if (state->mCommand == FastMixerState::COLD_IDLE) {
5337
5338 // FIXME workaround for first HAL write being CPU bound on some devices
5339 ATRACE_BEGIN("write");
5340 mOutput->write((char *)mSinkBuffer, 0);
5341 ATRACE_END();
5342
5343 int32_t old = android_atomic_inc(&mFastMixerFutex);
5344 if (old == -1) {
5345 (void) syscall(__NR_futex, &mFastMixerFutex, FUTEX_WAKE_PRIVATE, 1);
5346 }
5347 #ifdef AUDIO_WATCHDOG
5348 if (mAudioWatchdog != 0) {
5349 mAudioWatchdog->resume();
5350 }
5351 #endif
5352 }
5353 state->mCommand = FastMixerState::MIX_WRITE;
5354 #ifdef FAST_THREAD_STATISTICS
5355 mFastMixerDumpState.increaseSamplingN(mAfThreadCallback->isLowRamDevice() ?
5356 FastThreadDumpState::kSamplingNforLowRamDevice : FastThreadDumpState::kSamplingN);
5357 #endif
5358 sq->end();
5359 sq->push(FastMixerStateQueue::BLOCK_UNTIL_PUSHED);
5360 if (kUseFastMixer == FastMixer_Dynamic) {
5361 mNormalSink = mPipeSink;
5362 }
5363 } else {
5364 sq->end(false /*didModify*/);
5365 }
5366 }
5367 return PlaybackThread::threadLoop_write();
5368 }
5369
threadLoop_standby()5370 void MixerThread::threadLoop_standby()
5371 {
5372 // Idle the fast mixer if it's currently running
5373 if (mFastMixer != 0) {
5374 FastMixerStateQueue *sq = mFastMixer->sq();
5375 FastMixerState *state = sq->begin();
5376 if (!(state->mCommand & FastMixerState::IDLE)) {
5377 // Report any frames trapped in the Monopipe
5378 MonoPipe *monoPipe = (MonoPipe *)mPipeSink.get();
5379 const long long pipeFrames = monoPipe->maxFrames() - monoPipe->availableToWrite();
5380 mLocalLog.log("threadLoop_standby: framesWritten:%lld suspendedFrames:%lld "
5381 "monoPipeWritten:%lld monoPipeLeft:%lld",
5382 (long long)mFramesWritten, (long long)mSuspendedFrames,
5383 (long long)mPipeSink->framesWritten(), pipeFrames);
5384 mLocalLog.log("threadLoop_standby: %s", mTimestamp.toString().c_str());
5385
5386 state->mCommand = FastMixerState::COLD_IDLE;
5387 state->mColdFutexAddr = &mFastMixerFutex;
5388 state->mColdGen++;
5389 mFastMixerFutex = 0;
5390 sq->end();
5391 // BLOCK_UNTIL_PUSHED would be insufficient, as we need it to stop doing I/O now
5392 sq->push(FastMixerStateQueue::BLOCK_UNTIL_ACKED);
5393 if (kUseFastMixer == FastMixer_Dynamic) {
5394 mNormalSink = mOutputSink;
5395 }
5396 #ifdef AUDIO_WATCHDOG
5397 if (mAudioWatchdog != 0) {
5398 mAudioWatchdog->pause();
5399 }
5400 #endif
5401 } else {
5402 sq->end(false /*didModify*/);
5403 }
5404 }
5405 PlaybackThread::threadLoop_standby();
5406 }
5407
waitingAsyncCallback_l()5408 bool PlaybackThread::waitingAsyncCallback_l()
5409 {
5410 return false;
5411 }
5412
shouldStandby_l()5413 bool PlaybackThread::shouldStandby_l()
5414 {
5415 return !mStandby;
5416 }
5417
waitingAsyncCallback()5418 bool PlaybackThread::waitingAsyncCallback()
5419 {
5420 audio_utils::lock_guard _l(mutex());
5421 return waitingAsyncCallback_l();
5422 }
5423
5424 // shared by MIXER and DIRECT, overridden by DUPLICATING
threadLoop_standby()5425 void PlaybackThread::threadLoop_standby()
5426 {
5427 ALOGV("%s: audio hardware entering standby, mixer %p, suspend count %d",
5428 __func__, this, (int32_t)mSuspended);
5429 mOutput->standby();
5430 if (mUseAsyncWrite != 0) {
5431 // discard any pending drain or write ack by incrementing sequence
5432 mWriteAckSequence = (mWriteAckSequence + 2) & ~1;
5433 mDrainSequence = (mDrainSequence + 2) & ~1;
5434 ALOG_ASSERT(mCallbackThread != 0);
5435 mCallbackThread->setWriteBlocked(mWriteAckSequence);
5436 mCallbackThread->setDraining(mDrainSequence);
5437 }
5438 mHwPaused = false;
5439 setHalLatencyMode_l();
5440 }
5441
onAddNewTrack_l()5442 void PlaybackThread::onAddNewTrack_l()
5443 {
5444 ALOGV("signal playback thread");
5445 broadcast_l();
5446 }
5447
onAsyncError(bool isHardError)5448 void PlaybackThread::onAsyncError(bool isHardError)
5449 {
5450 auto allTrackPortIds = getTrackPortIds();
5451 for (int i = AUDIO_STREAM_SYSTEM; i < (int)AUDIO_STREAM_CNT; i++) {
5452 invalidateTracks((audio_stream_type_t)i);
5453 }
5454 if (isHardError) {
5455 mAfThreadCallback->onHardError(allTrackPortIds);
5456 }
5457 }
5458
threadLoop_mix()5459 void MixerThread::threadLoop_mix()
5460 {
5461 // mix buffers...
5462 mAudioMixer->process();
5463 mCurrentWriteLength = mSinkBufferSize;
5464 // increase sleep time progressively when application underrun condition clears.
5465 // Only increase sleep time if the mixer is ready for two consecutive times to avoid
5466 // that a steady state of alternating ready/not ready conditions keeps the sleep time
5467 // such that we would underrun the audio HAL.
5468 if ((mSleepTimeUs == 0) && (sleepTimeShift > 0)) {
5469 sleepTimeShift--;
5470 }
5471 mSleepTimeUs = 0;
5472 mStandbyTimeNs = systemTime() + mStandbyDelayNs;
5473 //TODO: delay standby when effects have a tail
5474
5475 }
5476
threadLoop_sleepTime()5477 void MixerThread::threadLoop_sleepTime()
5478 {
5479 // If no tracks are ready, sleep once for the duration of an output
5480 // buffer size, then write 0s to the output
5481 if (mSleepTimeUs == 0) {
5482 if (mMixerStatus == MIXER_TRACKS_ENABLED) {
5483 if (mPipeSink.get() != nullptr && mPipeSink == mNormalSink) {
5484 // Using the Monopipe availableToWrite, we estimate the
5485 // sleep time to retry for more data (before we underrun).
5486 MonoPipe *monoPipe = static_cast<MonoPipe *>(mPipeSink.get());
5487 const ssize_t availableToWrite = mPipeSink->availableToWrite();
5488 const size_t pipeFrames = monoPipe->maxFrames();
5489 const size_t framesLeft = pipeFrames - max(availableToWrite, 0);
5490 // HAL_framecount <= framesDelay ~ framesLeft / 2 <= Normal_Mixer_framecount
5491 const size_t framesDelay = std::min(
5492 mNormalFrameCount, max(framesLeft / 2, mFrameCount));
5493 ALOGV("pipeFrames:%zu framesLeft:%zu framesDelay:%zu",
5494 pipeFrames, framesLeft, framesDelay);
5495 mSleepTimeUs = framesDelay * MICROS_PER_SECOND / mSampleRate;
5496 } else {
5497 mSleepTimeUs = mActiveSleepTimeUs >> sleepTimeShift;
5498 if (mSleepTimeUs < kMinThreadSleepTimeUs) {
5499 mSleepTimeUs = kMinThreadSleepTimeUs;
5500 }
5501 // reduce sleep time in case of consecutive application underruns to avoid
5502 // starving the audio HAL. As activeSleepTimeUs() is larger than a buffer
5503 // duration we would end up writing less data than needed by the audio HAL if
5504 // the condition persists.
5505 if (sleepTimeShift < kMaxThreadSleepTimeShift) {
5506 sleepTimeShift++;
5507 }
5508 }
5509 } else {
5510 mSleepTimeUs = mIdleSleepTimeUs;
5511 }
5512 } else if (mBytesWritten != 0 || (mMixerStatus == MIXER_TRACKS_ENABLED)) {
5513 // clear out mMixerBuffer or mSinkBuffer, to ensure buffers are cleared
5514 // before effects processing or output.
5515 if (mMixerBufferValid) {
5516 memset(mMixerBuffer, 0, mMixerBufferSize);
5517 if (mType == SPATIALIZER) {
5518 memset(mSinkBuffer, 0, mSinkBufferSize);
5519 }
5520 } else {
5521 memset(mSinkBuffer, 0, mSinkBufferSize);
5522 }
5523 mSleepTimeUs = 0;
5524 ALOGV_IF(mBytesWritten == 0 && (mMixerStatus == MIXER_TRACKS_ENABLED),
5525 "anticipated start");
5526 }
5527 // TODO add standby time extension fct of effect tail
5528 }
5529
5530 // prepareTracks_l() must be called with ThreadBase::mutex() held
prepareTracks_l(Vector<sp<IAfTrack>> * tracksToRemove)5531 PlaybackThread::mixer_state MixerThread::prepareTracks_l(
5532 Vector<sp<IAfTrack>>* tracksToRemove)
5533 {
5534 // clean up deleted track ids in AudioMixer before allocating new tracks
5535 (void)mTracks.processDeletedTrackIds([this](int trackId) {
5536 // for each trackId, destroy it in the AudioMixer
5537 if (mAudioMixer->exists(trackId)) {
5538 mAudioMixer->destroy(trackId);
5539 }
5540 });
5541 mTracks.clearDeletedTrackIds();
5542
5543 mixer_state mixerStatus = MIXER_IDLE;
5544 // find out which tracks need to be processed
5545 size_t count = mActiveTracks.size();
5546 size_t mixedTracks = 0;
5547 size_t tracksWithEffect = 0;
5548 // counts only _active_ fast tracks
5549 size_t fastTracks = 0;
5550 uint32_t resetMask = 0; // bit mask of fast tracks that need to be reset
5551
5552 float masterVolume = mMasterVolume;
5553 bool masterMute = mMasterMute;
5554
5555 if (masterMute) {
5556 masterVolume = 0;
5557 }
5558 // Delegate master volume control to effect in output mix effect chain if needed
5559 sp<IAfEffectChain> chain = getEffectChain_l(AUDIO_SESSION_OUTPUT_MIX);
5560 if (chain != 0) {
5561 uint32_t v = (uint32_t)(masterVolume * (1 << 24));
5562 chain->setVolume(&v, &v);
5563 masterVolume = (float)((v + (1 << 23)) >> 24);
5564 chain.clear();
5565 }
5566
5567 // prepare a new state to push
5568 FastMixerStateQueue *sq = NULL;
5569 FastMixerState *state = NULL;
5570 bool didModify = false;
5571 FastMixerStateQueue::block_t block = FastMixerStateQueue::BLOCK_UNTIL_PUSHED;
5572 bool coldIdle = false;
5573 if (mFastMixer != 0) {
5574 sq = mFastMixer->sq();
5575 state = sq->begin();
5576 coldIdle = state->mCommand == FastMixerState::COLD_IDLE;
5577 }
5578
5579 mMixerBufferValid = false; // mMixerBuffer has no valid data until appropriate tracks found.
5580 mEffectBufferValid = false; // mEffectBuffer has no valid data until tracks found.
5581
5582 // DeferredOperations handles statistics after setting mixerStatus.
5583 class DeferredOperations {
5584 public:
5585 DeferredOperations(mixer_state *mixerStatus, ThreadMetrics *threadMetrics)
5586 : mMixerStatus(mixerStatus)
5587 , mThreadMetrics(threadMetrics) {}
5588
5589 // when leaving scope, tally frames properly.
5590 ~DeferredOperations() {
5591 // Tally underrun frames only if we are actually mixing (MIXER_TRACKS_READY)
5592 // because that is when the underrun occurs.
5593 // We do not distinguish between FastTracks and NormalTracks here.
5594 size_t maxUnderrunFrames = 0;
5595 if (*mMixerStatus == MIXER_TRACKS_READY && mUnderrunFrames.size() > 0) {
5596 for (const auto &underrun : mUnderrunFrames) {
5597 underrun.first->tallyUnderrunFrames(underrun.second);
5598 maxUnderrunFrames = max(underrun.second, maxUnderrunFrames);
5599 }
5600 }
5601 // send the max underrun frames for this mixer period
5602 mThreadMetrics->logUnderrunFrames(maxUnderrunFrames);
5603 }
5604
5605 // tallyUnderrunFrames() is called to update the track counters
5606 // with the number of underrun frames for a particular mixer period.
5607 // We defer tallying until we know the final mixer status.
5608 void tallyUnderrunFrames(const sp<IAfTrack>& track, size_t underrunFrames) {
5609 mUnderrunFrames.emplace_back(track, underrunFrames);
5610 }
5611
5612 private:
5613 const mixer_state * const mMixerStatus;
5614 ThreadMetrics * const mThreadMetrics;
5615 std::vector<std::pair<sp<IAfTrack>, size_t>> mUnderrunFrames;
5616 } deferredOperations(&mixerStatus, &mThreadMetrics);
5617 // implicit nested scope for variable capture
5618
5619 bool noFastHapticTrack = true;
5620 for (size_t i=0 ; i<count ; i++) {
5621 const sp<IAfTrack> t = mActiveTracks[i];
5622
5623 // this const just means the local variable doesn't change
5624 IAfTrack* const track = t.get();
5625
5626 // process fast tracks
5627 if (track->isFastTrack()) {
5628 LOG_ALWAYS_FATAL_IF(mFastMixer.get() == nullptr,
5629 "%s(%d): FastTrack(%d) present without FastMixer",
5630 __func__, id(), track->id());
5631
5632 if (track->getHapticPlaybackEnabled()) {
5633 noFastHapticTrack = false;
5634 }
5635
5636 // It's theoretically possible (though unlikely) for a fast track to be created
5637 // and then removed within the same normal mix cycle. This is not a problem, as
5638 // the track never becomes active so it's fast mixer slot is never touched.
5639 // The converse, of removing an (active) track and then creating a new track
5640 // at the identical fast mixer slot within the same normal mix cycle,
5641 // is impossible because the slot isn't marked available until the end of each cycle.
5642 int j = track->fastIndex();
5643 ALOG_ASSERT(0 < j && j < (int)FastMixerState::sMaxFastTracks);
5644 ALOG_ASSERT(!(mFastTrackAvailMask & (1 << j)));
5645 FastTrack *fastTrack = &state->mFastTracks[j];
5646
5647 // Determine whether the track is currently in underrun condition,
5648 // and whether it had a recent underrun.
5649 FastTrackDump *ftDump = &mFastMixerDumpState.mTracks[j];
5650 FastTrackUnderruns underruns = ftDump->mUnderruns;
5651 uint32_t recentFull = (underruns.mBitFields.mFull -
5652 track->fastTrackUnderruns().mBitFields.mFull) & UNDERRUN_MASK;
5653 uint32_t recentPartial = (underruns.mBitFields.mPartial -
5654 track->fastTrackUnderruns().mBitFields.mPartial) & UNDERRUN_MASK;
5655 uint32_t recentEmpty = (underruns.mBitFields.mEmpty -
5656 track->fastTrackUnderruns().mBitFields.mEmpty) & UNDERRUN_MASK;
5657 uint32_t recentUnderruns = recentPartial + recentEmpty;
5658 track->fastTrackUnderruns() = underruns;
5659 // don't count underruns that occur while stopping or pausing
5660 // or stopped which can occur when flush() is called while active
5661 size_t underrunFrames = 0;
5662 if (!(track->isStopping() || track->isPausing() || track->isStopped()) &&
5663 recentUnderruns > 0) {
5664 // FIXME fast mixer will pull & mix partial buffers, but we count as a full underrun
5665 underrunFrames = recentUnderruns * mFrameCount;
5666 }
5667 // Immediately account for FastTrack underruns.
5668 track->audioTrackServerProxy()->tallyUnderrunFrames(underrunFrames);
5669
5670 // This is similar to the state machine for normal tracks,
5671 // with a few modifications for fast tracks.
5672 bool isActive = true;
5673 switch (track->state()) {
5674 case IAfTrackBase::STOPPING_1:
5675 // track stays active in STOPPING_1 state until first underrun
5676 if (recentUnderruns > 0 || track->isTerminated()) {
5677 track->setState(IAfTrackBase::STOPPING_2);
5678 }
5679 break;
5680 case IAfTrackBase::PAUSING:
5681 // ramp down is not yet implemented
5682 track->setPaused();
5683 break;
5684 case IAfTrackBase::RESUMING:
5685 // ramp up is not yet implemented
5686 track->setState(IAfTrackBase::ACTIVE);
5687 break;
5688 case IAfTrackBase::ACTIVE:
5689 if (recentFull > 0 || recentPartial > 0) {
5690 // track has provided at least some frames recently: reset retry count
5691 track->retryCount() = kMaxTrackRetries;
5692 }
5693 if (recentUnderruns == 0) {
5694 // no recent underruns: stay active
5695 break;
5696 }
5697 // there has recently been an underrun of some kind
5698 if (track->sharedBuffer() == 0) {
5699 // were any of the recent underruns "empty" (no frames available)?
5700 if (recentEmpty == 0) {
5701 // no, then ignore the partial underruns as they are allowed indefinitely
5702 break;
5703 }
5704 // there has recently been an "empty" underrun: decrement the retry counter
5705 if (--(track->retryCount()) > 0) {
5706 break;
5707 }
5708 // indicate to client process that the track was disabled because of underrun;
5709 // it will then automatically call start() when data is available
5710 track->disable();
5711 // remove from active list, but state remains ACTIVE [confusing but true]
5712 isActive = false;
5713 break;
5714 }
5715 FALLTHROUGH_INTENDED;
5716 case IAfTrackBase::STOPPING_2:
5717 case IAfTrackBase::PAUSED:
5718 case IAfTrackBase::STOPPED:
5719 case IAfTrackBase::FLUSHED: // flush() while active
5720 // Check for presentation complete if track is inactive
5721 // We have consumed all the buffers of this track.
5722 // This would be incomplete if we auto-paused on underrun
5723 {
5724 uint32_t latency = 0;
5725 status_t result = mOutput->stream->getLatency(&latency);
5726 ALOGE_IF(result != OK,
5727 "Error when retrieving output stream latency: %d", result);
5728 size_t audioHALFrames = (latency * mSampleRate) / 1000;
5729 int64_t framesWritten = mBytesWritten / mFrameSize;
5730 if (!(mStandby || track->presentationComplete(framesWritten, audioHALFrames))) {
5731 // track stays in active list until presentation is complete
5732 break;
5733 }
5734 }
5735 if (track->isStopping_2()) {
5736 track->setState(IAfTrackBase::STOPPED);
5737 }
5738 if (track->isStopped()) {
5739 // Can't reset directly, as fast mixer is still polling this track
5740 // track->reset();
5741 // So instead mark this track as needing to be reset after push with ack
5742 resetMask |= 1 << i;
5743 }
5744 isActive = false;
5745 break;
5746 case IAfTrackBase::IDLE:
5747 default:
5748 LOG_ALWAYS_FATAL("unexpected track state %d", (int)track->state());
5749 }
5750
5751 if (isActive) {
5752 // was it previously inactive?
5753 if (!(state->mTrackMask & (1 << j))) {
5754 ExtendedAudioBufferProvider *eabp = track->asExtendedAudioBufferProvider();
5755 VolumeProvider *vp = track->asVolumeProvider();
5756 fastTrack->mBufferProvider = eabp;
5757 fastTrack->mVolumeProvider = vp;
5758 fastTrack->mChannelMask = track->channelMask();
5759 fastTrack->mFormat = track->format();
5760 fastTrack->mHapticPlaybackEnabled = track->getHapticPlaybackEnabled();
5761 fastTrack->mHapticScale = track->getHapticScale();
5762 fastTrack->mHapticMaxAmplitude = track->getHapticMaxAmplitude();
5763 fastTrack->mGeneration++;
5764 state->mTrackMask |= 1 << j;
5765 didModify = true;
5766 // no acknowledgement required for newly active tracks
5767 }
5768 sp<AudioTrackServerProxy> proxy = track->audioTrackServerProxy();
5769 float volume;
5770 if (track->isPlaybackRestricted() || mStreamTypes[track->streamType()].mute) {
5771 volume = 0.f;
5772 } else {
5773 volume = masterVolume * mStreamTypes[track->streamType()].volume;
5774 }
5775
5776 handleVoipVolume_l(&volume);
5777
5778 // cache the combined master volume and stream type volume for fast mixer; this
5779 // lacks any synchronization or barrier so VolumeProvider may read a stale value
5780 const float vh = track->getVolumeHandler()->getVolume(
5781 proxy->framesReleased()).first;
5782 volume *= vh;
5783 track->setCachedVolume(volume);
5784 gain_minifloat_packed_t vlr = proxy->getVolumeLR();
5785 float vlf = float_from_gain(gain_minifloat_unpack_left(vlr));
5786 float vrf = float_from_gain(gain_minifloat_unpack_right(vlr));
5787
5788 track->processMuteEvent_l(mAfThreadCallback->getOrCreateAudioManager(),
5789 /*muteState=*/{masterVolume == 0.f,
5790 mStreamTypes[track->streamType()].volume == 0.f,
5791 mStreamTypes[track->streamType()].mute,
5792 track->isPlaybackRestricted(),
5793 vlf == 0.f && vrf == 0.f,
5794 vh == 0.f});
5795
5796 vlf *= volume;
5797 vrf *= volume;
5798
5799 if (track->getInternalMute()) {
5800 vlf = 0.f;
5801 vrf = 0.f;
5802 }
5803
5804 track->setFinalVolume(vlf, vrf);
5805 ++fastTracks;
5806 } else {
5807 // was it previously active?
5808 if (state->mTrackMask & (1 << j)) {
5809 fastTrack->mBufferProvider = NULL;
5810 fastTrack->mGeneration++;
5811 state->mTrackMask &= ~(1 << j);
5812 didModify = true;
5813 // If any fast tracks were removed, we must wait for acknowledgement
5814 // because we're about to decrement the last sp<> on those tracks.
5815 block = FastMixerStateQueue::BLOCK_UNTIL_ACKED;
5816 } else {
5817 // ALOGW rather than LOG_ALWAYS_FATAL because it seems there are cases where an
5818 // AudioTrack may start (which may not be with a start() but with a write()
5819 // after underrun) and immediately paused or released. In that case the
5820 // FastTrack state hasn't had time to update.
5821 // TODO Remove the ALOGW when this theory is confirmed.
5822 ALOGW("fast track %d should have been active; "
5823 "mState=%d, mTrackMask=%#x, recentUnderruns=%u, isShared=%d",
5824 j, (int)track->state(), state->mTrackMask, recentUnderruns,
5825 track->sharedBuffer() != 0);
5826 // Since the FastMixer state already has the track inactive, do nothing here.
5827 }
5828 tracksToRemove->add(track);
5829 // Avoids a misleading display in dumpsys
5830 track->fastTrackUnderruns().mBitFields.mMostRecent = UNDERRUN_FULL;
5831 }
5832 if (fastTrack->mHapticPlaybackEnabled != track->getHapticPlaybackEnabled()) {
5833 fastTrack->mHapticPlaybackEnabled = track->getHapticPlaybackEnabled();
5834 didModify = true;
5835 }
5836 continue;
5837 }
5838
5839 { // local variable scope to avoid goto warning
5840
5841 audio_track_cblk_t* cblk = track->cblk();
5842
5843 // The first time a track is added we wait
5844 // for all its buffers to be filled before processing it
5845 const int trackId = track->id();
5846
5847 // if an active track doesn't exist in the AudioMixer, create it.
5848 // use the trackId as the AudioMixer name.
5849 if (!mAudioMixer->exists(trackId)) {
5850 status_t status = mAudioMixer->create(
5851 trackId,
5852 track->channelMask(),
5853 track->format(),
5854 track->sessionId());
5855 if (status != OK) {
5856 ALOGW("%s(): AudioMixer cannot create track(%d)"
5857 " mask %#x, format %#x, sessionId %d",
5858 __func__, trackId,
5859 track->channelMask(), track->format(), track->sessionId());
5860 tracksToRemove->add(track);
5861 track->invalidate(); // consider it dead.
5862 continue;
5863 }
5864 }
5865
5866 // make sure that we have enough frames to mix one full buffer.
5867 // enforce this condition only once to enable draining the buffer in case the client
5868 // app does not call stop() and relies on underrun to stop:
5869 // hence the test on (mMixerStatus == MIXER_TRACKS_READY) meaning the track was mixed
5870 // during last round
5871 size_t desiredFrames;
5872 const uint32_t sampleRate = track->audioTrackServerProxy()->getSampleRate();
5873 const AudioPlaybackRate playbackRate = track->audioTrackServerProxy()->getPlaybackRate();
5874
5875 desiredFrames = sourceFramesNeededWithTimestretch(
5876 sampleRate, mNormalFrameCount, mSampleRate, playbackRate.mSpeed);
5877 // TODO: ONLY USED FOR LEGACY RESAMPLERS, remove when they are removed.
5878 // add frames already consumed but not yet released by the resampler
5879 // because mAudioTrackServerProxy->framesReady() will include these frames
5880 desiredFrames += mAudioMixer->getUnreleasedFrames(trackId);
5881
5882 uint32_t minFrames = 1;
5883 if ((track->sharedBuffer() == 0) && !track->isStopped() && !track->isPausing() &&
5884 (mMixerStatusIgnoringFastTracks == MIXER_TRACKS_READY)) {
5885 minFrames = desiredFrames;
5886 }
5887
5888 size_t framesReady = track->framesReady();
5889 if (ATRACE_ENABLED()) {
5890 // I wish we had formatted trace names
5891 std::string traceName("nRdy");
5892 traceName += std::to_string(trackId);
5893 ATRACE_INT(traceName.c_str(), framesReady);
5894 }
5895 if ((framesReady >= minFrames) && track->isReady() &&
5896 !track->isPaused() && !track->isTerminated())
5897 {
5898 ALOGVV("track(%d) s=%08x [OK] on thread %p", trackId, cblk->mServer, this);
5899
5900 mixedTracks++;
5901
5902 // track->mainBuffer() != mSinkBuffer and mMixerBuffer means
5903 // there is an effect chain connected to the track
5904 chain.clear();
5905 if (track->mainBuffer() != mSinkBuffer &&
5906 track->mainBuffer() != mMixerBuffer) {
5907 if (mEffectBufferEnabled) {
5908 mEffectBufferValid = true; // Later can set directly.
5909 }
5910 chain = getEffectChain_l(track->sessionId());
5911 // Delegate volume control to effect in track effect chain if needed
5912 if (chain != 0) {
5913 tracksWithEffect++;
5914 } else {
5915 ALOGW("prepareTracks_l(): track(%d) attached to effect but no chain found on "
5916 "session %d",
5917 trackId, track->sessionId());
5918 }
5919 }
5920
5921
5922 int param = AudioMixer::VOLUME;
5923 if (track->fillingStatus() == IAfTrack::FS_FILLED) {
5924 // no ramp for the first volume setting
5925 track->fillingStatus() = IAfTrack::FS_ACTIVE;
5926 if (track->state() == IAfTrackBase::RESUMING) {
5927 track->setState(IAfTrackBase::ACTIVE);
5928 // If a new track is paused immediately after start, do not ramp on resume.
5929 if (cblk->mServer != 0) {
5930 param = AudioMixer::RAMP_VOLUME;
5931 }
5932 }
5933 mAudioMixer->setParameter(trackId, AudioMixer::RESAMPLE, AudioMixer::RESET, NULL);
5934 mLeftVolFloat = -1.0;
5935 // FIXME should not make a decision based on mServer
5936 } else if (cblk->mServer != 0) {
5937 // If the track is stopped before the first frame was mixed,
5938 // do not apply ramp
5939 param = AudioMixer::RAMP_VOLUME;
5940 }
5941
5942 // compute volume for this track
5943 uint32_t vl, vr; // in U8.24 integer format
5944 float vlf, vrf, vaf; // in [0.0, 1.0] float format
5945 // read original volumes with volume control
5946 float v = masterVolume * mStreamTypes[track->streamType()].volume;
5947 // Always fetch volumeshaper volume to ensure state is updated.
5948 const sp<AudioTrackServerProxy> proxy = track->audioTrackServerProxy();
5949 const float vh = track->getVolumeHandler()->getVolume(
5950 track->audioTrackServerProxy()->framesReleased()).first;
5951
5952 if (mStreamTypes[track->streamType()].mute || track->isPlaybackRestricted()) {
5953 v = 0;
5954 }
5955
5956 handleVoipVolume_l(&v);
5957
5958 if (track->isPausing()) {
5959 vl = vr = 0;
5960 vlf = vrf = vaf = 0.;
5961 track->setPaused();
5962 } else {
5963 gain_minifloat_packed_t vlr = proxy->getVolumeLR();
5964 vlf = float_from_gain(gain_minifloat_unpack_left(vlr));
5965 vrf = float_from_gain(gain_minifloat_unpack_right(vlr));
5966 // track volumes come from shared memory, so can't be trusted and must be clamped
5967 if (vlf > GAIN_FLOAT_UNITY) {
5968 ALOGV("Track left volume out of range: %.3g", vlf);
5969 vlf = GAIN_FLOAT_UNITY;
5970 }
5971 if (vrf > GAIN_FLOAT_UNITY) {
5972 ALOGV("Track right volume out of range: %.3g", vrf);
5973 vrf = GAIN_FLOAT_UNITY;
5974 }
5975
5976 track->processMuteEvent_l(mAfThreadCallback->getOrCreateAudioManager(),
5977 /*muteState=*/{masterVolume == 0.f,
5978 mStreamTypes[track->streamType()].volume == 0.f,
5979 mStreamTypes[track->streamType()].mute,
5980 track->isPlaybackRestricted(),
5981 vlf == 0.f && vrf == 0.f,
5982 vh == 0.f});
5983
5984 // now apply the master volume and stream type volume and shaper volume
5985 vlf *= v * vh;
5986 vrf *= v * vh;
5987 // assuming master volume and stream type volume each go up to 1.0,
5988 // then derive vl and vr as U8.24 versions for the effect chain
5989 const float scaleto8_24 = MAX_GAIN_INT * MAX_GAIN_INT;
5990 vl = (uint32_t) (scaleto8_24 * vlf);
5991 vr = (uint32_t) (scaleto8_24 * vrf);
5992 // vl and vr are now in U8.24 format
5993 uint16_t sendLevel = proxy->getSendLevel_U4_12();
5994 // send level comes from shared memory and so may be corrupt
5995 if (sendLevel > MAX_GAIN_INT) {
5996 ALOGV("Track send level out of range: %04X", sendLevel);
5997 sendLevel = MAX_GAIN_INT;
5998 }
5999 // vaf is represented as [0.0, 1.0] float by rescaling sendLevel
6000 vaf = v * sendLevel * (1. / MAX_GAIN_INT);
6001 }
6002
6003 if (track->getInternalMute()) {
6004 vrf = 0.f;
6005 vlf = 0.f;
6006 }
6007
6008 track->setFinalVolume(vlf, vrf);
6009
6010 // Delegate volume control to effect in track effect chain if needed
6011 if (chain != 0 && chain->setVolume(&vl, &vr)) {
6012 // Do not ramp volume if volume is controlled by effect
6013 param = AudioMixer::VOLUME;
6014 // Update remaining floating point volume levels
6015 vlf = (float)vl / (1 << 24);
6016 vrf = (float)vr / (1 << 24);
6017 track->setHasVolumeController(true);
6018 } else {
6019 // force no volume ramp when volume controller was just disabled or removed
6020 // from effect chain to avoid volume spike
6021 if (track->hasVolumeController()) {
6022 param = AudioMixer::VOLUME;
6023 }
6024 track->setHasVolumeController(false);
6025 }
6026
6027 // XXX: these things DON'T need to be done each time
6028 mAudioMixer->setBufferProvider(trackId, track->asExtendedAudioBufferProvider());
6029 mAudioMixer->enable(trackId);
6030
6031 mAudioMixer->setParameter(trackId, param, AudioMixer::VOLUME0, &vlf);
6032 mAudioMixer->setParameter(trackId, param, AudioMixer::VOLUME1, &vrf);
6033 mAudioMixer->setParameter(trackId, param, AudioMixer::AUXLEVEL, &vaf);
6034 mAudioMixer->setParameter(
6035 trackId,
6036 AudioMixer::TRACK,
6037 AudioMixer::FORMAT, (void *)track->format());
6038 mAudioMixer->setParameter(
6039 trackId,
6040 AudioMixer::TRACK,
6041 AudioMixer::CHANNEL_MASK, (void *)(uintptr_t)track->channelMask());
6042
6043 if (mType == SPATIALIZER && !track->isSpatialized()) {
6044 mAudioMixer->setParameter(
6045 trackId,
6046 AudioMixer::TRACK,
6047 AudioMixer::MIXER_CHANNEL_MASK,
6048 (void *)(uintptr_t)(mChannelMask | mHapticChannelMask));
6049 } else {
6050 mAudioMixer->setParameter(
6051 trackId,
6052 AudioMixer::TRACK,
6053 AudioMixer::MIXER_CHANNEL_MASK,
6054 (void *)(uintptr_t)(mMixerChannelMask | mHapticChannelMask));
6055 }
6056
6057 // limit track sample rate to 2 x output sample rate, which changes at re-configuration
6058 uint32_t maxSampleRate = mSampleRate * AUDIO_RESAMPLER_DOWN_RATIO_MAX;
6059 uint32_t reqSampleRate = proxy->getSampleRate();
6060 if (reqSampleRate == 0) {
6061 reqSampleRate = mSampleRate;
6062 } else if (reqSampleRate > maxSampleRate) {
6063 reqSampleRate = maxSampleRate;
6064 }
6065 mAudioMixer->setParameter(
6066 trackId,
6067 AudioMixer::RESAMPLE,
6068 AudioMixer::SAMPLE_RATE,
6069 (void *)(uintptr_t)reqSampleRate);
6070
6071 mAudioMixer->setParameter(
6072 trackId,
6073 AudioMixer::TIMESTRETCH,
6074 AudioMixer::PLAYBACK_RATE,
6075 // cast away constness for this generic API.
6076 const_cast<void *>(reinterpret_cast<const void *>(&playbackRate)));
6077
6078 /*
6079 * Select the appropriate output buffer for the track.
6080 *
6081 * Tracks with effects go into their own effects chain buffer
6082 * and from there into either mEffectBuffer or mSinkBuffer.
6083 *
6084 * Other tracks can use mMixerBuffer for higher precision
6085 * channel accumulation. If this buffer is enabled
6086 * (mMixerBufferEnabled true), then selected tracks will accumulate
6087 * into it.
6088 *
6089 */
6090 if (mMixerBufferEnabled
6091 && (track->mainBuffer() == mSinkBuffer
6092 || track->mainBuffer() == mMixerBuffer)) {
6093 if (mType == SPATIALIZER && !track->isSpatialized()) {
6094 mAudioMixer->setParameter(
6095 trackId,
6096 AudioMixer::TRACK,
6097 AudioMixer::MIXER_FORMAT, (void *)mEffectBufferFormat);
6098 mAudioMixer->setParameter(
6099 trackId,
6100 AudioMixer::TRACK,
6101 AudioMixer::MAIN_BUFFER, (void *)mPostSpatializerBuffer);
6102 } else {
6103 mAudioMixer->setParameter(
6104 trackId,
6105 AudioMixer::TRACK,
6106 AudioMixer::MIXER_FORMAT, (void *)mMixerBufferFormat);
6107 mAudioMixer->setParameter(
6108 trackId,
6109 AudioMixer::TRACK,
6110 AudioMixer::MAIN_BUFFER, (void *)mMixerBuffer);
6111 // TODO: override track->mainBuffer()?
6112 mMixerBufferValid = true;
6113 }
6114 } else {
6115 mAudioMixer->setParameter(
6116 trackId,
6117 AudioMixer::TRACK,
6118 AudioMixer::MIXER_FORMAT, (void *)AUDIO_FORMAT_PCM_FLOAT);
6119 mAudioMixer->setParameter(
6120 trackId,
6121 AudioMixer::TRACK,
6122 AudioMixer::MAIN_BUFFER, (void *)track->mainBuffer());
6123 }
6124 mAudioMixer->setParameter(
6125 trackId,
6126 AudioMixer::TRACK,
6127 AudioMixer::AUX_BUFFER, (void *)track->auxBuffer());
6128 mAudioMixer->setParameter(
6129 trackId,
6130 AudioMixer::TRACK,
6131 AudioMixer::HAPTIC_ENABLED, (void *)(uintptr_t)track->getHapticPlaybackEnabled());
6132 const os::HapticScale hapticScale = track->getHapticScale();
6133 mAudioMixer->setParameter(
6134 trackId,
6135 AudioMixer::TRACK,
6136 AudioMixer::HAPTIC_SCALE, (void *)&hapticScale);
6137 const float hapticMaxAmplitude = track->getHapticMaxAmplitude();
6138 mAudioMixer->setParameter(
6139 trackId,
6140 AudioMixer::TRACK,
6141 AudioMixer::HAPTIC_MAX_AMPLITUDE, (void *)&hapticMaxAmplitude);
6142
6143 // reset retry count
6144 track->retryCount() = kMaxTrackRetries;
6145
6146 // If one track is ready, set the mixer ready if:
6147 // - the mixer was not ready during previous round OR
6148 // - no other track is not ready
6149 if (mMixerStatusIgnoringFastTracks != MIXER_TRACKS_READY ||
6150 mixerStatus != MIXER_TRACKS_ENABLED) {
6151 mixerStatus = MIXER_TRACKS_READY;
6152 }
6153
6154 // Enable the next few lines to instrument a test for underrun log handling.
6155 // TODO: Remove when we have a better way of testing the underrun log.
6156 #if 0
6157 static int i;
6158 if ((++i & 0xf) == 0) {
6159 deferredOperations.tallyUnderrunFrames(track, 10 /* underrunFrames */);
6160 }
6161 #endif
6162 } else {
6163 size_t underrunFrames = 0;
6164 if (framesReady < desiredFrames && !track->isStopped() && !track->isPaused()) {
6165 ALOGV("track(%d) underrun, track state %s framesReady(%zu) < framesDesired(%zd)",
6166 trackId, track->getTrackStateAsString(), framesReady, desiredFrames);
6167 underrunFrames = desiredFrames;
6168 }
6169 deferredOperations.tallyUnderrunFrames(track, underrunFrames);
6170
6171 // clear effect chain input buffer if an active track underruns to avoid sending
6172 // previous audio buffer again to effects
6173 chain = getEffectChain_l(track->sessionId());
6174 if (chain != 0) {
6175 chain->clearInputBuffer();
6176 }
6177
6178 ALOGVV("track(%d) s=%08x [NOT READY] on thread %p", trackId, cblk->mServer, this);
6179 if ((track->sharedBuffer() != 0) || track->isTerminated() ||
6180 track->isStopped() || track->isPaused()) {
6181 // We have consumed all the buffers of this track.
6182 // Remove it from the list of active tracks.
6183 // TODO: use actual buffer filling status instead of latency when available from
6184 // audio HAL
6185 size_t audioHALFrames = (latency_l() * mSampleRate) / 1000;
6186 int64_t framesWritten = mBytesWritten / mFrameSize;
6187 if (mStandby || track->presentationComplete(framesWritten, audioHALFrames)) {
6188 if (track->isStopped()) {
6189 track->reset();
6190 }
6191 tracksToRemove->add(track);
6192 }
6193 } else {
6194 // No buffers for this track. Give it a few chances to
6195 // fill a buffer, then remove it from active list.
6196 if (--(track->retryCount()) <= 0) {
6197 ALOGI("%s BUFFER TIMEOUT: remove track(%d) from active list due to underrun"
6198 " on thread %d", __func__, trackId, mId);
6199 tracksToRemove->add(track);
6200 // indicate to client process that the track was disabled because of underrun;
6201 // it will then automatically call start() when data is available
6202 track->disable();
6203 // If one track is not ready, mark the mixer also not ready if:
6204 // - the mixer was ready during previous round OR
6205 // - no other track is ready
6206 } else if (mMixerStatusIgnoringFastTracks == MIXER_TRACKS_READY ||
6207 mixerStatus != MIXER_TRACKS_READY) {
6208 mixerStatus = MIXER_TRACKS_ENABLED;
6209 }
6210 }
6211 mAudioMixer->disable(trackId);
6212 }
6213
6214 } // local variable scope to avoid goto warning
6215
6216 }
6217
6218 if (mHapticChannelMask != AUDIO_CHANNEL_NONE && sq != NULL) {
6219 // When there is no fast track playing haptic and FastMixer exists,
6220 // enabling the first FastTrack, which provides mixed data from normal
6221 // tracks, to play haptic data.
6222 FastTrack *fastTrack = &state->mFastTracks[0];
6223 if (fastTrack->mHapticPlaybackEnabled != noFastHapticTrack) {
6224 fastTrack->mHapticPlaybackEnabled = noFastHapticTrack;
6225 didModify = true;
6226 }
6227 }
6228
6229 // Push the new FastMixer state if necessary
6230 [[maybe_unused]] bool pauseAudioWatchdog = false;
6231 if (didModify) {
6232 state->mFastTracksGen++;
6233 // if the fast mixer was active, but now there are no fast tracks, then put it in cold idle
6234 if (kUseFastMixer == FastMixer_Dynamic &&
6235 state->mCommand == FastMixerState::MIX_WRITE && state->mTrackMask <= 1) {
6236 state->mCommand = FastMixerState::COLD_IDLE;
6237 state->mColdFutexAddr = &mFastMixerFutex;
6238 state->mColdGen++;
6239 mFastMixerFutex = 0;
6240 if (kUseFastMixer == FastMixer_Dynamic) {
6241 mNormalSink = mOutputSink;
6242 }
6243 // If we go into cold idle, need to wait for acknowledgement
6244 // so that fast mixer stops doing I/O.
6245 block = FastMixerStateQueue::BLOCK_UNTIL_ACKED;
6246 pauseAudioWatchdog = true;
6247 }
6248 }
6249 if (sq != NULL) {
6250 sq->end(didModify);
6251 // No need to block if the FastMixer is in COLD_IDLE as the FastThread
6252 // is not active. (We BLOCK_UNTIL_ACKED when entering COLD_IDLE
6253 // when bringing the output sink into standby.)
6254 //
6255 // We will get the latest FastMixer state when we come out of COLD_IDLE.
6256 //
6257 // This occurs with BT suspend when we idle the FastMixer with
6258 // active tracks, which may be added or removed.
6259 sq->push(coldIdle ? FastMixerStateQueue::BLOCK_NEVER : block);
6260 }
6261 #ifdef AUDIO_WATCHDOG
6262 if (pauseAudioWatchdog && mAudioWatchdog != 0) {
6263 mAudioWatchdog->pause();
6264 }
6265 #endif
6266
6267 // Now perform the deferred reset on fast tracks that have stopped
6268 while (resetMask != 0) {
6269 size_t i = __builtin_ctz(resetMask);
6270 ALOG_ASSERT(i < count);
6271 resetMask &= ~(1 << i);
6272 sp<IAfTrack> track = mActiveTracks[i];
6273 ALOG_ASSERT(track->isFastTrack() && track->isStopped());
6274 track->reset();
6275 }
6276
6277 // Track destruction may occur outside of threadLoop once it is removed from active tracks.
6278 // Ensure the AudioMixer doesn't have a raw "buffer provider" pointer to the track if
6279 // it ceases to be active, to allow safe removal from the AudioMixer at the start
6280 // of prepareTracks_l(); this releases any outstanding buffer back to the track.
6281 // See also the implementation of destroyTrack_l().
6282 for (const auto &track : *tracksToRemove) {
6283 const int trackId = track->id();
6284 if (mAudioMixer->exists(trackId)) { // Normal tracks here, fast tracks in FastMixer.
6285 mAudioMixer->setBufferProvider(trackId, nullptr /* bufferProvider */);
6286 }
6287 }
6288
6289 // remove all the tracks that need to be...
6290 removeTracks_l(*tracksToRemove);
6291
6292 if (getEffectChain_l(AUDIO_SESSION_OUTPUT_MIX) != 0 ||
6293 getEffectChain_l(AUDIO_SESSION_OUTPUT_STAGE) != 0) {
6294 mEffectBufferValid = true;
6295 }
6296
6297 if (mEffectBufferValid) {
6298 // as long as there are effects we should clear the effects buffer, to avoid
6299 // passing a non-clean buffer to the effect chain
6300 memset(mEffectBuffer, 0, mEffectBufferSize);
6301 if (mType == SPATIALIZER) {
6302 memset(mPostSpatializerBuffer, 0, mPostSpatializerBufferSize);
6303 }
6304 }
6305 // sink or mix buffer must be cleared if all tracks are connected to an
6306 // effect chain as in this case the mixer will not write to the sink or mix buffer
6307 // and track effects will accumulate into it
6308 // always clear sink buffer for spatializer output as the output of the spatializer
6309 // effect will be accumulated into it
6310 if ((mBytesRemaining == 0) && (((mixedTracks != 0 && mixedTracks == tracksWithEffect) ||
6311 (mixedTracks == 0 && fastTracks > 0)) || (mType == SPATIALIZER))) {
6312 // FIXME as a performance optimization, should remember previous zero status
6313 if (mMixerBufferValid) {
6314 memset(mMixerBuffer, 0, mMixerBufferSize);
6315 // TODO: In testing, mSinkBuffer below need not be cleared because
6316 // the PlaybackThread::threadLoop() copies mMixerBuffer into mSinkBuffer
6317 // after mixing.
6318 //
6319 // To enforce this guarantee:
6320 // ((mixedTracks != 0 && mixedTracks == tracksWithEffect) ||
6321 // (mixedTracks == 0 && fastTracks > 0))
6322 // must imply MIXER_TRACKS_READY.
6323 // Later, we may clear buffers regardless, and skip much of this logic.
6324 }
6325 // FIXME as a performance optimization, should remember previous zero status
6326 memset(mSinkBuffer, 0, mNormalFrameCount * mFrameSize);
6327 }
6328
6329 // if any fast tracks, then status is ready
6330 mMixerStatusIgnoringFastTracks = mixerStatus;
6331 if (fastTracks > 0) {
6332 mixerStatus = MIXER_TRACKS_READY;
6333 }
6334 return mixerStatus;
6335 }
6336
6337 // trackCountForUid_l() must be called with ThreadBase::mutex() held
trackCountForUid_l(uid_t uid) const6338 uint32_t PlaybackThread::trackCountForUid_l(uid_t uid) const
6339 {
6340 uint32_t trackCount = 0;
6341 for (size_t i = 0; i < mTracks.size() ; i++) {
6342 if (mTracks[i]->uid() == uid) {
6343 trackCount++;
6344 }
6345 }
6346 return trackCount;
6347 }
6348
check(AudioStreamOut * output)6349 bool PlaybackThread::IsTimestampAdvancing::check(AudioStreamOut* output)
6350 {
6351 // Check the timestamp to see if it's advancing once every 150ms. If we check too frequently, we
6352 // could falsely detect that the frame position has stalled due to underrun because we haven't
6353 // given the Audio HAL enough time to update.
6354 const nsecs_t nowNs = systemTime();
6355 if (nowNs - mPreviousNs < mMinimumTimeBetweenChecksNs) {
6356 return mLatchedValue;
6357 }
6358 mPreviousNs = nowNs;
6359 mLatchedValue = false;
6360 // Determine if the presentation position is still advancing.
6361 uint64_t position = 0;
6362 struct timespec unused;
6363 const status_t ret = output->getPresentationPosition(&position, &unused);
6364 if (ret == NO_ERROR) {
6365 if (position != mPreviousPosition) {
6366 mPreviousPosition = position;
6367 mLatchedValue = true;
6368 }
6369 }
6370 return mLatchedValue;
6371 }
6372
clear()6373 void PlaybackThread::IsTimestampAdvancing::clear()
6374 {
6375 mLatchedValue = true;
6376 mPreviousPosition = 0;
6377 mPreviousNs = 0;
6378 }
6379
6380 // isTrackAllowed_l() must be called with ThreadBase::mutex() held
isTrackAllowed_l(audio_channel_mask_t channelMask,audio_format_t format,audio_session_t sessionId,uid_t uid) const6381 bool MixerThread::isTrackAllowed_l(
6382 audio_channel_mask_t channelMask, audio_format_t format,
6383 audio_session_t sessionId, uid_t uid) const
6384 {
6385 if (!PlaybackThread::isTrackAllowed_l(channelMask, format, sessionId, uid)) {
6386 return false;
6387 }
6388 // Check validity as we don't call AudioMixer::create() here.
6389 if (!mAudioMixer->isValidFormat(format)) {
6390 ALOGW("%s: invalid format: %#x", __func__, format);
6391 return false;
6392 }
6393 if (!mAudioMixer->isValidChannelMask(channelMask)) {
6394 ALOGW("%s: invalid channelMask: %#x", __func__, channelMask);
6395 return false;
6396 }
6397 return true;
6398 }
6399
6400 // checkForNewParameter_l() must be called with ThreadBase::mutex() held
checkForNewParameter_l(const String8 & keyValuePair,status_t & status)6401 bool MixerThread::checkForNewParameter_l(const String8& keyValuePair,
6402 status_t& status)
6403 {
6404 bool reconfig = false;
6405 status = NO_ERROR;
6406
6407 AutoPark<FastMixer> park(mFastMixer);
6408
6409 AudioParameter param = AudioParameter(keyValuePair);
6410 int value;
6411 if (param.getInt(String8(AudioParameter::keySamplingRate), value) == NO_ERROR) {
6412 reconfig = true;
6413 }
6414 if (param.getInt(String8(AudioParameter::keyFormat), value) == NO_ERROR) {
6415 if (!isValidPcmSinkFormat(static_cast<audio_format_t>(value))) {
6416 status = BAD_VALUE;
6417 } else {
6418 // no need to save value, since it's constant
6419 reconfig = true;
6420 }
6421 }
6422 if (param.getInt(String8(AudioParameter::keyChannels), value) == NO_ERROR) {
6423 if (!isValidPcmSinkChannelMask(static_cast<audio_channel_mask_t>(value))) {
6424 status = BAD_VALUE;
6425 } else {
6426 // no need to save value, since it's constant
6427 reconfig = true;
6428 }
6429 }
6430 if (param.getInt(String8(AudioParameter::keyFrameCount), value) == NO_ERROR) {
6431 // do not accept frame count changes if tracks are open as the track buffer
6432 // size depends on frame count and correct behavior would not be guaranteed
6433 // if frame count is changed after track creation
6434 if (!mTracks.isEmpty()) {
6435 status = INVALID_OPERATION;
6436 } else {
6437 reconfig = true;
6438 }
6439 }
6440 if (param.getInt(String8(AudioParameter::keyRouting), value) == NO_ERROR) {
6441 LOG_FATAL("Should not set routing device in MixerThread");
6442 }
6443
6444 if (status == NO_ERROR) {
6445 status = mOutput->stream->setParameters(keyValuePair);
6446 if (!mStandby && status == INVALID_OPERATION) {
6447 ALOGW("%s: setParameters failed with keyValuePair %s, entering standby",
6448 __func__, keyValuePair.c_str());
6449 mOutput->standby();
6450 mThreadMetrics.logEndInterval();
6451 mThreadSnapshot.onEnd();
6452 setStandby_l();
6453 mBytesWritten = 0;
6454 status = mOutput->stream->setParameters(keyValuePair);
6455 }
6456 if (status == NO_ERROR && reconfig) {
6457 readOutputParameters_l();
6458 delete mAudioMixer;
6459 mAudioMixer = new AudioMixer(mNormalFrameCount, mSampleRate);
6460 for (const auto &track : mTracks) {
6461 const int trackId = track->id();
6462 const status_t createStatus = mAudioMixer->create(
6463 trackId,
6464 track->channelMask(),
6465 track->format(),
6466 track->sessionId());
6467 ALOGW_IF(createStatus != NO_ERROR,
6468 "%s(): AudioMixer cannot create track(%d)"
6469 " mask %#x, format %#x, sessionId %d",
6470 __func__,
6471 trackId, track->channelMask(), track->format(), track->sessionId());
6472 }
6473 sendIoConfigEvent_l(AUDIO_OUTPUT_CONFIG_CHANGED);
6474 }
6475 }
6476
6477 return reconfig;
6478 }
6479
6480
dumpInternals_l(int fd,const Vector<String16> & args)6481 void MixerThread::dumpInternals_l(int fd, const Vector<String16>& args)
6482 {
6483 PlaybackThread::dumpInternals_l(fd, args);
6484 dprintf(fd, " Thread throttle time (msecs): %u\n", (uint32_t)mThreadThrottleTimeMs);
6485 dprintf(fd, " AudioMixer tracks: %s\n", mAudioMixer->trackNames().c_str());
6486 dprintf(fd, " Master mono: %s\n", mMasterMono ? "on" : "off");
6487 dprintf(fd, " Master balance: %f (%s)\n", mMasterBalance.load(),
6488 (hasFastMixer() ? std::to_string(mFastMixer->getMasterBalance())
6489 : mBalance.toString()).c_str());
6490 if (hasFastMixer()) {
6491 dprintf(fd, " FastMixer thread %p tid=%d", mFastMixer.get(), mFastMixer->getTid());
6492
6493 // Make a non-atomic copy of fast mixer dump state so it won't change underneath us
6494 // while we are dumping it. It may be inconsistent, but it won't mutate!
6495 // This is a large object so we place it on the heap.
6496 // FIXME 25972958: Need an intelligent copy constructor that does not touch unused pages.
6497 const std::unique_ptr<FastMixerDumpState> copy =
6498 std::make_unique<FastMixerDumpState>(mFastMixerDumpState);
6499 copy->dump(fd);
6500
6501 #ifdef STATE_QUEUE_DUMP
6502 // Similar for state queue
6503 StateQueueObserverDump observerCopy = mStateQueueObserverDump;
6504 observerCopy.dump(fd);
6505 StateQueueMutatorDump mutatorCopy = mStateQueueMutatorDump;
6506 mutatorCopy.dump(fd);
6507 #endif
6508
6509 #ifdef AUDIO_WATCHDOG
6510 if (mAudioWatchdog != 0) {
6511 // Make a non-atomic copy of audio watchdog dump so it won't change underneath us
6512 AudioWatchdogDump wdCopy = mAudioWatchdogDump;
6513 wdCopy.dump(fd);
6514 }
6515 #endif
6516
6517 } else {
6518 dprintf(fd, " No FastMixer\n");
6519 }
6520
6521 dprintf(fd, "Bluetooth latency modes are %senabled\n",
6522 mBluetoothLatencyModesEnabled ? "" : "not ");
6523 dprintf(fd, "HAL does %ssupport Bluetooth latency modes\n", mOutput != nullptr &&
6524 mOutput->audioHwDev->supportsBluetoothVariableLatency() ? "" : "not ");
6525 dprintf(fd, "Supported latency modes: %s\n", toString(mSupportedLatencyModes).c_str());
6526 }
6527
idleSleepTimeUs() const6528 uint32_t MixerThread::idleSleepTimeUs() const
6529 {
6530 return (uint32_t)(((mNormalFrameCount * 1000) / mSampleRate) * 1000) / 2;
6531 }
6532
suspendSleepTimeUs() const6533 uint32_t MixerThread::suspendSleepTimeUs() const
6534 {
6535 return (uint32_t)(((mNormalFrameCount * 1000) / mSampleRate) * 1000);
6536 }
6537
cacheParameters_l()6538 void MixerThread::cacheParameters_l()
6539 {
6540 PlaybackThread::cacheParameters_l();
6541
6542 // FIXME: Relaxed timing because of a certain device that can't meet latency
6543 // Should be reduced to 2x after the vendor fixes the driver issue
6544 // increase threshold again due to low power audio mode. The way this warning
6545 // threshold is calculated and its usefulness should be reconsidered anyway.
6546 maxPeriod = seconds(mNormalFrameCount) / mSampleRate * 15;
6547 }
6548
onHalLatencyModesChanged_l()6549 void MixerThread::onHalLatencyModesChanged_l() {
6550 mAfThreadCallback->onSupportedLatencyModesChanged(mId, mSupportedLatencyModes);
6551 }
6552
setHalLatencyMode_l()6553 void MixerThread::setHalLatencyMode_l() {
6554 // Only handle latency mode if:
6555 // - mBluetoothLatencyModesEnabled is true
6556 // - the HAL supports latency modes
6557 // - the selected device is Bluetooth LE or A2DP
6558 if (!mBluetoothLatencyModesEnabled.load() || mSupportedLatencyModes.empty()) {
6559 return;
6560 }
6561 if (mOutDeviceTypeAddrs.size() != 1
6562 || !(audio_is_a2dp_out_device(mOutDeviceTypeAddrs[0].mType)
6563 || audio_is_ble_out_device(mOutDeviceTypeAddrs[0].mType))) {
6564 return;
6565 }
6566
6567 audio_latency_mode_t latencyMode = AUDIO_LATENCY_MODE_FREE;
6568 if (mSupportedLatencyModes.size() == 1) {
6569 // If the HAL only support one latency mode currently, confirm the choice
6570 latencyMode = mSupportedLatencyModes[0];
6571 } else if (mSupportedLatencyModes.size() > 1) {
6572 // Request low latency if:
6573 // - At least one active track is either:
6574 // - a fast track with gaming usage or
6575 // - a track with acessibility usage
6576 for (const auto& track : mActiveTracks) {
6577 if ((track->isFastTrack() && track->attributes().usage == AUDIO_USAGE_GAME)
6578 || track->attributes().usage == AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY) {
6579 latencyMode = AUDIO_LATENCY_MODE_LOW;
6580 break;
6581 }
6582 }
6583 }
6584
6585 if (latencyMode != mSetLatencyMode) {
6586 status_t status = mOutput->stream->setLatencyMode(latencyMode);
6587 ALOGD("%s: thread(%d) setLatencyMode(%s) returned %d",
6588 __func__, mId, toString(latencyMode).c_str(), status);
6589 if (status == NO_ERROR) {
6590 mSetLatencyMode = latencyMode;
6591 }
6592 }
6593 }
6594
updateHalSupportedLatencyModes_l()6595 void MixerThread::updateHalSupportedLatencyModes_l() {
6596
6597 if (mOutput == nullptr || mOutput->stream == nullptr) {
6598 return;
6599 }
6600 std::vector<audio_latency_mode_t> latencyModes;
6601 const status_t status = mOutput->stream->getRecommendedLatencyModes(&latencyModes);
6602 if (status != NO_ERROR) {
6603 latencyModes.clear();
6604 }
6605 if (latencyModes != mSupportedLatencyModes) {
6606 ALOGD("%s: thread(%d) status %d supported latency modes: %s",
6607 __func__, mId, status, toString(latencyModes).c_str());
6608 mSupportedLatencyModes.swap(latencyModes);
6609 sendHalLatencyModesChangedEvent_l();
6610 }
6611 }
6612
getSupportedLatencyModes(std::vector<audio_latency_mode_t> * modes)6613 status_t MixerThread::getSupportedLatencyModes(
6614 std::vector<audio_latency_mode_t>* modes) {
6615 if (modes == nullptr) {
6616 return BAD_VALUE;
6617 }
6618 audio_utils::lock_guard _l(mutex());
6619 *modes = mSupportedLatencyModes;
6620 return NO_ERROR;
6621 }
6622
onRecommendedLatencyModeChanged(std::vector<audio_latency_mode_t> modes)6623 void MixerThread::onRecommendedLatencyModeChanged(
6624 std::vector<audio_latency_mode_t> modes) {
6625 audio_utils::lock_guard _l(mutex());
6626 if (modes != mSupportedLatencyModes) {
6627 ALOGD("%s: thread(%d) supported latency modes: %s",
6628 __func__, mId, toString(modes).c_str());
6629 mSupportedLatencyModes.swap(modes);
6630 sendHalLatencyModesChangedEvent_l();
6631 }
6632 }
6633
setBluetoothVariableLatencyEnabled(bool enabled)6634 status_t MixerThread::setBluetoothVariableLatencyEnabled(bool enabled) {
6635 if (mOutput == nullptr || mOutput->audioHwDev == nullptr
6636 || !mOutput->audioHwDev->supportsBluetoothVariableLatency()) {
6637 return INVALID_OPERATION;
6638 }
6639 mBluetoothLatencyModesEnabled.store(enabled);
6640 return NO_ERROR;
6641 }
6642
6643 // ----------------------------------------------------------------------------
6644
6645 /* static */
createDirectOutputThread(const sp<IAfThreadCallback> & afThreadCallback,AudioStreamOut * output,audio_io_handle_t id,bool systemReady,const audio_offload_info_t & offloadInfo)6646 sp<IAfPlaybackThread> IAfPlaybackThread::createDirectOutputThread(
6647 const sp<IAfThreadCallback>& afThreadCallback,
6648 AudioStreamOut* output, audio_io_handle_t id, bool systemReady,
6649 const audio_offload_info_t& offloadInfo) {
6650 return sp<DirectOutputThread>::make(
6651 afThreadCallback, output, id, systemReady, offloadInfo);
6652 }
6653
DirectOutputThread(const sp<IAfThreadCallback> & afThreadCallback,AudioStreamOut * output,audio_io_handle_t id,ThreadBase::type_t type,bool systemReady,const audio_offload_info_t & offloadInfo)6654 DirectOutputThread::DirectOutputThread(const sp<IAfThreadCallback>& afThreadCallback,
6655 AudioStreamOut* output, audio_io_handle_t id, ThreadBase::type_t type, bool systemReady,
6656 const audio_offload_info_t& offloadInfo)
6657 : PlaybackThread(afThreadCallback, output, id, type, systemReady)
6658 , mOffloadInfo(offloadInfo)
6659 {
6660 setMasterBalance(afThreadCallback->getMasterBalance_l());
6661 }
6662
~DirectOutputThread()6663 DirectOutputThread::~DirectOutputThread()
6664 {
6665 }
6666
dumpInternals_l(int fd,const Vector<String16> & args)6667 void DirectOutputThread::dumpInternals_l(int fd, const Vector<String16>& args)
6668 {
6669 PlaybackThread::dumpInternals_l(fd, args);
6670 dprintf(fd, " Master balance: %f Left: %f Right: %f\n",
6671 mMasterBalance.load(), mMasterBalanceLeft, mMasterBalanceRight);
6672 }
6673
setMasterBalance(float balance)6674 void DirectOutputThread::setMasterBalance(float balance)
6675 {
6676 audio_utils::lock_guard _l(mutex());
6677 if (mMasterBalance != balance) {
6678 mMasterBalance.store(balance);
6679 mBalance.computeStereoBalance(balance, &mMasterBalanceLeft, &mMasterBalanceRight);
6680 broadcast_l();
6681 }
6682 }
6683
processVolume_l(IAfTrack * track,bool lastTrack)6684 void DirectOutputThread::processVolume_l(IAfTrack* track, bool lastTrack)
6685 {
6686 float left, right;
6687
6688 // Ensure volumeshaper state always advances even when muted.
6689 const sp<AudioTrackServerProxy> proxy = track->audioTrackServerProxy();
6690
6691 const int64_t frames = mTimestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL];
6692 const int64_t time = mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL];
6693
6694 ALOGVV("%s: Direct/Offload bufferConsumed:%zu timestamp frames:%lld time:%lld",
6695 __func__, proxy->framesReleased(), (long long)frames, (long long)time);
6696
6697 const int64_t volumeShaperFrames =
6698 mMonotonicFrameCounter.updateAndGetMonotonicFrameCount(frames, time);
6699 const auto [shaperVolume, shaperActive] =
6700 track->getVolumeHandler()->getVolume(volumeShaperFrames);
6701 mVolumeShaperActive = shaperActive;
6702
6703 gain_minifloat_packed_t vlr = proxy->getVolumeLR();
6704 left = float_from_gain(gain_minifloat_unpack_left(vlr));
6705 right = float_from_gain(gain_minifloat_unpack_right(vlr));
6706
6707 const bool clientVolumeMute = (left == 0.f && right == 0.f);
6708
6709 if (mMasterMute || mStreamTypes[track->streamType()].mute || track->isPlaybackRestricted()) {
6710 left = right = 0;
6711 } else {
6712 float typeVolume = mStreamTypes[track->streamType()].volume;
6713 const float v = mMasterVolume * typeVolume * shaperVolume;
6714
6715 if (left > GAIN_FLOAT_UNITY) {
6716 left = GAIN_FLOAT_UNITY;
6717 }
6718 if (right > GAIN_FLOAT_UNITY) {
6719 right = GAIN_FLOAT_UNITY;
6720 }
6721 left *= v;
6722 right *= v;
6723 if (mAfThreadCallback->getMode() != AUDIO_MODE_IN_COMMUNICATION
6724 || audio_channel_count_from_out_mask(mChannelMask) > 1) {
6725 left *= mMasterBalanceLeft; // DirectOutputThread balance applied as track volume
6726 right *= mMasterBalanceRight;
6727 }
6728 }
6729
6730 track->processMuteEvent_l(mAfThreadCallback->getOrCreateAudioManager(),
6731 /*muteState=*/{mMasterMute,
6732 mStreamTypes[track->streamType()].volume == 0.f,
6733 mStreamTypes[track->streamType()].mute,
6734 track->isPlaybackRestricted(),
6735 clientVolumeMute,
6736 shaperVolume == 0.f});
6737
6738 if (lastTrack) {
6739 track->setFinalVolume(left, right);
6740 if (left != mLeftVolFloat || right != mRightVolFloat) {
6741 mLeftVolFloat = left;
6742 mRightVolFloat = right;
6743
6744 // Delegate volume control to effect in track effect chain if needed
6745 // only one effect chain can be present on DirectOutputThread, so if
6746 // there is one, the track is connected to it
6747 if (!mEffectChains.isEmpty()) {
6748 // if effect chain exists, volume is handled by it.
6749 // Convert volumes from float to 8.24
6750 uint32_t vl = (uint32_t)(left * (1 << 24));
6751 uint32_t vr = (uint32_t)(right * (1 << 24));
6752 // Direct/Offload effect chains set output volume in setVolume().
6753 (void)mEffectChains[0]->setVolume(&vl, &vr);
6754 } else {
6755 // otherwise we directly set the volume.
6756 setVolumeForOutput_l(left, right);
6757 }
6758 }
6759 }
6760 }
6761
onAddNewTrack_l()6762 void DirectOutputThread::onAddNewTrack_l()
6763 {
6764 sp<IAfTrack> previousTrack = mPreviousTrack.promote();
6765 sp<IAfTrack> latestTrack = mActiveTracks.getLatest();
6766
6767 if (previousTrack != 0 && latestTrack != 0) {
6768 if (mType == DIRECT) {
6769 if (previousTrack.get() != latestTrack.get()) {
6770 mFlushPending = true;
6771 }
6772 } else /* mType == OFFLOAD */ {
6773 if (previousTrack->sessionId() != latestTrack->sessionId() ||
6774 previousTrack->isFlushPending()) {
6775 mFlushPending = true;
6776 }
6777 }
6778 } else if (previousTrack == 0) {
6779 // there could be an old track added back during track transition for direct
6780 // output, so always issues flush to flush data of the previous track if it
6781 // was already destroyed with HAL paused, then flush can resume the playback
6782 mFlushPending = true;
6783 }
6784 PlaybackThread::onAddNewTrack_l();
6785 }
6786
prepareTracks_l(Vector<sp<IAfTrack>> * tracksToRemove)6787 PlaybackThread::mixer_state DirectOutputThread::prepareTracks_l(
6788 Vector<sp<IAfTrack>>* tracksToRemove
6789 )
6790 {
6791 size_t count = mActiveTracks.size();
6792 mixer_state mixerStatus = MIXER_IDLE;
6793 bool doHwPause = false;
6794 bool doHwResume = false;
6795
6796 // find out which tracks need to be processed
6797 for (const sp<IAfTrack>& t : mActiveTracks) {
6798 if (t->isInvalid()) {
6799 ALOGW("An invalidated track shouldn't be in active list");
6800 tracksToRemove->add(t);
6801 continue;
6802 }
6803
6804 IAfTrack* const track = t.get();
6805 #ifdef VERY_VERY_VERBOSE_LOGGING
6806 audio_track_cblk_t* cblk = track->cblk();
6807 #endif
6808 // Only consider last track started for volume and mixer state control.
6809 // In theory an older track could underrun and restart after the new one starts
6810 // but as we only care about the transition phase between two tracks on a
6811 // direct output, it is not a problem to ignore the underrun case.
6812 sp<IAfTrack> l = mActiveTracks.getLatest();
6813 bool last = l.get() == track;
6814
6815 if (track->isPausePending()) {
6816 track->pauseAck();
6817 // It is possible a track might have been flushed or stopped.
6818 // Other operations such as flush pending might occur on the next prepare.
6819 if (track->isPausing()) {
6820 track->setPaused();
6821 }
6822 // Always perform pause, as an immediate flush will change
6823 // the pause state to be no longer isPausing().
6824 if (mHwSupportsPause && last && !mHwPaused) {
6825 doHwPause = true;
6826 mHwPaused = true;
6827 }
6828 } else if (track->isFlushPending()) {
6829 track->flushAck();
6830 if (last) {
6831 mFlushPending = true;
6832 }
6833 } else if (track->isResumePending()) {
6834 track->resumeAck();
6835 if (last) {
6836 mLeftVolFloat = mRightVolFloat = -1.0;
6837 if (mHwPaused) {
6838 doHwResume = true;
6839 mHwPaused = false;
6840 }
6841 }
6842 }
6843
6844 // The first time a track is added we wait
6845 // for all its buffers to be filled before processing it.
6846 // Allow draining the buffer in case the client
6847 // app does not call stop() and relies on underrun to stop:
6848 // hence the test on (track->retryCount() > 1).
6849 // If track->retryCount() <= 1 then track is about to be disabled, paused, removed,
6850 // so we accept any nonzero amount of data delivered by the AudioTrack (which will
6851 // reset the retry counter).
6852 // Do not use a high threshold for compressed audio.
6853
6854 // target retry count that we will use is based on the time we wait for retries.
6855 const int32_t targetRetryCount = kMaxTrackRetriesDirectMs * 1000 / mActiveSleepTimeUs;
6856 // the retry threshold is when we accept any size for PCM data. This is slightly
6857 // smaller than the retry count so we can push small bits of data without a glitch.
6858 const int32_t retryThreshold = targetRetryCount > 2 ? targetRetryCount - 1 : 1;
6859 uint32_t minFrames;
6860 if ((track->sharedBuffer() == 0) && !track->isStopping_1() && !track->isPausing()
6861 && (track->retryCount() > retryThreshold) && audio_has_proportional_frames(mFormat)) {
6862 minFrames = mNormalFrameCount;
6863 } else {
6864 minFrames = 1;
6865 }
6866
6867 const size_t framesReady = track->framesReady();
6868 const int trackId = track->id();
6869 if (ATRACE_ENABLED()) {
6870 std::string traceName("nRdy");
6871 traceName += std::to_string(trackId);
6872 ATRACE_INT(traceName.c_str(), framesReady);
6873 }
6874 if ((framesReady >= minFrames) && track->isReady() && !track->isPaused() &&
6875 !track->isStopping_2() && !track->isStopped())
6876 {
6877 ALOGVV("track(%d) s=%08x [OK]", trackId, cblk->mServer);
6878
6879 if (track->fillingStatus() == IAfTrack::FS_FILLED) {
6880 track->fillingStatus() = IAfTrack::FS_ACTIVE;
6881 if (last) {
6882 // make sure processVolume_l() will apply new volume even if 0
6883 mLeftVolFloat = mRightVolFloat = -1.0;
6884 }
6885 if (!mHwSupportsPause) {
6886 track->resumeAck();
6887 }
6888 }
6889
6890 // compute volume for this track
6891 processVolume_l(track, last);
6892 if (last) {
6893 sp<IAfTrack> previousTrack = mPreviousTrack.promote();
6894 if (previousTrack != 0) {
6895 if (track != previousTrack.get()) {
6896 // Flush any data still being written from last track
6897 mBytesRemaining = 0;
6898 // Invalidate previous track to force a seek when resuming.
6899 previousTrack->invalidate();
6900 }
6901 }
6902 mPreviousTrack = track;
6903
6904 // reset retry count
6905 track->retryCount() = targetRetryCount;
6906 mActiveTrack = t;
6907 mixerStatus = MIXER_TRACKS_READY;
6908 if (mHwPaused) {
6909 doHwResume = true;
6910 mHwPaused = false;
6911 }
6912 }
6913 } else {
6914 // clear effect chain input buffer if the last active track started underruns
6915 // to avoid sending previous audio buffer again to effects
6916 if (!mEffectChains.isEmpty() && last) {
6917 mEffectChains[0]->clearInputBuffer();
6918 }
6919 if (track->isStopping_1()) {
6920 track->setState(IAfTrackBase::STOPPING_2);
6921 if (last && mHwPaused) {
6922 doHwResume = true;
6923 mHwPaused = false;
6924 }
6925 }
6926 if ((track->sharedBuffer() != 0) || track->isStopped() ||
6927 track->isStopping_2() || track->isPaused()) {
6928 // We have consumed all the buffers of this track.
6929 // Remove it from the list of active tracks.
6930 bool presComplete = false;
6931 if (mStandby || !last ||
6932 (presComplete = track->presentationComplete(latency_l())) ||
6933 track->isPaused() || mHwPaused) {
6934 if (presComplete) {
6935 mOutput->presentationComplete();
6936 }
6937 if (track->isStopping_2()) {
6938 track->setState(IAfTrackBase::STOPPED);
6939 }
6940 if (track->isStopped()) {
6941 track->reset();
6942 }
6943 tracksToRemove->add(track);
6944 }
6945 } else {
6946 // No buffers for this track. Give it a few chances to
6947 // fill a buffer, then remove it from active list.
6948 // Only consider last track started for mixer state control
6949 bool isTimestampAdvancing = mIsTimestampAdvancing.check(mOutput);
6950 if (!isTunerStream() // tuner streams remain active in underrun
6951 && --(track->retryCount()) <= 0) {
6952 if (isTimestampAdvancing) { // HAL is still playing audio, give us more time.
6953 track->retryCount() = kMaxTrackRetriesOffload;
6954 } else {
6955 ALOGI("%s BUFFER TIMEOUT: remove track(%d) from active list due to"
6956 " underrun on thread %d", __func__, trackId, mId);
6957 tracksToRemove->add(track);
6958 // indicate to client process that the track was disabled because of
6959 // underrun; it will then automatically call start() when data is available
6960 track->disable();
6961 // only do hw pause when track is going to be removed due to BUFFER TIMEOUT.
6962 // unlike mixerthread, HAL can be paused for direct output
6963 ALOGW("pause because of UNDERRUN, framesReady = %zu,"
6964 "minFrames = %u, mFormat = %#x",
6965 framesReady, minFrames, mFormat);
6966 if (last && mHwSupportsPause && !mHwPaused && !mStandby) {
6967 doHwPause = true;
6968 mHwPaused = true;
6969 }
6970 }
6971 } else if (last) {
6972 mixerStatus = MIXER_TRACKS_ENABLED;
6973 }
6974 }
6975 }
6976 }
6977
6978 // if an active track did not command a flush, check for pending flush on stopped tracks
6979 if (!mFlushPending) {
6980 for (size_t i = 0; i < mTracks.size(); i++) {
6981 if (mTracks[i]->isFlushPending()) {
6982 mTracks[i]->flushAck();
6983 mFlushPending = true;
6984 }
6985 }
6986 }
6987
6988 // make sure the pause/flush/resume sequence is executed in the right order.
6989 // If a flush is pending and a track is active but the HW is not paused, force a HW pause
6990 // before flush and then resume HW. This can happen in case of pause/flush/resume
6991 // if resume is received before pause is executed.
6992 if (mHwSupportsPause && !mStandby &&
6993 (doHwPause || (mFlushPending && !mHwPaused && (count != 0)))) {
6994 status_t result = mOutput->stream->pause();
6995 ALOGE_IF(result != OK, "Error when pausing output stream: %d", result);
6996 doHwResume = !doHwPause; // resume if pause is due to flush.
6997 }
6998 if (mFlushPending) {
6999 flushHw_l();
7000 }
7001 if (mHwSupportsPause && !mStandby && doHwResume) {
7002 status_t result = mOutput->stream->resume();
7003 ALOGE_IF(result != OK, "Error when resuming output stream: %d", result);
7004 }
7005 // remove all the tracks that need to be...
7006 removeTracks_l(*tracksToRemove);
7007
7008 return mixerStatus;
7009 }
7010
threadLoop_mix()7011 void DirectOutputThread::threadLoop_mix()
7012 {
7013 size_t frameCount = mFrameCount;
7014 int8_t *curBuf = (int8_t *)mSinkBuffer;
7015 // output audio to hardware
7016 while (frameCount) {
7017 AudioBufferProvider::Buffer buffer;
7018 buffer.frameCount = frameCount;
7019 status_t status = mActiveTrack->getNextBuffer(&buffer);
7020 if (status != NO_ERROR || buffer.raw == NULL) {
7021 // no need to pad with 0 for compressed audio
7022 if (audio_has_proportional_frames(mFormat)) {
7023 memset(curBuf, 0, frameCount * mFrameSize);
7024 }
7025 break;
7026 }
7027 memcpy(curBuf, buffer.raw, buffer.frameCount * mFrameSize);
7028 frameCount -= buffer.frameCount;
7029 curBuf += buffer.frameCount * mFrameSize;
7030 mActiveTrack->releaseBuffer(&buffer);
7031 }
7032 mCurrentWriteLength = curBuf - (int8_t *)mSinkBuffer;
7033 mSleepTimeUs = 0;
7034 mStandbyTimeNs = systemTime() + mStandbyDelayNs;
7035 mActiveTrack.clear();
7036 }
7037
threadLoop_sleepTime()7038 void DirectOutputThread::threadLoop_sleepTime()
7039 {
7040 // do not write to HAL when paused
7041 if (mHwPaused || (usesHwAvSync() && mStandby)) {
7042 mSleepTimeUs = mIdleSleepTimeUs;
7043 return;
7044 }
7045 if (mMixerStatus == MIXER_TRACKS_ENABLED) {
7046 mSleepTimeUs = mActiveSleepTimeUs;
7047 } else {
7048 mSleepTimeUs = mIdleSleepTimeUs;
7049 }
7050 // Note: In S or later, we do not write zeroes for
7051 // linear or proportional PCM direct tracks in underrun.
7052 }
7053
threadLoop_exit()7054 void DirectOutputThread::threadLoop_exit()
7055 {
7056 {
7057 audio_utils::lock_guard _l(mutex());
7058 for (size_t i = 0; i < mTracks.size(); i++) {
7059 if (mTracks[i]->isFlushPending()) {
7060 mTracks[i]->flushAck();
7061 mFlushPending = true;
7062 }
7063 }
7064 if (mFlushPending) {
7065 flushHw_l();
7066 }
7067 }
7068 PlaybackThread::threadLoop_exit();
7069 }
7070
7071 // must be called with thread mutex locked
shouldStandby_l()7072 bool DirectOutputThread::shouldStandby_l()
7073 {
7074 bool trackPaused = false;
7075 bool trackStopped = false;
7076 bool trackDisabled = false;
7077
7078 // do not put the HAL in standby when paused. NuPlayer clear the offloaded AudioTrack
7079 // after a timeout and we will enter standby then.
7080 // On offload threads, do not enter standby if the main track is still underrunning.
7081 if (mTracks.size() > 0) {
7082 const auto& mainTrack = mTracks[mTracks.size() - 1];
7083
7084 trackPaused = mainTrack->isPaused();
7085 trackStopped = mainTrack->isStopped() || mainTrack->state() == IAfTrackBase::IDLE;
7086 trackDisabled = (mType == OFFLOAD) && mainTrack->isDisabled();
7087 }
7088
7089 return !mStandby && !(trackPaused || (mHwPaused && !trackStopped) || trackDisabled);
7090 }
7091
7092 // checkForNewParameter_l() must be called with ThreadBase::mutex() held
checkForNewParameter_l(const String8 & keyValuePair,status_t & status)7093 bool DirectOutputThread::checkForNewParameter_l(const String8& keyValuePair,
7094 status_t& status)
7095 {
7096 bool reconfig = false;
7097 status = NO_ERROR;
7098
7099 AudioParameter param = AudioParameter(keyValuePair);
7100 int value;
7101 if (param.getInt(String8(AudioParameter::keyRouting), value) == NO_ERROR) {
7102 LOG_FATAL("Should not set routing device in DirectOutputThread");
7103 }
7104 if (param.getInt(String8(AudioParameter::keyFrameCount), value) == NO_ERROR) {
7105 // do not accept frame count changes if tracks are open as the track buffer
7106 // size depends on frame count and correct behavior would not be garantied
7107 // if frame count is changed after track creation
7108 if (!mTracks.isEmpty()) {
7109 status = INVALID_OPERATION;
7110 } else {
7111 reconfig = true;
7112 }
7113 }
7114 if (status == NO_ERROR) {
7115 status = mOutput->stream->setParameters(keyValuePair);
7116 if (!mStandby && status == INVALID_OPERATION) {
7117 mOutput->standby();
7118 if (!mStandby) {
7119 mThreadMetrics.logEndInterval();
7120 mThreadSnapshot.onEnd();
7121 setStandby_l();
7122 }
7123 mBytesWritten = 0;
7124 status = mOutput->stream->setParameters(keyValuePair);
7125 }
7126 if (status == NO_ERROR && reconfig) {
7127 readOutputParameters_l();
7128 sendIoConfigEvent_l(AUDIO_OUTPUT_CONFIG_CHANGED);
7129 }
7130 }
7131
7132 return reconfig;
7133 }
7134
activeSleepTimeUs() const7135 uint32_t DirectOutputThread::activeSleepTimeUs() const
7136 {
7137 uint32_t time;
7138 if (audio_has_proportional_frames(mFormat)) {
7139 time = PlaybackThread::activeSleepTimeUs();
7140 } else {
7141 time = kDirectMinSleepTimeUs;
7142 }
7143 return time;
7144 }
7145
idleSleepTimeUs() const7146 uint32_t DirectOutputThread::idleSleepTimeUs() const
7147 {
7148 uint32_t time;
7149 if (audio_has_proportional_frames(mFormat)) {
7150 time = (uint32_t)(((mFrameCount * 1000) / mSampleRate) * 1000) / 2;
7151 } else {
7152 time = kDirectMinSleepTimeUs;
7153 }
7154 return time;
7155 }
7156
suspendSleepTimeUs() const7157 uint32_t DirectOutputThread::suspendSleepTimeUs() const
7158 {
7159 uint32_t time;
7160 if (audio_has_proportional_frames(mFormat)) {
7161 time = (uint32_t)(((mFrameCount * 1000) / mSampleRate) * 1000);
7162 } else {
7163 time = kDirectMinSleepTimeUs;
7164 }
7165 return time;
7166 }
7167
cacheParameters_l()7168 void DirectOutputThread::cacheParameters_l()
7169 {
7170 PlaybackThread::cacheParameters_l();
7171
7172 // use shorter standby delay as on normal output to release
7173 // hardware resources as soon as possible
7174 // no delay on outputs with HW A/V sync
7175 if (usesHwAvSync()) {
7176 mStandbyDelayNs = 0;
7177 } else if ((mType == OFFLOAD) && !audio_has_proportional_frames(mFormat)) {
7178 mStandbyDelayNs = kOffloadStandbyDelayNs;
7179 } else {
7180 mStandbyDelayNs = microseconds(mActiveSleepTimeUs*2);
7181 }
7182 }
7183
flushHw_l()7184 void DirectOutputThread::flushHw_l()
7185 {
7186 PlaybackThread::flushHw_l();
7187 mOutput->flush();
7188 mFlushPending = false;
7189 mTimestampVerifier.discontinuity(discontinuityForStandbyOrFlush());
7190 mTimestamp.clear();
7191 mMonotonicFrameCounter.onFlush();
7192 // We do not reset mHwPaused which is hidden from the Track client.
7193 // Note: the client track in Tracks.cpp and AudioTrack.cpp
7194 // has a FLUSHED state but the DirectOutputThread does not;
7195 // those tracks will continue to show isStopped().
7196 }
7197
computeWaitTimeNs_l() const7198 int64_t DirectOutputThread::computeWaitTimeNs_l() const {
7199 // If a VolumeShaper is active, we must wake up periodically to update volume.
7200 const int64_t NS_PER_MS = 1000000;
7201 return mVolumeShaperActive ?
7202 kMinNormalSinkBufferSizeMs * NS_PER_MS : PlaybackThread::computeWaitTimeNs_l();
7203 }
7204
7205 // ----------------------------------------------------------------------------
7206
AsyncCallbackThread(const wp<PlaybackThread> & playbackThread)7207 AsyncCallbackThread::AsyncCallbackThread(
7208 const wp<PlaybackThread>& playbackThread)
7209 : Thread(false /*canCallJava*/),
7210 mPlaybackThread(playbackThread),
7211 mWriteAckSequence(0),
7212 mDrainSequence(0),
7213 mAsyncError(ASYNC_ERROR_NONE)
7214 {
7215 }
7216
onFirstRef()7217 void AsyncCallbackThread::onFirstRef()
7218 {
7219 run("Offload Cbk", ANDROID_PRIORITY_URGENT_AUDIO);
7220 }
7221
threadLoop()7222 bool AsyncCallbackThread::threadLoop()
7223 {
7224 while (!exitPending()) {
7225 uint32_t writeAckSequence;
7226 uint32_t drainSequence;
7227 AsyncError asyncError;
7228
7229 {
7230 audio_utils::unique_lock _l(mutex());
7231 while (!((mWriteAckSequence & 1) ||
7232 (mDrainSequence & 1) ||
7233 mAsyncError ||
7234 exitPending())) {
7235 mWaitWorkCV.wait(_l);
7236 }
7237
7238 if (exitPending()) {
7239 break;
7240 }
7241 ALOGV("AsyncCallbackThread mWriteAckSequence %d mDrainSequence %d",
7242 mWriteAckSequence, mDrainSequence);
7243 writeAckSequence = mWriteAckSequence;
7244 mWriteAckSequence &= ~1;
7245 drainSequence = mDrainSequence;
7246 mDrainSequence &= ~1;
7247 asyncError = mAsyncError;
7248 mAsyncError = ASYNC_ERROR_NONE;
7249 }
7250 {
7251 const sp<PlaybackThread> playbackThread = mPlaybackThread.promote();
7252 if (playbackThread != 0) {
7253 if (writeAckSequence & 1) {
7254 playbackThread->resetWriteBlocked(writeAckSequence >> 1);
7255 }
7256 if (drainSequence & 1) {
7257 playbackThread->resetDraining(drainSequence >> 1);
7258 }
7259 if (asyncError != ASYNC_ERROR_NONE) {
7260 playbackThread->onAsyncError(asyncError == ASYNC_ERROR_HARD);
7261 }
7262 }
7263 }
7264 }
7265 return false;
7266 }
7267
exit()7268 void AsyncCallbackThread::exit()
7269 {
7270 ALOGV("AsyncCallbackThread::exit");
7271 audio_utils::lock_guard _l(mutex());
7272 requestExit();
7273 mWaitWorkCV.notify_all();
7274 }
7275
setWriteBlocked(uint32_t sequence)7276 void AsyncCallbackThread::setWriteBlocked(uint32_t sequence)
7277 {
7278 audio_utils::lock_guard _l(mutex());
7279 // bit 0 is cleared
7280 mWriteAckSequence = sequence << 1;
7281 }
7282
resetWriteBlocked()7283 void AsyncCallbackThread::resetWriteBlocked()
7284 {
7285 audio_utils::lock_guard _l(mutex());
7286 // ignore unexpected callbacks
7287 if (mWriteAckSequence & 2) {
7288 mWriteAckSequence |= 1;
7289 mWaitWorkCV.notify_one();
7290 }
7291 }
7292
setDraining(uint32_t sequence)7293 void AsyncCallbackThread::setDraining(uint32_t sequence)
7294 {
7295 audio_utils::lock_guard _l(mutex());
7296 // bit 0 is cleared
7297 mDrainSequence = sequence << 1;
7298 }
7299
resetDraining()7300 void AsyncCallbackThread::resetDraining()
7301 {
7302 audio_utils::lock_guard _l(mutex());
7303 // ignore unexpected callbacks
7304 if (mDrainSequence & 2) {
7305 mDrainSequence |= 1;
7306 mWaitWorkCV.notify_one();
7307 }
7308 }
7309
setAsyncError(bool isHardError)7310 void AsyncCallbackThread::setAsyncError(bool isHardError)
7311 {
7312 audio_utils::lock_guard _l(mutex());
7313 mAsyncError = isHardError ? ASYNC_ERROR_HARD : ASYNC_ERROR_SOFT;
7314 mWaitWorkCV.notify_one();
7315 }
7316
7317
7318 // ----------------------------------------------------------------------------
7319
7320 /* static */
createOffloadThread(const sp<IAfThreadCallback> & afThreadCallback,AudioStreamOut * output,audio_io_handle_t id,bool systemReady,const audio_offload_info_t & offloadInfo)7321 sp<IAfPlaybackThread> IAfPlaybackThread::createOffloadThread(
7322 const sp<IAfThreadCallback>& afThreadCallback,
7323 AudioStreamOut* output, audio_io_handle_t id, bool systemReady,
7324 const audio_offload_info_t& offloadInfo) {
7325 return sp<OffloadThread>::make(afThreadCallback, output, id, systemReady, offloadInfo);
7326 }
7327
OffloadThread(const sp<IAfThreadCallback> & afThreadCallback,AudioStreamOut * output,audio_io_handle_t id,bool systemReady,const audio_offload_info_t & offloadInfo)7328 OffloadThread::OffloadThread(const sp<IAfThreadCallback>& afThreadCallback,
7329 AudioStreamOut* output, audio_io_handle_t id, bool systemReady,
7330 const audio_offload_info_t& offloadInfo)
7331 : DirectOutputThread(afThreadCallback, output, id, OFFLOAD, systemReady, offloadInfo),
7332 mPausedWriteLength(0), mPausedBytesRemaining(0), mKeepWakeLock(true)
7333 {
7334 //FIXME: mStandby should be set to true by ThreadBase constructo
7335 mStandby = true;
7336 mKeepWakeLock = property_get_bool("ro.audio.offload_wakelock", true /* default_value */);
7337 }
7338
threadLoop_exit()7339 void OffloadThread::threadLoop_exit()
7340 {
7341 if (mFlushPending || mHwPaused) {
7342 // If a flush is pending or track was paused, just discard buffered data
7343 audio_utils::lock_guard l(mutex());
7344 flushHw_l();
7345 } else {
7346 mMixerStatus = MIXER_DRAIN_ALL;
7347 threadLoop_drain();
7348 }
7349 if (mUseAsyncWrite) {
7350 ALOG_ASSERT(mCallbackThread != 0);
7351 mCallbackThread->exit();
7352 }
7353 PlaybackThread::threadLoop_exit();
7354 }
7355
prepareTracks_l(Vector<sp<IAfTrack>> * tracksToRemove)7356 PlaybackThread::mixer_state OffloadThread::prepareTracks_l(
7357 Vector<sp<IAfTrack>>* tracksToRemove
7358 )
7359 {
7360 size_t count = mActiveTracks.size();
7361
7362 mixer_state mixerStatus = MIXER_IDLE;
7363 bool doHwPause = false;
7364 bool doHwResume = false;
7365
7366 ALOGV("OffloadThread::prepareTracks_l active tracks %zu", count);
7367
7368 // find out which tracks need to be processed
7369 for (const sp<IAfTrack>& t : mActiveTracks) {
7370 IAfTrack* const track = t.get();
7371 #ifdef VERY_VERY_VERBOSE_LOGGING
7372 audio_track_cblk_t* cblk = track->cblk();
7373 #endif
7374 // Only consider last track started for volume and mixer state control.
7375 // In theory an older track could underrun and restart after the new one starts
7376 // but as we only care about the transition phase between two tracks on a
7377 // direct output, it is not a problem to ignore the underrun case.
7378 sp<IAfTrack> l = mActiveTracks.getLatest();
7379 bool last = l.get() == track;
7380
7381 if (track->isInvalid()) {
7382 ALOGW("An invalidated track shouldn't be in active list");
7383 tracksToRemove->add(track);
7384 continue;
7385 }
7386
7387 if (track->state() == IAfTrackBase::IDLE) {
7388 ALOGW("An idle track shouldn't be in active list");
7389 continue;
7390 }
7391
7392 if (track->isPausePending()) {
7393 track->pauseAck();
7394 // It is possible a track might have been flushed or stopped.
7395 // Other operations such as flush pending might occur on the next prepare.
7396 if (track->isPausing()) {
7397 track->setPaused();
7398 }
7399 // Always perform pause if last, as an immediate flush will change
7400 // the pause state to be no longer isPausing().
7401 if (last) {
7402 if (mHwSupportsPause && !mHwPaused) {
7403 doHwPause = true;
7404 mHwPaused = true;
7405 }
7406 // If we were part way through writing the mixbuffer to
7407 // the HAL we must save this until we resume
7408 // BUG - this will be wrong if a different track is made active,
7409 // in that case we want to discard the pending data in the
7410 // mixbuffer and tell the client to present it again when the
7411 // track is resumed
7412 mPausedWriteLength = mCurrentWriteLength;
7413 mPausedBytesRemaining = mBytesRemaining;
7414 mBytesRemaining = 0; // stop writing
7415 }
7416 tracksToRemove->add(track);
7417 } else if (track->isFlushPending()) {
7418 if (track->isStopping_1()) {
7419 track->retryCount() = kMaxTrackStopRetriesOffload;
7420 } else {
7421 track->retryCount() = kMaxTrackRetriesOffload;
7422 }
7423 track->flushAck();
7424 if (last) {
7425 mFlushPending = true;
7426 }
7427 } else if (track->isResumePending()){
7428 track->resumeAck();
7429 if (last) {
7430 if (mPausedBytesRemaining) {
7431 // Need to continue write that was interrupted
7432 mCurrentWriteLength = mPausedWriteLength;
7433 mBytesRemaining = mPausedBytesRemaining;
7434 mPausedBytesRemaining = 0;
7435 }
7436 if (mHwPaused) {
7437 doHwResume = true;
7438 mHwPaused = false;
7439 // threadLoop_mix() will handle the case that we need to
7440 // resume an interrupted write
7441 }
7442 // enable write to audio HAL
7443 mSleepTimeUs = 0;
7444
7445 mLeftVolFloat = mRightVolFloat = -1.0;
7446
7447 // Do not handle new data in this iteration even if track->framesReady()
7448 mixerStatus = MIXER_TRACKS_ENABLED;
7449 }
7450 } else if (track->framesReady() && track->isReady() &&
7451 !track->isPaused() && !track->isTerminated() && !track->isStopping_2()) {
7452 ALOGVV("OffloadThread: track(%d) s=%08x [OK]", track->id(), cblk->mServer);
7453 if (track->fillingStatus() == IAfTrack::FS_FILLED) {
7454 track->fillingStatus() = IAfTrack::FS_ACTIVE;
7455 if (last) {
7456 // make sure processVolume_l() will apply new volume even if 0
7457 mLeftVolFloat = mRightVolFloat = -1.0;
7458 }
7459 }
7460
7461 if (last) {
7462 sp<IAfTrack> previousTrack = mPreviousTrack.promote();
7463 if (previousTrack != 0) {
7464 if (track != previousTrack.get()) {
7465 // Flush any data still being written from last track
7466 mBytesRemaining = 0;
7467 if (mPausedBytesRemaining) {
7468 // Last track was paused so we also need to flush saved
7469 // mixbuffer state and invalidate track so that it will
7470 // re-submit that unwritten data when it is next resumed
7471 mPausedBytesRemaining = 0;
7472 // Invalidate is a bit drastic - would be more efficient
7473 // to have a flag to tell client that some of the
7474 // previously written data was lost
7475 previousTrack->invalidate();
7476 }
7477 // flush data already sent to the DSP if changing audio session as audio
7478 // comes from a different source. Also invalidate previous track to force a
7479 // seek when resuming.
7480 if (previousTrack->sessionId() != track->sessionId()) {
7481 previousTrack->invalidate();
7482 }
7483 }
7484 }
7485 mPreviousTrack = track;
7486 // reset retry count
7487 if (track->isStopping_1()) {
7488 track->retryCount() = kMaxTrackStopRetriesOffload;
7489 } else {
7490 track->retryCount() = kMaxTrackRetriesOffload;
7491 }
7492 mActiveTrack = t;
7493 mixerStatus = MIXER_TRACKS_READY;
7494 }
7495 } else {
7496 ALOGVV("OffloadThread: track(%d) s=%08x [NOT READY]", track->id(), cblk->mServer);
7497 if (track->isStopping_1()) {
7498 if (--(track->retryCount()) <= 0) {
7499 // Hardware buffer can hold a large amount of audio so we must
7500 // wait for all current track's data to drain before we say
7501 // that the track is stopped.
7502 if (mBytesRemaining == 0) {
7503 // Only start draining when all data in mixbuffer
7504 // has been written
7505 ALOGV("OffloadThread: underrun and STOPPING_1 -> draining, STOPPING_2");
7506 track->setState(IAfTrackBase::STOPPING_2);
7507 // so presentation completes after
7508 // drain do not drain if no data was ever sent to HAL (mStandby == true)
7509 if (last && !mStandby) {
7510 // do not modify drain sequence if we are already draining. This happens
7511 // when resuming from pause after drain.
7512 if ((mDrainSequence & 1) == 0) {
7513 mSleepTimeUs = 0;
7514 mStandbyTimeNs = systemTime() + mStandbyDelayNs;
7515 mixerStatus = MIXER_DRAIN_TRACK;
7516 mDrainSequence += 2;
7517 }
7518 if (mHwPaused) {
7519 // It is possible to move from PAUSED to STOPPING_1 without
7520 // a resume so we must ensure hardware is running
7521 doHwResume = true;
7522 mHwPaused = false;
7523 }
7524 }
7525 }
7526 } else if (last) {
7527 ALOGV("stopping1 underrun retries left %d", track->retryCount());
7528 mixerStatus = MIXER_TRACKS_ENABLED;
7529 }
7530 } else if (track->isStopping_2()) {
7531 // Drain has completed or we are in standby, signal presentation complete
7532 if (!(mDrainSequence & 1) || !last || mStandby) {
7533 track->setState(IAfTrackBase::STOPPED);
7534 mOutput->presentationComplete();
7535 track->presentationComplete(latency_l()); // always returns true
7536 track->reset();
7537 tracksToRemove->add(track);
7538 // OFFLOADED stop resets frame counts.
7539 if (!mUseAsyncWrite) {
7540 // If we don't get explicit drain notification we must
7541 // register discontinuity regardless of whether this is
7542 // the previous (!last) or the upcoming (last) track
7543 // to avoid skipping the discontinuity.
7544 mTimestampVerifier.discontinuity(
7545 mTimestampVerifier.DISCONTINUITY_MODE_ZERO);
7546 }
7547 }
7548 } else {
7549 // No buffers for this track. Give it a few chances to
7550 // fill a buffer, then remove it from active list.
7551 bool isTimestampAdvancing = mIsTimestampAdvancing.check(mOutput);
7552 if (!isTunerStream() // tuner streams remain active in underrun
7553 && --(track->retryCount()) <= 0) {
7554 if (isTimestampAdvancing) { // HAL is still playing audio, give us more time.
7555 track->retryCount() = kMaxTrackRetriesOffload;
7556 } else {
7557 ALOGI("%s BUFFER TIMEOUT: remove track(%d) from active list due to"
7558 " underrun on thread %d", __func__, track->id(), mId);
7559 tracksToRemove->add(track);
7560 // tell client process that the track was disabled because of underrun;
7561 // it will then automatically call start() when data is available
7562 track->disable();
7563 }
7564 } else if (last){
7565 mixerStatus = MIXER_TRACKS_ENABLED;
7566 }
7567 }
7568 }
7569 // compute volume for this track
7570 if (track->isReady()) { // check ready to prevent premature start.
7571 processVolume_l(track, last);
7572 }
7573 }
7574
7575 // make sure the pause/flush/resume sequence is executed in the right order.
7576 // If a flush is pending and a track is active but the HW is not paused, force a HW pause
7577 // before flush and then resume HW. This can happen in case of pause/flush/resume
7578 // if resume is received before pause is executed.
7579 if (!mStandby && (doHwPause || (mFlushPending && !mHwPaused && (count != 0)))) {
7580 status_t result = mOutput->stream->pause();
7581 ALOGE_IF(result != OK, "Error when pausing output stream: %d", result);
7582 doHwResume = !doHwPause; // resume if pause is due to flush.
7583 }
7584 if (mFlushPending) {
7585 flushHw_l();
7586 }
7587 if (!mStandby && doHwResume) {
7588 status_t result = mOutput->stream->resume();
7589 ALOGE_IF(result != OK, "Error when resuming output stream: %d", result);
7590 }
7591
7592 // remove all the tracks that need to be...
7593 removeTracks_l(*tracksToRemove);
7594
7595 return mixerStatus;
7596 }
7597
7598 // must be called with thread mutex locked
waitingAsyncCallback_l()7599 bool OffloadThread::waitingAsyncCallback_l()
7600 {
7601 ALOGVV("waitingAsyncCallback_l mWriteAckSequence %d mDrainSequence %d",
7602 mWriteAckSequence, mDrainSequence);
7603 if (mUseAsyncWrite && ((mWriteAckSequence & 1) || (mDrainSequence & 1))) {
7604 return true;
7605 }
7606 return false;
7607 }
7608
waitingAsyncCallback()7609 bool OffloadThread::waitingAsyncCallback()
7610 {
7611 audio_utils::lock_guard _l(mutex());
7612 return waitingAsyncCallback_l();
7613 }
7614
flushHw_l()7615 void OffloadThread::flushHw_l()
7616 {
7617 DirectOutputThread::flushHw_l();
7618 // Flush anything still waiting in the mixbuffer
7619 mCurrentWriteLength = 0;
7620 mBytesRemaining = 0;
7621 mPausedWriteLength = 0;
7622 mPausedBytesRemaining = 0;
7623 // reset bytes written count to reflect that DSP buffers are empty after flush.
7624 mBytesWritten = 0;
7625
7626 if (mUseAsyncWrite) {
7627 // discard any pending drain or write ack by incrementing sequence
7628 mWriteAckSequence = (mWriteAckSequence + 2) & ~1;
7629 mDrainSequence = (mDrainSequence + 2) & ~1;
7630 ALOG_ASSERT(mCallbackThread != 0);
7631 mCallbackThread->setWriteBlocked(mWriteAckSequence);
7632 mCallbackThread->setDraining(mDrainSequence);
7633 }
7634 }
7635
invalidateTracks(audio_stream_type_t streamType)7636 void OffloadThread::invalidateTracks(audio_stream_type_t streamType)
7637 {
7638 audio_utils::lock_guard _l(mutex());
7639 if (PlaybackThread::invalidateTracks_l(streamType)) {
7640 mFlushPending = true;
7641 }
7642 }
7643
invalidateTracks(std::set<audio_port_handle_t> & portIds)7644 void OffloadThread::invalidateTracks(std::set<audio_port_handle_t>& portIds) {
7645 audio_utils::lock_guard _l(mutex());
7646 if (PlaybackThread::invalidateTracks_l(portIds)) {
7647 mFlushPending = true;
7648 }
7649 }
7650
7651 // ----------------------------------------------------------------------------
7652
7653 /* static */
create(const sp<IAfThreadCallback> & afThreadCallback,IAfPlaybackThread * mainThread,audio_io_handle_t id,bool systemReady)7654 sp<IAfDuplicatingThread> IAfDuplicatingThread::create(
7655 const sp<IAfThreadCallback>& afThreadCallback,
7656 IAfPlaybackThread* mainThread, audio_io_handle_t id, bool systemReady) {
7657 return sp<DuplicatingThread>::make(afThreadCallback, mainThread, id, systemReady);
7658 }
7659
DuplicatingThread(const sp<IAfThreadCallback> & afThreadCallback,IAfPlaybackThread * mainThread,audio_io_handle_t id,bool systemReady)7660 DuplicatingThread::DuplicatingThread(const sp<IAfThreadCallback>& afThreadCallback,
7661 IAfPlaybackThread* mainThread, audio_io_handle_t id, bool systemReady)
7662 : MixerThread(afThreadCallback, mainThread->getOutput(), id,
7663 systemReady, DUPLICATING),
7664 mWaitTimeMs(UINT_MAX)
7665 {
7666 addOutputTrack(mainThread);
7667 }
7668
~DuplicatingThread()7669 DuplicatingThread::~DuplicatingThread()
7670 {
7671 for (size_t i = 0; i < mOutputTracks.size(); i++) {
7672 mOutputTracks[i]->destroy();
7673 }
7674 }
7675
threadLoop_mix()7676 void DuplicatingThread::threadLoop_mix()
7677 {
7678 // mix buffers...
7679 if (outputsReady()) {
7680 mAudioMixer->process();
7681 } else {
7682 if (mMixerBufferValid) {
7683 memset(mMixerBuffer, 0, mMixerBufferSize);
7684 } else {
7685 memset(mSinkBuffer, 0, mSinkBufferSize);
7686 }
7687 }
7688 mSleepTimeUs = 0;
7689 writeFrames = mNormalFrameCount;
7690 mCurrentWriteLength = mSinkBufferSize;
7691 mStandbyTimeNs = systemTime() + mStandbyDelayNs;
7692 }
7693
threadLoop_sleepTime()7694 void DuplicatingThread::threadLoop_sleepTime()
7695 {
7696 if (mSleepTimeUs == 0) {
7697 if (mMixerStatus == MIXER_TRACKS_ENABLED) {
7698 mSleepTimeUs = mActiveSleepTimeUs;
7699 } else {
7700 mSleepTimeUs = mIdleSleepTimeUs;
7701 }
7702 } else if (mBytesWritten != 0) {
7703 if (mMixerStatus == MIXER_TRACKS_ENABLED) {
7704 writeFrames = mNormalFrameCount;
7705 memset(mSinkBuffer, 0, mSinkBufferSize);
7706 } else {
7707 // flush remaining overflow buffers in output tracks
7708 writeFrames = 0;
7709 }
7710 mSleepTimeUs = 0;
7711 }
7712 }
7713
threadLoop_write()7714 ssize_t DuplicatingThread::threadLoop_write()
7715 {
7716 for (size_t i = 0; i < outputTracks.size(); i++) {
7717 const ssize_t actualWritten = outputTracks[i]->write(mSinkBuffer, writeFrames);
7718
7719 // Consider the first OutputTrack for timestamp and frame counting.
7720
7721 // The threadLoop() generally assumes writing a full sink buffer size at a time.
7722 // Here, we correct for writeFrames of 0 (a stop) or underruns because
7723 // we always claim success.
7724 if (i == 0) {
7725 const ssize_t correction = mSinkBufferSize / mFrameSize - actualWritten;
7726 ALOGD_IF(correction != 0 && writeFrames != 0,
7727 "%s: writeFrames:%u actualWritten:%zd correction:%zd mFramesWritten:%lld",
7728 __func__, writeFrames, actualWritten, correction, (long long)mFramesWritten);
7729 mFramesWritten -= correction;
7730 }
7731
7732 // TODO: Report correction for the other output tracks and show in the dump.
7733 }
7734 if (mStandby) {
7735 mThreadMetrics.logBeginInterval();
7736 mThreadSnapshot.onBegin();
7737 mStandby = false;
7738 }
7739 return (ssize_t)mSinkBufferSize;
7740 }
7741
threadLoop_standby()7742 void DuplicatingThread::threadLoop_standby()
7743 {
7744 // DuplicatingThread implements standby by stopping all tracks
7745 for (size_t i = 0; i < outputTracks.size(); i++) {
7746 outputTracks[i]->stop();
7747 }
7748 }
7749
threadLoop_exit()7750 void DuplicatingThread::threadLoop_exit()
7751 {
7752 // Prevent calling the OutputTrack dtor in the DuplicatingThread dtor
7753 // where other mutexes (i.e. AudioPolicyService_Mutex) may be held.
7754 // Do so here in the threadLoop_exit().
7755
7756 SortedVector <sp<IAfOutputTrack>> localTracks;
7757 {
7758 audio_utils::lock_guard l(mutex());
7759 localTracks = std::move(mOutputTracks);
7760 mOutputTracks.clear();
7761 }
7762 localTracks.clear();
7763 outputTracks.clear();
7764 PlaybackThread::threadLoop_exit();
7765 }
7766
dumpInternals_l(int fd,const Vector<String16> & args)7767 void DuplicatingThread::dumpInternals_l(int fd, const Vector<String16>& args)
7768 {
7769 MixerThread::dumpInternals_l(fd, args);
7770
7771 std::stringstream ss;
7772 const size_t numTracks = mOutputTracks.size();
7773 ss << " " << numTracks << " OutputTracks";
7774 if (numTracks > 0) {
7775 ss << ":";
7776 for (const auto &track : mOutputTracks) {
7777 const auto thread = track->thread().promote();
7778 ss << " (" << track->id() << " : ";
7779 if (thread.get() != nullptr) {
7780 ss << thread.get() << ", " << thread->id();
7781 } else {
7782 ss << "null";
7783 }
7784 ss << ")";
7785 }
7786 }
7787 ss << "\n";
7788 std::string result = ss.str();
7789 write(fd, result.c_str(), result.size());
7790 }
7791
saveOutputTracks()7792 void DuplicatingThread::saveOutputTracks()
7793 {
7794 outputTracks = mOutputTracks;
7795 }
7796
clearOutputTracks()7797 void DuplicatingThread::clearOutputTracks()
7798 {
7799 outputTracks.clear();
7800 }
7801
addOutputTrack(IAfPlaybackThread * thread)7802 void DuplicatingThread::addOutputTrack(IAfPlaybackThread* thread)
7803 {
7804 audio_utils::lock_guard _l(mutex());
7805 // The downstream MixerThread consumes thread->frameCount() amount of frames per mix pass.
7806 // Adjust for thread->sampleRate() to determine minimum buffer frame count.
7807 // Then triple buffer because Threads do not run synchronously and may not be clock locked.
7808 const size_t frameCount =
7809 3 * sourceFramesNeeded(mSampleRate, thread->frameCount(), thread->sampleRate());
7810 // TODO: Consider asynchronous sample rate conversion to handle clock disparity
7811 // from different OutputTracks and their associated MixerThreads (e.g. one may
7812 // nearly empty and the other may be dropping data).
7813
7814 // TODO b/182392769: use attribution source util, move to server edge
7815 AttributionSourceState attributionSource = AttributionSourceState();
7816 attributionSource.uid = VALUE_OR_FATAL(legacy2aidl_uid_t_int32_t(
7817 IPCThreadState::self()->getCallingUid()));
7818 attributionSource.pid = VALUE_OR_FATAL(legacy2aidl_pid_t_int32_t(
7819 IPCThreadState::self()->getCallingPid()));
7820 attributionSource.token = sp<BBinder>::make();
7821 sp<IAfOutputTrack> outputTrack = IAfOutputTrack::create(thread,
7822 this,
7823 mSampleRate,
7824 mFormat,
7825 mChannelMask,
7826 frameCount,
7827 attributionSource);
7828 status_t status = outputTrack != 0 ? outputTrack->initCheck() : (status_t) NO_MEMORY;
7829 if (status != NO_ERROR) {
7830 ALOGE("addOutputTrack() initCheck failed %d", status);
7831 return;
7832 }
7833 thread->setStreamVolume(AUDIO_STREAM_PATCH, 1.0f);
7834 mOutputTracks.add(outputTrack);
7835 ALOGV("addOutputTrack() track %p, on thread %p", outputTrack.get(), thread);
7836 updateWaitTime_l();
7837 }
7838
removeOutputTrack(IAfPlaybackThread * thread)7839 void DuplicatingThread::removeOutputTrack(IAfPlaybackThread* thread)
7840 {
7841 audio_utils::lock_guard _l(mutex());
7842 for (size_t i = 0; i < mOutputTracks.size(); i++) {
7843 if (mOutputTracks[i]->thread() == thread) {
7844 mOutputTracks[i]->destroy();
7845 mOutputTracks.removeAt(i);
7846 updateWaitTime_l();
7847 // NO_THREAD_SAFETY_ANALYSIS
7848 // Lambda workaround: as thread != this
7849 // we can safely call the remote thread getOutput.
7850 const bool equalOutput =
7851 [&](){ return thread->getOutput() == mOutput; }();
7852 if (equalOutput) {
7853 mOutput = nullptr;
7854 }
7855 return;
7856 }
7857 }
7858 ALOGV("removeOutputTrack(): unknown thread: %p", thread);
7859 }
7860
7861 // caller must hold mutex()
updateWaitTime_l()7862 void DuplicatingThread::updateWaitTime_l()
7863 {
7864 mWaitTimeMs = UINT_MAX;
7865 for (size_t i = 0; i < mOutputTracks.size(); i++) {
7866 const auto strong = mOutputTracks[i]->thread().promote();
7867 if (strong != 0) {
7868 uint32_t waitTimeMs = (strong->frameCount() * 2 * 1000) / strong->sampleRate();
7869 if (waitTimeMs < mWaitTimeMs) {
7870 mWaitTimeMs = waitTimeMs;
7871 }
7872 }
7873 }
7874 }
7875
outputsReady()7876 bool DuplicatingThread::outputsReady()
7877 {
7878 for (size_t i = 0; i < outputTracks.size(); i++) {
7879 const auto thread = outputTracks[i]->thread().promote();
7880 if (thread == 0) {
7881 ALOGW("DuplicatingThread::outputsReady() could not promote thread on output track %p",
7882 outputTracks[i].get());
7883 return false;
7884 }
7885 IAfPlaybackThread* const playbackThread = thread->asIAfPlaybackThread().get();
7886 // see note at standby() declaration
7887 if (playbackThread->inStandby() && !playbackThread->isSuspended()) {
7888 ALOGV("DuplicatingThread output track %p on thread %p Not Ready", outputTracks[i].get(),
7889 thread.get());
7890 return false;
7891 }
7892 }
7893 return true;
7894 }
7895
sendMetadataToBackend_l(const StreamOutHalInterface::SourceMetadata & metadata)7896 void DuplicatingThread::sendMetadataToBackend_l(
7897 const StreamOutHalInterface::SourceMetadata& metadata)
7898 {
7899 for (auto& outputTrack : outputTracks) { // not mOutputTracks
7900 outputTrack->setMetadatas(metadata.tracks);
7901 }
7902 }
7903
activeSleepTimeUs() const7904 uint32_t DuplicatingThread::activeSleepTimeUs() const
7905 {
7906 // return half the wait time in microseconds.
7907 return std::min(mWaitTimeMs * 500ULL, (unsigned long long)UINT32_MAX); // prevent overflow.
7908 }
7909
cacheParameters_l()7910 void DuplicatingThread::cacheParameters_l()
7911 {
7912 // updateWaitTime_l() sets mWaitTimeMs, which affects activeSleepTimeUs(), so call it first
7913 updateWaitTime_l();
7914
7915 MixerThread::cacheParameters_l();
7916 }
7917
7918 // ----------------------------------------------------------------------------
7919
7920 /* static */
createSpatializerThread(const sp<IAfThreadCallback> & afThreadCallback,AudioStreamOut * output,audio_io_handle_t id,bool systemReady,audio_config_base_t * mixerConfig)7921 sp<IAfPlaybackThread> IAfPlaybackThread::createSpatializerThread(
7922 const sp<IAfThreadCallback>& afThreadCallback,
7923 AudioStreamOut* output,
7924 audio_io_handle_t id,
7925 bool systemReady,
7926 audio_config_base_t* mixerConfig) {
7927 return sp<SpatializerThread>::make(afThreadCallback, output, id, systemReady, mixerConfig);
7928 }
7929
SpatializerThread(const sp<IAfThreadCallback> & afThreadCallback,AudioStreamOut * output,audio_io_handle_t id,bool systemReady,audio_config_base_t * mixerConfig)7930 SpatializerThread::SpatializerThread(const sp<IAfThreadCallback>& afThreadCallback,
7931 AudioStreamOut* output,
7932 audio_io_handle_t id,
7933 bool systemReady,
7934 audio_config_base_t *mixerConfig)
7935 : MixerThread(afThreadCallback, output, id, systemReady, SPATIALIZER, mixerConfig)
7936 {
7937 }
7938
setHalLatencyMode_l()7939 void SpatializerThread::setHalLatencyMode_l() {
7940 // if mSupportedLatencyModes is empty, the HAL stream does not support
7941 // latency mode control and we can exit.
7942 if (mSupportedLatencyModes.empty()) {
7943 return;
7944 }
7945 // Do not update the HAL latency mode if no track is active
7946 if (mActiveTracks.isEmpty()) {
7947 return;
7948 }
7949
7950 audio_latency_mode_t latencyMode = AUDIO_LATENCY_MODE_FREE;
7951 if (mSupportedLatencyModes.size() == 1) {
7952 // If the HAL only support one latency mode currently, confirm the choice
7953 latencyMode = mSupportedLatencyModes[0];
7954 } else if (mSupportedLatencyModes.size() > 1) {
7955 // Request low latency if:
7956 // - The low latency mode is requested by the spatializer controller
7957 // (mRequestedLatencyMode = AUDIO_LATENCY_MODE_LOW)
7958 // AND
7959 // - At least one active track is spatialized
7960 for (const auto& track : mActiveTracks) {
7961 if (track->isSpatialized()) {
7962 latencyMode = mRequestedLatencyMode;
7963 break;
7964 }
7965 }
7966 }
7967
7968 if (latencyMode != mSetLatencyMode) {
7969 status_t status = mOutput->stream->setLatencyMode(latencyMode);
7970 ALOGD("%s: thread(%d) setLatencyMode(%s) returned %d",
7971 __func__, mId, toString(latencyMode).c_str(), status);
7972 if (status == NO_ERROR) {
7973 mSetLatencyMode = latencyMode;
7974 }
7975 }
7976 }
7977
setRequestedLatencyMode(audio_latency_mode_t mode)7978 status_t SpatializerThread::setRequestedLatencyMode(audio_latency_mode_t mode) {
7979 if (mode < 0 || mode >= AUDIO_LATENCY_MODE_CNT) {
7980 return BAD_VALUE;
7981 }
7982 audio_utils::lock_guard _l(mutex());
7983 mRequestedLatencyMode = mode;
7984 return NO_ERROR;
7985 }
7986
checkOutputStageEffects()7987 void SpatializerThread::checkOutputStageEffects()
7988 NO_THREAD_SAFETY_ANALYSIS
7989 // 'createEffect_l' requires holding mutex 'AudioFlinger_Mutex' exclusively
7990 {
7991 bool hasVirtualizer = false;
7992 bool hasDownMixer = false;
7993 sp<IAfEffectHandle> finalDownMixer;
7994 {
7995 audio_utils::lock_guard _l(mutex());
7996 sp<IAfEffectChain> chain = getEffectChain_l(AUDIO_SESSION_OUTPUT_STAGE);
7997 if (chain != 0) {
7998 hasVirtualizer = chain->getEffectFromType_l(FX_IID_SPATIALIZER) != nullptr;
7999 hasDownMixer = chain->getEffectFromType_l(EFFECT_UIID_DOWNMIX) != nullptr;
8000 }
8001
8002 finalDownMixer = mFinalDownMixer;
8003 mFinalDownMixer.clear();
8004 }
8005
8006 if (hasVirtualizer) {
8007 if (finalDownMixer != nullptr) {
8008 int32_t ret;
8009 finalDownMixer->asIEffect()->disable(&ret);
8010 }
8011 finalDownMixer.clear();
8012 } else if (!hasDownMixer) {
8013 std::vector<effect_descriptor_t> descriptors;
8014 status_t status = mAfThreadCallback->getEffectsFactoryHal()->getDescriptors(
8015 EFFECT_UIID_DOWNMIX, &descriptors);
8016 if (status != NO_ERROR) {
8017 return;
8018 }
8019 ALOG_ASSERT(!descriptors.empty(),
8020 "%s getDescriptors() returned no error but empty list", __func__);
8021
8022 finalDownMixer = createEffect_l(nullptr /*client*/, nullptr /*effectClient*/,
8023 0 /*priority*/, AUDIO_SESSION_OUTPUT_STAGE, &descriptors[0], nullptr /*enabled*/,
8024 &status, false /*pinned*/, false /*probe*/, false /*notifyFramesProcessed*/);
8025
8026 if (finalDownMixer == nullptr || (status != NO_ERROR && status != ALREADY_EXISTS)) {
8027 ALOGW("%s error creating downmixer %d", __func__, status);
8028 finalDownMixer.clear();
8029 } else {
8030 int32_t ret;
8031 finalDownMixer->asIEffect()->enable(&ret);
8032 }
8033 }
8034
8035 {
8036 audio_utils::lock_guard _l(mutex());
8037 mFinalDownMixer = finalDownMixer;
8038 }
8039 }
8040
threadLoop_exit()8041 void SpatializerThread::threadLoop_exit()
8042 {
8043 // The Spatializer EffectHandle must be released on the PlaybackThread
8044 // threadLoop() to prevent lock inversion in the SpatializerThread dtor.
8045 mFinalDownMixer.clear();
8046
8047 PlaybackThread::threadLoop_exit();
8048 }
8049
8050 // ----------------------------------------------------------------------------
8051 // Record
8052 // ----------------------------------------------------------------------------
8053
create(const sp<IAfThreadCallback> & afThreadCallback,AudioStreamIn * input,audio_io_handle_t id,bool systemReady)8054 sp<IAfRecordThread> IAfRecordThread::create(const sp<IAfThreadCallback>& afThreadCallback,
8055 AudioStreamIn* input,
8056 audio_io_handle_t id,
8057 bool systemReady) {
8058 return sp<RecordThread>::make(afThreadCallback, input, id, systemReady);
8059 }
8060
RecordThread(const sp<IAfThreadCallback> & afThreadCallback,AudioStreamIn * input,audio_io_handle_t id,bool systemReady)8061 RecordThread::RecordThread(const sp<IAfThreadCallback>& afThreadCallback,
8062 AudioStreamIn *input,
8063 audio_io_handle_t id,
8064 bool systemReady
8065 ) :
8066 ThreadBase(afThreadCallback, id, RECORD, systemReady, false /* isOut */),
8067 mInput(input),
8068 mSource(mInput),
8069 mActiveTracks(&this->mLocalLog),
8070 mRsmpInBuffer(NULL),
8071 // mRsmpInFrames, mRsmpInFramesP2, and mRsmpInFramesOA are set by readInputParameters_l()
8072 mRsmpInRear(0)
8073 , mReadOnlyHeap(new MemoryDealer(kRecordThreadReadOnlyHeapSize,
8074 "RecordThreadRO", MemoryHeapBase::READ_ONLY))
8075 // mFastCapture below
8076 , mFastCaptureFutex(0)
8077 // mInputSource
8078 // mPipeSink
8079 // mPipeSource
8080 , mPipeFramesP2(0)
8081 // mPipeMemory
8082 // mFastCaptureNBLogWriter
8083 , mFastTrackAvail(false)
8084 , mBtNrecSuspended(false)
8085 {
8086 snprintf(mThreadName, kThreadNameLength, "AudioIn_%X", id);
8087 mNBLogWriter = afThreadCallback->newWriter_l(kLogSize, mThreadName);
8088
8089 if (mInput->audioHwDev != nullptr) {
8090 mIsMsdDevice = strcmp(
8091 mInput->audioHwDev->moduleName(), AUDIO_HARDWARE_MODULE_ID_MSD) == 0;
8092 }
8093
8094 readInputParameters_l();
8095
8096 // TODO: We may also match on address as well as device type for
8097 // AUDIO_DEVICE_IN_BUS, AUDIO_DEVICE_IN_BLUETOOTH_A2DP, AUDIO_DEVICE_IN_REMOTE_SUBMIX
8098 // TODO: This property should be ensure that only contains one single device type.
8099 mTimestampCorrectedDevice = (audio_devices_t)property_get_int64(
8100 "audio.timestamp.corrected_input_device",
8101 (int64_t)(mIsMsdDevice ? AUDIO_DEVICE_IN_BUS // turn on by default for MSD
8102 : AUDIO_DEVICE_NONE));
8103
8104 // create an NBAIO source for the HAL input stream, and negotiate
8105 mInputSource = new AudioStreamInSource(input->stream);
8106 size_t numCounterOffers = 0;
8107 const NBAIO_Format offers[1] = {Format_from_SR_C(mSampleRate, mChannelCount, mFormat)};
8108 #if !LOG_NDEBUG
8109 [[maybe_unused]] ssize_t index =
8110 #else
8111 (void)
8112 #endif
8113 mInputSource->negotiate(offers, 1, NULL, numCounterOffers);
8114 ALOG_ASSERT(index == 0);
8115
8116 // initialize fast capture depending on configuration
8117 bool initFastCapture;
8118 switch (kUseFastCapture) {
8119 case FastCapture_Never:
8120 initFastCapture = false;
8121 ALOGV("%p kUseFastCapture = Never, initFastCapture = false", this);
8122 break;
8123 case FastCapture_Always:
8124 initFastCapture = true;
8125 ALOGV("%p kUseFastCapture = Always, initFastCapture = true", this);
8126 break;
8127 case FastCapture_Static:
8128 initFastCapture = !mIsMsdDevice // Disable fast capture for MSD BUS devices.
8129 && audio_is_linear_pcm(mFormat)
8130 && (mFrameCount * 1000) / mSampleRate < kMinNormalCaptureBufferSizeMs;
8131 ALOGV("%p kUseFastCapture = Static, format = 0x%x, (%lld * 1000) / %u vs %u, "
8132 "initFastCapture = %d, mIsMsdDevice = %d", this, mFormat, (long long)mFrameCount,
8133 mSampleRate, kMinNormalCaptureBufferSizeMs, initFastCapture, mIsMsdDevice);
8134 break;
8135 // case FastCapture_Dynamic:
8136 }
8137
8138 if (initFastCapture) {
8139 // create a Pipe for FastCapture to write to, and for us and fast tracks to read from
8140 NBAIO_Format format = mInputSource->format();
8141 // quadruple-buffering of 20 ms each; this ensures we can sleep for 20ms in RecordThread
8142 size_t pipeFramesP2 = roundup(4 * FMS_20 * mSampleRate / 1000);
8143 size_t pipeSize = pipeFramesP2 * Format_frameSize(format);
8144 void *pipeBuffer = nullptr;
8145 const sp<MemoryDealer> roHeap(readOnlyHeap());
8146 sp<IMemory> pipeMemory;
8147 if ((roHeap == 0) ||
8148 (pipeMemory = roHeap->allocate(pipeSize)) == 0 ||
8149 (pipeBuffer = pipeMemory->unsecurePointer()) == nullptr) {
8150 ALOGE("not enough memory for pipe buffer size=%zu; "
8151 "roHeap=%p, pipeMemory=%p, pipeBuffer=%p; roHeapSize: %lld",
8152 pipeSize, roHeap.get(), pipeMemory.get(), pipeBuffer,
8153 (long long)kRecordThreadReadOnlyHeapSize);
8154 goto failed;
8155 }
8156 // pipe will be shared directly with fast clients, so clear to avoid leaking old information
8157 memset(pipeBuffer, 0, pipeSize);
8158 Pipe *pipe = new Pipe(pipeFramesP2, format, pipeBuffer);
8159 const NBAIO_Format offersFast[1] = {format};
8160 size_t numCounterOffersFast = 0;
8161 [[maybe_unused]] ssize_t index2 = pipe->negotiate(offersFast, std::size(offersFast),
8162 nullptr /* counterOffers */, numCounterOffersFast);
8163 ALOG_ASSERT(index2 == 0);
8164 mPipeSink = pipe;
8165 PipeReader *pipeReader = new PipeReader(*pipe);
8166 numCounterOffersFast = 0;
8167 index2 = pipeReader->negotiate(offersFast, std::size(offersFast),
8168 nullptr /* counterOffers */, numCounterOffersFast);
8169 ALOG_ASSERT(index2 == 0);
8170 mPipeSource = pipeReader;
8171 mPipeFramesP2 = pipeFramesP2;
8172 mPipeMemory = pipeMemory;
8173
8174 // create fast capture
8175 mFastCapture = new FastCapture();
8176 FastCaptureStateQueue *sq = mFastCapture->sq();
8177 #ifdef STATE_QUEUE_DUMP
8178 // FIXME
8179 #endif
8180 FastCaptureState *state = sq->begin();
8181 state->mCblk = NULL;
8182 state->mInputSource = mInputSource.get();
8183 state->mInputSourceGen++;
8184 state->mPipeSink = pipe;
8185 state->mPipeSinkGen++;
8186 state->mFrameCount = mFrameCount;
8187 state->mCommand = FastCaptureState::COLD_IDLE;
8188 // already done in constructor initialization list
8189 //mFastCaptureFutex = 0;
8190 state->mColdFutexAddr = &mFastCaptureFutex;
8191 state->mColdGen++;
8192 state->mDumpState = &mFastCaptureDumpState;
8193 #ifdef TEE_SINK
8194 // FIXME
8195 #endif
8196 mFastCaptureNBLogWriter =
8197 afThreadCallback->newWriter_l(kFastCaptureLogSize, "FastCapture");
8198 state->mNBLogWriter = mFastCaptureNBLogWriter.get();
8199 sq->end();
8200 sq->push(FastCaptureStateQueue::BLOCK_UNTIL_PUSHED);
8201
8202 // start the fast capture
8203 mFastCapture->run("FastCapture", ANDROID_PRIORITY_URGENT_AUDIO);
8204 pid_t tid = mFastCapture->getTid();
8205 sendPrioConfigEvent(getpid(), tid, kPriorityFastCapture, false /*forApp*/);
8206 stream()->setHalThreadPriority(kPriorityFastCapture);
8207 #ifdef AUDIO_WATCHDOG
8208 // FIXME
8209 #endif
8210
8211 mFastTrackAvail = true;
8212 }
8213 #ifdef TEE_SINK
8214 mTee.set(mInputSource->format(), NBAIO_Tee::TEE_FLAG_INPUT_THREAD);
8215 mTee.setId(std::string("_") + std::to_string(mId) + "_C");
8216 #endif
8217 failed: ;
8218
8219 // FIXME mNormalSource
8220 }
8221
~RecordThread()8222 RecordThread::~RecordThread()
8223 {
8224 if (mFastCapture != 0) {
8225 FastCaptureStateQueue *sq = mFastCapture->sq();
8226 FastCaptureState *state = sq->begin();
8227 if (state->mCommand == FastCaptureState::COLD_IDLE) {
8228 int32_t old = android_atomic_inc(&mFastCaptureFutex);
8229 if (old == -1) {
8230 (void) syscall(__NR_futex, &mFastCaptureFutex, FUTEX_WAKE_PRIVATE, 1);
8231 }
8232 }
8233 state->mCommand = FastCaptureState::EXIT;
8234 sq->end();
8235 sq->push(FastCaptureStateQueue::BLOCK_UNTIL_PUSHED);
8236 mFastCapture->join();
8237 mFastCapture.clear();
8238 }
8239 mAfThreadCallback->unregisterWriter(mFastCaptureNBLogWriter);
8240 mAfThreadCallback->unregisterWriter(mNBLogWriter);
8241 free(mRsmpInBuffer);
8242 }
8243
onFirstRef()8244 void RecordThread::onFirstRef()
8245 {
8246 run(mThreadName, PRIORITY_URGENT_AUDIO);
8247 }
8248
preExit()8249 void RecordThread::preExit()
8250 {
8251 ALOGV(" preExit()");
8252 audio_utils::lock_guard _l(mutex());
8253 for (size_t i = 0; i < mTracks.size(); i++) {
8254 sp<IAfRecordTrack> track = mTracks[i];
8255 track->invalidate();
8256 }
8257 mActiveTracks.clear();
8258 mStartStopCV.notify_all();
8259 }
8260
threadLoop()8261 bool RecordThread::threadLoop()
8262 {
8263 nsecs_t lastWarning = 0;
8264
8265 inputStandBy();
8266
8267 reacquire_wakelock:
8268 {
8269 audio_utils::lock_guard _l(mutex());
8270 acquireWakeLock_l();
8271 }
8272
8273 // used to request a deferred sleep, to be executed later while mutex is unlocked
8274 uint32_t sleepUs = 0;
8275
8276 // timestamp correction enable is determined under lock, used in processing step.
8277 bool timestampCorrectionEnabled = false;
8278
8279 int64_t lastLoopCountRead = -2; // never matches "previous" loop, when loopCount = 0.
8280
8281 // loop while there is work to do
8282 for (int64_t loopCount = 0;; ++loopCount) { // loopCount used for statistics tracking
8283 // Note: these sp<> are released at the end of the for loop outside of the mutex() lock.
8284 sp<IAfRecordTrack> activeTrack;
8285 std::vector<sp<IAfRecordTrack>> oldActiveTracks;
8286 Vector<sp<IAfEffectChain>> effectChains;
8287
8288 // activeTracks accumulates a copy of a subset of mActiveTracks
8289 Vector<sp<IAfRecordTrack>> activeTracks;
8290
8291 // reference to the (first and only) active fast track
8292 sp<IAfRecordTrack> fastTrack;
8293
8294 // reference to a fast track which is about to be removed
8295 sp<IAfRecordTrack> fastTrackToRemove;
8296
8297 bool silenceFastCapture = false;
8298
8299 { // scope for mutex()
8300 audio_utils::unique_lock _l(mutex());
8301
8302 processConfigEvents_l();
8303
8304 // check exitPending here because checkForNewParameters_l() and
8305 // checkForNewParameters_l() can temporarily release mutex()
8306 if (exitPending()) {
8307 break;
8308 }
8309
8310 // sleep with mutex unlocked
8311 if (sleepUs > 0) {
8312 ATRACE_BEGIN("sleepC");
8313 (void)mWaitWorkCV.wait_for(_l, std::chrono::microseconds(sleepUs));
8314 ATRACE_END();
8315 sleepUs = 0;
8316 continue;
8317 }
8318
8319 // if no active track(s), then standby and release wakelock
8320 size_t size = mActiveTracks.size();
8321 if (size == 0) {
8322 standbyIfNotAlreadyInStandby();
8323 // exitPending() can't become true here
8324 releaseWakeLock_l();
8325 ALOGV("RecordThread: loop stopping");
8326 // go to sleep
8327 mWaitWorkCV.wait(_l);
8328 ALOGV("RecordThread: loop starting");
8329 goto reacquire_wakelock;
8330 }
8331
8332 bool doBroadcast = false;
8333 bool allStopped = true;
8334 for (size_t i = 0; i < size; ) {
8335 if (activeTrack) { // ensure track release is outside lock.
8336 oldActiveTracks.emplace_back(std::move(activeTrack));
8337 }
8338 activeTrack = mActiveTracks[i];
8339 if (activeTrack->isTerminated()) {
8340 if (activeTrack->isFastTrack()) {
8341 ALOG_ASSERT(fastTrackToRemove == 0);
8342 fastTrackToRemove = activeTrack;
8343 }
8344 removeTrack_l(activeTrack);
8345 mActiveTracks.remove(activeTrack);
8346 size--;
8347 continue;
8348 }
8349
8350 IAfTrackBase::track_state activeTrackState = activeTrack->state();
8351 switch (activeTrackState) {
8352
8353 case IAfTrackBase::PAUSING:
8354 mActiveTracks.remove(activeTrack);
8355 activeTrack->setState(IAfTrackBase::PAUSED);
8356 if (activeTrack->isFastTrack()) {
8357 ALOGV("%s fast track is paused, thus removed from active list", __func__);
8358 // Keep a ref on fast track to wait for FastCapture thread to get updated
8359 // state before potential track removal
8360 fastTrackToRemove = activeTrack;
8361 }
8362 doBroadcast = true;
8363 size--;
8364 continue;
8365
8366 case IAfTrackBase::STARTING_1:
8367 sleepUs = 10000;
8368 i++;
8369 allStopped = false;
8370 continue;
8371
8372 case IAfTrackBase::STARTING_2:
8373 doBroadcast = true;
8374 if (mStandby) {
8375 mThreadMetrics.logBeginInterval();
8376 mThreadSnapshot.onBegin();
8377 mStandby = false;
8378 }
8379 activeTrack->setState(IAfTrackBase::ACTIVE);
8380 allStopped = false;
8381 break;
8382
8383 case IAfTrackBase::ACTIVE:
8384 allStopped = false;
8385 break;
8386
8387 case IAfTrackBase::IDLE: // cannot be on ActiveTracks if idle
8388 case IAfTrackBase::PAUSED: // cannot be on ActiveTracks if paused
8389 case IAfTrackBase::STOPPED: // cannot be on ActiveTracks if destroyed/terminated
8390 default:
8391 LOG_ALWAYS_FATAL("%s: Unexpected active track state:%d, id:%d, tracks:%zu",
8392 __func__, activeTrackState, activeTrack->id(), size);
8393 }
8394
8395 if (activeTrack->isFastTrack()) {
8396 ALOG_ASSERT(!mFastTrackAvail);
8397 ALOG_ASSERT(fastTrack == 0);
8398 // if the active fast track is silenced either:
8399 // 1) silence the whole capture from fast capture buffer if this is
8400 // the only active track
8401 // 2) invalidate this track: this will cause the client to reconnect and possibly
8402 // be invalidated again until unsilenced
8403 bool invalidate = false;
8404 if (activeTrack->isSilenced()) {
8405 if (size > 1) {
8406 invalidate = true;
8407 } else {
8408 silenceFastCapture = true;
8409 }
8410 }
8411 // Invalidate fast tracks if access to audio history is required as this is not
8412 // possible with fast tracks. Once the fast track has been invalidated, no new
8413 // fast track will be created until mMaxSharedAudioHistoryMs is cleared.
8414 if (mMaxSharedAudioHistoryMs != 0) {
8415 invalidate = true;
8416 }
8417 if (invalidate) {
8418 activeTrack->invalidate();
8419 ALOG_ASSERT(fastTrackToRemove == 0);
8420 fastTrackToRemove = activeTrack;
8421 removeTrack_l(activeTrack);
8422 mActiveTracks.remove(activeTrack);
8423 size--;
8424 continue;
8425 }
8426 fastTrack = activeTrack;
8427 }
8428
8429 activeTracks.add(activeTrack);
8430 i++;
8431
8432 }
8433
8434 mActiveTracks.updatePowerState_l(this);
8435
8436 updateMetadata_l();
8437
8438 if (allStopped) {
8439 standbyIfNotAlreadyInStandby();
8440 }
8441 if (doBroadcast) {
8442 mStartStopCV.notify_all();
8443 }
8444
8445 // sleep if there are no active tracks to process
8446 if (activeTracks.isEmpty()) {
8447 if (sleepUs == 0) {
8448 sleepUs = kRecordThreadSleepUs;
8449 }
8450 continue;
8451 }
8452 sleepUs = 0;
8453
8454 timestampCorrectionEnabled = isTimestampCorrectionEnabled_l();
8455 lockEffectChains_l(effectChains);
8456 }
8457
8458 // thread mutex is now unlocked, mActiveTracks unknown, activeTracks.size() > 0
8459
8460 size_t size = effectChains.size();
8461 for (size_t i = 0; i < size; i++) {
8462 // thread mutex is not locked, but effect chain is locked
8463 effectChains[i]->process_l();
8464 }
8465
8466 // Push a new fast capture state if fast capture is not already running, or cblk change
8467 if (mFastCapture != 0) {
8468 FastCaptureStateQueue *sq = mFastCapture->sq();
8469 FastCaptureState *state = sq->begin();
8470 bool didModify = false;
8471 FastCaptureStateQueue::block_t block = FastCaptureStateQueue::BLOCK_UNTIL_PUSHED;
8472 if (state->mCommand != FastCaptureState::READ_WRITE /* FIXME &&
8473 (kUseFastMixer != FastMixer_Dynamic || state->mTrackMask > 1)*/) {
8474 if (state->mCommand == FastCaptureState::COLD_IDLE) {
8475 int32_t old = android_atomic_inc(&mFastCaptureFutex);
8476 if (old == -1) {
8477 (void) syscall(__NR_futex, &mFastCaptureFutex, FUTEX_WAKE_PRIVATE, 1);
8478 }
8479 }
8480 state->mCommand = FastCaptureState::READ_WRITE;
8481 #if 0 // FIXME
8482 mFastCaptureDumpState.increaseSamplingN(mAfThreadCallback->isLowRamDevice() ?
8483 FastThreadDumpState::kSamplingNforLowRamDevice :
8484 FastThreadDumpState::kSamplingN);
8485 #endif
8486 didModify = true;
8487 }
8488 audio_track_cblk_t *cblkOld = state->mCblk;
8489 audio_track_cblk_t *cblkNew = fastTrack != 0 ? fastTrack->cblk() : NULL;
8490 if (cblkNew != cblkOld) {
8491 state->mCblk = cblkNew;
8492 // block until acked if removing a fast track
8493 if (cblkOld != NULL) {
8494 block = FastCaptureStateQueue::BLOCK_UNTIL_ACKED;
8495 }
8496 didModify = true;
8497 }
8498 AudioBufferProvider* abp = (fastTrack != 0 && fastTrack->isPatchTrack()) ?
8499 reinterpret_cast<AudioBufferProvider*>(fastTrack.get()) : nullptr;
8500 if (state->mFastPatchRecordBufferProvider != abp) {
8501 state->mFastPatchRecordBufferProvider = abp;
8502 state->mFastPatchRecordFormat = fastTrack == 0 ?
8503 AUDIO_FORMAT_INVALID : fastTrack->format();
8504 didModify = true;
8505 }
8506 if (state->mSilenceCapture != silenceFastCapture) {
8507 state->mSilenceCapture = silenceFastCapture;
8508 didModify = true;
8509 }
8510 sq->end(didModify);
8511 if (didModify) {
8512 sq->push(block);
8513 #if 0
8514 if (kUseFastCapture == FastCapture_Dynamic) {
8515 mNormalSource = mPipeSource;
8516 }
8517 #endif
8518 }
8519 }
8520
8521 // now run the fast track destructor with thread mutex unlocked
8522 fastTrackToRemove.clear();
8523
8524 // Read from HAL to keep up with fastest client if multiple active tracks, not slowest one.
8525 // Only the client(s) that are too slow will overrun. But if even the fastest client is too
8526 // slow, then this RecordThread will overrun by not calling HAL read often enough.
8527 // If destination is non-contiguous, first read past the nominal end of buffer, then
8528 // copy to the right place. Permitted because mRsmpInBuffer was over-allocated.
8529
8530 int32_t rear = mRsmpInRear & (mRsmpInFramesP2 - 1);
8531 ssize_t framesRead = 0; // not needed, remove clang-tidy warning.
8532 const int64_t lastIoBeginNs = systemTime(); // start IO timing
8533
8534 // If an NBAIO source is present, use it to read the normal capture's data
8535 if (mPipeSource != 0) {
8536 size_t framesToRead = min(mRsmpInFramesOA - rear, mRsmpInFramesP2 / 2);
8537
8538 // The audio fifo read() returns OVERRUN on overflow, and advances the read pointer
8539 // to the full buffer point (clearing the overflow condition). Upon OVERRUN error,
8540 // we immediately retry the read() to get data and prevent another overflow.
8541 for (int retries = 0; retries <= 2; ++retries) {
8542 ALOGW_IF(retries > 0, "overrun on read from pipe, retry #%d", retries);
8543 framesRead = mPipeSource->read((uint8_t*)mRsmpInBuffer + rear * mFrameSize,
8544 framesToRead);
8545 if (framesRead != OVERRUN) break;
8546 }
8547
8548 const ssize_t availableToRead = mPipeSource->availableToRead();
8549 if (availableToRead >= 0) {
8550 mMonopipePipeDepthStats.add(availableToRead);
8551 // PipeSource is the primary clock. It is up to the AudioRecord client to keep up.
8552 LOG_ALWAYS_FATAL_IF((size_t)availableToRead > mPipeFramesP2,
8553 "more frames to read than fifo size, %zd > %zu",
8554 availableToRead, mPipeFramesP2);
8555 const size_t pipeFramesFree = mPipeFramesP2 - availableToRead;
8556 const size_t sleepFrames = min(pipeFramesFree, mRsmpInFramesP2) / 2;
8557 ALOGVV("mPipeFramesP2:%zu mRsmpInFramesP2:%zu sleepFrames:%zu availableToRead:%zd",
8558 mPipeFramesP2, mRsmpInFramesP2, sleepFrames, availableToRead);
8559 sleepUs = (sleepFrames * 1000000LL) / mSampleRate;
8560 }
8561 if (framesRead < 0) {
8562 status_t status = (status_t) framesRead;
8563 switch (status) {
8564 case OVERRUN:
8565 ALOGW("overrun on read from pipe");
8566 framesRead = 0;
8567 break;
8568 case NEGOTIATE:
8569 ALOGE("re-negotiation is needed");
8570 framesRead = -1; // Will cause an attempt to recover.
8571 break;
8572 default:
8573 ALOGE("unknown error %d on read from pipe", status);
8574 break;
8575 }
8576 }
8577 // otherwise use the HAL / AudioStreamIn directly
8578 } else {
8579 ATRACE_BEGIN("read");
8580 size_t bytesRead;
8581 status_t result = mSource->read(
8582 (uint8_t*)mRsmpInBuffer + rear * mFrameSize, mBufferSize, &bytesRead);
8583 ATRACE_END();
8584 if (result < 0) {
8585 framesRead = result;
8586 } else {
8587 framesRead = bytesRead / mFrameSize;
8588 }
8589 }
8590
8591 const int64_t lastIoEndNs = systemTime(); // end IO timing
8592
8593 // Update server timestamp with server stats
8594 // systemTime() is optional if the hardware supports timestamps.
8595 if (framesRead >= 0) {
8596 mTimestamp.mPosition[ExtendedTimestamp::LOCATION_SERVER] += framesRead;
8597 mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_SERVER] = lastIoEndNs;
8598 }
8599
8600 // Update server timestamp with kernel stats
8601 if (mPipeSource.get() == nullptr /* don't obtain for FastCapture, could block */) {
8602 int64_t position, time;
8603 if (mStandby) {
8604 mTimestampVerifier.discontinuity(audio_is_linear_pcm(mFormat) ?
8605 mTimestampVerifier.DISCONTINUITY_MODE_CONTINUOUS :
8606 mTimestampVerifier.DISCONTINUITY_MODE_ZERO);
8607 } else if (mSource->getCapturePosition(&position, &time) == NO_ERROR
8608 && time > mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL]) {
8609
8610 mTimestampVerifier.add(position, time, mSampleRate);
8611 if (timestampCorrectionEnabled) {
8612 ALOGVV("TS_BEFORE: %d %lld %lld",
8613 id(), (long long)time, (long long)position);
8614 auto correctedTimestamp = mTimestampVerifier.getLastCorrectedTimestamp();
8615 position = correctedTimestamp.mFrames;
8616 time = correctedTimestamp.mTimeNs;
8617 ALOGVV("TS_AFTER: %d %lld %lld",
8618 id(), (long long)time, (long long)position);
8619 }
8620
8621 mTimestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL] = position;
8622 mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL] = time;
8623 // Note: In general record buffers should tend to be empty in
8624 // a properly running pipeline.
8625 //
8626 // Also, it is not advantageous to call get_presentation_position during the read
8627 // as the read obtains a lock, preventing the timestamp call from executing.
8628 } else {
8629 mTimestampVerifier.error();
8630 }
8631 }
8632
8633 // From the timestamp, input read latency is negative output write latency.
8634 const audio_input_flags_t flags = mInput != NULL ? mInput->flags : AUDIO_INPUT_FLAG_NONE;
8635 const double latencyMs = IAfRecordTrack::checkServerLatencySupported(mFormat, flags)
8636 ? - mTimestamp.getOutputServerLatencyMs(mSampleRate) : 0.;
8637 if (latencyMs != 0.) { // note 0. means timestamp is empty.
8638 mLatencyMs.add(latencyMs);
8639 }
8640
8641 // Use this to track timestamp information
8642 // ALOGD("%s", mTimestamp.toString().c_str());
8643
8644 if (framesRead < 0 || (framesRead == 0 && mPipeSource == 0)) {
8645 ALOGE("read failed: framesRead=%zd", framesRead);
8646 // Force input into standby so that it tries to recover at next read attempt
8647 inputStandBy();
8648 sleepUs = kRecordThreadSleepUs;
8649 }
8650 if (framesRead <= 0) {
8651 goto unlock;
8652 }
8653 ALOG_ASSERT(framesRead > 0);
8654 mFramesRead += framesRead;
8655
8656 #ifdef TEE_SINK
8657 (void)mTee.write((uint8_t*)mRsmpInBuffer + rear * mFrameSize, framesRead);
8658 #endif
8659 // If destination is non-contiguous, we now correct for reading past end of buffer.
8660 {
8661 size_t part1 = mRsmpInFramesP2 - rear;
8662 if ((size_t) framesRead > part1) {
8663 memcpy(mRsmpInBuffer, (uint8_t*)mRsmpInBuffer + mRsmpInFramesP2 * mFrameSize,
8664 (framesRead - part1) * mFrameSize);
8665 }
8666 }
8667 mRsmpInRear = audio_utils::safe_add_overflow(mRsmpInRear, (int32_t)framesRead);
8668
8669 size = activeTracks.size();
8670
8671 // loop over each active track
8672 for (size_t i = 0; i < size; i++) {
8673 activeTrack = activeTracks[i];
8674
8675 // skip fast tracks, as those are handled directly by FastCapture
8676 if (activeTrack->isFastTrack()) {
8677 continue;
8678 }
8679
8680 // TODO: This code probably should be moved to RecordTrack.
8681 // TODO: Update the activeTrack buffer converter in case of reconfigure.
8682
8683 enum {
8684 OVERRUN_UNKNOWN,
8685 OVERRUN_TRUE,
8686 OVERRUN_FALSE
8687 } overrun = OVERRUN_UNKNOWN;
8688
8689 // loop over getNextBuffer to handle circular sink
8690 for (;;) {
8691
8692 activeTrack->sinkBuffer().frameCount = ~0;
8693 status_t status = activeTrack->getNextBuffer(&activeTrack->sinkBuffer());
8694 size_t framesOut = activeTrack->sinkBuffer().frameCount;
8695 LOG_ALWAYS_FATAL_IF((status == OK) != (framesOut > 0));
8696
8697 // check available frames and handle overrun conditions
8698 // if the record track isn't draining fast enough.
8699 bool hasOverrun;
8700 size_t framesIn;
8701 activeTrack->resamplerBufferProvider()->sync(&framesIn, &hasOverrun);
8702 if (hasOverrun) {
8703 overrun = OVERRUN_TRUE;
8704 }
8705 if (framesOut == 0 || framesIn == 0) {
8706 break;
8707 }
8708
8709 // Don't allow framesOut to be larger than what is possible with resampling
8710 // from framesIn.
8711 // This isn't strictly necessary but helps limit buffer resizing in
8712 // RecordBufferConverter. TODO: remove when no longer needed.
8713 if (audio_is_linear_pcm(activeTrack->format())) {
8714 framesOut = min(framesOut,
8715 destinationFramesPossible(
8716 framesIn, mSampleRate, activeTrack->sampleRate()));
8717 }
8718
8719 if (activeTrack->isDirect()) {
8720 // No RecordBufferConverter used for direct streams. Pass
8721 // straight from RecordThread buffer to RecordTrack buffer.
8722 AudioBufferProvider::Buffer buffer;
8723 buffer.frameCount = framesOut;
8724 const status_t getNextBufferStatus =
8725 activeTrack->resamplerBufferProvider()->getNextBuffer(&buffer);
8726 if (getNextBufferStatus == OK && buffer.frameCount != 0) {
8727 ALOGV_IF(buffer.frameCount != framesOut,
8728 "%s() read less than expected (%zu vs %zu)",
8729 __func__, buffer.frameCount, framesOut);
8730 framesOut = buffer.frameCount;
8731 memcpy(activeTrack->sinkBuffer().raw,
8732 buffer.raw, buffer.frameCount * mFrameSize);
8733 activeTrack->resamplerBufferProvider()->releaseBuffer(&buffer);
8734 } else {
8735 framesOut = 0;
8736 ALOGE("%s() cannot fill request, status: %d, frameCount: %zu",
8737 __func__, getNextBufferStatus, buffer.frameCount);
8738 }
8739 } else {
8740 // process frames from the RecordThread buffer provider to the RecordTrack
8741 // buffer
8742 framesOut = activeTrack->recordBufferConverter()->convert(
8743 activeTrack->sinkBuffer().raw,
8744 activeTrack->resamplerBufferProvider(),
8745 framesOut);
8746 }
8747
8748 if (framesOut > 0 && (overrun == OVERRUN_UNKNOWN)) {
8749 overrun = OVERRUN_FALSE;
8750 }
8751
8752 // MediaSyncEvent handling: Synchronize AudioRecord to AudioTrack completion.
8753 const ssize_t framesToDrop =
8754 activeTrack->synchronizedRecordState().updateRecordFrames(framesOut);
8755 if (framesToDrop == 0) {
8756 // no sync event, process normally, otherwise ignore.
8757 if (framesOut > 0) {
8758 activeTrack->sinkBuffer().frameCount = framesOut;
8759 // Sanitize before releasing if the track has no access to the source data
8760 // An idle UID receives silence from non virtual devices until active
8761 if (activeTrack->isSilenced()) {
8762 memset(activeTrack->sinkBuffer().raw,
8763 0, framesOut * activeTrack->frameSize());
8764 }
8765 activeTrack->releaseBuffer(&activeTrack->sinkBuffer());
8766 }
8767 }
8768 if (framesOut == 0) {
8769 break;
8770 }
8771 }
8772
8773 switch (overrun) {
8774 case OVERRUN_TRUE:
8775 // client isn't retrieving buffers fast enough
8776 if (!activeTrack->setOverflow()) {
8777 nsecs_t now = systemTime();
8778 // FIXME should lastWarning per track?
8779 if ((now - lastWarning) > kWarningThrottleNs) {
8780 ALOGW("RecordThread: buffer overflow");
8781 lastWarning = now;
8782 }
8783 }
8784 break;
8785 case OVERRUN_FALSE:
8786 activeTrack->clearOverflow();
8787 break;
8788 case OVERRUN_UNKNOWN:
8789 break;
8790 }
8791
8792 // update frame information and push timestamp out
8793 activeTrack->updateTrackFrameInfo(
8794 activeTrack->serverProxy()->framesReleased(),
8795 mTimestamp.mPosition[ExtendedTimestamp::LOCATION_SERVER],
8796 mSampleRate, mTimestamp);
8797 }
8798
8799 unlock:
8800 // enable changes in effect chain
8801 unlockEffectChains(effectChains);
8802 // effectChains doesn't need to be cleared, since it is cleared by destructor at scope end
8803 if (audio_has_proportional_frames(mFormat)
8804 && loopCount == lastLoopCountRead + 1) {
8805 const int64_t readPeriodNs = lastIoEndNs - mLastIoEndNs;
8806 const double jitterMs =
8807 TimestampVerifier<int64_t, int64_t>::computeJitterMs(
8808 {framesRead, readPeriodNs},
8809 {0, 0} /* lastTimestamp */, mSampleRate);
8810 const double processMs = (lastIoBeginNs - mLastIoEndNs) * 1e-6;
8811
8812 audio_utils::lock_guard _l(mutex());
8813 mIoJitterMs.add(jitterMs);
8814 mProcessTimeMs.add(processMs);
8815 }
8816 // update timing info.
8817 mLastIoBeginNs = lastIoBeginNs;
8818 mLastIoEndNs = lastIoEndNs;
8819 lastLoopCountRead = loopCount;
8820 }
8821
8822 standbyIfNotAlreadyInStandby();
8823
8824 {
8825 audio_utils::lock_guard _l(mutex());
8826 for (size_t i = 0; i < mTracks.size(); i++) {
8827 sp<IAfRecordTrack> track = mTracks[i];
8828 track->invalidate();
8829 }
8830 mActiveTracks.clear();
8831 mStartStopCV.notify_all();
8832 }
8833
8834 releaseWakeLock();
8835
8836 ALOGV("RecordThread %p exiting", this);
8837 return false;
8838 }
8839
standbyIfNotAlreadyInStandby()8840 void RecordThread::standbyIfNotAlreadyInStandby()
8841 {
8842 if (!mStandby) {
8843 inputStandBy();
8844 mThreadMetrics.logEndInterval();
8845 mThreadSnapshot.onEnd();
8846 mStandby = true;
8847 }
8848 }
8849
inputStandBy()8850 void RecordThread::inputStandBy()
8851 {
8852 // Idle the fast capture if it's currently running
8853 if (mFastCapture != 0) {
8854 FastCaptureStateQueue *sq = mFastCapture->sq();
8855 FastCaptureState *state = sq->begin();
8856 if (!(state->mCommand & FastCaptureState::IDLE)) {
8857 state->mCommand = FastCaptureState::COLD_IDLE;
8858 state->mColdFutexAddr = &mFastCaptureFutex;
8859 state->mColdGen++;
8860 mFastCaptureFutex = 0;
8861 sq->end();
8862 // BLOCK_UNTIL_PUSHED would be insufficient, as we need it to stop doing I/O now
8863 sq->push(FastCaptureStateQueue::BLOCK_UNTIL_ACKED);
8864 #if 0
8865 if (kUseFastCapture == FastCapture_Dynamic) {
8866 // FIXME
8867 }
8868 #endif
8869 #ifdef AUDIO_WATCHDOG
8870 // FIXME
8871 #endif
8872 } else {
8873 sq->end(false /*didModify*/);
8874 }
8875 }
8876 status_t result = mSource->standby();
8877 ALOGE_IF(result != OK, "Error when putting input stream into standby: %d", result);
8878
8879 // If going into standby, flush the pipe source.
8880 if (mPipeSource.get() != nullptr) {
8881 const ssize_t flushed = mPipeSource->flush();
8882 if (flushed > 0) {
8883 ALOGV("Input standby flushed PipeSource %zd frames", flushed);
8884 mTimestamp.mPosition[ExtendedTimestamp::LOCATION_SERVER] += flushed;
8885 mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_SERVER] = systemTime();
8886 }
8887 }
8888 }
8889
8890 // RecordThread::createRecordTrack_l() must be called with AudioFlinger::mutex() held
createRecordTrack_l(const sp<Client> & client,const audio_attributes_t & attr,uint32_t * pSampleRate,audio_format_t format,audio_channel_mask_t channelMask,size_t * pFrameCount,audio_session_t sessionId,size_t * pNotificationFrameCount,pid_t creatorPid,const AttributionSourceState & attributionSource,audio_input_flags_t * flags,pid_t tid,status_t * status,audio_port_handle_t portId,int32_t maxSharedAudioHistoryMs)8891 sp<IAfRecordTrack> RecordThread::createRecordTrack_l(
8892 const sp<Client>& client,
8893 const audio_attributes_t& attr,
8894 uint32_t *pSampleRate,
8895 audio_format_t format,
8896 audio_channel_mask_t channelMask,
8897 size_t *pFrameCount,
8898 audio_session_t sessionId,
8899 size_t *pNotificationFrameCount,
8900 pid_t creatorPid,
8901 const AttributionSourceState& attributionSource,
8902 audio_input_flags_t *flags,
8903 pid_t tid,
8904 status_t *status,
8905 audio_port_handle_t portId,
8906 int32_t maxSharedAudioHistoryMs)
8907 {
8908 size_t frameCount = *pFrameCount;
8909 size_t notificationFrameCount = *pNotificationFrameCount;
8910 sp<IAfRecordTrack> track;
8911 status_t lStatus;
8912 audio_input_flags_t inputFlags = mInput->flags;
8913 audio_input_flags_t requestedFlags = *flags;
8914 uint32_t sampleRate;
8915
8916 lStatus = initCheck();
8917 if (lStatus != NO_ERROR) {
8918 ALOGE("createRecordTrack_l() audio driver not initialized");
8919 goto Exit;
8920 }
8921
8922 if (!audio_is_linear_pcm(mFormat) && (*flags & AUDIO_INPUT_FLAG_DIRECT) == 0) {
8923 ALOGE("createRecordTrack_l() on an encoded stream requires AUDIO_INPUT_FLAG_DIRECT");
8924 lStatus = BAD_VALUE;
8925 goto Exit;
8926 }
8927
8928 if (maxSharedAudioHistoryMs != 0) {
8929 if (!captureHotwordAllowed(attributionSource)) {
8930 lStatus = PERMISSION_DENIED;
8931 goto Exit;
8932 }
8933 if (maxSharedAudioHistoryMs < 0
8934 || maxSharedAudioHistoryMs > kMaxSharedAudioHistoryMs) {
8935 lStatus = BAD_VALUE;
8936 goto Exit;
8937 }
8938 }
8939 if (*pSampleRate == 0) {
8940 *pSampleRate = mSampleRate;
8941 }
8942 sampleRate = *pSampleRate;
8943
8944 // special case for FAST flag considered OK if fast capture is present and access to
8945 // audio history is not required
8946 if (hasFastCapture() && mMaxSharedAudioHistoryMs == 0) {
8947 inputFlags = (audio_input_flags_t)(inputFlags | AUDIO_INPUT_FLAG_FAST);
8948 }
8949
8950 // Check if requested flags are compatible with input stream flags
8951 if ((*flags & inputFlags) != *flags) {
8952 ALOGW("createRecordTrack_l(): mismatch between requested flags (%08x) and"
8953 " input flags (%08x)",
8954 *flags, inputFlags);
8955 *flags = (audio_input_flags_t)(*flags & inputFlags);
8956 }
8957
8958 // client expresses a preference for FAST and no access to audio history,
8959 // but we get the final say
8960 if (*flags & AUDIO_INPUT_FLAG_FAST && maxSharedAudioHistoryMs == 0) {
8961 if (
8962 // we formerly checked for a callback handler (non-0 tid),
8963 // but that is no longer required for TRANSFER_OBTAIN mode
8964 // No need to match hardware format, format conversion will be done in client side.
8965 //
8966 // Frame count is not specified (0), or is less than or equal the pipe depth.
8967 // It is OK to provide a higher capacity than requested.
8968 // We will force it to mPipeFramesP2 below.
8969 (frameCount <= mPipeFramesP2) &&
8970 // PCM data
8971 audio_is_linear_pcm(format) &&
8972 // hardware channel mask
8973 (channelMask == mChannelMask) &&
8974 // hardware sample rate
8975 (sampleRate == mSampleRate) &&
8976 // record thread has an associated fast capture
8977 hasFastCapture() &&
8978 // there are sufficient fast track slots available
8979 mFastTrackAvail
8980 ) {
8981 // check compatibility with audio effects.
8982 audio_utils::lock_guard _l(mutex());
8983 // Do not accept FAST flag if the session has software effects
8984 sp<IAfEffectChain> chain = getEffectChain_l(sessionId);
8985 if (chain != 0) {
8986 audio_input_flags_t old = *flags;
8987 chain->checkInputFlagCompatibility(flags);
8988 if (old != *flags) {
8989 ALOGV("%p AUDIO_INPUT_FLAGS denied by effect old=%#x new=%#x",
8990 this, (int)old, (int)*flags);
8991 }
8992 }
8993 ALOGV_IF((*flags & AUDIO_INPUT_FLAG_FAST) != 0,
8994 "%p AUDIO_INPUT_FLAG_FAST accepted: frameCount=%zu mFrameCount=%zu",
8995 this, frameCount, mFrameCount);
8996 } else {
8997 ALOGV("%p AUDIO_INPUT_FLAG_FAST denied: frameCount=%zu mFrameCount=%zu mPipeFramesP2=%zu "
8998 "format=%#x isLinear=%d mFormat=%#x channelMask=%#x sampleRate=%u mSampleRate=%u "
8999 "hasFastCapture=%d tid=%d mFastTrackAvail=%d",
9000 this, frameCount, mFrameCount, mPipeFramesP2,
9001 format, audio_is_linear_pcm(format), mFormat, channelMask, sampleRate, mSampleRate,
9002 hasFastCapture(), tid, mFastTrackAvail);
9003 *flags = (audio_input_flags_t)(*flags & ~AUDIO_INPUT_FLAG_FAST);
9004 }
9005 }
9006
9007 // If FAST or RAW flags were corrected, ask caller to request new input from audio policy
9008 if ((*flags & AUDIO_INPUT_FLAG_FAST) !=
9009 (requestedFlags & AUDIO_INPUT_FLAG_FAST)) {
9010 *flags = (audio_input_flags_t) (*flags & ~(AUDIO_INPUT_FLAG_FAST | AUDIO_INPUT_FLAG_RAW));
9011 lStatus = BAD_TYPE;
9012 goto Exit;
9013 }
9014
9015 // compute track buffer size in frames, and suggest the notification frame count
9016 if (*flags & AUDIO_INPUT_FLAG_FAST) {
9017 // fast track: frame count is exactly the pipe depth
9018 frameCount = mPipeFramesP2;
9019 // ignore requested notificationFrames, and always notify exactly once every HAL buffer
9020 notificationFrameCount = mFrameCount;
9021 } else {
9022 // not fast track: max notification period is resampled equivalent of one HAL buffer time
9023 // or 20 ms if there is a fast capture
9024 // TODO This could be a roundupRatio inline, and const
9025 size_t maxNotificationFrames = ((int64_t) (hasFastCapture() ? mSampleRate/50 : mFrameCount)
9026 * sampleRate + mSampleRate - 1) / mSampleRate;
9027 // minimum number of notification periods is at least kMinNotifications,
9028 // and at least kMinMs rounded up to a whole notification period (minNotificationsByMs)
9029 static const size_t kMinNotifications = 3;
9030 static const uint32_t kMinMs = 30;
9031 // TODO This could be a roundupRatio inline
9032 const size_t minFramesByMs = (sampleRate * kMinMs + 1000 - 1) / 1000;
9033 // TODO This could be a roundupRatio inline
9034 const size_t minNotificationsByMs = (minFramesByMs + maxNotificationFrames - 1) /
9035 maxNotificationFrames;
9036 const size_t minFrameCount = maxNotificationFrames *
9037 max(kMinNotifications, minNotificationsByMs);
9038 frameCount = max(frameCount, minFrameCount);
9039 if (notificationFrameCount == 0 || notificationFrameCount > maxNotificationFrames) {
9040 notificationFrameCount = maxNotificationFrames;
9041 }
9042 }
9043 *pFrameCount = frameCount;
9044 *pNotificationFrameCount = notificationFrameCount;
9045
9046 { // scope for mutex()
9047 audio_utils::lock_guard _l(mutex());
9048 int32_t startFrames = -1;
9049 if (!mSharedAudioPackageName.empty()
9050 && mSharedAudioPackageName == attributionSource.packageName
9051 && mSharedAudioSessionId == sessionId
9052 && captureHotwordAllowed(attributionSource)) {
9053 startFrames = mSharedAudioStartFrames;
9054 }
9055
9056 track = IAfRecordTrack::create(this, client, attr, sampleRate,
9057 format, channelMask, frameCount,
9058 nullptr /* buffer */, (size_t)0 /* bufferSize */, sessionId, creatorPid,
9059 attributionSource, *flags, IAfTrackBase::TYPE_DEFAULT, portId,
9060 startFrames);
9061
9062 lStatus = track->initCheck();
9063 if (lStatus != NO_ERROR) {
9064 ALOGE("createRecordTrack_l() initCheck failed %d; no control block?", lStatus);
9065 // track must be cleared from the caller as the caller has the AF lock
9066 goto Exit;
9067 }
9068 mTracks.add(track);
9069
9070 if ((*flags & AUDIO_INPUT_FLAG_FAST) && (tid != -1)) {
9071 pid_t callingPid = IPCThreadState::self()->getCallingPid();
9072 // we don't have CAP_SYS_NICE, nor do we want to have it as it's too powerful,
9073 // so ask activity manager to do this on our behalf
9074 sendPrioConfigEvent_l(callingPid, tid, kPriorityAudioApp, true /*forApp*/);
9075 }
9076
9077 if (maxSharedAudioHistoryMs != 0) {
9078 sendResizeBufferConfigEvent_l(maxSharedAudioHistoryMs);
9079 }
9080 }
9081
9082 lStatus = NO_ERROR;
9083
9084 Exit:
9085 *status = lStatus;
9086 return track;
9087 }
9088
start(IAfRecordTrack * recordTrack,AudioSystem::sync_event_t event,audio_session_t triggerSession)9089 status_t RecordThread::start(IAfRecordTrack* recordTrack,
9090 AudioSystem::sync_event_t event,
9091 audio_session_t triggerSession)
9092 {
9093 ALOGV("RecordThread::start event %d, triggerSession %d", event, triggerSession);
9094 sp<ThreadBase> strongMe = this;
9095 status_t status = NO_ERROR;
9096
9097 if (event == AudioSystem::SYNC_EVENT_NONE) {
9098 recordTrack->clearSyncStartEvent();
9099 } else if (event != AudioSystem::SYNC_EVENT_SAME) {
9100 recordTrack->synchronizedRecordState().startRecording(
9101 mAfThreadCallback->createSyncEvent(
9102 event, triggerSession,
9103 recordTrack->sessionId(), syncStartEventCallback, recordTrack));
9104 }
9105
9106 {
9107 // This section is a rendezvous between binder thread executing start() and RecordThread
9108 audio_utils::lock_guard lock(mutex());
9109 if (recordTrack->isInvalid()) {
9110 recordTrack->clearSyncStartEvent();
9111 ALOGW("%s track %d: invalidated before startInput", __func__, recordTrack->portId());
9112 return DEAD_OBJECT;
9113 }
9114 if (mActiveTracks.indexOf(recordTrack) >= 0) {
9115 if (recordTrack->state() == IAfTrackBase::PAUSING) {
9116 // We haven't stopped yet (moved to PAUSED and not in mActiveTracks)
9117 // so no need to startInput().
9118 ALOGV("active record track PAUSING -> ACTIVE");
9119 recordTrack->setState(IAfTrackBase::ACTIVE);
9120 } else {
9121 ALOGV("active record track state %d", (int)recordTrack->state());
9122 }
9123 return status;
9124 }
9125
9126 // TODO consider other ways of handling this, such as changing the state to :STARTING and
9127 // adding the track to mActiveTracks after returning from AudioSystem::startInput(),
9128 // or using a separate command thread
9129 recordTrack->setState(IAfTrackBase::STARTING_1);
9130 mActiveTracks.add(recordTrack);
9131 if (recordTrack->isExternalTrack()) {
9132 mutex().unlock();
9133 status = AudioSystem::startInput(recordTrack->portId());
9134 mutex().lock();
9135 if (recordTrack->isInvalid()) {
9136 recordTrack->clearSyncStartEvent();
9137 if (status == NO_ERROR && recordTrack->state() == IAfTrackBase::STARTING_1) {
9138 recordTrack->setState(IAfTrackBase::STARTING_2);
9139 // STARTING_2 forces destroy to call stopInput.
9140 }
9141 ALOGW("%s track %d: invalidated after startInput", __func__, recordTrack->portId());
9142 return DEAD_OBJECT;
9143 }
9144 if (recordTrack->state() != IAfTrackBase::STARTING_1) {
9145 ALOGW("%s(%d): unsynchronized mState:%d change",
9146 __func__, recordTrack->id(), (int)recordTrack->state());
9147 // Someone else has changed state, let them take over,
9148 // leave mState in the new state.
9149 recordTrack->clearSyncStartEvent();
9150 return INVALID_OPERATION;
9151 }
9152 // we're ok, but perhaps startInput has failed
9153 if (status != NO_ERROR) {
9154 ALOGW("%s(%d): startInput failed, status %d",
9155 __func__, recordTrack->id(), status);
9156 // We are in ActiveTracks if STARTING_1 and valid, so remove from ActiveTracks,
9157 // leave in STARTING_1, so destroy() will not call stopInput.
9158 mActiveTracks.remove(recordTrack);
9159 recordTrack->clearSyncStartEvent();
9160 return status;
9161 }
9162 sendIoConfigEvent_l(
9163 AUDIO_CLIENT_STARTED, recordTrack->creatorPid(), recordTrack->portId());
9164 }
9165
9166 recordTrack->logBeginInterval(patchSourcesToString(&mPatch)); // log to MediaMetrics
9167
9168 // Catch up with current buffer indices if thread is already running.
9169 // This is what makes a new client discard all buffered data. If the track's mRsmpInFront
9170 // was initialized to some value closer to the thread's mRsmpInFront, then the track could
9171 // see previously buffered data before it called start(), but with greater risk of overrun.
9172
9173 recordTrack->resamplerBufferProvider()->reset();
9174 if (!recordTrack->isDirect()) {
9175 // clear any converter state as new data will be discontinuous
9176 recordTrack->recordBufferConverter()->reset();
9177 }
9178 recordTrack->setState(IAfTrackBase::STARTING_2);
9179 // signal thread to start
9180 mWaitWorkCV.notify_all();
9181 return status;
9182 }
9183 }
9184
syncStartEventCallback(const wp<SyncEvent> & event)9185 void RecordThread::syncStartEventCallback(const wp<SyncEvent>& event)
9186 {
9187 const sp<SyncEvent> strongEvent = event.promote();
9188
9189 if (strongEvent != 0) {
9190 sp<IAfTrackBase> ptr =
9191 std::any_cast<const wp<IAfTrackBase>>(strongEvent->cookie()).promote();
9192 if (ptr != nullptr) {
9193 // TODO(b/291317898) handleSyncStartEvent is in IAfTrackBase not IAfRecordTrack.
9194 ptr->handleSyncStartEvent(strongEvent);
9195 }
9196 }
9197 }
9198
stop(IAfRecordTrack * recordTrack)9199 bool RecordThread::stop(IAfRecordTrack* recordTrack) {
9200 ALOGV("RecordThread::stop");
9201 audio_utils::unique_lock _l(mutex());
9202 // if we're invalid, we can't be on the ActiveTracks.
9203 if (mActiveTracks.indexOf(recordTrack) < 0 || recordTrack->state() == IAfTrackBase::PAUSING) {
9204 return false;
9205 }
9206 // note that threadLoop may still be processing the track at this point [without lock]
9207 recordTrack->setState(IAfTrackBase::PAUSING);
9208
9209 // NOTE: Waiting here is important to keep stop synchronous.
9210 // This is needed for proper patchRecord peer release.
9211 while (recordTrack->state() == IAfTrackBase::PAUSING && !recordTrack->isInvalid()) {
9212 mWaitWorkCV.notify_all(); // signal thread to stop
9213 mStartStopCV.wait(_l, getTid());
9214 }
9215
9216 if (recordTrack->state() == IAfTrackBase::PAUSED) { // successful stop
9217 ALOGV("Record stopped OK");
9218 return true;
9219 }
9220
9221 // don't handle anything - we've been invalidated or restarted and in a different state
9222 ALOGW_IF("%s(%d): unsynchronized stop, state: %d",
9223 __func__, recordTrack->id(), recordTrack->state());
9224 return false;
9225 }
9226
isValidSyncEvent(const sp<SyncEvent> &) const9227 bool RecordThread::isValidSyncEvent(const sp<SyncEvent>& /* event */) const
9228 {
9229 return false;
9230 }
9231
setSyncEvent(const sp<SyncEvent> &)9232 status_t RecordThread::setSyncEvent(const sp<SyncEvent>& /* event */)
9233 {
9234 #if 0 // This branch is currently dead code, but is preserved in case it will be needed in future
9235 if (!isValidSyncEvent(event)) {
9236 return BAD_VALUE;
9237 }
9238
9239 audio_session_t eventSession = event->triggerSession();
9240 status_t ret = NAME_NOT_FOUND;
9241
9242 audio_utils::lock_guard _l(mutex());
9243
9244 for (size_t i = 0; i < mTracks.size(); i++) {
9245 sp<IAfRecordTrack> track = mTracks[i];
9246 if (eventSession == track->sessionId()) {
9247 (void) track->setSyncEvent(event);
9248 ret = NO_ERROR;
9249 }
9250 }
9251 return ret;
9252 #else
9253 return BAD_VALUE;
9254 #endif
9255 }
9256
getActiveMicrophones(std::vector<media::MicrophoneInfoFw> * activeMicrophones) const9257 status_t RecordThread::getActiveMicrophones(
9258 std::vector<media::MicrophoneInfoFw>* activeMicrophones) const
9259 {
9260 ALOGV("RecordThread::getActiveMicrophones");
9261 audio_utils::lock_guard _l(mutex());
9262 if (!isStreamInitialized()) {
9263 return NO_INIT;
9264 }
9265 status_t status = mInput->stream->getActiveMicrophones(activeMicrophones);
9266 return status;
9267 }
9268
setPreferredMicrophoneDirection(audio_microphone_direction_t direction)9269 status_t RecordThread::setPreferredMicrophoneDirection(
9270 audio_microphone_direction_t direction)
9271 {
9272 ALOGV("setPreferredMicrophoneDirection(%d)", direction);
9273 audio_utils::lock_guard _l(mutex());
9274 if (!isStreamInitialized()) {
9275 return NO_INIT;
9276 }
9277 return mInput->stream->setPreferredMicrophoneDirection(direction);
9278 }
9279
setPreferredMicrophoneFieldDimension(float zoom)9280 status_t RecordThread::setPreferredMicrophoneFieldDimension(float zoom)
9281 {
9282 ALOGV("setPreferredMicrophoneFieldDimension(%f)", zoom);
9283 audio_utils::lock_guard _l(mutex());
9284 if (!isStreamInitialized()) {
9285 return NO_INIT;
9286 }
9287 return mInput->stream->setPreferredMicrophoneFieldDimension(zoom);
9288 }
9289
shareAudioHistory(const std::string & sharedAudioPackageName,audio_session_t sharedSessionId,int64_t sharedAudioStartMs)9290 status_t RecordThread::shareAudioHistory(
9291 const std::string& sharedAudioPackageName, audio_session_t sharedSessionId,
9292 int64_t sharedAudioStartMs) {
9293 audio_utils::lock_guard _l(mutex());
9294 return shareAudioHistory_l(sharedAudioPackageName, sharedSessionId, sharedAudioStartMs);
9295 }
9296
shareAudioHistory_l(const std::string & sharedAudioPackageName,audio_session_t sharedSessionId,int64_t sharedAudioStartMs)9297 status_t RecordThread::shareAudioHistory_l(
9298 const std::string& sharedAudioPackageName, audio_session_t sharedSessionId,
9299 int64_t sharedAudioStartMs) {
9300
9301 if ((hasAudioSession_l(sharedSessionId) & ThreadBase::TRACK_SESSION) == 0) {
9302 return BAD_VALUE;
9303 }
9304
9305 if (sharedAudioStartMs < 0
9306 || sharedAudioStartMs > INT64_MAX / mSampleRate) {
9307 return BAD_VALUE;
9308 }
9309
9310 // Current implementation of the input resampling buffer wraps around indexes at 32 bit.
9311 // As we cannot detect more than one wraparound, only accept values up current write position
9312 // after one wraparound
9313 // We assume recent wraparounds on mRsmpInRear only given it is unlikely that the requesting
9314 // app waits several hours after the start time was computed.
9315 int64_t sharedAudioStartFrames = sharedAudioStartMs * mSampleRate / 1000;
9316 const int32_t sharedOffset = audio_utils::safe_sub_overflow(mRsmpInRear,
9317 (int32_t)sharedAudioStartFrames);
9318 // Bring the start frame position within the input buffer to match the documented
9319 // "best effort" behavior of the API.
9320 if (sharedOffset < 0) {
9321 sharedAudioStartFrames = mRsmpInRear;
9322 } else if (sharedOffset > static_cast<signed>(mRsmpInFrames)) {
9323 sharedAudioStartFrames =
9324 audio_utils::safe_sub_overflow(mRsmpInRear, (int32_t)mRsmpInFrames);
9325 }
9326
9327 mSharedAudioPackageName = sharedAudioPackageName;
9328 if (mSharedAudioPackageName.empty()) {
9329 resetAudioHistory_l();
9330 } else {
9331 mSharedAudioSessionId = sharedSessionId;
9332 mSharedAudioStartFrames = (int32_t)sharedAudioStartFrames;
9333 }
9334 return NO_ERROR;
9335 }
9336
resetAudioHistory_l()9337 void RecordThread::resetAudioHistory_l() {
9338 mSharedAudioSessionId = AUDIO_SESSION_NONE;
9339 mSharedAudioStartFrames = -1;
9340 mSharedAudioPackageName = "";
9341 }
9342
updateMetadata_l()9343 ThreadBase::MetadataUpdate RecordThread::updateMetadata_l()
9344 {
9345 if (!isStreamInitialized() || !mActiveTracks.readAndClearHasChanged()) {
9346 return {}; // nothing to do
9347 }
9348 StreamInHalInterface::SinkMetadata metadata;
9349 auto backInserter = std::back_inserter(metadata.tracks);
9350 for (const sp<IAfRecordTrack>& track : mActiveTracks) {
9351 track->copyMetadataTo(backInserter);
9352 }
9353 mInput->stream->updateSinkMetadata(metadata);
9354 MetadataUpdate change;
9355 change.recordMetadataUpdate = metadata.tracks;
9356 return change;
9357 }
9358
9359 // destroyTrack_l() must be called with ThreadBase::mutex() held
destroyTrack_l(const sp<IAfRecordTrack> & track)9360 void RecordThread::destroyTrack_l(const sp<IAfRecordTrack>& track)
9361 {
9362 track->terminate();
9363 track->setState(IAfTrackBase::STOPPED);
9364
9365 // active tracks are removed by threadLoop()
9366 if (mActiveTracks.indexOf(track) < 0) {
9367 removeTrack_l(track);
9368 }
9369 }
9370
removeTrack_l(const sp<IAfRecordTrack> & track)9371 void RecordThread::removeTrack_l(const sp<IAfRecordTrack>& track)
9372 {
9373 String8 result;
9374 track->appendDump(result, false /* active */);
9375 mLocalLog.log("removeTrack_l (%p) %s", track.get(), result.c_str());
9376
9377 mTracks.remove(track);
9378 // need anything related to effects here?
9379 if (track->isFastTrack()) {
9380 ALOG_ASSERT(!mFastTrackAvail);
9381 mFastTrackAvail = true;
9382 }
9383 }
9384
dumpInternals_l(int fd,const Vector<String16> &)9385 void RecordThread::dumpInternals_l(int fd, const Vector<String16>& /* args */)
9386 {
9387 AudioStreamIn *input = mInput;
9388 audio_input_flags_t flags = input != NULL ? input->flags : AUDIO_INPUT_FLAG_NONE;
9389 dprintf(fd, " AudioStreamIn: %p flags %#x (%s)\n",
9390 input, flags, toString(flags).c_str());
9391 dprintf(fd, " Frames read: %lld\n", (long long)mFramesRead);
9392 if (mActiveTracks.isEmpty()) {
9393 dprintf(fd, " No active record clients\n");
9394 }
9395
9396 if (input != nullptr) {
9397 dprintf(fd, " Hal stream dump:\n");
9398 (void)input->stream->dump(fd);
9399 }
9400
9401 dprintf(fd, " Fast capture thread: %s\n", hasFastCapture() ? "yes" : "no");
9402 dprintf(fd, " Fast track available: %s\n", mFastTrackAvail ? "yes" : "no");
9403
9404 // Make a non-atomic copy of fast capture dump state so it won't change underneath us
9405 // while we are dumping it. It may be inconsistent, but it won't mutate!
9406 // This is a large object so we place it on the heap.
9407 // FIXME 25972958: Need an intelligent copy constructor that does not touch unused pages.
9408 const std::unique_ptr<FastCaptureDumpState> copy =
9409 std::make_unique<FastCaptureDumpState>(mFastCaptureDumpState);
9410 copy->dump(fd);
9411 }
9412
dumpTracks_l(int fd,const Vector<String16> &)9413 void RecordThread::dumpTracks_l(int fd, const Vector<String16>& /* args */)
9414 {
9415 String8 result;
9416 size_t numtracks = mTracks.size();
9417 size_t numactive = mActiveTracks.size();
9418 size_t numactiveseen = 0;
9419 dprintf(fd, " %zu Tracks", numtracks);
9420 const char *prefix = " ";
9421 if (numtracks) {
9422 dprintf(fd, " of which %zu are active\n", numactive);
9423 result.append(prefix);
9424 mTracks[0]->appendDumpHeader(result);
9425 for (size_t i = 0; i < numtracks ; ++i) {
9426 sp<IAfRecordTrack> track = mTracks[i];
9427 if (track != 0) {
9428 bool active = mActiveTracks.indexOf(track) >= 0;
9429 if (active) {
9430 numactiveseen++;
9431 }
9432 result.append(prefix);
9433 track->appendDump(result, active);
9434 }
9435 }
9436 } else {
9437 dprintf(fd, "\n");
9438 }
9439
9440 if (numactiveseen != numactive) {
9441 result.append(" The following tracks are in the active list but"
9442 " not in the track list\n");
9443 result.append(prefix);
9444 mActiveTracks[0]->appendDumpHeader(result);
9445 for (size_t i = 0; i < numactive; ++i) {
9446 sp<IAfRecordTrack> track = mActiveTracks[i];
9447 if (mTracks.indexOf(track) < 0) {
9448 result.append(prefix);
9449 track->appendDump(result, true /* active */);
9450 }
9451 }
9452
9453 }
9454 write(fd, result.c_str(), result.size());
9455 }
9456
setRecordSilenced(audio_port_handle_t portId,bool silenced)9457 void RecordThread::setRecordSilenced(audio_port_handle_t portId, bool silenced)
9458 {
9459 audio_utils::lock_guard _l(mutex());
9460 for (size_t i = 0; i < mTracks.size() ; i++) {
9461 sp<IAfRecordTrack> track = mTracks[i];
9462 if (track != 0 && track->portId() == portId) {
9463 track->setSilenced(silenced);
9464 }
9465 }
9466 }
9467
reset()9468 void ResamplerBufferProvider::reset()
9469 {
9470 const auto threadBase = mRecordTrack->thread().promote();
9471 auto* const recordThread = static_cast<RecordThread *>(threadBase->asIAfRecordThread().get());
9472 mRsmpInUnrel = 0;
9473 const int32_t rear = recordThread->mRsmpInRear;
9474 ssize_t deltaFrames = 0;
9475 if (mRecordTrack->startFrames() >= 0) {
9476 int32_t startFrames = mRecordTrack->startFrames();
9477 // Accept a recent wraparound of mRsmpInRear
9478 if (startFrames <= rear) {
9479 deltaFrames = rear - startFrames;
9480 } else {
9481 deltaFrames = (int32_t)((int64_t)rear + UINT32_MAX + 1 - startFrames);
9482 }
9483 // start frame cannot be further in the past than start of resampling buffer
9484 if ((size_t) deltaFrames > recordThread->mRsmpInFrames) {
9485 deltaFrames = recordThread->mRsmpInFrames;
9486 }
9487 }
9488 mRsmpInFront = audio_utils::safe_sub_overflow(rear, static_cast<int32_t>(deltaFrames));
9489 }
9490
sync(size_t * framesAvailable,bool * hasOverrun)9491 void ResamplerBufferProvider::sync(
9492 size_t *framesAvailable, bool *hasOverrun)
9493 {
9494 const auto threadBase = mRecordTrack->thread().promote();
9495 auto* const recordThread = static_cast<RecordThread *>(threadBase->asIAfRecordThread().get());
9496 const int32_t rear = recordThread->mRsmpInRear;
9497 const int32_t front = mRsmpInFront;
9498 const ssize_t filled = audio_utils::safe_sub_overflow(rear, front);
9499
9500 size_t framesIn;
9501 bool overrun = false;
9502 if (filled < 0) {
9503 // should not happen, but treat like a massive overrun and re-sync
9504 framesIn = 0;
9505 mRsmpInFront = rear;
9506 overrun = true;
9507 } else if ((size_t) filled <= recordThread->mRsmpInFrames) {
9508 framesIn = (size_t) filled;
9509 } else {
9510 // client is not keeping up with server, but give it latest data
9511 framesIn = recordThread->mRsmpInFrames;
9512 mRsmpInFront = /* front = */ audio_utils::safe_sub_overflow(
9513 rear, static_cast<int32_t>(framesIn));
9514 overrun = true;
9515 }
9516 if (framesAvailable != NULL) {
9517 *framesAvailable = framesIn;
9518 }
9519 if (hasOverrun != NULL) {
9520 *hasOverrun = overrun;
9521 }
9522 }
9523
9524 // AudioBufferProvider interface
getNextBuffer(AudioBufferProvider::Buffer * buffer)9525 status_t ResamplerBufferProvider::getNextBuffer(
9526 AudioBufferProvider::Buffer* buffer)
9527 {
9528 const auto threadBase = mRecordTrack->thread().promote();
9529 if (threadBase == 0) {
9530 buffer->frameCount = 0;
9531 buffer->raw = NULL;
9532 return NOT_ENOUGH_DATA;
9533 }
9534 auto* const recordThread = static_cast<RecordThread *>(threadBase->asIAfRecordThread().get());
9535 int32_t rear = recordThread->mRsmpInRear;
9536 int32_t front = mRsmpInFront;
9537 ssize_t filled = audio_utils::safe_sub_overflow(rear, front);
9538 // FIXME should not be P2 (don't want to increase latency)
9539 // FIXME if client not keeping up, discard
9540 LOG_ALWAYS_FATAL_IF(!(0 <= filled && (size_t) filled <= recordThread->mRsmpInFrames));
9541 // 'filled' may be non-contiguous, so return only the first contiguous chunk
9542
9543 front &= recordThread->mRsmpInFramesP2 - 1;
9544 size_t part1 = recordThread->mRsmpInFramesP2 - front;
9545 if (part1 > (size_t) filled) {
9546 part1 = filled;
9547 }
9548 size_t ask = buffer->frameCount;
9549 ALOG_ASSERT(ask > 0);
9550 if (part1 > ask) {
9551 part1 = ask;
9552 }
9553 if (part1 == 0) {
9554 // out of data is fine since the resampler will return a short-count.
9555 buffer->raw = NULL;
9556 buffer->frameCount = 0;
9557 mRsmpInUnrel = 0;
9558 return NOT_ENOUGH_DATA;
9559 }
9560
9561 buffer->raw = (uint8_t*)recordThread->mRsmpInBuffer + front * recordThread->mFrameSize;
9562 buffer->frameCount = part1;
9563 mRsmpInUnrel = part1;
9564 return NO_ERROR;
9565 }
9566
9567 // AudioBufferProvider interface
releaseBuffer(AudioBufferProvider::Buffer * buffer)9568 void ResamplerBufferProvider::releaseBuffer(
9569 AudioBufferProvider::Buffer* buffer)
9570 {
9571 int32_t stepCount = static_cast<int32_t>(buffer->frameCount);
9572 if (stepCount == 0) {
9573 return;
9574 }
9575 ALOG_ASSERT(stepCount <= (int32_t)mRsmpInUnrel);
9576 mRsmpInUnrel -= stepCount;
9577 mRsmpInFront = audio_utils::safe_add_overflow(mRsmpInFront, stepCount);
9578 buffer->raw = NULL;
9579 buffer->frameCount = 0;
9580 }
9581
checkBtNrec()9582 void RecordThread::checkBtNrec()
9583 {
9584 audio_utils::lock_guard _l(mutex());
9585 checkBtNrec_l();
9586 }
9587
checkBtNrec_l()9588 void RecordThread::checkBtNrec_l()
9589 {
9590 // disable AEC and NS if the device is a BT SCO headset supporting those
9591 // pre processings
9592 bool suspend = audio_is_bluetooth_sco_device(inDeviceType_l()) &&
9593 mAfThreadCallback->btNrecIsOff();
9594 if (mBtNrecSuspended.exchange(suspend) != suspend) {
9595 for (size_t i = 0; i < mEffectChains.size(); i++) {
9596 setEffectSuspended_l(FX_IID_AEC, suspend, mEffectChains[i]->sessionId());
9597 setEffectSuspended_l(FX_IID_NS, suspend, mEffectChains[i]->sessionId());
9598 }
9599 }
9600 }
9601
9602
checkForNewParameter_l(const String8 & keyValuePair,status_t & status)9603 bool RecordThread::checkForNewParameter_l(const String8& keyValuePair,
9604 status_t& status)
9605 {
9606 bool reconfig = false;
9607
9608 status = NO_ERROR;
9609
9610 audio_format_t reqFormat = mFormat;
9611 uint32_t samplingRate = mSampleRate;
9612 // TODO this may change if we want to support capture from HDMI PCM multi channel (e.g on TVs).
9613 [[maybe_unused]] audio_channel_mask_t channelMask =
9614 audio_channel_in_mask_from_count(mChannelCount);
9615
9616 AudioParameter param = AudioParameter(keyValuePair);
9617 int value;
9618
9619 // scope for AutoPark extends to end of method
9620 AutoPark<FastCapture> park(mFastCapture);
9621
9622 // TODO Investigate when this code runs. Check with audio policy when a sample rate and
9623 // channel count change can be requested. Do we mandate the first client defines the
9624 // HAL sampling rate and channel count or do we allow changes on the fly?
9625 if (param.getInt(String8(AudioParameter::keySamplingRate), value) == NO_ERROR) {
9626 samplingRate = value;
9627 reconfig = true;
9628 }
9629 if (param.getInt(String8(AudioParameter::keyFormat), value) == NO_ERROR) {
9630 if (!audio_is_linear_pcm((audio_format_t) value)) {
9631 status = BAD_VALUE;
9632 } else {
9633 reqFormat = (audio_format_t) value;
9634 reconfig = true;
9635 }
9636 }
9637 if (param.getInt(String8(AudioParameter::keyChannels), value) == NO_ERROR) {
9638 audio_channel_mask_t mask = (audio_channel_mask_t) value;
9639 if (!audio_is_input_channel(mask) ||
9640 audio_channel_count_from_in_mask(mask) > FCC_LIMIT) {
9641 status = BAD_VALUE;
9642 } else {
9643 channelMask = mask;
9644 reconfig = true;
9645 }
9646 }
9647 if (param.getInt(String8(AudioParameter::keyFrameCount), value) == NO_ERROR) {
9648 // do not accept frame count changes if tracks are open as the track buffer
9649 // size depends on frame count and correct behavior would not be guaranteed
9650 // if frame count is changed after track creation
9651 if (mActiveTracks.size() > 0) {
9652 status = INVALID_OPERATION;
9653 } else {
9654 reconfig = true;
9655 }
9656 }
9657 if (param.getInt(String8(AudioParameter::keyRouting), value) == NO_ERROR) {
9658 LOG_FATAL("Should not set routing device in RecordThread");
9659 }
9660 if (param.getInt(String8(AudioParameter::keyInputSource), value) == NO_ERROR &&
9661 mAudioSource != (audio_source_t)value) {
9662 LOG_FATAL("Should not set audio source in RecordThread");
9663 }
9664
9665 if (status == NO_ERROR) {
9666 status = mInput->stream->setParameters(keyValuePair);
9667 if (status == INVALID_OPERATION) {
9668 inputStandBy();
9669 status = mInput->stream->setParameters(keyValuePair);
9670 }
9671 if (reconfig) {
9672 if (status == BAD_VALUE) {
9673 audio_config_base_t config = AUDIO_CONFIG_BASE_INITIALIZER;
9674 if (mInput->stream->getAudioProperties(&config) == OK &&
9675 audio_is_linear_pcm(config.format) && audio_is_linear_pcm(reqFormat) &&
9676 config.sample_rate <= (AUDIO_RESAMPLER_DOWN_RATIO_MAX * samplingRate) &&
9677 audio_channel_count_from_in_mask(config.channel_mask) <= FCC_LIMIT) {
9678 status = NO_ERROR;
9679 }
9680 }
9681 if (status == NO_ERROR) {
9682 readInputParameters_l();
9683 sendIoConfigEvent_l(AUDIO_INPUT_CONFIG_CHANGED);
9684 }
9685 }
9686 }
9687
9688 return reconfig;
9689 }
9690
getParameters(const String8 & keys)9691 String8 RecordThread::getParameters(const String8& keys)
9692 {
9693 audio_utils::lock_guard _l(mutex());
9694 if (initCheck() == NO_ERROR) {
9695 String8 out_s8;
9696 if (mInput->stream->getParameters(keys, &out_s8) == OK) {
9697 return out_s8;
9698 }
9699 }
9700 return {};
9701 }
9702
ioConfigChanged_l(audio_io_config_event_t event,pid_t pid,audio_port_handle_t portId)9703 void RecordThread::ioConfigChanged_l(audio_io_config_event_t event, pid_t pid,
9704 audio_port_handle_t portId) {
9705 sp<AudioIoDescriptor> desc;
9706 switch (event) {
9707 case AUDIO_INPUT_OPENED:
9708 case AUDIO_INPUT_REGISTERED:
9709 case AUDIO_INPUT_CONFIG_CHANGED:
9710 desc = sp<AudioIoDescriptor>::make(mId, mPatch, true /*isInput*/,
9711 mSampleRate, mFormat, mChannelMask, mFrameCount, mFrameCount);
9712 break;
9713 case AUDIO_CLIENT_STARTED:
9714 desc = sp<AudioIoDescriptor>::make(mId, mPatch, portId);
9715 break;
9716 case AUDIO_INPUT_CLOSED:
9717 default:
9718 desc = sp<AudioIoDescriptor>::make(mId);
9719 break;
9720 }
9721 mAfThreadCallback->ioConfigChanged_l(event, desc, pid);
9722 }
9723
readInputParameters_l()9724 void RecordThread::readInputParameters_l()
9725 {
9726 const audio_config_base_t audioConfig = mInput->getAudioProperties();
9727 mSampleRate = audioConfig.sample_rate;
9728 mChannelMask = audioConfig.channel_mask;
9729 if (!audio_is_input_channel(mChannelMask)) {
9730 LOG_ALWAYS_FATAL("Channel mask %#x not valid for input", mChannelMask);
9731 }
9732
9733 mChannelCount = audio_channel_count_from_in_mask(mChannelMask);
9734
9735 // Get actual HAL format.
9736 status_t result = mInput->stream->getAudioProperties(nullptr, nullptr, &mHALFormat);
9737 LOG_ALWAYS_FATAL_IF(result != OK, "Error when retrieving input stream format: %d", result);
9738 // Get format from the shim, which will be different than the HAL format
9739 // if recording compressed audio from IEC61937 wrapped sources.
9740 mFormat = audioConfig.format;
9741 if (!audio_is_valid_format(mFormat)) {
9742 LOG_ALWAYS_FATAL("Format %#x not valid for input", mFormat);
9743 }
9744 if (audio_is_linear_pcm(mFormat)) {
9745 LOG_ALWAYS_FATAL_IF(mChannelCount > FCC_LIMIT, "HAL channel count %d > %d",
9746 mChannelCount, FCC_LIMIT);
9747 } else {
9748 // Can have more that FCC_LIMIT channels in encoded streams.
9749 ALOGI("HAL format %#x is not linear pcm", mFormat);
9750 }
9751 mFrameSize = mInput->getFrameSize();
9752 LOG_ALWAYS_FATAL_IF(mFrameSize <= 0, "Error frame size was %zu but must be greater than zero",
9753 mFrameSize);
9754 result = mInput->stream->getBufferSize(&mBufferSize);
9755 LOG_ALWAYS_FATAL_IF(result != OK, "Error retrieving buffer size from HAL: %d", result);
9756 mFrameCount = mBufferSize / mFrameSize;
9757 ALOGV("%p RecordThread params: mChannelCount=%u, mFormat=%#x, mFrameSize=%zu, "
9758 "mBufferSize=%zu, mFrameCount=%zu",
9759 this, mChannelCount, mFormat, mFrameSize, mBufferSize, mFrameCount);
9760
9761 // mRsmpInFrames must be 0 before calling resizeInputBuffer_l for the first time
9762 mRsmpInFrames = 0;
9763 resizeInputBuffer_l(0 /*maxSharedAudioHistoryMs*/);
9764
9765 // AudioRecord mSampleRate and mChannelCount are constant due to AudioRecord API constraints.
9766 // But if thread's mSampleRate or mChannelCount changes, how will that affect active tracks?
9767
9768 audio_input_flags_t flags = mInput->flags;
9769 mediametrics::LogItem item(mThreadMetrics.getMetricsId());
9770 item.set(AMEDIAMETRICS_PROP_EVENT, AMEDIAMETRICS_PROP_EVENT_VALUE_READPARAMETERS)
9771 .set(AMEDIAMETRICS_PROP_ENCODING, IAfThreadBase::formatToString(mFormat).c_str())
9772 .set(AMEDIAMETRICS_PROP_FLAGS, toString(flags).c_str())
9773 .set(AMEDIAMETRICS_PROP_SAMPLERATE, (int32_t)mSampleRate)
9774 .set(AMEDIAMETRICS_PROP_CHANNELMASK, (int32_t)mChannelMask)
9775 .set(AMEDIAMETRICS_PROP_CHANNELCOUNT, (int32_t)mChannelCount)
9776 .set(AMEDIAMETRICS_PROP_FRAMECOUNT, (int32_t)mFrameCount)
9777 .record();
9778 }
9779
getInputFramesLost() const9780 uint32_t RecordThread::getInputFramesLost() const
9781 {
9782 audio_utils::lock_guard _l(mutex());
9783 uint32_t result;
9784 if (initCheck() == NO_ERROR && mInput->stream->getInputFramesLost(&result) == OK) {
9785 return result;
9786 }
9787 return 0;
9788 }
9789
sessionIds() const9790 KeyedVector<audio_session_t, bool> RecordThread::sessionIds() const
9791 {
9792 KeyedVector<audio_session_t, bool> ids;
9793 audio_utils::lock_guard _l(mutex());
9794 for (size_t j = 0; j < mTracks.size(); ++j) {
9795 sp<IAfRecordTrack> track = mTracks[j];
9796 audio_session_t sessionId = track->sessionId();
9797 if (ids.indexOfKey(sessionId) < 0) {
9798 ids.add(sessionId, true);
9799 }
9800 }
9801 return ids;
9802 }
9803
clearInput()9804 AudioStreamIn* RecordThread::clearInput()
9805 {
9806 audio_utils::lock_guard _l(mutex());
9807 AudioStreamIn *input = mInput;
9808 mInput = NULL;
9809 mInputSource.clear();
9810 return input;
9811 }
9812
9813 // this method must always be called either with ThreadBase mutex() held or inside the thread loop
stream() const9814 sp<StreamHalInterface> RecordThread::stream() const
9815 {
9816 if (mInput == NULL) {
9817 return NULL;
9818 }
9819 return mInput->stream;
9820 }
9821
addEffectChain_l(const sp<IAfEffectChain> & chain)9822 status_t RecordThread::addEffectChain_l(const sp<IAfEffectChain>& chain)
9823 {
9824 ALOGV("addEffectChain_l() %p on thread %p", chain.get(), this);
9825 chain->setThread(this);
9826 chain->setInBuffer(NULL);
9827 chain->setOutBuffer(NULL);
9828
9829 checkSuspendOnAddEffectChain_l(chain);
9830
9831 // make sure enabled pre processing effects state is communicated to the HAL as we
9832 // just moved them to a new input stream.
9833 chain->syncHalEffectsState_l();
9834
9835 mEffectChains.add(chain);
9836
9837 return NO_ERROR;
9838 }
9839
removeEffectChain_l(const sp<IAfEffectChain> & chain)9840 size_t RecordThread::removeEffectChain_l(const sp<IAfEffectChain>& chain)
9841 {
9842 ALOGV("removeEffectChain_l() %p from thread %p", chain.get(), this);
9843
9844 for (size_t i = 0; i < mEffectChains.size(); i++) {
9845 if (chain == mEffectChains[i]) {
9846 mEffectChains.removeAt(i);
9847 break;
9848 }
9849 }
9850 return mEffectChains.size();
9851 }
9852
createAudioPatch_l(const struct audio_patch * patch,audio_patch_handle_t * handle)9853 status_t RecordThread::createAudioPatch_l(const struct audio_patch* patch,
9854 audio_patch_handle_t *handle)
9855 {
9856 status_t status = NO_ERROR;
9857
9858 // store new device and send to effects
9859 mInDeviceTypeAddr.mType = patch->sources[0].ext.device.type;
9860 mInDeviceTypeAddr.setAddress(patch->sources[0].ext.device.address);
9861 audio_port_handle_t deviceId = patch->sources[0].id;
9862 for (size_t i = 0; i < mEffectChains.size(); i++) {
9863 mEffectChains[i]->setInputDevice_l(inDeviceTypeAddr());
9864 }
9865
9866 checkBtNrec_l();
9867
9868 // store new source and send to effects
9869 if (mAudioSource != patch->sinks[0].ext.mix.usecase.source) {
9870 mAudioSource = patch->sinks[0].ext.mix.usecase.source;
9871 for (size_t i = 0; i < mEffectChains.size(); i++) {
9872 mEffectChains[i]->setAudioSource_l(mAudioSource);
9873 }
9874 }
9875
9876 if (mInput->audioHwDev->supportsAudioPatches()) {
9877 sp<DeviceHalInterface> hwDevice = mInput->audioHwDev->hwDevice();
9878 status = hwDevice->createAudioPatch(patch->num_sources,
9879 patch->sources,
9880 patch->num_sinks,
9881 patch->sinks,
9882 handle);
9883 } else {
9884 status = mInput->stream->legacyCreateAudioPatch(patch->sources[0],
9885 patch->sinks[0].ext.mix.usecase.source,
9886 patch->sources[0].ext.device.type);
9887 *handle = AUDIO_PATCH_HANDLE_NONE;
9888 }
9889
9890 if ((mPatch.num_sources == 0) || (mPatch.sources[0].id != deviceId)) {
9891 sendIoConfigEvent_l(AUDIO_INPUT_CONFIG_CHANGED);
9892 mPatch = *patch;
9893 }
9894
9895 const std::string pathSourcesAsString = patchSourcesToString(patch);
9896 mThreadMetrics.logEndInterval();
9897 mThreadMetrics.logCreatePatch(pathSourcesAsString, /* outDevices */ {});
9898 mThreadMetrics.logBeginInterval();
9899 // also dispatch to active AudioRecords
9900 for (const auto &track : mActiveTracks) {
9901 track->logEndInterval();
9902 track->logBeginInterval(pathSourcesAsString);
9903 }
9904 // Force meteadata update after a route change
9905 mActiveTracks.setHasChanged();
9906
9907 return status;
9908 }
9909
releaseAudioPatch_l(const audio_patch_handle_t handle)9910 status_t RecordThread::releaseAudioPatch_l(const audio_patch_handle_t handle)
9911 {
9912 status_t status = NO_ERROR;
9913
9914 mPatch = audio_patch{};
9915 mInDeviceTypeAddr.reset();
9916
9917 if (mInput->audioHwDev->supportsAudioPatches()) {
9918 sp<DeviceHalInterface> hwDevice = mInput->audioHwDev->hwDevice();
9919 status = hwDevice->releaseAudioPatch(handle);
9920 } else {
9921 status = mInput->stream->legacyReleaseAudioPatch();
9922 }
9923 // Force meteadata update after a route change
9924 mActiveTracks.setHasChanged();
9925
9926 return status;
9927 }
9928
updateOutDevices(const DeviceDescriptorBaseVector & outDevices)9929 void RecordThread::updateOutDevices(const DeviceDescriptorBaseVector& outDevices)
9930 {
9931 audio_utils::lock_guard _l(mutex());
9932 mOutDevices = outDevices;
9933 mOutDeviceTypeAddrs = deviceTypeAddrsFromDescriptors(mOutDevices);
9934 for (size_t i = 0; i < mEffectChains.size(); i++) {
9935 mEffectChains[i]->setDevices_l(outDeviceTypeAddrs());
9936 }
9937 }
9938
getOldestFront_l()9939 int32_t RecordThread::getOldestFront_l()
9940 {
9941 if (mTracks.size() == 0) {
9942 return mRsmpInRear;
9943 }
9944 int32_t oldestFront = mRsmpInRear;
9945 int32_t maxFilled = 0;
9946 for (size_t i = 0; i < mTracks.size(); i++) {
9947 int32_t front = mTracks[i]->resamplerBufferProvider()->getFront();
9948 int32_t filled;
9949 (void)__builtin_sub_overflow(mRsmpInRear, front, &filled);
9950 if (filled > maxFilled) {
9951 oldestFront = front;
9952 maxFilled = filled;
9953 }
9954 }
9955 if (maxFilled > static_cast<signed>(mRsmpInFrames)) {
9956 (void)__builtin_sub_overflow(mRsmpInRear, mRsmpInFrames, &oldestFront);
9957 }
9958 return oldestFront;
9959 }
9960
updateFronts_l(int32_t offset)9961 void RecordThread::updateFronts_l(int32_t offset)
9962 {
9963 if (offset == 0) {
9964 return;
9965 }
9966 for (size_t i = 0; i < mTracks.size(); i++) {
9967 int32_t front = mTracks[i]->resamplerBufferProvider()->getFront();
9968 front = audio_utils::safe_sub_overflow(front, offset);
9969 mTracks[i]->resamplerBufferProvider()->setFront(front);
9970 }
9971 }
9972
resizeInputBuffer_l(int32_t maxSharedAudioHistoryMs)9973 void RecordThread::resizeInputBuffer_l(int32_t maxSharedAudioHistoryMs)
9974 {
9975 // This is the formula for calculating the temporary buffer size.
9976 // With 7 HAL buffers, we can guarantee ability to down-sample the input by ratio of 6:1 to
9977 // 1 full output buffer, regardless of the alignment of the available input.
9978 // The value is somewhat arbitrary, and could probably be even larger.
9979 // A larger value should allow more old data to be read after a track calls start(),
9980 // without increasing latency.
9981 //
9982 // Note this is independent of the maximum downsampling ratio permitted for capture.
9983 size_t minRsmpInFrames = mFrameCount * 7;
9984
9985 // maxSharedAudioHistoryMs != 0 indicates a request to possibly make some part of the audio
9986 // capture history available to another client using the same session ID:
9987 // dimension the resampler input buffer accordingly.
9988
9989 // Get oldest client read position: getOldestFront_l() must be called before altering
9990 // mRsmpInRear, or mRsmpInFrames
9991 int32_t previousFront = getOldestFront_l();
9992 size_t previousRsmpInFramesP2 = mRsmpInFramesP2;
9993 int32_t previousRear = mRsmpInRear;
9994 mRsmpInRear = 0;
9995
9996 ALOG_ASSERT(maxSharedAudioHistoryMs >= 0
9997 && maxSharedAudioHistoryMs <= kMaxSharedAudioHistoryMs,
9998 "resizeInputBuffer_l() called with invalid max shared history %d",
9999 maxSharedAudioHistoryMs);
10000 if (maxSharedAudioHistoryMs != 0) {
10001 // resizeInputBuffer_l should never be called with a non zero shared history if the
10002 // buffer was not already allocated
10003 ALOG_ASSERT(mRsmpInBuffer != nullptr && mRsmpInFrames != 0,
10004 "resizeInputBuffer_l() called with shared history and unallocated buffer");
10005 size_t rsmpInFrames = (size_t)maxSharedAudioHistoryMs * mSampleRate / 1000;
10006 // never reduce resampler input buffer size
10007 if (rsmpInFrames <= mRsmpInFrames) {
10008 return;
10009 }
10010 mRsmpInFrames = rsmpInFrames;
10011 }
10012 mMaxSharedAudioHistoryMs = maxSharedAudioHistoryMs;
10013 // Note: mRsmpInFrames is 0 when called with maxSharedAudioHistoryMs equals to 0 so it is always
10014 // initialized
10015 if (mRsmpInFrames < minRsmpInFrames) {
10016 mRsmpInFrames = minRsmpInFrames;
10017 }
10018 mRsmpInFramesP2 = roundup(mRsmpInFrames);
10019
10020 // TODO optimize audio capture buffer sizes ...
10021 // Here we calculate the size of the sliding buffer used as a source
10022 // for resampling. mRsmpInFramesP2 is currently roundup(mFrameCount * 7).
10023 // For current HAL frame counts, this is usually 2048 = 40 ms. It would
10024 // be better to have it derived from the pipe depth in the long term.
10025 // The current value is higher than necessary. However it should not add to latency.
10026
10027 // Over-allocate beyond mRsmpInFramesP2 to permit a HAL read past end of buffer
10028 mRsmpInFramesOA = mRsmpInFramesP2 + mFrameCount - 1;
10029
10030 void *rsmpInBuffer;
10031 (void)posix_memalign(&rsmpInBuffer, 32, mRsmpInFramesOA * mFrameSize);
10032 // if posix_memalign fails, will segv here.
10033 memset(rsmpInBuffer, 0, mRsmpInFramesOA * mFrameSize);
10034
10035 // Copy audio history if any from old buffer before freeing it
10036 if (previousRear != 0) {
10037 ALOG_ASSERT(mRsmpInBuffer != nullptr,
10038 "resizeInputBuffer_l() called with null buffer but frames already read from HAL");
10039
10040 ssize_t unread = audio_utils::safe_sub_overflow(previousRear, previousFront);
10041 previousFront &= previousRsmpInFramesP2 - 1;
10042 size_t part1 = previousRsmpInFramesP2 - previousFront;
10043 if (part1 > (size_t) unread) {
10044 part1 = unread;
10045 }
10046 if (part1 != 0) {
10047 memcpy(rsmpInBuffer, (const uint8_t*)mRsmpInBuffer + previousFront * mFrameSize,
10048 part1 * mFrameSize);
10049 mRsmpInRear = part1;
10050 part1 = unread - part1;
10051 if (part1 != 0) {
10052 memcpy((uint8_t*)rsmpInBuffer + mRsmpInRear * mFrameSize,
10053 (const uint8_t*)mRsmpInBuffer, part1 * mFrameSize);
10054 mRsmpInRear += part1;
10055 }
10056 }
10057 // Update front for all clients according to new rear
10058 updateFronts_l(audio_utils::safe_sub_overflow(previousRear, mRsmpInRear));
10059 } else {
10060 mRsmpInRear = 0;
10061 }
10062 free(mRsmpInBuffer);
10063 mRsmpInBuffer = rsmpInBuffer;
10064 }
10065
addPatchTrack(const sp<IAfPatchRecord> & record)10066 void RecordThread::addPatchTrack(const sp<IAfPatchRecord>& record)
10067 {
10068 audio_utils::lock_guard _l(mutex());
10069 mTracks.add(record);
10070 if (record->getSource()) {
10071 mSource = record->getSource();
10072 }
10073 }
10074
deletePatchTrack(const sp<IAfPatchRecord> & record)10075 void RecordThread::deletePatchTrack(const sp<IAfPatchRecord>& record)
10076 {
10077 audio_utils::lock_guard _l(mutex());
10078 if (mSource == record->getSource()) {
10079 mSource = mInput;
10080 }
10081 destroyTrack_l(record);
10082 }
10083
toAudioPortConfig(struct audio_port_config * config)10084 void RecordThread::toAudioPortConfig(struct audio_port_config* config)
10085 {
10086 ThreadBase::toAudioPortConfig(config);
10087 config->role = AUDIO_PORT_ROLE_SINK;
10088 config->ext.mix.hw_module = mInput->audioHwDev->handle();
10089 config->ext.mix.usecase.source = mAudioSource;
10090 if (mInput && mInput->flags != AUDIO_INPUT_FLAG_NONE) {
10091 config->config_mask |= AUDIO_PORT_CONFIG_FLAGS;
10092 config->flags.input = mInput->flags;
10093 }
10094 }
10095
10096 // ----------------------------------------------------------------------------
10097 // Mmap
10098 // ----------------------------------------------------------------------------
10099
10100 // Mmap stream control interface implementation. Each MmapThreadHandle controls one
10101 // MmapPlaybackThread or MmapCaptureThread instance.
10102 class MmapThreadHandle : public MmapStreamInterface {
10103 public:
10104 explicit MmapThreadHandle(const sp<IAfMmapThread>& thread);
10105 ~MmapThreadHandle() override;
10106
10107 // MmapStreamInterface virtuals
10108 status_t createMmapBuffer(int32_t minSizeFrames,
10109 struct audio_mmap_buffer_info* info) final;
10110 status_t getMmapPosition(struct audio_mmap_position* position) final;
10111 status_t getExternalPosition(uint64_t* position, int64_t* timeNanos) final;
10112 status_t start(const AudioClient& client,
10113 const audio_attributes_t* attr, audio_port_handle_t* handle) final;
10114 status_t stop(audio_port_handle_t handle) final;
10115 status_t standby() final;
10116 status_t reportData(const void* buffer, size_t frameCount) final;
10117 private:
10118 const sp<IAfMmapThread> mThread;
10119 };
10120
10121 /* static */
createMmapStreamInterfaceAdapter(const sp<IAfMmapThread> & mmapThread)10122 sp<MmapStreamInterface> IAfMmapThread::createMmapStreamInterfaceAdapter(
10123 const sp<IAfMmapThread>& mmapThread) {
10124 return sp<MmapThreadHandle>::make(mmapThread);
10125 }
10126
MmapThreadHandle(const sp<IAfMmapThread> & thread)10127 MmapThreadHandle::MmapThreadHandle(const sp<IAfMmapThread>& thread)
10128 : mThread(thread)
10129 {
10130 assert(thread != 0); // thread must start non-null and stay non-null
10131 }
10132
10133 // MmapStreamInterface could be directly implemented by MmapThread excepting this
10134 // special handling on adapter dtor.
~MmapThreadHandle()10135 MmapThreadHandle::~MmapThreadHandle()
10136 {
10137 mThread->disconnect();
10138 }
10139
createMmapBuffer(int32_t minSizeFrames,struct audio_mmap_buffer_info * info)10140 status_t MmapThreadHandle::createMmapBuffer(int32_t minSizeFrames,
10141 struct audio_mmap_buffer_info *info)
10142 {
10143 return mThread->createMmapBuffer(minSizeFrames, info);
10144 }
10145
getMmapPosition(struct audio_mmap_position * position)10146 status_t MmapThreadHandle::getMmapPosition(struct audio_mmap_position* position)
10147 {
10148 return mThread->getMmapPosition(position);
10149 }
10150
getExternalPosition(uint64_t * position,int64_t * timeNanos)10151 status_t MmapThreadHandle::getExternalPosition(uint64_t* position,
10152 int64_t *timeNanos) {
10153 return mThread->getExternalPosition(position, timeNanos);
10154 }
10155
start(const AudioClient & client,const audio_attributes_t * attr,audio_port_handle_t * handle)10156 status_t MmapThreadHandle::start(const AudioClient& client,
10157 const audio_attributes_t *attr, audio_port_handle_t *handle)
10158 {
10159 return mThread->start(client, attr, handle);
10160 }
10161
stop(audio_port_handle_t handle)10162 status_t MmapThreadHandle::stop(audio_port_handle_t handle)
10163 {
10164 return mThread->stop(handle);
10165 }
10166
standby()10167 status_t MmapThreadHandle::standby()
10168 {
10169 return mThread->standby();
10170 }
10171
reportData(const void * buffer,size_t frameCount)10172 status_t MmapThreadHandle::reportData(const void* buffer, size_t frameCount)
10173 {
10174 return mThread->reportData(buffer, frameCount);
10175 }
10176
10177
MmapThread(const sp<IAfThreadCallback> & afThreadCallback,audio_io_handle_t id,AudioHwDevice * hwDev,const sp<StreamHalInterface> & stream,bool systemReady,bool isOut)10178 MmapThread::MmapThread(
10179 const sp<IAfThreadCallback>& afThreadCallback, audio_io_handle_t id,
10180 AudioHwDevice *hwDev, const sp<StreamHalInterface>& stream, bool systemReady, bool isOut)
10181 : ThreadBase(afThreadCallback, id, (isOut ? MMAP_PLAYBACK : MMAP_CAPTURE), systemReady, isOut),
10182 mSessionId(AUDIO_SESSION_NONE),
10183 mPortId(AUDIO_PORT_HANDLE_NONE),
10184 mHalStream(stream), mHalDevice(hwDev->hwDevice()), mAudioHwDev(hwDev),
10185 mActiveTracks(&this->mLocalLog),
10186 mHalVolFloat(-1.0f), // Initialize to illegal value so it always gets set properly later.
10187 mNoCallbackWarningCount(0)
10188 {
10189 mStandby = true;
10190 readHalParameters_l();
10191 }
10192
onFirstRef()10193 void MmapThread::onFirstRef()
10194 {
10195 run(mThreadName, ANDROID_PRIORITY_URGENT_AUDIO);
10196 }
10197
disconnect()10198 void MmapThread::disconnect()
10199 {
10200 ActiveTracks<IAfMmapTrack> activeTracks;
10201 audio_port_handle_t localPortId;
10202 {
10203 audio_utils::lock_guard _l(mutex());
10204 for (const sp<IAfMmapTrack>& t : mActiveTracks) {
10205 activeTracks.add(t);
10206 }
10207 localPortId = mPortId;
10208 }
10209 for (const sp<IAfMmapTrack>& t : activeTracks) {
10210 stop(t->portId());
10211 }
10212 // This will decrement references and may cause the destruction of this thread.
10213 if (isOutput()) {
10214 AudioSystem::releaseOutput(localPortId);
10215 } else {
10216 AudioSystem::releaseInput(localPortId);
10217 }
10218 }
10219
10220
configure_l(const audio_attributes_t * attr,audio_stream_type_t streamType __unused,audio_session_t sessionId,const sp<MmapStreamCallback> & callback,audio_port_handle_t deviceId,audio_port_handle_t portId)10221 void MmapThread::configure_l(const audio_attributes_t* attr,
10222 audio_stream_type_t streamType __unused,
10223 audio_session_t sessionId,
10224 const sp<MmapStreamCallback>& callback,
10225 audio_port_handle_t deviceId,
10226 audio_port_handle_t portId)
10227 {
10228 mAttr = *attr;
10229 mSessionId = sessionId;
10230 mCallback = callback;
10231 mDeviceId = deviceId;
10232 mPortId = portId;
10233 }
10234
createMmapBuffer(int32_t minSizeFrames,struct audio_mmap_buffer_info * info)10235 status_t MmapThread::createMmapBuffer(int32_t minSizeFrames,
10236 struct audio_mmap_buffer_info *info)
10237 {
10238 audio_utils::lock_guard l(mutex());
10239 if (mHalStream == 0) {
10240 return NO_INIT;
10241 }
10242 mStandby = true;
10243 return mHalStream->createMmapBuffer(minSizeFrames, info);
10244 }
10245
getMmapPosition(struct audio_mmap_position * position) const10246 status_t MmapThread::getMmapPosition(struct audio_mmap_position* position) const
10247 {
10248 audio_utils::lock_guard l(mutex());
10249 if (mHalStream == 0) {
10250 return NO_INIT;
10251 }
10252 return mHalStream->getMmapPosition(position);
10253 }
10254
exitStandby_l()10255 status_t MmapThread::exitStandby_l()
10256 {
10257 // The HAL must receive track metadata before starting the stream
10258 updateMetadata_l();
10259 status_t ret = mHalStream->start();
10260 if (ret != NO_ERROR) {
10261 ALOGE("%s: error mHalStream->start() = %d for first track", __FUNCTION__, ret);
10262 return ret;
10263 }
10264 if (mStandby) {
10265 mThreadMetrics.logBeginInterval();
10266 mThreadSnapshot.onBegin();
10267 mStandby = false;
10268 }
10269 return NO_ERROR;
10270 }
10271
start(const AudioClient & client,const audio_attributes_t * attr,audio_port_handle_t * handle)10272 status_t MmapThread::start(const AudioClient& client,
10273 const audio_attributes_t *attr,
10274 audio_port_handle_t *handle)
10275 {
10276 audio_utils::lock_guard l(mutex());
10277 ALOGV("%s clientUid %d mStandby %d mPortId %d *handle %d", __FUNCTION__,
10278 client.attributionSource.uid, mStandby, mPortId, *handle);
10279 if (mHalStream == 0) {
10280 return NO_INIT;
10281 }
10282
10283 status_t ret;
10284
10285 // For the first track, reuse portId and session allocated when the stream was opened.
10286 if (*handle == mPortId) {
10287 acquireWakeLock_l();
10288 return NO_ERROR;
10289 }
10290
10291 audio_port_handle_t portId = AUDIO_PORT_HANDLE_NONE;
10292
10293 audio_io_handle_t io = mId;
10294 const AttributionSourceState adjAttributionSource = afutils::checkAttributionSourcePackage(
10295 client.attributionSource);
10296
10297 const auto localSessionId = mSessionId;
10298 auto localAttr = mAttr;
10299 if (isOutput()) {
10300 audio_config_t config = AUDIO_CONFIG_INITIALIZER;
10301 config.sample_rate = mSampleRate;
10302 config.channel_mask = mChannelMask;
10303 config.format = mFormat;
10304 audio_stream_type_t stream = streamType_l();
10305 audio_output_flags_t flags =
10306 (audio_output_flags_t)(AUDIO_OUTPUT_FLAG_MMAP_NOIRQ | AUDIO_OUTPUT_FLAG_DIRECT);
10307 audio_port_handle_t deviceId = mDeviceId;
10308 std::vector<audio_io_handle_t> secondaryOutputs;
10309 bool isSpatialized;
10310 bool isBitPerfect;
10311 mutex().unlock();
10312 ret = AudioSystem::getOutputForAttr(&localAttr, &io,
10313 localSessionId,
10314 &stream,
10315 adjAttributionSource,
10316 &config,
10317 flags,
10318 &deviceId,
10319 &portId,
10320 &secondaryOutputs,
10321 &isSpatialized,
10322 &isBitPerfect);
10323 mutex().lock();
10324 mAttr = localAttr;
10325 ALOGD_IF(!secondaryOutputs.empty(),
10326 "MmapThread::start does not support secondary outputs, ignoring them");
10327 } else {
10328 audio_config_base_t config;
10329 config.sample_rate = mSampleRate;
10330 config.channel_mask = mChannelMask;
10331 config.format = mFormat;
10332 audio_port_handle_t deviceId = mDeviceId;
10333 mutex().unlock();
10334 ret = AudioSystem::getInputForAttr(&localAttr, &io,
10335 RECORD_RIID_INVALID,
10336 localSessionId,
10337 adjAttributionSource,
10338 &config,
10339 AUDIO_INPUT_FLAG_MMAP_NOIRQ,
10340 &deviceId,
10341 &portId);
10342 mutex().lock();
10343 // localAttr is const for getInputForAttr.
10344 }
10345 // APM should not chose a different input or output stream for the same set of attributes
10346 // and audo configuration
10347 if (ret != NO_ERROR || io != mId) {
10348 ALOGE("%s: error getting output or input from APM (error %d, io %d expected io %d)",
10349 __FUNCTION__, ret, io, mId);
10350 return BAD_VALUE;
10351 }
10352
10353 if (isOutput()) {
10354 mutex().unlock();
10355 ret = AudioSystem::startOutput(portId);
10356 mutex().lock();
10357 } else {
10358 {
10359 // Add the track record before starting input so that the silent status for the
10360 // client can be cached.
10361 setClientSilencedState_l(portId, false /*silenced*/);
10362 }
10363 mutex().unlock();
10364 ret = AudioSystem::startInput(portId);
10365 mutex().lock();
10366 }
10367
10368 // abort if start is rejected by audio policy manager
10369 if (ret != NO_ERROR) {
10370 ALOGE("%s: error start rejected by AudioPolicyManager = %d", __FUNCTION__, ret);
10371 if (!mActiveTracks.isEmpty()) {
10372 mutex().unlock();
10373 if (isOutput()) {
10374 AudioSystem::releaseOutput(portId);
10375 } else {
10376 AudioSystem::releaseInput(portId);
10377 }
10378 mutex().lock();
10379 } else {
10380 mHalStream->stop();
10381 }
10382 eraseClientSilencedState_l(portId);
10383 return PERMISSION_DENIED;
10384 }
10385
10386 // Given that MmapThread::mAttr is mutable, should a MmapTrack have attributes ?
10387 sp<IAfMmapTrack> track = IAfMmapTrack::create(
10388 this, attr == nullptr ? mAttr : *attr, mSampleRate, mFormat,
10389 mChannelMask, mSessionId, isOutput(),
10390 client.attributionSource,
10391 IPCThreadState::self()->getCallingPid(), portId);
10392 if (!isOutput()) {
10393 track->setSilenced_l(isClientSilenced_l(portId));
10394 }
10395
10396 if (isOutput()) {
10397 // force volume update when a new track is added
10398 mHalVolFloat = -1.0f;
10399 } else if (!track->isSilenced_l()) {
10400 for (const sp<IAfMmapTrack>& t : mActiveTracks) {
10401 if (t->isSilenced_l()
10402 && t->uid() != static_cast<uid_t>(client.attributionSource.uid)) {
10403 t->invalidate();
10404 }
10405 }
10406 }
10407
10408 mActiveTracks.add(track);
10409 sp<IAfEffectChain> chain = getEffectChain_l(mSessionId);
10410 if (chain != 0) {
10411 chain->setStrategy(getStrategyForStream(streamType_l()));
10412 chain->incTrackCnt();
10413 chain->incActiveTrackCnt();
10414 }
10415
10416 track->logBeginInterval(patchSinksToString(&mPatch)); // log to MediaMetrics
10417 *handle = portId;
10418
10419 if (mActiveTracks.size() == 1) {
10420 ret = exitStandby_l();
10421 }
10422
10423 broadcast_l();
10424
10425 ALOGV("%s DONE status %d handle %d stream %p", __FUNCTION__, ret, *handle, mHalStream.get());
10426
10427 return ret;
10428 }
10429
stop(audio_port_handle_t handle)10430 status_t MmapThread::stop(audio_port_handle_t handle)
10431 {
10432 ALOGV("%s handle %d", __FUNCTION__, handle);
10433 audio_utils::lock_guard l(mutex());
10434
10435 if (mHalStream == 0) {
10436 return NO_INIT;
10437 }
10438
10439 if (handle == mPortId) {
10440 releaseWakeLock_l();
10441 return NO_ERROR;
10442 }
10443
10444 sp<IAfMmapTrack> track;
10445 for (const sp<IAfMmapTrack>& t : mActiveTracks) {
10446 if (handle == t->portId()) {
10447 track = t;
10448 break;
10449 }
10450 }
10451 if (track == 0) {
10452 return BAD_VALUE;
10453 }
10454
10455 mActiveTracks.remove(track);
10456 eraseClientSilencedState_l(track->portId());
10457
10458 mutex().unlock();
10459 if (isOutput()) {
10460 AudioSystem::stopOutput(track->portId());
10461 AudioSystem::releaseOutput(track->portId());
10462 } else {
10463 AudioSystem::stopInput(track->portId());
10464 AudioSystem::releaseInput(track->portId());
10465 }
10466 mutex().lock();
10467
10468 sp<IAfEffectChain> chain = getEffectChain_l(track->sessionId());
10469 if (chain != 0) {
10470 chain->decActiveTrackCnt();
10471 chain->decTrackCnt();
10472 }
10473
10474 if (mActiveTracks.isEmpty()) {
10475 mHalStream->stop();
10476 }
10477
10478 broadcast_l();
10479
10480 return NO_ERROR;
10481 }
10482
standby()10483 status_t MmapThread::standby()
10484 NO_THREAD_SAFETY_ANALYSIS // clang bug
10485 {
10486 ALOGV("%s", __FUNCTION__);
10487 audio_utils::lock_guard l_{mutex()};
10488
10489 if (mHalStream == 0) {
10490 return NO_INIT;
10491 }
10492 if (!mActiveTracks.isEmpty()) {
10493 return INVALID_OPERATION;
10494 }
10495 mHalStream->standby();
10496 if (!mStandby) {
10497 mThreadMetrics.logEndInterval();
10498 mThreadSnapshot.onEnd();
10499 mStandby = true;
10500 }
10501 releaseWakeLock_l();
10502 return NO_ERROR;
10503 }
10504
reportData(const void *,size_t)10505 status_t MmapThread::reportData(const void* /*buffer*/, size_t /*frameCount*/) {
10506 // This is a stub implementation. The MmapPlaybackThread overrides this function.
10507 return INVALID_OPERATION;
10508 }
10509
readHalParameters_l()10510 void MmapThread::readHalParameters_l()
10511 {
10512 status_t result = mHalStream->getAudioProperties(&mSampleRate, &mChannelMask, &mHALFormat);
10513 LOG_ALWAYS_FATAL_IF(result != OK, "Error retrieving audio properties from HAL: %d", result);
10514 mFormat = mHALFormat;
10515 LOG_ALWAYS_FATAL_IF(!audio_is_linear_pcm(mFormat), "HAL format %#x is not linear pcm", mFormat);
10516 result = mHalStream->getFrameSize(&mFrameSize);
10517 LOG_ALWAYS_FATAL_IF(result != OK, "Error retrieving frame size from HAL: %d", result);
10518 LOG_ALWAYS_FATAL_IF(mFrameSize <= 0, "Error frame size was %zu but must be greater than zero",
10519 mFrameSize);
10520 result = mHalStream->getBufferSize(&mBufferSize);
10521 LOG_ALWAYS_FATAL_IF(result != OK, "Error retrieving buffer size from HAL: %d", result);
10522 mFrameCount = mBufferSize / mFrameSize;
10523
10524 // TODO: make a readHalParameters call?
10525 mediametrics::LogItem item(mThreadMetrics.getMetricsId());
10526 item.set(AMEDIAMETRICS_PROP_EVENT, AMEDIAMETRICS_PROP_EVENT_VALUE_READPARAMETERS)
10527 .set(AMEDIAMETRICS_PROP_ENCODING, IAfThreadBase::formatToString(mFormat).c_str())
10528 .set(AMEDIAMETRICS_PROP_SAMPLERATE, (int32_t)mSampleRate)
10529 .set(AMEDIAMETRICS_PROP_CHANNELMASK, (int32_t)mChannelMask)
10530 .set(AMEDIAMETRICS_PROP_CHANNELCOUNT, (int32_t)mChannelCount)
10531 .set(AMEDIAMETRICS_PROP_FRAMECOUNT, (int32_t)mFrameCount)
10532 /*
10533 .set(AMEDIAMETRICS_PROP_FLAGS, toString(flags).c_str())
10534 .set(AMEDIAMETRICS_PROP_PREFIX_HAPTIC AMEDIAMETRICS_PROP_CHANNELMASK,
10535 (int32_t)mHapticChannelMask)
10536 .set(AMEDIAMETRICS_PROP_PREFIX_HAPTIC AMEDIAMETRICS_PROP_CHANNELCOUNT,
10537 (int32_t)mHapticChannelCount)
10538 */
10539 .set(AMEDIAMETRICS_PROP_PREFIX_HAL AMEDIAMETRICS_PROP_ENCODING,
10540 IAfThreadBase::formatToString(mHALFormat).c_str())
10541 .set(AMEDIAMETRICS_PROP_PREFIX_HAL AMEDIAMETRICS_PROP_FRAMECOUNT,
10542 (int32_t)mFrameCount) // sic - added HAL
10543 .record();
10544 }
10545
threadLoop()10546 bool MmapThread::threadLoop()
10547 {
10548 {
10549 audio_utils::unique_lock _l(mutex());
10550 checkSilentMode_l();
10551 }
10552
10553 const String8 myName(String8::format("thread %p type %d TID %d", this, mType, gettid()));
10554
10555 while (!exitPending())
10556 {
10557 Vector<sp<IAfEffectChain>> effectChains;
10558
10559 { // under Thread lock
10560 audio_utils::unique_lock _l(mutex());
10561
10562 if (mSignalPending) {
10563 // A signal was raised while we were unlocked
10564 mSignalPending = false;
10565 } else {
10566 if (mConfigEvents.isEmpty()) {
10567 // we're about to wait, flush the binder command buffer
10568 IPCThreadState::self()->flushCommands();
10569
10570 if (exitPending()) {
10571 break;
10572 }
10573
10574 // wait until we have something to do...
10575 ALOGV("%s going to sleep", myName.c_str());
10576 mWaitWorkCV.wait(_l);
10577 ALOGV("%s waking up", myName.c_str());
10578
10579 checkSilentMode_l();
10580
10581 continue;
10582 }
10583 }
10584
10585 processConfigEvents_l();
10586
10587 processVolume_l();
10588
10589 checkInvalidTracks_l();
10590
10591 mActiveTracks.updatePowerState_l(this);
10592
10593 updateMetadata_l();
10594
10595 lockEffectChains_l(effectChains);
10596 } // release Thread lock
10597
10598 for (size_t i = 0; i < effectChains.size(); i ++) {
10599 effectChains[i]->process_l(); // Thread is not locked, but effect chain is locked
10600 }
10601
10602 // enable changes in effect chain, including moving to another thread.
10603 unlockEffectChains(effectChains);
10604 // Effect chains will be actually deleted here if they were removed from
10605 // mEffectChains list during mixing or effects processing
10606 }
10607
10608 threadLoop_exit();
10609
10610 if (!mStandby) {
10611 threadLoop_standby();
10612 mStandby = true;
10613 }
10614
10615 ALOGV("Thread %p type %d exiting", this, mType);
10616 return false;
10617 }
10618
10619 // checkForNewParameter_l() must be called with ThreadBase::mutex() held
checkForNewParameter_l(const String8 & keyValuePair,status_t & status)10620 bool MmapThread::checkForNewParameter_l(const String8& keyValuePair,
10621 status_t& status)
10622 {
10623 AudioParameter param = AudioParameter(keyValuePair);
10624 int value;
10625 bool sendToHal = true;
10626 if (param.getInt(String8(AudioParameter::keyRouting), value) == NO_ERROR) {
10627 LOG_FATAL("Should not happen set routing device in MmapThread");
10628 }
10629 if (sendToHal) {
10630 status = mHalStream->setParameters(keyValuePair);
10631 } else {
10632 status = NO_ERROR;
10633 }
10634
10635 return false;
10636 }
10637
getParameters(const String8 & keys)10638 String8 MmapThread::getParameters(const String8& keys)
10639 {
10640 audio_utils::lock_guard _l(mutex());
10641 String8 out_s8;
10642 if (initCheck() == NO_ERROR && mHalStream->getParameters(keys, &out_s8) == OK) {
10643 return out_s8;
10644 }
10645 return {};
10646 }
10647
ioConfigChanged_l(audio_io_config_event_t event,pid_t pid,audio_port_handle_t portId __unused)10648 void MmapThread::ioConfigChanged_l(audio_io_config_event_t event, pid_t pid,
10649 audio_port_handle_t portId __unused) {
10650 sp<AudioIoDescriptor> desc;
10651 bool isInput = false;
10652 switch (event) {
10653 case AUDIO_INPUT_OPENED:
10654 case AUDIO_INPUT_REGISTERED:
10655 case AUDIO_INPUT_CONFIG_CHANGED:
10656 isInput = true;
10657 FALLTHROUGH_INTENDED;
10658 case AUDIO_OUTPUT_OPENED:
10659 case AUDIO_OUTPUT_REGISTERED:
10660 case AUDIO_OUTPUT_CONFIG_CHANGED:
10661 desc = sp<AudioIoDescriptor>::make(mId, mPatch, isInput,
10662 mSampleRate, mFormat, mChannelMask, mFrameCount, mFrameCount);
10663 break;
10664 case AUDIO_INPUT_CLOSED:
10665 case AUDIO_OUTPUT_CLOSED:
10666 default:
10667 desc = sp<AudioIoDescriptor>::make(mId);
10668 break;
10669 }
10670 mAfThreadCallback->ioConfigChanged_l(event, desc, pid);
10671 }
10672
createAudioPatch_l(const struct audio_patch * patch,audio_patch_handle_t * handle)10673 status_t MmapThread::createAudioPatch_l(const struct audio_patch* patch,
10674 audio_patch_handle_t *handle)
10675 NO_THREAD_SAFETY_ANALYSIS // elease and re-acquire mutex()
10676 {
10677 status_t status = NO_ERROR;
10678
10679 // store new device and send to effects
10680 audio_devices_t type = AUDIO_DEVICE_NONE;
10681 audio_port_handle_t deviceId;
10682 AudioDeviceTypeAddrVector sinkDeviceTypeAddrs;
10683 AudioDeviceTypeAddr sourceDeviceTypeAddr;
10684 uint32_t numDevices = 0;
10685 if (isOutput()) {
10686 for (unsigned int i = 0; i < patch->num_sinks; i++) {
10687 LOG_ALWAYS_FATAL_IF(popcount(patch->sinks[i].ext.device.type) > 1
10688 && !mAudioHwDev->supportsAudioPatches(),
10689 "Enumerated device type(%#x) must not be used "
10690 "as it does not support audio patches",
10691 patch->sinks[i].ext.device.type);
10692 type = static_cast<audio_devices_t>(type | patch->sinks[i].ext.device.type);
10693 sinkDeviceTypeAddrs.emplace_back(patch->sinks[i].ext.device.type,
10694 patch->sinks[i].ext.device.address);
10695 }
10696 deviceId = patch->sinks[0].id;
10697 numDevices = mPatch.num_sinks;
10698 } else {
10699 type = patch->sources[0].ext.device.type;
10700 deviceId = patch->sources[0].id;
10701 numDevices = mPatch.num_sources;
10702 sourceDeviceTypeAddr.mType = patch->sources[0].ext.device.type;
10703 sourceDeviceTypeAddr.setAddress(patch->sources[0].ext.device.address);
10704 }
10705
10706 for (size_t i = 0; i < mEffectChains.size(); i++) {
10707 if (isOutput()) {
10708 mEffectChains[i]->setDevices_l(sinkDeviceTypeAddrs);
10709 } else {
10710 mEffectChains[i]->setInputDevice_l(sourceDeviceTypeAddr);
10711 }
10712 }
10713
10714 if (!isOutput()) {
10715 // store new source and send to effects
10716 if (mAudioSource != patch->sinks[0].ext.mix.usecase.source) {
10717 mAudioSource = patch->sinks[0].ext.mix.usecase.source;
10718 for (size_t i = 0; i < mEffectChains.size(); i++) {
10719 mEffectChains[i]->setAudioSource_l(mAudioSource);
10720 }
10721 }
10722 }
10723
10724 // For mmap streams, once the routing has changed, they will be disconnected. It should be
10725 // okay to notify the client earlier before the new patch creation.
10726 if (mDeviceId != deviceId) {
10727 if (const sp<MmapStreamCallback> callback = mCallback.promote()) {
10728 // The aaudioservice handle the routing changed event asynchronously. In that case,
10729 // it is safe to hold the lock here.
10730 callback->onRoutingChanged(deviceId);
10731 }
10732 }
10733
10734 if (mAudioHwDev->supportsAudioPatches()) {
10735 status = mHalDevice->createAudioPatch(patch->num_sources, patch->sources, patch->num_sinks,
10736 patch->sinks, handle);
10737 } else {
10738 audio_port_config port;
10739 std::optional<audio_source_t> source;
10740 if (isOutput()) {
10741 port = patch->sinks[0];
10742 } else {
10743 port = patch->sources[0];
10744 source = patch->sinks[0].ext.mix.usecase.source;
10745 }
10746 status = mHalStream->legacyCreateAudioPatch(port, source, type);
10747 *handle = AUDIO_PATCH_HANDLE_NONE;
10748 }
10749
10750 if (numDevices == 0 || mDeviceId != deviceId) {
10751 if (isOutput()) {
10752 sendIoConfigEvent_l(AUDIO_OUTPUT_CONFIG_CHANGED);
10753 mOutDeviceTypeAddrs = sinkDeviceTypeAddrs;
10754 checkSilentMode_l();
10755 } else {
10756 sendIoConfigEvent_l(AUDIO_INPUT_CONFIG_CHANGED);
10757 mInDeviceTypeAddr = sourceDeviceTypeAddr;
10758 }
10759 mPatch = *patch;
10760 mDeviceId = deviceId;
10761 }
10762 // Force meteadata update after a route change
10763 mActiveTracks.setHasChanged();
10764
10765 return status;
10766 }
10767
releaseAudioPatch_l(const audio_patch_handle_t handle)10768 status_t MmapThread::releaseAudioPatch_l(const audio_patch_handle_t handle)
10769 {
10770 status_t status = NO_ERROR;
10771
10772 mPatch = audio_patch{};
10773 mOutDeviceTypeAddrs.clear();
10774 mInDeviceTypeAddr.reset();
10775
10776 bool supportsAudioPatches = mHalDevice->supportsAudioPatches(&supportsAudioPatches) == OK ?
10777 supportsAudioPatches : false;
10778
10779 if (supportsAudioPatches) {
10780 status = mHalDevice->releaseAudioPatch(handle);
10781 } else {
10782 status = mHalStream->legacyReleaseAudioPatch();
10783 }
10784 // Force meteadata update after a route change
10785 mActiveTracks.setHasChanged();
10786
10787 return status;
10788 }
10789
toAudioPortConfig(struct audio_port_config * config)10790 void MmapThread::toAudioPortConfig(struct audio_port_config* config)
10791 NO_THREAD_SAFETY_ANALYSIS // mAudioHwDev handle access
10792 {
10793 ThreadBase::toAudioPortConfig(config);
10794 if (isOutput()) {
10795 config->role = AUDIO_PORT_ROLE_SOURCE;
10796 config->ext.mix.hw_module = mAudioHwDev->handle();
10797 config->ext.mix.usecase.stream = AUDIO_STREAM_DEFAULT;
10798 } else {
10799 config->role = AUDIO_PORT_ROLE_SINK;
10800 config->ext.mix.hw_module = mAudioHwDev->handle();
10801 config->ext.mix.usecase.source = mAudioSource;
10802 }
10803 }
10804
addEffectChain_l(const sp<IAfEffectChain> & chain)10805 status_t MmapThread::addEffectChain_l(const sp<IAfEffectChain>& chain)
10806 {
10807 audio_session_t session = chain->sessionId();
10808
10809 ALOGV("addEffectChain_l() %p on thread %p for session %d", chain.get(), this, session);
10810 // Attach all tracks with same session ID to this chain.
10811 // indicate all active tracks in the chain
10812 for (const sp<IAfMmapTrack>& track : mActiveTracks) {
10813 if (session == track->sessionId()) {
10814 chain->incTrackCnt();
10815 chain->incActiveTrackCnt();
10816 }
10817 }
10818
10819 chain->setThread(this);
10820 chain->setInBuffer(nullptr);
10821 chain->setOutBuffer(nullptr);
10822 chain->syncHalEffectsState_l();
10823
10824 mEffectChains.add(chain);
10825 checkSuspendOnAddEffectChain_l(chain);
10826 return NO_ERROR;
10827 }
10828
removeEffectChain_l(const sp<IAfEffectChain> & chain)10829 size_t MmapThread::removeEffectChain_l(const sp<IAfEffectChain>& chain)
10830 {
10831 audio_session_t session = chain->sessionId();
10832
10833 ALOGV("removeEffectChain_l() %p from thread %p for session %d", chain.get(), this, session);
10834
10835 for (size_t i = 0; i < mEffectChains.size(); i++) {
10836 if (chain == mEffectChains[i]) {
10837 mEffectChains.removeAt(i);
10838 // detach all active tracks from the chain
10839 // detach all tracks with same session ID from this chain
10840 for (const sp<IAfMmapTrack>& track : mActiveTracks) {
10841 if (session == track->sessionId()) {
10842 chain->decActiveTrackCnt();
10843 chain->decTrackCnt();
10844 }
10845 }
10846 break;
10847 }
10848 }
10849 return mEffectChains.size();
10850 }
10851
threadLoop_standby()10852 void MmapThread::threadLoop_standby()
10853 {
10854 mHalStream->standby();
10855 }
10856
threadLoop_exit()10857 void MmapThread::threadLoop_exit()
10858 {
10859 // Do not call callback->onTearDown() because it is redundant for thread exit
10860 // and because it can cause a recursive mutex lock on stop().
10861 }
10862
setSyncEvent(const sp<SyncEvent> &)10863 status_t MmapThread::setSyncEvent(const sp<SyncEvent>& /* event */)
10864 {
10865 return BAD_VALUE;
10866 }
10867
isValidSyncEvent(const sp<SyncEvent> &) const10868 bool MmapThread::isValidSyncEvent(
10869 const sp<SyncEvent>& /* event */) const
10870 {
10871 return false;
10872 }
10873
checkEffectCompatibility_l(const effect_descriptor_t * desc,audio_session_t sessionId)10874 status_t MmapThread::checkEffectCompatibility_l(
10875 const effect_descriptor_t *desc, audio_session_t sessionId)
10876 {
10877 // No global effect sessions on mmap threads
10878 if (audio_is_global_session(sessionId)) {
10879 ALOGW("checkEffectCompatibility_l(): global effect %s on MMAP thread %s",
10880 desc->name, mThreadName);
10881 return BAD_VALUE;
10882 }
10883
10884 if (!isOutput() && ((desc->flags & EFFECT_FLAG_TYPE_MASK) != EFFECT_FLAG_TYPE_PRE_PROC)) {
10885 ALOGW("checkEffectCompatibility_l(): non pre processing effect %s on capture mmap thread",
10886 desc->name);
10887 return BAD_VALUE;
10888 }
10889 if (isOutput() && ((desc->flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_PRE_PROC)) {
10890 ALOGW("checkEffectCompatibility_l(): pre processing effect %s created on playback mmap "
10891 "thread", desc->name);
10892 return BAD_VALUE;
10893 }
10894
10895 // Only allow effects without processing load or latency
10896 if ((desc->flags & EFFECT_FLAG_NO_PROCESS_MASK) != EFFECT_FLAG_NO_PROCESS) {
10897 return BAD_VALUE;
10898 }
10899
10900 if (IAfEffectModule::isHapticGenerator(&desc->type)) {
10901 ALOGE("%s(): HapticGenerator is not supported for MmapThread", __func__);
10902 return BAD_VALUE;
10903 }
10904
10905 return NO_ERROR;
10906 }
10907
checkInvalidTracks_l()10908 void MmapThread::checkInvalidTracks_l()
10909 {
10910 for (const sp<IAfMmapTrack>& track : mActiveTracks) {
10911 if (track->isInvalid()) {
10912 if (const sp<MmapStreamCallback> callback = mCallback.promote()) {
10913 // The aaudioservice handle the routing changed event asynchronously. In that case,
10914 // it is safe to hold the lock here.
10915 callback->onRoutingChanged(AUDIO_PORT_HANDLE_NONE);
10916 } else if (mNoCallbackWarningCount < kMaxNoCallbackWarnings) {
10917 ALOGW("Could not notify MMAP stream tear down: no onRoutingChanged callback!");
10918 mNoCallbackWarningCount++;
10919 }
10920 break;
10921 }
10922 }
10923 }
10924
dumpInternals_l(int fd,const Vector<String16> &)10925 void MmapThread::dumpInternals_l(int fd, const Vector<String16>& /* args */)
10926 {
10927 dprintf(fd, " Attributes: content type %d usage %d source %d\n",
10928 mAttr.content_type, mAttr.usage, mAttr.source);
10929 dprintf(fd, " Session: %d port Id: %d\n", mSessionId, mPortId);
10930 if (mActiveTracks.isEmpty()) {
10931 dprintf(fd, " No active clients\n");
10932 }
10933 }
10934
dumpTracks_l(int fd,const Vector<String16> &)10935 void MmapThread::dumpTracks_l(int fd, const Vector<String16>& /* args */)
10936 {
10937 String8 result;
10938 size_t numtracks = mActiveTracks.size();
10939 dprintf(fd, " %zu Tracks\n", numtracks);
10940 const char *prefix = " ";
10941 if (numtracks) {
10942 result.append(prefix);
10943 mActiveTracks[0]->appendDumpHeader(result);
10944 for (size_t i = 0; i < numtracks ; ++i) {
10945 sp<IAfMmapTrack> track = mActiveTracks[i];
10946 result.append(prefix);
10947 track->appendDump(result, true /* active */);
10948 }
10949 } else {
10950 dprintf(fd, "\n");
10951 }
10952 write(fd, result.c_str(), result.size());
10953 }
10954
10955 /* static */
create(const sp<IAfThreadCallback> & afThreadCallback,audio_io_handle_t id,AudioHwDevice * hwDev,AudioStreamOut * output,bool systemReady)10956 sp<IAfMmapPlaybackThread> IAfMmapPlaybackThread::create(
10957 const sp<IAfThreadCallback>& afThreadCallback, audio_io_handle_t id,
10958 AudioHwDevice* hwDev, AudioStreamOut* output, bool systemReady) {
10959 return sp<MmapPlaybackThread>::make(afThreadCallback, id, hwDev, output, systemReady);
10960 }
10961
MmapPlaybackThread(const sp<IAfThreadCallback> & afThreadCallback,audio_io_handle_t id,AudioHwDevice * hwDev,AudioStreamOut * output,bool systemReady)10962 MmapPlaybackThread::MmapPlaybackThread(
10963 const sp<IAfThreadCallback>& afThreadCallback, audio_io_handle_t id,
10964 AudioHwDevice *hwDev, AudioStreamOut *output, bool systemReady)
10965 : MmapThread(afThreadCallback, id, hwDev, output->stream, systemReady, true /* isOut */),
10966 mStreamType(AUDIO_STREAM_MUSIC),
10967 mOutput(output)
10968 {
10969 snprintf(mThreadName, kThreadNameLength, "AudioMmapOut_%X", id);
10970 mChannelCount = audio_channel_count_from_out_mask(mChannelMask);
10971 mMasterVolume = afThreadCallback->masterVolume_l();
10972 mMasterMute = afThreadCallback->masterMute_l();
10973
10974 for (int i = AUDIO_STREAM_MIN; i < AUDIO_STREAM_FOR_POLICY_CNT; ++i) {
10975 const audio_stream_type_t stream{static_cast<audio_stream_type_t>(i)};
10976 mStreamTypes[stream].volume = 0.0f;
10977 mStreamTypes[stream].mute = mAfThreadCallback->streamMute_l(stream);
10978 }
10979 // Audio patch and call assistant volume are always max
10980 mStreamTypes[AUDIO_STREAM_PATCH].volume = 1.0f;
10981 mStreamTypes[AUDIO_STREAM_PATCH].mute = false;
10982 mStreamTypes[AUDIO_STREAM_CALL_ASSISTANT].volume = 1.0f;
10983 mStreamTypes[AUDIO_STREAM_CALL_ASSISTANT].mute = false;
10984
10985 if (mAudioHwDev) {
10986 if (mAudioHwDev->canSetMasterVolume()) {
10987 mMasterVolume = 1.0;
10988 }
10989
10990 if (mAudioHwDev->canSetMasterMute()) {
10991 mMasterMute = false;
10992 }
10993 }
10994 }
10995
configure(const audio_attributes_t * attr,audio_stream_type_t streamType,audio_session_t sessionId,const sp<MmapStreamCallback> & callback,audio_port_handle_t deviceId,audio_port_handle_t portId)10996 void MmapPlaybackThread::configure(const audio_attributes_t* attr,
10997 audio_stream_type_t streamType,
10998 audio_session_t sessionId,
10999 const sp<MmapStreamCallback>& callback,
11000 audio_port_handle_t deviceId,
11001 audio_port_handle_t portId)
11002 {
11003 audio_utils::lock_guard l(mutex());
11004 MmapThread::configure_l(attr, streamType, sessionId, callback, deviceId, portId);
11005 mStreamType = streamType;
11006 }
11007
clearOutput()11008 AudioStreamOut* MmapPlaybackThread::clearOutput()
11009 {
11010 audio_utils::lock_guard _l(mutex());
11011 AudioStreamOut *output = mOutput;
11012 mOutput = NULL;
11013 return output;
11014 }
11015
setMasterVolume(float value)11016 void MmapPlaybackThread::setMasterVolume(float value)
11017 {
11018 audio_utils::lock_guard _l(mutex());
11019 // Don't apply master volume in SW if our HAL can do it for us.
11020 if (mAudioHwDev &&
11021 mAudioHwDev->canSetMasterVolume()) {
11022 mMasterVolume = 1.0;
11023 } else {
11024 mMasterVolume = value;
11025 }
11026 }
11027
setMasterMute(bool muted)11028 void MmapPlaybackThread::setMasterMute(bool muted)
11029 {
11030 audio_utils::lock_guard _l(mutex());
11031 // Don't apply master mute in SW if our HAL can do it for us.
11032 if (mAudioHwDev && mAudioHwDev->canSetMasterMute()) {
11033 mMasterMute = false;
11034 } else {
11035 mMasterMute = muted;
11036 }
11037 }
11038
setStreamVolume(audio_stream_type_t stream,float value)11039 void MmapPlaybackThread::setStreamVolume(audio_stream_type_t stream, float value)
11040 {
11041 audio_utils::lock_guard _l(mutex());
11042 mStreamTypes[stream].volume = value;
11043 if (stream == mStreamType) {
11044 broadcast_l();
11045 }
11046 }
11047
streamVolume(audio_stream_type_t stream) const11048 float MmapPlaybackThread::streamVolume(audio_stream_type_t stream) const
11049 {
11050 audio_utils::lock_guard _l(mutex());
11051 return mStreamTypes[stream].volume;
11052 }
11053
setStreamMute(audio_stream_type_t stream,bool muted)11054 void MmapPlaybackThread::setStreamMute(audio_stream_type_t stream, bool muted)
11055 {
11056 audio_utils::lock_guard _l(mutex());
11057 mStreamTypes[stream].mute = muted;
11058 if (stream == mStreamType) {
11059 broadcast_l();
11060 }
11061 }
11062
invalidateTracks(audio_stream_type_t streamType)11063 void MmapPlaybackThread::invalidateTracks(audio_stream_type_t streamType)
11064 {
11065 audio_utils::lock_guard _l(mutex());
11066 if (streamType == mStreamType) {
11067 for (const sp<IAfMmapTrack>& track : mActiveTracks) {
11068 track->invalidate();
11069 }
11070 broadcast_l();
11071 }
11072 }
11073
invalidateTracks(std::set<audio_port_handle_t> & portIds)11074 void MmapPlaybackThread::invalidateTracks(std::set<audio_port_handle_t>& portIds)
11075 {
11076 audio_utils::lock_guard _l(mutex());
11077 bool trackMatch = false;
11078 for (const sp<IAfMmapTrack>& track : mActiveTracks) {
11079 if (portIds.find(track->portId()) != portIds.end()) {
11080 track->invalidate();
11081 trackMatch = true;
11082 portIds.erase(track->portId());
11083 }
11084 if (portIds.empty()) {
11085 break;
11086 }
11087 }
11088 if (trackMatch) {
11089 broadcast_l();
11090 }
11091 }
11092
processVolume_l()11093 void MmapPlaybackThread::processVolume_l()
11094 NO_THREAD_SAFETY_ANALYSIS // access of track->processMuteEvent_l
11095 {
11096 float volume;
11097
11098 if (mMasterMute || streamMuted_l()) {
11099 volume = 0;
11100 } else {
11101 volume = mMasterVolume * streamVolume_l();
11102 }
11103
11104 if (volume != mHalVolFloat) {
11105 // Convert volumes from float to 8.24
11106 uint32_t vol = (uint32_t)(volume * (1 << 24));
11107
11108 // Delegate volume control to effect in track effect chain if needed
11109 // only one effect chain can be present on DirectOutputThread, so if
11110 // there is one, the track is connected to it
11111 if (!mEffectChains.isEmpty()) {
11112 mEffectChains[0]->setVolume(&vol, &vol);
11113 volume = (float)vol / (1 << 24);
11114 }
11115 // Try to use HW volume control and fall back to SW control if not implemented
11116 if (mOutput->stream->setVolume(volume, volume) == NO_ERROR) {
11117 mHalVolFloat = volume; // HW volume control worked, so update value.
11118 mNoCallbackWarningCount = 0;
11119 } else {
11120 sp<MmapStreamCallback> callback = mCallback.promote();
11121 if (callback != 0) {
11122 mHalVolFloat = volume; // SW volume control worked, so update value.
11123 mNoCallbackWarningCount = 0;
11124 mutex().unlock();
11125 callback->onVolumeChanged(volume);
11126 mutex().lock();
11127 } else {
11128 if (mNoCallbackWarningCount < kMaxNoCallbackWarnings) {
11129 ALOGW("Could not set MMAP stream volume: no volume callback!");
11130 mNoCallbackWarningCount++;
11131 }
11132 }
11133 }
11134 for (const sp<IAfMmapTrack>& track : mActiveTracks) {
11135 track->setMetadataHasChanged();
11136 track->processMuteEvent_l(mAfThreadCallback->getOrCreateAudioManager(),
11137 /*muteState=*/{mMasterMute,
11138 streamVolume_l() == 0.f,
11139 streamMuted_l(),
11140 // TODO(b/241533526): adjust logic to include mute from AppOps
11141 false /*muteFromPlaybackRestricted*/,
11142 false /*muteFromClientVolume*/,
11143 false /*muteFromVolumeShaper*/});
11144 }
11145 }
11146 }
11147
updateMetadata_l()11148 ThreadBase::MetadataUpdate MmapPlaybackThread::updateMetadata_l()
11149 {
11150 if (!isStreamInitialized() || !mActiveTracks.readAndClearHasChanged()) {
11151 return {}; // nothing to do
11152 }
11153 StreamOutHalInterface::SourceMetadata metadata;
11154 for (const sp<IAfMmapTrack>& track : mActiveTracks) {
11155 // No track is invalid as this is called after prepareTrack_l in the same critical section
11156 playback_track_metadata_v7_t trackMetadata;
11157 trackMetadata.base = {
11158 .usage = track->attributes().usage,
11159 .content_type = track->attributes().content_type,
11160 .gain = mHalVolFloat, // TODO: propagate from aaudio pre-mix volume
11161 };
11162 trackMetadata.channel_mask = track->channelMask(),
11163 strncpy(trackMetadata.tags, track->attributes().tags, AUDIO_ATTRIBUTES_TAGS_MAX_SIZE);
11164 metadata.tracks.push_back(trackMetadata);
11165 }
11166 mOutput->stream->updateSourceMetadata(metadata);
11167
11168 MetadataUpdate change;
11169 change.playbackMetadataUpdate = metadata.tracks;
11170 return change;
11171 };
11172
checkSilentMode_l()11173 void MmapPlaybackThread::checkSilentMode_l()
11174 {
11175 if (!mMasterMute) {
11176 char value[PROPERTY_VALUE_MAX];
11177 if (property_get("ro.audio.silent", value, "0") > 0) {
11178 char *endptr;
11179 unsigned long ul = strtoul(value, &endptr, 0);
11180 if (*endptr == '\0' && ul != 0) {
11181 ALOGW("%s: mute from ro.audio.silent. Silence is golden", __func__);
11182 // The setprop command will not allow a property to be changed after
11183 // the first time it is set, so we don't have to worry about un-muting.
11184 setMasterMute_l(true);
11185 }
11186 }
11187 }
11188 }
11189
toAudioPortConfig(struct audio_port_config * config)11190 void MmapPlaybackThread::toAudioPortConfig(struct audio_port_config* config)
11191 {
11192 MmapThread::toAudioPortConfig(config);
11193 if (mOutput && mOutput->flags != AUDIO_OUTPUT_FLAG_NONE) {
11194 config->config_mask |= AUDIO_PORT_CONFIG_FLAGS;
11195 config->flags.output = mOutput->flags;
11196 }
11197 }
11198
getExternalPosition(uint64_t * position,int64_t * timeNanos) const11199 status_t MmapPlaybackThread::getExternalPosition(uint64_t* position,
11200 int64_t* timeNanos) const
11201 {
11202 if (mOutput == nullptr) {
11203 return NO_INIT;
11204 }
11205 struct timespec timestamp;
11206 status_t status = mOutput->getPresentationPosition(position, ×tamp);
11207 if (status == NO_ERROR) {
11208 *timeNanos = timestamp.tv_sec * NANOS_PER_SECOND + timestamp.tv_nsec;
11209 }
11210 return status;
11211 }
11212
reportData(const void * buffer,size_t frameCount)11213 status_t MmapPlaybackThread::reportData(const void* buffer, size_t frameCount) {
11214 // Send to MelProcessor for sound dose measurement.
11215 auto processor = mMelProcessor.load();
11216 if (processor) {
11217 processor->process(buffer, frameCount * mFrameSize);
11218 }
11219
11220 return NO_ERROR;
11221 }
11222
11223 // startMelComputation_l() must be called with AudioFlinger::mutex() held
startMelComputation_l(const sp<audio_utils::MelProcessor> & processor)11224 void MmapPlaybackThread::startMelComputation_l(
11225 const sp<audio_utils::MelProcessor>& processor)
11226 {
11227 ALOGV("%s: starting mel processor for thread %d", __func__, id());
11228 mMelProcessor.store(processor);
11229 if (processor) {
11230 processor->resume();
11231 }
11232
11233 // no need to update output format for MMapPlaybackThread since it is
11234 // assigned constant for each thread
11235 }
11236
11237 // stopMelComputation_l() must be called with AudioFlinger::mutex() held
stopMelComputation_l()11238 void MmapPlaybackThread::stopMelComputation_l()
11239 {
11240 ALOGV("%s: pausing mel processor for thread %d", __func__, id());
11241 auto melProcessor = mMelProcessor.load();
11242 if (melProcessor != nullptr) {
11243 melProcessor->pause();
11244 }
11245 }
11246
dumpInternals_l(int fd,const Vector<String16> & args)11247 void MmapPlaybackThread::dumpInternals_l(int fd, const Vector<String16>& args)
11248 {
11249 MmapThread::dumpInternals_l(fd, args);
11250
11251 dprintf(fd, " Stream type: %d Stream volume: %f HAL volume: %f Stream mute %d\n",
11252 mStreamType, streamVolume_l(), mHalVolFloat, streamMuted_l());
11253 dprintf(fd, " Master volume: %f Master mute %d\n", mMasterVolume, mMasterMute);
11254 }
11255
11256 /* static */
create(const sp<IAfThreadCallback> & afThreadCallback,audio_io_handle_t id,AudioHwDevice * hwDev,AudioStreamIn * input,bool systemReady)11257 sp<IAfMmapCaptureThread> IAfMmapCaptureThread::create(
11258 const sp<IAfThreadCallback>& afThreadCallback, audio_io_handle_t id,
11259 AudioHwDevice* hwDev, AudioStreamIn* input, bool systemReady) {
11260 return sp<MmapCaptureThread>::make(afThreadCallback, id, hwDev, input, systemReady);
11261 }
11262
MmapCaptureThread(const sp<IAfThreadCallback> & afThreadCallback,audio_io_handle_t id,AudioHwDevice * hwDev,AudioStreamIn * input,bool systemReady)11263 MmapCaptureThread::MmapCaptureThread(
11264 const sp<IAfThreadCallback>& afThreadCallback, audio_io_handle_t id,
11265 AudioHwDevice *hwDev, AudioStreamIn *input, bool systemReady)
11266 : MmapThread(afThreadCallback, id, hwDev, input->stream, systemReady, false /* isOut */),
11267 mInput(input)
11268 {
11269 snprintf(mThreadName, kThreadNameLength, "AudioMmapIn_%X", id);
11270 mChannelCount = audio_channel_count_from_in_mask(mChannelMask);
11271 }
11272
exitStandby_l()11273 status_t MmapCaptureThread::exitStandby_l()
11274 {
11275 {
11276 // mInput might have been cleared by clearInput()
11277 if (mInput != nullptr && mInput->stream != nullptr) {
11278 mInput->stream->setGain(1.0f);
11279 }
11280 }
11281 return MmapThread::exitStandby_l();
11282 }
11283
clearInput()11284 AudioStreamIn* MmapCaptureThread::clearInput()
11285 {
11286 audio_utils::lock_guard _l(mutex());
11287 AudioStreamIn *input = mInput;
11288 mInput = NULL;
11289 return input;
11290 }
11291
processVolume_l()11292 void MmapCaptureThread::processVolume_l()
11293 {
11294 bool changed = false;
11295 bool silenced = false;
11296
11297 sp<MmapStreamCallback> callback = mCallback.promote();
11298 if (callback == 0) {
11299 if (mNoCallbackWarningCount < kMaxNoCallbackWarnings) {
11300 ALOGW("Could not set MMAP stream silenced: no onStreamSilenced callback!");
11301 mNoCallbackWarningCount++;
11302 }
11303 }
11304
11305 // After a change occurred in track silenced state, mute capture in audio DSP if at least one
11306 // track is silenced and unmute otherwise
11307 for (size_t i = 0; i < mActiveTracks.size() && !silenced; i++) {
11308 if (!mActiveTracks[i]->getAndSetSilencedNotified_l()) {
11309 changed = true;
11310 silenced = mActiveTracks[i]->isSilenced_l();
11311 }
11312 }
11313
11314 if (changed) {
11315 mInput->stream->setGain(silenced ? 0.0f: 1.0f);
11316 }
11317 }
11318
updateMetadata_l()11319 ThreadBase::MetadataUpdate MmapCaptureThread::updateMetadata_l()
11320 {
11321 if (!isStreamInitialized() || !mActiveTracks.readAndClearHasChanged()) {
11322 return {}; // nothing to do
11323 }
11324 StreamInHalInterface::SinkMetadata metadata;
11325 for (const sp<IAfMmapTrack>& track : mActiveTracks) {
11326 // No track is invalid as this is called after prepareTrack_l in the same critical section
11327 record_track_metadata_v7_t trackMetadata;
11328 trackMetadata.base = {
11329 .source = track->attributes().source,
11330 .gain = 1, // capture tracks do not have volumes
11331 };
11332 trackMetadata.channel_mask = track->channelMask(),
11333 strncpy(trackMetadata.tags, track->attributes().tags, AUDIO_ATTRIBUTES_TAGS_MAX_SIZE);
11334 metadata.tracks.push_back(trackMetadata);
11335 }
11336 mInput->stream->updateSinkMetadata(metadata);
11337 MetadataUpdate change;
11338 change.recordMetadataUpdate = metadata.tracks;
11339 return change;
11340 }
11341
setRecordSilenced(audio_port_handle_t portId,bool silenced)11342 void MmapCaptureThread::setRecordSilenced(audio_port_handle_t portId, bool silenced)
11343 {
11344 audio_utils::lock_guard _l(mutex());
11345 for (size_t i = 0; i < mActiveTracks.size() ; i++) {
11346 if (mActiveTracks[i]->portId() == portId) {
11347 mActiveTracks[i]->setSilenced_l(silenced);
11348 broadcast_l();
11349 }
11350 }
11351 setClientSilencedIfExists_l(portId, silenced);
11352 }
11353
toAudioPortConfig(struct audio_port_config * config)11354 void MmapCaptureThread::toAudioPortConfig(struct audio_port_config* config)
11355 {
11356 MmapThread::toAudioPortConfig(config);
11357 if (mInput && mInput->flags != AUDIO_INPUT_FLAG_NONE) {
11358 config->config_mask |= AUDIO_PORT_CONFIG_FLAGS;
11359 config->flags.input = mInput->flags;
11360 }
11361 }
11362
getExternalPosition(uint64_t * position,int64_t * timeNanos) const11363 status_t MmapCaptureThread::getExternalPosition(
11364 uint64_t* position, int64_t* timeNanos) const
11365 {
11366 if (mInput == nullptr) {
11367 return NO_INIT;
11368 }
11369 return mInput->getCapturePosition((int64_t*)position, timeNanos);
11370 }
11371
11372 // ----------------------------------------------------------------------------
11373
11374 /* static */
createBitPerfectThread(const sp<IAfThreadCallback> & afThreadCallback,AudioStreamOut * output,audio_io_handle_t id,bool systemReady)11375 sp<IAfPlaybackThread> IAfPlaybackThread::createBitPerfectThread(
11376 const sp<IAfThreadCallback>& afThreadCallback,
11377 AudioStreamOut* output, audio_io_handle_t id, bool systemReady) {
11378 return sp<BitPerfectThread>::make(afThreadCallback, output, id, systemReady);
11379 }
11380
BitPerfectThread(const sp<IAfThreadCallback> & afThreadCallback,AudioStreamOut * output,audio_io_handle_t id,bool systemReady)11381 BitPerfectThread::BitPerfectThread(const sp<IAfThreadCallback> &afThreadCallback,
11382 AudioStreamOut *output, audio_io_handle_t id, bool systemReady)
11383 : MixerThread(afThreadCallback, output, id, systemReady, BIT_PERFECT) {}
11384
prepareTracks_l(Vector<sp<IAfTrack>> * tracksToRemove)11385 PlaybackThread::mixer_state BitPerfectThread::prepareTracks_l(
11386 Vector<sp<IAfTrack>>* tracksToRemove) {
11387 mixer_state result = MixerThread::prepareTracks_l(tracksToRemove);
11388 // If there is only one active track and it is bit-perfect, enable tee buffer.
11389 float volumeLeft = 1.0f;
11390 float volumeRight = 1.0f;
11391 if (sp<IAfTrack> bitPerfectTrack = getTrackToStreamBitPerfectly_l();
11392 bitPerfectTrack != nullptr) {
11393 const int trackId = bitPerfectTrack->id();
11394 mAudioMixer->setParameter(
11395 trackId, AudioMixer::TRACK, AudioMixer::TEE_BUFFER, (void *)mSinkBuffer);
11396 mAudioMixer->setParameter(
11397 trackId, AudioMixer::TRACK, AudioMixer::TEE_BUFFER_FRAME_COUNT,
11398 (void *)(uintptr_t)mNormalFrameCount);
11399 bitPerfectTrack->getFinalVolume(&volumeLeft, &volumeRight);
11400 mIsBitPerfect = true;
11401 } else {
11402 mIsBitPerfect = false;
11403 // No need to copy bit-perfect data directly to sink buffer given there are multiple tracks
11404 // active.
11405 for (const auto& track : mActiveTracks) {
11406 const int trackId = track->id();
11407 mAudioMixer->setParameter(
11408 trackId, AudioMixer::TRACK, AudioMixer::TEE_BUFFER, nullptr);
11409 }
11410 }
11411 if (mVolumeLeft != volumeLeft || mVolumeRight != volumeRight) {
11412 mVolumeLeft = volumeLeft;
11413 mVolumeRight = volumeRight;
11414 setVolumeForOutput_l(volumeLeft, volumeRight);
11415 }
11416 return result;
11417 }
11418
threadLoop_mix()11419 void BitPerfectThread::threadLoop_mix() {
11420 MixerThread::threadLoop_mix();
11421 mHasDataCopiedToSinkBuffer = mIsBitPerfect;
11422 }
11423
setTracksInternalMute(std::map<audio_port_handle_t,bool> * tracksInternalMute)11424 void BitPerfectThread::setTracksInternalMute(
11425 std::map<audio_port_handle_t, bool>* tracksInternalMute) {
11426 for (auto& track : mTracks) {
11427 if (auto it = tracksInternalMute->find(track->portId()); it != tracksInternalMute->end()) {
11428 track->setInternalMute(it->second);
11429 tracksInternalMute->erase(it);
11430 }
11431 }
11432 }
11433
getTrackToStreamBitPerfectly_l()11434 sp<IAfTrack> BitPerfectThread::getTrackToStreamBitPerfectly_l() {
11435 if (com::android::media::audioserver::
11436 fix_concurrent_playback_behavior_with_bit_perfect_client()) {
11437 sp<IAfTrack> bitPerfectTrack = nullptr;
11438 bool allOtherTracksMuted = true;
11439 // Return the bit perfect track if all other tracks are muted
11440 for (const auto& track : mActiveTracks) {
11441 if (track->isBitPerfect()) {
11442 bitPerfectTrack = track;
11443 } else if (track->getFinalVolume() != 0.f) {
11444 allOtherTracksMuted = false;
11445 if (bitPerfectTrack != nullptr) {
11446 break;
11447 }
11448 }
11449 }
11450 return allOtherTracksMuted ? bitPerfectTrack : nullptr;
11451 } else {
11452 if (mActiveTracks.size() == 1 && mActiveTracks[0]->isBitPerfect()) {
11453 return mActiveTracks[0];
11454 }
11455 }
11456 return nullptr;
11457 }
11458
11459 } // namespace android
11460