1 /*
2 **
3 ** Copyright 2012, The Android Open Source Project
4 **
5 ** Licensed under the Apache License, Version 2.0 (the "License");
6 ** you may not use this file except in compliance with the License.
7 ** You may obtain a copy of the License at
8 **
9 ** http://www.apache.org/licenses/LICENSE-2.0
10 **
11 ** Unless required by applicable law or agreed to in writing, software
12 ** distributed under the License is distributed on an "AS IS" BASIS,
13 ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 ** See the License for the specific language governing permissions and
15 ** limitations under the License.
16 */
17
18 #define LOG_TAG "AudioFlinger"
19 //#define LOG_NDEBUG 0
20 #define ATRACE_TAG ATRACE_TAG_AUDIO
21
22 #include "MmapTracks.h"
23 #include "PlaybackTracks.h"
24 #include "RecordTracks.h"
25
26 #include "Client.h"
27 #include "IAfEffect.h"
28 #include "IAfThread.h"
29 #include "ResamplerBufferProvider.h"
30
31 #include <audio_utils/minifloat.h>
32 #include <media/AudioValidator.h>
33 #include <media/RecordBufferConverter.h>
34 #include <media/nbaio/Pipe.h>
35 #include <media/nbaio/PipeReader.h>
36 #include <mediautils/ServiceUtilities.h>
37 #include <mediautils/SharedMemoryAllocator.h>
38 #include <private/media/AudioTrackShared.h>
39 #include <utils/Log.h>
40 #include <utils/Trace.h>
41
42 #include <linux/futex.h>
43 #include <math.h>
44 #include <sys/syscall.h>
45
46 // ----------------------------------------------------------------------------
47
48 // Note: the following macro is used for extremely verbose logging message. In
49 // order to run with ALOG_ASSERT turned on, we need to have LOG_NDEBUG set to
50 // 0; but one side effect of this is to turn all LOGV's as well. Some messages
51 // are so verbose that we want to suppress them even when we have ALOG_ASSERT
52 // turned on. Do not uncomment the #def below unless you really know what you
53 // are doing and want to see all of the extremely verbose messages.
54 //#define VERY_VERY_VERBOSE_LOGGING
55 #ifdef VERY_VERY_VERBOSE_LOGGING
56 #define ALOGVV ALOGV
57 #else
58 #define ALOGVV(a...) do { } while(0)
59 #endif
60
61 // TODO: Remove when this is put into AidlConversionUtil.h
62 #define VALUE_OR_RETURN_BINDER_STATUS(x) \
63 ({ \
64 auto _tmp = (x); \
65 if (!_tmp.ok()) return ::android::aidl_utils::binderStatusFromStatusT(_tmp.error()); \
66 std::move(_tmp.value()); \
67 })
68
69 namespace android {
70
71 using ::android::aidl_utils::binderStatusFromStatusT;
72 using binder::Status;
73 using content::AttributionSourceState;
74 using media::VolumeShaper;
75 // ----------------------------------------------------------------------------
76 // TrackBase
77 // ----------------------------------------------------------------------------
78 #undef LOG_TAG
79 #define LOG_TAG "AF::TrackBase"
80
81 static volatile int32_t nextTrackId = 55;
82
83 // TrackBase constructor must be called with AudioFlinger::mLock held
TrackBase(IAfThreadBase * thread,const sp<Client> & client,const audio_attributes_t & attr,uint32_t sampleRate,audio_format_t format,audio_channel_mask_t channelMask,size_t frameCount,void * buffer,size_t bufferSize,audio_session_t sessionId,pid_t creatorPid,uid_t clientUid,bool isOut,const alloc_type alloc,track_type type,audio_port_handle_t portId,std::string metricsId)84 TrackBase::TrackBase(
85 IAfThreadBase *thread,
86 const sp<Client>& client,
87 const audio_attributes_t& attr,
88 uint32_t sampleRate,
89 audio_format_t format,
90 audio_channel_mask_t channelMask,
91 size_t frameCount,
92 void *buffer,
93 size_t bufferSize,
94 audio_session_t sessionId,
95 pid_t creatorPid,
96 uid_t clientUid,
97 bool isOut,
98 const alloc_type alloc,
99 track_type type,
100 audio_port_handle_t portId,
101 std::string metricsId)
102 :
103 mThread(thread),
104 mAllocType(alloc),
105 mClient(client),
106 mCblk(NULL),
107 // mBuffer, mBufferSize
108 mState(IDLE),
109 mAttr(attr),
110 mSampleRate(sampleRate),
111 mFormat(format),
112 mChannelMask(channelMask),
113 mChannelCount(isOut ?
114 audio_channel_count_from_out_mask(channelMask) :
115 audio_channel_count_from_in_mask(channelMask)),
116 mFrameSize(audio_bytes_per_frame(mChannelCount, format)),
117 mFrameCount(frameCount),
118 mSessionId(sessionId),
119 mIsOut(isOut),
120 mId(android_atomic_inc(&nextTrackId)),
121 mTerminated(false),
122 mType(type),
123 mThreadIoHandle(thread ? thread->id() : AUDIO_IO_HANDLE_NONE),
124 mPortId(portId),
125 mIsInvalid(false),
126 mTrackMetrics(std::move(metricsId), isOut, clientUid),
127 mCreatorPid(creatorPid)
128 {
129 const uid_t callingUid = IPCThreadState::self()->getCallingUid();
130 if (!isAudioServerOrMediaServerUid(callingUid) || clientUid == AUDIO_UID_INVALID) {
131 ALOGW_IF(clientUid != AUDIO_UID_INVALID && clientUid != callingUid,
132 "%s(%d): uid %d tried to pass itself off as %d",
133 __func__, mId, callingUid, clientUid);
134 clientUid = callingUid;
135 }
136 // clientUid contains the uid of the app that is responsible for this track, so we can blame
137 // battery usage on it.
138 mUid = clientUid;
139
140 // ALOGD("Creating track with %d buffers @ %d bytes", bufferCount, bufferSize);
141
142 size_t minBufferSize = buffer == NULL ? roundup(frameCount) : frameCount;
143 // check overflow when computing bufferSize due to multiplication by mFrameSize.
144 if (minBufferSize < frameCount // roundup rounds down for values above UINT_MAX / 2
145 || mFrameSize == 0 // format needs to be correct
146 || minBufferSize > SIZE_MAX / mFrameSize) {
147 android_errorWriteLog(0x534e4554, "34749571");
148 return;
149 }
150 minBufferSize *= mFrameSize;
151
152 if (buffer == nullptr) {
153 bufferSize = minBufferSize; // allocated here.
154 } else if (minBufferSize > bufferSize) {
155 android_errorWriteLog(0x534e4554, "38340117");
156 return;
157 }
158
159 size_t size = sizeof(audio_track_cblk_t);
160 if (buffer == NULL && alloc == ALLOC_CBLK) {
161 // check overflow when computing allocation size for streaming tracks.
162 if (size > SIZE_MAX - bufferSize) {
163 android_errorWriteLog(0x534e4554, "34749571");
164 return;
165 }
166 size += bufferSize;
167 }
168
169 if (client != 0) {
170 mCblkMemory = client->allocator().allocate(mediautils::NamedAllocRequest{{size},
171 std::string("Track ID: ").append(std::to_string(mId))});
172 if (mCblkMemory == 0 ||
173 (mCblk = static_cast<audio_track_cblk_t *>(mCblkMemory->unsecurePointer())) == NULL) {
174 ALOGE("%s(%d): not enough memory for AudioTrack size=%zu", __func__, mId, size);
175 ALOGE("%s", client->allocator().dump().c_str());
176 mCblkMemory.clear();
177 return;
178 }
179 } else {
180 mCblk = (audio_track_cblk_t *) malloc(size);
181 if (mCblk == NULL) {
182 ALOGE("%s(%d): not enough memory for AudioTrack size=%zu", __func__, mId, size);
183 return;
184 }
185 }
186
187 // construct the shared structure in-place.
188 if (mCblk != NULL) {
189 new(mCblk) audio_track_cblk_t();
190 switch (alloc) {
191 case ALLOC_READONLY: {
192 const sp<MemoryDealer> roHeap(thread->readOnlyHeap());
193 if (roHeap == 0 ||
194 (mBufferMemory = roHeap->allocate(bufferSize)) == 0 ||
195 (mBuffer = mBufferMemory->unsecurePointer()) == NULL) {
196 ALOGE("%s(%d): not enough memory for read-only buffer size=%zu",
197 __func__, mId, bufferSize);
198 if (roHeap != 0) {
199 roHeap->dump("buffer");
200 }
201 mCblkMemory.clear();
202 mBufferMemory.clear();
203 return;
204 }
205 memset(mBuffer, 0, bufferSize);
206 } break;
207 case ALLOC_PIPE:
208 mBufferMemory = thread->pipeMemory();
209 // mBuffer is the virtual address as seen from current process (mediaserver),
210 // and should normally be coming from mBufferMemory->unsecurePointer().
211 // However in this case the TrackBase does not reference the buffer directly.
212 // It should references the buffer via the pipe.
213 // Therefore, to detect incorrect usage of the buffer, we set mBuffer to NULL.
214 mBuffer = NULL;
215 bufferSize = 0;
216 break;
217 case ALLOC_CBLK:
218 // clear all buffers
219 if (buffer == NULL) {
220 mBuffer = (char*)mCblk + sizeof(audio_track_cblk_t);
221 memset(mBuffer, 0, bufferSize);
222 } else {
223 mBuffer = buffer;
224 #if 0
225 mCblk->mFlags = CBLK_FORCEREADY; // FIXME hack, need to fix the track ready logic
226 #endif
227 }
228 break;
229 case ALLOC_LOCAL:
230 mBuffer = calloc(1, bufferSize);
231 break;
232 case ALLOC_NONE:
233 mBuffer = buffer;
234 break;
235 default:
236 LOG_ALWAYS_FATAL("%s(%d): invalid allocation type: %d", __func__, mId, (int)alloc);
237 }
238 mBufferSize = bufferSize;
239
240 #ifdef TEE_SINK
241 mTee.set(sampleRate, mChannelCount, format, NBAIO_Tee::TEE_FLAG_TRACK);
242 #endif
243 // mState is mirrored for the client to read.
244 mState.setMirror(&mCblk->mState);
245 // ensure our state matches up until we consolidate the enumeration.
246 static_assert(CBLK_STATE_IDLE == IDLE);
247 static_assert(CBLK_STATE_PAUSING == PAUSING);
248 }
249 }
250
251 // TODO b/182392769: use attribution source util
audioServerAttributionSource(pid_t pid)252 static AttributionSourceState audioServerAttributionSource(pid_t pid) {
253 AttributionSourceState attributionSource{};
254 attributionSource.uid = AID_AUDIOSERVER;
255 attributionSource.pid = pid;
256 attributionSource.token = sp<BBinder>::make();
257 return attributionSource;
258 }
259
initCheck() const260 status_t TrackBase::initCheck() const
261 {
262 status_t status;
263 if (mType == TYPE_OUTPUT || mType == TYPE_PATCH) {
264 status = cblk() != NULL ? NO_ERROR : NO_MEMORY;
265 } else {
266 status = getCblk() != 0 ? NO_ERROR : NO_MEMORY;
267 }
268 return status;
269 }
270
~TrackBase()271 TrackBase::~TrackBase()
272 {
273 // delete the proxy before deleting the shared memory it refers to, to avoid dangling reference
274 mServerProxy.clear();
275 releaseCblk();
276 mCblkMemory.clear(); // free the shared memory before releasing the heap it belongs to
277 if (mClient != 0) {
278 // Client destructor must run with AudioFlinger client mutex locked
279 audio_utils::lock_guard _l(mClient->afClientCallback()->clientMutex());
280 // If the client's reference count drops to zero, the associated destructor
281 // must run with AudioFlinger lock held. Thus the explicit clear() rather than
282 // relying on the automatic clear() at end of scope.
283 mClient.clear();
284 }
285 if (mAllocType == ALLOC_LOCAL) {
286 free(mBuffer);
287 mBuffer = nullptr;
288 }
289 // flush the binder command buffer
290 IPCThreadState::self()->flushCommands();
291 }
292
293 // AudioBufferProvider interface
294 // getNextBuffer() = 0;
295 // This implementation of releaseBuffer() is used by Track and RecordTrack
releaseBuffer(AudioBufferProvider::Buffer * buffer)296 void TrackBase::releaseBuffer(AudioBufferProvider::Buffer* buffer)
297 {
298 #ifdef TEE_SINK
299 mTee.write(buffer->raw, buffer->frameCount);
300 #endif
301
302 ServerProxy::Buffer buf;
303 buf.mFrameCount = buffer->frameCount;
304 buf.mRaw = buffer->raw;
305 buffer->frameCount = 0;
306 buffer->raw = NULL;
307 mServerProxy->releaseBuffer(&buf);
308 }
309
setSyncEvent(const sp<audioflinger::SyncEvent> & event)310 status_t TrackBase::setSyncEvent(
311 const sp<audioflinger::SyncEvent>& event)
312 {
313 mSyncEvents.emplace_back(event);
314 return NO_ERROR;
315 }
316
PatchTrackBase(const sp<ClientProxy> & proxy,IAfThreadBase * thread,const Timeout & timeout)317 PatchTrackBase::PatchTrackBase(const sp<ClientProxy>& proxy,
318 IAfThreadBase* thread, const Timeout& timeout)
319 : mProxy(proxy)
320 {
321 if (timeout) {
322 setPeerTimeout(*timeout);
323 } else {
324 // Double buffer mixer
325 uint64_t mixBufferNs = ((uint64_t)2 * thread->frameCount() * 1000000000) /
326 thread->sampleRate();
327 setPeerTimeout(std::chrono::nanoseconds{mixBufferNs});
328 }
329 }
330
setPeerTimeout(std::chrono::nanoseconds timeout)331 void PatchTrackBase::setPeerTimeout(std::chrono::nanoseconds timeout) {
332 mPeerTimeout.tv_sec = timeout.count() / std::nano::den;
333 mPeerTimeout.tv_nsec = timeout.count() % std::nano::den;
334 }
335
336
337 // ----------------------------------------------------------------------------
338 // Playback
339 // ----------------------------------------------------------------------------
340 #undef LOG_TAG
341 #define LOG_TAG "AF::TrackHandle"
342
343 class TrackHandle : public android::media::BnAudioTrack {
344 public:
345 explicit TrackHandle(const sp<IAfTrack>& track);
346 ~TrackHandle() override;
347
348 binder::Status getCblk(std::optional<media::SharedFileRegion>* _aidl_return) final;
349 binder::Status start(int32_t* _aidl_return) final;
350 binder::Status stop() final;
351 binder::Status flush() final;
352 binder::Status pause() final;
353 binder::Status attachAuxEffect(int32_t effectId, int32_t* _aidl_return) final;
354 binder::Status setParameters(const std::string& keyValuePairs,
355 int32_t* _aidl_return) final;
356 binder::Status selectPresentation(int32_t presentationId, int32_t programId,
357 int32_t* _aidl_return) final;
358 binder::Status getTimestamp(media::AudioTimestampInternal* timestamp,
359 int32_t* _aidl_return) final;
360 binder::Status signal() final;
361 binder::Status applyVolumeShaper(const media::VolumeShaperConfiguration& configuration,
362 const media::VolumeShaperOperation& operation,
363 int32_t* _aidl_return) final;
364 binder::Status getVolumeShaperState(
365 int32_t id,
366 std::optional<media::VolumeShaperState>* _aidl_return) final;
367 binder::Status getDualMonoMode(
368 media::audio::common::AudioDualMonoMode* _aidl_return) final;
369 binder::Status setDualMonoMode(
370 media::audio::common::AudioDualMonoMode mode) final;
371 binder::Status getAudioDescriptionMixLevel(float* _aidl_return) final;
372 binder::Status setAudioDescriptionMixLevel(float leveldB) final;
373 binder::Status getPlaybackRateParameters(
374 media::audio::common::AudioPlaybackRate* _aidl_return) final;
375 binder::Status setPlaybackRateParameters(
376 const media::audio::common::AudioPlaybackRate& playbackRate) final;
377
378 private:
379 const sp<IAfTrack> mTrack;
380 };
381
382 /* static */
createIAudioTrackAdapter(const sp<IAfTrack> & track)383 sp<media::IAudioTrack> IAfTrack::createIAudioTrackAdapter(const sp<IAfTrack>& track) {
384 return sp<TrackHandle>::make(track);
385 }
386
TrackHandle(const sp<IAfTrack> & track)387 TrackHandle::TrackHandle(const sp<IAfTrack>& track)
388 : BnAudioTrack(),
389 mTrack(track)
390 {
391 setMinSchedulerPolicy(SCHED_NORMAL, ANDROID_PRIORITY_AUDIO);
392 setInheritRt(true);
393 }
394
~TrackHandle()395 TrackHandle::~TrackHandle() {
396 // just stop the track on deletion, associated resources
397 // will be freed from the main thread once all pending buffers have
398 // been played. Unless it's not in the active track list, in which
399 // case we free everything now...
400 mTrack->destroy();
401 }
402
getCblk(std::optional<media::SharedFileRegion> * _aidl_return)403 Status TrackHandle::getCblk(
404 std::optional<media::SharedFileRegion>* _aidl_return) {
405 *_aidl_return = legacy2aidl_NullableIMemory_SharedFileRegion(mTrack->getCblk()).value();
406 return Status::ok();
407 }
408
start(int32_t * _aidl_return)409 Status TrackHandle::start(int32_t* _aidl_return) {
410 *_aidl_return = mTrack->start();
411 return Status::ok();
412 }
413
stop()414 Status TrackHandle::stop() {
415 mTrack->stop();
416 return Status::ok();
417 }
418
flush()419 Status TrackHandle::flush() {
420 mTrack->flush();
421 return Status::ok();
422 }
423
pause()424 Status TrackHandle::pause() {
425 mTrack->pause();
426 return Status::ok();
427 }
428
attachAuxEffect(int32_t effectId,int32_t * _aidl_return)429 Status TrackHandle::attachAuxEffect(int32_t effectId,
430 int32_t* _aidl_return) {
431 *_aidl_return = mTrack->attachAuxEffect(effectId);
432 return Status::ok();
433 }
434
setParameters(const std::string & keyValuePairs,int32_t * _aidl_return)435 Status TrackHandle::setParameters(const std::string& keyValuePairs,
436 int32_t* _aidl_return) {
437 *_aidl_return = mTrack->setParameters(String8(keyValuePairs.c_str()));
438 return Status::ok();
439 }
440
selectPresentation(int32_t presentationId,int32_t programId,int32_t * _aidl_return)441 Status TrackHandle::selectPresentation(int32_t presentationId, int32_t programId,
442 int32_t* _aidl_return) {
443 *_aidl_return = mTrack->selectPresentation(presentationId, programId);
444 return Status::ok();
445 }
446
getTimestamp(media::AudioTimestampInternal * timestamp,int32_t * _aidl_return)447 Status TrackHandle::getTimestamp(media::AudioTimestampInternal* timestamp,
448 int32_t* _aidl_return) {
449 AudioTimestamp legacy;
450 *_aidl_return = mTrack->getTimestamp(legacy);
451 if (*_aidl_return != OK) {
452 return Status::ok();
453 }
454
455 // restrict position modulo INT_MAX to avoid integer sanitization abort
456 legacy.mPosition &= INT_MAX;
457
458 *timestamp = legacy2aidl_AudioTimestamp_AudioTimestampInternal(legacy).value();
459 return Status::ok();
460 }
461
signal()462 Status TrackHandle::signal() {
463 mTrack->signal();
464 return Status::ok();
465 }
466
applyVolumeShaper(const media::VolumeShaperConfiguration & configuration,const media::VolumeShaperOperation & operation,int32_t * _aidl_return)467 Status TrackHandle::applyVolumeShaper(
468 const media::VolumeShaperConfiguration& configuration,
469 const media::VolumeShaperOperation& operation,
470 int32_t* _aidl_return) {
471 sp<VolumeShaper::Configuration> conf = new VolumeShaper::Configuration();
472 *_aidl_return = conf->readFromParcelable(configuration);
473 if (*_aidl_return != OK) {
474 return Status::ok();
475 }
476
477 sp<VolumeShaper::Operation> op = new VolumeShaper::Operation();
478 *_aidl_return = op->readFromParcelable(operation);
479 if (*_aidl_return != OK) {
480 return Status::ok();
481 }
482
483 *_aidl_return = mTrack->applyVolumeShaper(conf, op);
484 return Status::ok();
485 }
486
getVolumeShaperState(int32_t id,std::optional<media::VolumeShaperState> * _aidl_return)487 Status TrackHandle::getVolumeShaperState(
488 int32_t id,
489 std::optional<media::VolumeShaperState>* _aidl_return) {
490 sp<VolumeShaper::State> legacy = mTrack->getVolumeShaperState(id);
491 if (legacy == nullptr) {
492 _aidl_return->reset();
493 return Status::ok();
494 }
495 media::VolumeShaperState aidl;
496 legacy->writeToParcelable(&aidl);
497 *_aidl_return = aidl;
498 return Status::ok();
499 }
500
getDualMonoMode(media::audio::common::AudioDualMonoMode * _aidl_return)501 Status TrackHandle::getDualMonoMode(
502 media::audio::common::AudioDualMonoMode* _aidl_return)
503 {
504 audio_dual_mono_mode_t mode = AUDIO_DUAL_MONO_MODE_OFF;
505 const status_t status = mTrack->getDualMonoMode(&mode)
506 ?: AudioValidator::validateDualMonoMode(mode);
507 if (status == OK) {
508 *_aidl_return = VALUE_OR_RETURN_BINDER_STATUS(
509 legacy2aidl_audio_dual_mono_mode_t_AudioDualMonoMode(mode));
510 }
511 return binderStatusFromStatusT(status);
512 }
513
setDualMonoMode(media::audio::common::AudioDualMonoMode mode)514 Status TrackHandle::setDualMonoMode(
515 media::audio::common::AudioDualMonoMode mode)
516 {
517 const auto localMonoMode = VALUE_OR_RETURN_BINDER_STATUS(
518 aidl2legacy_AudioDualMonoMode_audio_dual_mono_mode_t(mode));
519 return binderStatusFromStatusT(AudioValidator::validateDualMonoMode(localMonoMode)
520 ?: mTrack->setDualMonoMode(localMonoMode));
521 }
522
getAudioDescriptionMixLevel(float * _aidl_return)523 Status TrackHandle::getAudioDescriptionMixLevel(float* _aidl_return)
524 {
525 float leveldB = -std::numeric_limits<float>::infinity();
526 const status_t status = mTrack->getAudioDescriptionMixLevel(&leveldB)
527 ?: AudioValidator::validateAudioDescriptionMixLevel(leveldB);
528 if (status == OK) *_aidl_return = leveldB;
529 return binderStatusFromStatusT(status);
530 }
531
setAudioDescriptionMixLevel(float leveldB)532 Status TrackHandle::setAudioDescriptionMixLevel(float leveldB)
533 {
534 return binderStatusFromStatusT(AudioValidator::validateAudioDescriptionMixLevel(leveldB)
535 ?: mTrack->setAudioDescriptionMixLevel(leveldB));
536 }
537
getPlaybackRateParameters(media::audio::common::AudioPlaybackRate * _aidl_return)538 Status TrackHandle::getPlaybackRateParameters(
539 media::audio::common::AudioPlaybackRate* _aidl_return)
540 {
541 audio_playback_rate_t localPlaybackRate{};
542 status_t status = mTrack->getPlaybackRateParameters(&localPlaybackRate)
543 ?: AudioValidator::validatePlaybackRate(localPlaybackRate);
544 if (status == NO_ERROR) {
545 *_aidl_return = VALUE_OR_RETURN_BINDER_STATUS(
546 legacy2aidl_audio_playback_rate_t_AudioPlaybackRate(localPlaybackRate));
547 }
548 return binderStatusFromStatusT(status);
549 }
550
setPlaybackRateParameters(const media::audio::common::AudioPlaybackRate & playbackRate)551 Status TrackHandle::setPlaybackRateParameters(
552 const media::audio::common::AudioPlaybackRate& playbackRate)
553 {
554 const audio_playback_rate_t localPlaybackRate = VALUE_OR_RETURN_BINDER_STATUS(
555 aidl2legacy_AudioPlaybackRate_audio_playback_rate_t(playbackRate));
556 return binderStatusFromStatusT(AudioValidator::validatePlaybackRate(localPlaybackRate)
557 ?: mTrack->setPlaybackRateParameters(localPlaybackRate));
558 }
559
560 // ----------------------------------------------------------------------------
561 // AppOp for audio playback
562 // -------------------------------
563
564 // static
createIfNeeded(IAfThreadBase * thread,const AttributionSourceState & attributionSource,const audio_attributes_t & attr,int id,audio_stream_type_t streamType)565 sp<OpPlayAudioMonitor> OpPlayAudioMonitor::createIfNeeded(
566 IAfThreadBase* thread,
567 const AttributionSourceState& attributionSource, const audio_attributes_t& attr, int id,
568 audio_stream_type_t streamType)
569 {
570 Vector<String16> packages;
571 const uid_t uid = VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(attributionSource.uid));
572 getPackagesForUid(uid, packages);
573 if (isServiceUid(uid)) {
574 if (packages.isEmpty()) {
575 ALOGW("OpPlayAudio: not muting track:%d usage:%d for service UID %d", id, attr.usage,
576 uid);
577 return nullptr;
578 }
579 }
580 // stream type has been filtered by audio policy to indicate whether it can be muted
581 if (streamType == AUDIO_STREAM_ENFORCED_AUDIBLE) {
582 ALOGD("OpPlayAudio: not muting track:%d usage:%d ENFORCED_AUDIBLE", id, attr.usage);
583 return nullptr;
584 }
585 if ((attr.flags & AUDIO_FLAG_BYPASS_INTERRUPTION_POLICY)
586 == AUDIO_FLAG_BYPASS_INTERRUPTION_POLICY) {
587 ALOGD("OpPlayAudio: not muting track:%d flags %#x have FLAG_BYPASS_INTERRUPTION_POLICY",
588 id, attr.flags);
589 return nullptr;
590 }
591 return sp<OpPlayAudioMonitor>::make(thread, attributionSource, attr.usage, id, uid);
592 }
593
OpPlayAudioMonitor(IAfThreadBase * thread,const AttributionSourceState & attributionSource,audio_usage_t usage,int id,uid_t uid)594 OpPlayAudioMonitor::OpPlayAudioMonitor(IAfThreadBase* thread,
595 const AttributionSourceState& attributionSource,
596 audio_usage_t usage, int id, uid_t uid)
597 : mThread(wp<IAfThreadBase>::fromExisting(thread)),
598 mHasOpPlayAudio(true),
599 mUsage((int32_t)usage),
600 mId(id),
601 mUid(uid),
602 mPackageName(VALUE_OR_FATAL(aidl2legacy_string_view_String16(
603 attributionSource.packageName.value_or("")))) {}
604
~OpPlayAudioMonitor()605 OpPlayAudioMonitor::~OpPlayAudioMonitor()
606 {
607 if (mOpCallback != 0) {
608 mAppOpsManager.stopWatchingMode(mOpCallback);
609 }
610 mOpCallback.clear();
611 }
612
onFirstRef()613 void OpPlayAudioMonitor::onFirstRef()
614 {
615 // make sure not to broadcast the initial state since it is not needed and could
616 // cause a deadlock since this method can be called with the mThread->mLock held
617 checkPlayAudioForUsage(/*doBroadcast=*/false);
618 if (mPackageName.size()) {
619 mOpCallback = new PlayAudioOpCallback(this);
620 mAppOpsManager.startWatchingMode(AppOpsManager::OP_PLAY_AUDIO, mPackageName, mOpCallback);
621 } else {
622 ALOGW("Skipping OpPlayAudioMonitor due to null package name");
623 }
624 }
625
hasOpPlayAudio() const626 bool OpPlayAudioMonitor::hasOpPlayAudio() const {
627 return mHasOpPlayAudio.load();
628 }
629
630 // Note this method is never called (and never to be) for audio server / patch record track
631 // - not called from constructor due to check on UID,
632 // - not called from PlayAudioOpCallback because the callback is not installed in this case
checkPlayAudioForUsage(bool doBroadcast)633 void OpPlayAudioMonitor::checkPlayAudioForUsage(bool doBroadcast) {
634 const bool hasAppOps =
635 mPackageName.size() &&
636 mAppOpsManager.checkAudioOpNoThrow(AppOpsManager::OP_PLAY_AUDIO, mUsage, mUid,
637 mPackageName) == AppOpsManager::MODE_ALLOWED;
638
639 bool shouldChange = !hasAppOps; // check if we need to update.
640 if (mHasOpPlayAudio.compare_exchange_strong(shouldChange, hasAppOps)) {
641 ALOGI("OpPlayAudio: track:%d package:%s usage:%d %smuted", mId,
642 String8(mPackageName).c_str(), mUsage, hasAppOps ? "not " : "");
643 if (doBroadcast) {
644 auto thread = mThread.promote();
645 if (thread != nullptr && thread->type() == IAfThreadBase::OFFLOAD) {
646 // Wake up Thread if offloaded, otherwise it may be several seconds for update.
647 audio_utils::lock_guard _l(thread->mutex());
648 thread->broadcast_l();
649 }
650 }
651 }
652 }
653
PlayAudioOpCallback(const wp<OpPlayAudioMonitor> & monitor)654 OpPlayAudioMonitor::PlayAudioOpCallback::PlayAudioOpCallback(
655 const wp<OpPlayAudioMonitor>& monitor) : mMonitor(monitor)
656 { }
657
opChanged(int32_t op,const String16 & packageName)658 void OpPlayAudioMonitor::PlayAudioOpCallback::opChanged(int32_t op,
659 const String16& packageName) {
660 if (op != AppOpsManager::OP_PLAY_AUDIO) {
661 return;
662 }
663
664 ALOGI("%s OP_PLAY_AUDIO callback received for %s", __func__, String8(packageName).c_str());
665 sp<OpPlayAudioMonitor> monitor = mMonitor.promote();
666 if (monitor != NULL) {
667 monitor->checkPlayAudioForUsage(/*doBroadcast=*/true);
668 }
669 }
670
671 // static
getPackagesForUid(uid_t uid,Vector<String16> & packages)672 void OpPlayAudioMonitor::getPackagesForUid(
673 uid_t uid, Vector<String16>& packages)
674 {
675 PermissionController permissionController;
676 permissionController.getPackagesForUid(uid, packages);
677 }
678
679 // ----------------------------------------------------------------------------
680 #undef LOG_TAG
681 #define LOG_TAG "AF::Track"
682
683 /* static */
create(IAfPlaybackThread * thread,const sp<Client> & client,audio_stream_type_t streamType,const audio_attributes_t & attr,uint32_t sampleRate,audio_format_t format,audio_channel_mask_t channelMask,size_t frameCount,void * buffer,size_t bufferSize,const sp<IMemory> & sharedBuffer,audio_session_t sessionId,pid_t creatorPid,const AttributionSourceState & attributionSource,audio_output_flags_t flags,track_type type,audio_port_handle_t portId,size_t frameCountToBeReady,float speed,bool isSpatialized,bool isBitPerfect)684 sp<IAfTrack> IAfTrack::create(
685 IAfPlaybackThread* thread,
686 const sp<Client>& client,
687 audio_stream_type_t streamType,
688 const audio_attributes_t& attr,
689 uint32_t sampleRate,
690 audio_format_t format,
691 audio_channel_mask_t channelMask,
692 size_t frameCount,
693 void *buffer,
694 size_t bufferSize,
695 const sp<IMemory>& sharedBuffer,
696 audio_session_t sessionId,
697 pid_t creatorPid,
698 const AttributionSourceState& attributionSource,
699 audio_output_flags_t flags,
700 track_type type,
701 audio_port_handle_t portId,
702 /** default behaviour is to start when there are as many frames
703 * ready as possible (aka. Buffer is full). */
704 size_t frameCountToBeReady,
705 float speed,
706 bool isSpatialized,
707 bool isBitPerfect) {
708 return sp<Track>::make(thread,
709 client,
710 streamType,
711 attr,
712 sampleRate,
713 format,
714 channelMask,
715 frameCount,
716 buffer,
717 bufferSize,
718 sharedBuffer,
719 sessionId,
720 creatorPid,
721 attributionSource,
722 flags,
723 type,
724 portId,
725 frameCountToBeReady,
726 speed,
727 isSpatialized,
728 isBitPerfect);
729 }
730
731 // Track constructor must be called with AudioFlinger::mLock and ThreadBase::mLock held
Track(IAfPlaybackThread * thread,const sp<Client> & client,audio_stream_type_t streamType,const audio_attributes_t & attr,uint32_t sampleRate,audio_format_t format,audio_channel_mask_t channelMask,size_t frameCount,void * buffer,size_t bufferSize,const sp<IMemory> & sharedBuffer,audio_session_t sessionId,pid_t creatorPid,const AttributionSourceState & attributionSource,audio_output_flags_t flags,track_type type,audio_port_handle_t portId,size_t frameCountToBeReady,float speed,bool isSpatialized,bool isBitPerfect)732 Track::Track(
733 IAfPlaybackThread* thread,
734 const sp<Client>& client,
735 audio_stream_type_t streamType,
736 const audio_attributes_t& attr,
737 uint32_t sampleRate,
738 audio_format_t format,
739 audio_channel_mask_t channelMask,
740 size_t frameCount,
741 void *buffer,
742 size_t bufferSize,
743 const sp<IMemory>& sharedBuffer,
744 audio_session_t sessionId,
745 pid_t creatorPid,
746 const AttributionSourceState& attributionSource,
747 audio_output_flags_t flags,
748 track_type type,
749 audio_port_handle_t portId,
750 size_t frameCountToBeReady,
751 float speed,
752 bool isSpatialized,
753 bool isBitPerfect)
754 : TrackBase(thread, client, attr, sampleRate, format, channelMask, frameCount,
755 // TODO: Using unsecurePointer() has some associated security pitfalls
756 // (see declaration for details).
757 // Either document why it is safe in this case or address the
758 // issue (e.g. by copying).
759 (sharedBuffer != 0) ? sharedBuffer->unsecurePointer() : buffer,
760 (sharedBuffer != 0) ? sharedBuffer->size() : bufferSize,
761 sessionId, creatorPid,
762 VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(attributionSource.uid)), true /*isOut*/,
763 (type == TYPE_PATCH) ? ( buffer == NULL ? ALLOC_LOCAL : ALLOC_NONE) : ALLOC_CBLK,
764 type,
765 portId,
766 std::string(AMEDIAMETRICS_KEY_PREFIX_AUDIO_TRACK) + std::to_string(portId)),
767 mFillingStatus(FS_INVALID),
768 // mRetryCount initialized later when needed
769 mSharedBuffer(sharedBuffer),
770 mStreamType(streamType),
771 mMainBuffer(thread->sinkBuffer()),
772 mAuxBuffer(NULL),
773 mAuxEffectId(0), mHasVolumeController(false),
774 mFrameMap(16 /* sink-frame-to-track-frame map memory */),
775 mVolumeHandler(new media::VolumeHandler(sampleRate)),
776 mOpPlayAudioMonitor(OpPlayAudioMonitor::createIfNeeded(thread, attributionSource, attr, id(),
777 streamType)),
778 // mSinkTimestamp
779 mFastIndex(-1),
780 mCachedVolume(1.0),
781 /* The track might not play immediately after being active, similarly as if its volume was 0.
782 * When the track starts playing, its volume will be computed. */
783 mFinalVolume(0.f),
784 mResumeToStopping(false),
785 mFlushHwPending(false),
786 mFlags(flags),
787 mSpeed(speed),
788 mIsSpatialized(isSpatialized),
789 mIsBitPerfect(isBitPerfect)
790 {
791 // client == 0 implies sharedBuffer == 0
792 ALOG_ASSERT(!(client == 0 && sharedBuffer != 0));
793
794 ALOGV_IF(sharedBuffer != 0, "%s(%d): sharedBuffer: %p, size: %zu",
795 __func__, mId, sharedBuffer->unsecurePointer(), sharedBuffer->size());
796
797 if (mCblk == NULL) {
798 return;
799 }
800
801 uid_t uid = VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(attributionSource.uid));
802 if (!thread->isTrackAllowed_l(channelMask, format, sessionId, uid)) {
803 ALOGE("%s(%d): no more tracks available", __func__, mId);
804 releaseCblk(); // this makes the track invalid.
805 return;
806 }
807
808 if (sharedBuffer == 0) {
809 mAudioTrackServerProxy = new AudioTrackServerProxy(mCblk, mBuffer, frameCount,
810 mFrameSize, !isExternalTrack(), sampleRate);
811 } else {
812 mAudioTrackServerProxy = new StaticAudioTrackServerProxy(mCblk, mBuffer, frameCount,
813 mFrameSize, sampleRate);
814 }
815 mServerProxy = mAudioTrackServerProxy;
816 mServerProxy->setStartThresholdInFrames(frameCountToBeReady); // update the Cblk value
817
818 // only allocate a fast track index if we were able to allocate a normal track name
819 if (flags & AUDIO_OUTPUT_FLAG_FAST) {
820 // FIXME: Not calling framesReadyIsCalledByMultipleThreads() exposes a potential
821 // race with setSyncEvent(). However, if we call it, we cannot properly start
822 // static fast tracks (SoundPool) immediately after stopping.
823 //mAudioTrackServerProxy->framesReadyIsCalledByMultipleThreads();
824 ALOG_ASSERT(thread->fastTrackAvailMask_l() != 0);
825 const int i = __builtin_ctz(thread->fastTrackAvailMask_l());
826 ALOG_ASSERT(0 < i && i < (int)FastMixerState::sMaxFastTracks);
827 // FIXME This is too eager. We allocate a fast track index before the
828 // fast track becomes active. Since fast tracks are a scarce resource,
829 // this means we are potentially denying other more important fast tracks from
830 // being created. It would be better to allocate the index dynamically.
831 mFastIndex = i;
832 thread->fastTrackAvailMask_l() &= ~(1 << i);
833 }
834
835 mServerLatencySupported = checkServerLatencySupported(format, flags);
836 #ifdef TEE_SINK
837 mTee.setId(std::string("_") + std::to_string(mThreadIoHandle)
838 + "_" + std::to_string(mId) + "_T");
839 #endif
840
841 if (thread->supportsHapticPlayback()) {
842 // If the track is attached to haptic playback thread, it is potentially to have
843 // HapticGenerator effect, which will generate haptic data, on the track. In that case,
844 // external vibration is always created for all tracks attached to haptic playback thread.
845 mAudioVibrationController = new AudioVibrationController(this);
846 std::string packageName = attributionSource.packageName.has_value() ?
847 attributionSource.packageName.value() : "";
848 mExternalVibration = new os::ExternalVibration(
849 mUid, packageName, mAttr, mAudioVibrationController);
850 }
851
852 // Once this item is logged by the server, the client can add properties.
853 const char * const traits = sharedBuffer == 0 ? "" : "static";
854 mTrackMetrics.logConstructor(creatorPid, uid, id(), traits, streamType);
855 }
856
~Track()857 Track::~Track()
858 {
859 ALOGV("%s(%d)", __func__, mId);
860
861 // The destructor would clear mSharedBuffer,
862 // but it will not push the decremented reference count,
863 // leaving the client's IMemory dangling indefinitely.
864 // This prevents that leak.
865 if (mSharedBuffer != 0) {
866 mSharedBuffer.clear();
867 }
868 }
869
initCheck() const870 status_t Track::initCheck() const
871 {
872 status_t status = TrackBase::initCheck();
873 if (status == NO_ERROR && mCblk == nullptr) {
874 status = NO_MEMORY;
875 }
876 return status;
877 }
878
destroy()879 void Track::destroy()
880 {
881 // NOTE: destroyTrack_l() can remove a strong reference to this Track
882 // by removing it from mTracks vector, so there is a risk that this Tracks's
883 // destructor is called. As the destructor needs to lock mLock,
884 // we must acquire a strong reference on this Track before locking mLock
885 // here so that the destructor is called only when exiting this function.
886 // On the other hand, as long as Track::destroy() is only called by
887 // TrackHandle destructor, the TrackHandle still holds a strong ref on
888 // this Track with its member mTrack.
889 sp<Track> keep(this);
890 { // scope for mLock
891 bool wasActive = false;
892 const sp<IAfThreadBase> thread = mThread.promote();
893 if (thread != 0) {
894 audio_utils::unique_lock ul(thread->mutex());
895 thread->waitWhileThreadBusy_l(ul);
896
897 auto* const playbackThread = thread->asIAfPlaybackThread().get();
898 wasActive = playbackThread->destroyTrack_l(this);
899 forEachTeePatchTrack_l([](const auto& patchTrack) { patchTrack->destroy(); });
900 }
901 if (isExternalTrack() && !wasActive) {
902 // If the track is not active, the TrackHandle is responsible for
903 // releasing the port id, not the ThreadBase::threadLoop().
904 // At this point, there is no concurrency issue as the track is going away.
905 AudioSystem::releaseOutput(mPortId);
906 }
907 }
908 }
909
appendDumpHeader(String8 & result) const910 void Track::appendDumpHeader(String8& result) const
911 {
912 result.appendFormat("Type Id Active Client Session Port Id S Flags "
913 " Format Chn mask SRate "
914 "ST Usg CT "
915 " G db L dB R dB VS dB "
916 " Server FrmCnt FrmRdy F Underruns Flushed BitPerfect InternalMute"
917 "%s\n",
918 isServerLatencySupported() ? " Latency" : "");
919 }
920
appendDump(String8 & result,bool active) const921 void Track::appendDump(String8& result, bool active) const
922 {
923 char trackType;
924 switch (mType) {
925 case TYPE_DEFAULT:
926 case TYPE_OUTPUT:
927 if (isStatic()) {
928 trackType = 'S'; // static
929 } else {
930 trackType = ' '; // normal
931 }
932 break;
933 case TYPE_PATCH:
934 trackType = 'P';
935 break;
936 default:
937 trackType = '?';
938 }
939
940 if (isFastTrack()) {
941 result.appendFormat("F%d %c %6d", mFastIndex, trackType, mId);
942 } else {
943 result.appendFormat(" %c %6d", trackType, mId);
944 }
945
946 char nowInUnderrun;
947 switch (mObservedUnderruns.mBitFields.mMostRecent) {
948 case UNDERRUN_FULL:
949 nowInUnderrun = ' ';
950 break;
951 case UNDERRUN_PARTIAL:
952 nowInUnderrun = '<';
953 break;
954 case UNDERRUN_EMPTY:
955 nowInUnderrun = '*';
956 break;
957 default:
958 nowInUnderrun = '?';
959 break;
960 }
961
962 char fillingStatus;
963 switch (mFillingStatus) {
964 case FS_INVALID:
965 fillingStatus = 'I';
966 break;
967 case FS_FILLING:
968 fillingStatus = 'f';
969 break;
970 case FS_FILLED:
971 fillingStatus = 'F';
972 break;
973 case FS_ACTIVE:
974 fillingStatus = 'A';
975 break;
976 default:
977 fillingStatus = '?';
978 break;
979 }
980
981 // clip framesReadySafe to max representation in dump
982 const size_t framesReadySafe =
983 std::min(mAudioTrackServerProxy->framesReadySafe(), (size_t)99999999);
984
985 // obtain volumes
986 const gain_minifloat_packed_t vlr = mAudioTrackServerProxy->getVolumeLR();
987 const std::pair<float /* volume */, bool /* active */> vsVolume =
988 mVolumeHandler->getLastVolume();
989
990 // Our effective frame count is obtained by ServerProxy::getBufferSizeInFrames()
991 // as it may be reduced by the application.
992 const size_t bufferSizeInFrames = (size_t)mAudioTrackServerProxy->getBufferSizeInFrames();
993 // Check whether the buffer size has been modified by the app.
994 const char modifiedBufferChar = bufferSizeInFrames < mFrameCount
995 ? 'r' /* buffer reduced */: bufferSizeInFrames > mFrameCount
996 ? 'e' /* error */ : ' ' /* identical */;
997
998 result.appendFormat("%7s %6u %7u %7u %2s 0x%03X "
999 "%08X %08X %6u "
1000 "%2u %3x %2x "
1001 "%5.2g %5.2g %5.2g %5.2g%c "
1002 "%08X %6zu%c %6zu %c %9u%c %7u %10s %12s",
1003 active ? "yes" : "no",
1004 (mClient == 0) ? getpid() : mClient->pid(),
1005 mSessionId,
1006 mPortId,
1007 getTrackStateAsCodedString(),
1008 mCblk->mFlags,
1009
1010 mFormat,
1011 mChannelMask,
1012 sampleRate(),
1013
1014 mStreamType,
1015 mAttr.usage,
1016 mAttr.content_type,
1017
1018 20.0 * log10(mFinalVolume),
1019 20.0 * log10(float_from_gain(gain_minifloat_unpack_left(vlr))),
1020 20.0 * log10(float_from_gain(gain_minifloat_unpack_right(vlr))),
1021 20.0 * log10(vsVolume.first), // VolumeShaper(s) total volume
1022 vsVolume.second ? 'A' : ' ', // if any VolumeShapers active
1023
1024 mCblk->mServer,
1025 bufferSizeInFrames,
1026 modifiedBufferChar,
1027 framesReadySafe,
1028 fillingStatus,
1029 mAudioTrackServerProxy->getUnderrunFrames(),
1030 nowInUnderrun,
1031 (unsigned)mAudioTrackServerProxy->framesFlushed() % 10000000,
1032 isBitPerfect() ? "true" : "false",
1033 getInternalMute() ? "true" : "false"
1034 );
1035
1036 if (isServerLatencySupported()) {
1037 double latencyMs;
1038 bool fromTrack;
1039 if (getTrackLatencyMs(&latencyMs, &fromTrack) == OK) {
1040 // Show latency in msec, followed by 't' if from track timestamp (the most accurate)
1041 // or 'k' if estimated from kernel because track frames haven't been presented yet.
1042 result.appendFormat(" %7.2lf %c", latencyMs, fromTrack ? 't' : 'k');
1043 } else {
1044 result.appendFormat("%10s", mCblk->mServer != 0 ? "unavail" : "new");
1045 }
1046 }
1047 result.append("\n");
1048 }
1049
sampleRate() const1050 uint32_t Track::sampleRate() const {
1051 return mAudioTrackServerProxy->getSampleRate();
1052 }
1053
1054 // AudioBufferProvider interface
getNextBuffer(AudioBufferProvider::Buffer * buffer)1055 status_t Track::getNextBuffer(AudioBufferProvider::Buffer* buffer)
1056 {
1057 ServerProxy::Buffer buf;
1058 size_t desiredFrames = buffer->frameCount;
1059 buf.mFrameCount = desiredFrames;
1060 status_t status = mServerProxy->obtainBuffer(&buf);
1061 buffer->frameCount = buf.mFrameCount;
1062 buffer->raw = buf.mRaw;
1063 if (buf.mFrameCount == 0 && !isStopping() && !isStopped() && !isPaused() && !isOffloaded()) {
1064 ALOGV("%s(%d): underrun, framesReady(%zu) < framesDesired(%zd), state: %d",
1065 __func__, mId, buf.mFrameCount, desiredFrames, (int)mState);
1066 mAudioTrackServerProxy->tallyUnderrunFrames(desiredFrames);
1067 } else {
1068 mAudioTrackServerProxy->tallyUnderrunFrames(0);
1069 }
1070 return status;
1071 }
1072
releaseBuffer(AudioBufferProvider::Buffer * buffer)1073 void Track::releaseBuffer(AudioBufferProvider::Buffer* buffer)
1074 {
1075 interceptBuffer(*buffer);
1076 TrackBase::releaseBuffer(buffer);
1077 }
1078
1079 // TODO: compensate for time shift between HW modules.
interceptBuffer(const AudioBufferProvider::Buffer & sourceBuffer)1080 void Track::interceptBuffer(
1081 const AudioBufferProvider::Buffer& sourceBuffer) {
1082 auto start = std::chrono::steady_clock::now();
1083 const size_t frameCount = sourceBuffer.frameCount;
1084 if (frameCount == 0) {
1085 return; // No audio to intercept.
1086 // Additionally PatchProxyBufferProvider::obtainBuffer (called by PathTrack::getNextBuffer)
1087 // does not allow 0 frame size request contrary to getNextBuffer
1088 }
1089 TeePatches teePatches;
1090 if (mTeePatchesRWLock.tryReadLock() == NO_ERROR) {
1091 // Cache a copy of tee patches in case it is updated while using.
1092 teePatches = mTeePatches;
1093 mTeePatchesRWLock.unlock();
1094 }
1095 for (auto& teePatch : teePatches) {
1096 IAfPatchRecord* patchRecord = teePatch.patchRecord.get();
1097 const size_t framesWritten = patchRecord->writeFrames(
1098 sourceBuffer.i8, frameCount, mFrameSize);
1099 const size_t framesLeft = frameCount - framesWritten;
1100 ALOGW_IF(framesLeft != 0, "%s(%d) PatchRecord %d can not provide big enough "
1101 "buffer %zu/%zu, dropping %zu frames", __func__, mId, patchRecord->id(),
1102 framesWritten, frameCount, framesLeft);
1103 }
1104 auto spent = ceil<std::chrono::microseconds>(std::chrono::steady_clock::now() - start);
1105 using namespace std::chrono_literals;
1106 // Average is ~20us per track, this should virtually never be logged (Logging takes >200us)
1107 ALOGD_IF(spent > 500us, "%s: took %lldus to intercept %zu tracks", __func__,
1108 spent.count(), teePatches.size());
1109 }
1110
1111 // ExtendedAudioBufferProvider interface
1112
1113 // framesReady() may return an approximation of the number of frames if called
1114 // from a different thread than the one calling Proxy->obtainBuffer() and
1115 // Proxy->releaseBuffer(). Also note there is no mutual exclusion in the
1116 // AudioTrackServerProxy so be especially careful calling with FastTracks.
framesReady() const1117 size_t Track::framesReady() const {
1118 if (mSharedBuffer != 0 && (isStopped() || isStopping())) {
1119 // Static tracks return zero frames immediately upon stopping (for FastTracks).
1120 // The remainder of the buffer is not drained.
1121 return 0;
1122 }
1123 return mAudioTrackServerProxy->framesReady();
1124 }
1125
framesReleased() const1126 int64_t Track::framesReleased() const
1127 {
1128 return mAudioTrackServerProxy->framesReleased();
1129 }
1130
onTimestamp(const ExtendedTimestamp & timestamp)1131 void Track::onTimestamp(const ExtendedTimestamp ×tamp)
1132 {
1133 // This call comes from a FastTrack and should be kept lockless.
1134 // The server side frames are already translated to client frames.
1135 mAudioTrackServerProxy->setTimestamp(timestamp);
1136
1137 // We do not set drained here, as FastTrack timestamp may not go to very last frame.
1138
1139 // Compute latency.
1140 // TODO: Consider whether the server latency may be passed in by FastMixer
1141 // as a constant for all active FastTracks.
1142 const double latencyMs = timestamp.getOutputServerLatencyMs(sampleRate());
1143 mServerLatencyFromTrack.store(true);
1144 mServerLatencyMs.store(latencyMs);
1145 }
1146
1147 // Don't call for fast tracks; the framesReady() could result in priority inversion
isReady() const1148 bool Track::isReady() const {
1149 if (mFillingStatus != FS_FILLING || isStopped() || isPausing()) {
1150 return true;
1151 }
1152
1153 if (isStopping()) {
1154 if (framesReady() > 0) {
1155 mFillingStatus = FS_FILLED;
1156 }
1157 return true;
1158 }
1159
1160 size_t bufferSizeInFrames = mServerProxy->getBufferSizeInFrames();
1161 // Note: mServerProxy->getStartThresholdInFrames() is clamped.
1162 const size_t startThresholdInFrames = mServerProxy->getStartThresholdInFrames();
1163 const size_t framesToBeReady = std::clamp( // clamp again to validate client values.
1164 std::min(startThresholdInFrames, bufferSizeInFrames), size_t(1), mFrameCount);
1165
1166 if (framesReady() >= framesToBeReady || (mCblk->mFlags & CBLK_FORCEREADY)) {
1167 ALOGV("%s(%d): consider track ready with %zu/%zu, target was %zu)",
1168 __func__, mId, framesReady(), bufferSizeInFrames, framesToBeReady);
1169 mFillingStatus = FS_FILLED;
1170 android_atomic_and(~CBLK_FORCEREADY, &mCblk->mFlags);
1171 return true;
1172 }
1173 return false;
1174 }
1175
start(AudioSystem::sync_event_t event __unused,audio_session_t triggerSession __unused)1176 status_t Track::start(AudioSystem::sync_event_t event __unused,
1177 audio_session_t triggerSession __unused)
1178 {
1179 status_t status = NO_ERROR;
1180 ALOGV("%s(%d): calling pid %d session %d",
1181 __func__, mId, IPCThreadState::self()->getCallingPid(), mSessionId);
1182
1183 const sp<IAfThreadBase> thread = mThread.promote();
1184 if (thread != 0) {
1185 if (isOffloaded()) {
1186 audio_utils::lock_guard _laf(thread->afThreadCallback()->mutex());
1187 const bool nonOffloadableGlobalEffectEnabled =
1188 thread->afThreadCallback()->isNonOffloadableGlobalEffectEnabled_l();
1189 audio_utils::lock_guard _lth(thread->mutex());
1190 sp<IAfEffectChain> ec = thread->getEffectChain_l(mSessionId);
1191 if (nonOffloadableGlobalEffectEnabled ||
1192 (ec != 0 && ec->isNonOffloadableEnabled())) {
1193 invalidate();
1194 return PERMISSION_DENIED;
1195 }
1196 }
1197 audio_utils::unique_lock ul(thread->mutex());
1198 thread->waitWhileThreadBusy_l(ul);
1199
1200 track_state state = mState;
1201 // here the track could be either new, or restarted
1202 // in both cases "unstop" the track
1203
1204 // initial state-stopping. next state-pausing.
1205 // What if resume is called ?
1206
1207 if (state == FLUSHED) {
1208 // avoid underrun glitches when starting after flush
1209 reset();
1210 }
1211
1212 // clear mPauseHwPending because of pause (and possibly flush) during underrun.
1213 mPauseHwPending = false;
1214 if (state == PAUSED || state == PAUSING) {
1215 if (mResumeToStopping) {
1216 // happened we need to resume to STOPPING_1
1217 mState = TrackBase::STOPPING_1;
1218 ALOGV("%s(%d): PAUSED => STOPPING_1 on thread %d",
1219 __func__, mId, (int)mThreadIoHandle);
1220 } else {
1221 mState = TrackBase::RESUMING;
1222 ALOGV("%s(%d): PAUSED => RESUMING on thread %d",
1223 __func__, mId, (int)mThreadIoHandle);
1224 }
1225 } else {
1226 mState = TrackBase::ACTIVE;
1227 ALOGV("%s(%d): ? => ACTIVE on thread %d",
1228 __func__, mId, (int)mThreadIoHandle);
1229 }
1230
1231 auto* const playbackThread = thread->asIAfPlaybackThread().get();
1232
1233 // states to reset position info for pcm tracks
1234 if (audio_is_linear_pcm(mFormat)
1235 && (state == IDLE || state == STOPPED || state == FLUSHED)) {
1236 mFrameMap.reset();
1237
1238 if (!isFastTrack()) {
1239 // Start point of track -> sink frame map. If the HAL returns a
1240 // frame position smaller than the first written frame in
1241 // updateTrackFrameInfo, the timestamp can be interpolated
1242 // instead of using a larger value.
1243 mFrameMap.push(mAudioTrackServerProxy->framesReleased(),
1244 playbackThread->framesWritten());
1245 }
1246 }
1247 if (isFastTrack()) {
1248 // refresh fast track underruns on start because that field is never cleared
1249 // by the fast mixer; furthermore, the same track can be recycled, i.e. start
1250 // after stop.
1251 mObservedUnderruns = playbackThread->getFastTrackUnderruns(mFastIndex);
1252 }
1253 status = playbackThread->addTrack_l(this);
1254 if (status == INVALID_OPERATION || status == PERMISSION_DENIED || status == DEAD_OBJECT) {
1255 triggerEvents(AudioSystem::SYNC_EVENT_PRESENTATION_COMPLETE);
1256 // restore previous state if start was rejected by policy manager
1257 if (status == PERMISSION_DENIED || status == DEAD_OBJECT) {
1258 mState = state;
1259 }
1260 }
1261
1262 // Audio timing metrics are computed a few mix cycles after starting.
1263 {
1264 mLogStartCountdown = LOG_START_COUNTDOWN;
1265 mLogStartTimeNs = systemTime();
1266 mLogStartFrames = mAudioTrackServerProxy->getTimestamp()
1267 .mPosition[ExtendedTimestamp::LOCATION_KERNEL];
1268 mLogLatencyMs = 0.;
1269 }
1270 mLogForceVolumeUpdate = true; // at least one volume logged for metrics when starting.
1271
1272 if (status == NO_ERROR || status == ALREADY_EXISTS) {
1273 // for streaming tracks, remove the buffer read stop limit.
1274 mAudioTrackServerProxy->start();
1275 }
1276
1277 // track was already in the active list, not a problem
1278 if (status == ALREADY_EXISTS) {
1279 status = NO_ERROR;
1280 } else {
1281 // Acknowledge any pending flush(), so that subsequent new data isn't discarded.
1282 // It is usually unsafe to access the server proxy from a binder thread.
1283 // But in this case we know the mixer thread (whether normal mixer or fast mixer)
1284 // isn't looking at this track yet: we still hold the normal mixer thread lock,
1285 // and for fast tracks the track is not yet in the fast mixer thread's active set.
1286 // For static tracks, this is used to acknowledge change in position or loop.
1287 ServerProxy::Buffer buffer;
1288 buffer.mFrameCount = 1;
1289 (void) mAudioTrackServerProxy->obtainBuffer(&buffer, true /*ackFlush*/);
1290 }
1291 if (status == NO_ERROR) {
1292 forEachTeePatchTrack_l([](const auto& patchTrack) { patchTrack->start(); });
1293 }
1294 } else {
1295 status = BAD_VALUE;
1296 }
1297 if (status == NO_ERROR) {
1298 // send format to AudioManager for playback activity monitoring
1299 const sp<IAudioManager> audioManager =
1300 thread->afThreadCallback()->getOrCreateAudioManager();
1301 if (audioManager && mPortId != AUDIO_PORT_HANDLE_NONE) {
1302 std::unique_ptr<os::PersistableBundle> bundle =
1303 std::make_unique<os::PersistableBundle>();
1304 bundle->putBoolean(String16(kExtraPlayerEventSpatializedKey),
1305 isSpatialized());
1306 bundle->putInt(String16(kExtraPlayerEventSampleRateKey), mSampleRate);
1307 bundle->putInt(String16(kExtraPlayerEventChannelMaskKey), mChannelMask);
1308 status_t result = audioManager->portEvent(mPortId,
1309 PLAYER_UPDATE_FORMAT, bundle);
1310 if (result != OK) {
1311 ALOGE("%s: unable to send playback format for port ID %d, status error %d",
1312 __func__, mPortId, result);
1313 }
1314 }
1315 }
1316 return status;
1317 }
1318
stop()1319 void Track::stop()
1320 {
1321 ALOGV("%s(%d): calling pid %d", __func__, mId, IPCThreadState::self()->getCallingPid());
1322 const sp<IAfThreadBase> thread = mThread.promote();
1323 if (thread != 0) {
1324 audio_utils::unique_lock ul(thread->mutex());
1325 thread->waitWhileThreadBusy_l(ul);
1326
1327 track_state state = mState;
1328 if (state == RESUMING || state == ACTIVE || state == PAUSING || state == PAUSED) {
1329 // If the track is not active (PAUSED and buffers full), flush buffers
1330 auto* const playbackThread = thread->asIAfPlaybackThread().get();
1331 if (!playbackThread->isTrackActive(this)) {
1332 reset();
1333 mState = STOPPED;
1334 } else if (isPatchTrack() || (!isFastTrack() && !isOffloaded() && !isDirect())) {
1335 // for a PatchTrack (whatever fast ot not), do not drain but move directly
1336 // to STOPPED to avoid closing while active.
1337 mState = STOPPED;
1338 } else {
1339 // For fast tracks prepareTracks_l() will set state to STOPPING_2
1340 // presentation is complete
1341 // For an offloaded track this starts a drain and state will
1342 // move to STOPPING_2 when drain completes and then STOPPED
1343 mState = STOPPING_1;
1344 if (isOffloaded()) {
1345 mRetryCount = IAfPlaybackThread::kMaxTrackStopRetriesOffload;
1346 }
1347 }
1348 playbackThread->broadcast_l();
1349 ALOGV("%s(%d): not stopping/stopped => stopping/stopped on thread %d",
1350 __func__, mId, (int)mThreadIoHandle);
1351 }
1352 forEachTeePatchTrack_l([](const auto& patchTrack) { patchTrack->stop(); });
1353 }
1354 }
1355
pause()1356 void Track::pause()
1357 {
1358 ALOGV("%s(%d): calling pid %d", __func__, mId, IPCThreadState::self()->getCallingPid());
1359 const sp<IAfThreadBase> thread = mThread.promote();
1360 if (thread != 0) {
1361 audio_utils::unique_lock ul(thread->mutex());
1362 thread->waitWhileThreadBusy_l(ul);
1363
1364 auto* const playbackThread = thread->asIAfPlaybackThread().get();
1365 switch (mState) {
1366 case STOPPING_1:
1367 case STOPPING_2:
1368 if (!isOffloaded()) {
1369 /* nothing to do if track is not offloaded */
1370 break;
1371 }
1372
1373 // Offloaded track was draining, we need to carry on draining when resumed
1374 mResumeToStopping = true;
1375 FALLTHROUGH_INTENDED;
1376 case ACTIVE:
1377 case RESUMING:
1378 mState = PAUSING;
1379 ALOGV("%s(%d): ACTIVE/RESUMING => PAUSING on thread %d",
1380 __func__, mId, (int)mThreadIoHandle);
1381 if (isOffloadedOrDirect()) {
1382 mPauseHwPending = true;
1383 }
1384 playbackThread->broadcast_l();
1385 break;
1386
1387 default:
1388 break;
1389 }
1390 // Pausing the TeePatch to avoid a glitch on underrun, at the cost of buffered audio loss.
1391 forEachTeePatchTrack_l([](const auto& patchTrack) { patchTrack->pause(); });
1392 }
1393 }
1394
flush()1395 void Track::flush()
1396 {
1397 ALOGV("%s(%d)", __func__, mId);
1398 const sp<IAfThreadBase> thread = mThread.promote();
1399 if (thread != 0) {
1400 audio_utils::unique_lock ul(thread->mutex());
1401 thread->waitWhileThreadBusy_l(ul);
1402
1403 auto* const playbackThread = thread->asIAfPlaybackThread().get();
1404
1405 // Flush the ring buffer now if the track is not active in the PlaybackThread.
1406 // Otherwise the flush would not be done until the track is resumed.
1407 // Requires FastTrack removal be BLOCK_UNTIL_ACKED
1408 if (!playbackThread->isTrackActive(this)) {
1409 (void)mServerProxy->flushBufferIfNeeded();
1410 }
1411
1412 if (isOffloaded()) {
1413 // If offloaded we allow flush during any state except terminated
1414 // and keep the track active to avoid problems if user is seeking
1415 // rapidly and underlying hardware has a significant delay handling
1416 // a pause
1417 if (isTerminated()) {
1418 return;
1419 }
1420
1421 ALOGV("%s(%d): offload flush", __func__, mId);
1422 reset();
1423
1424 if (mState == STOPPING_1 || mState == STOPPING_2) {
1425 ALOGV("%s(%d): flushed in STOPPING_1 or 2 state, change state to ACTIVE",
1426 __func__, mId);
1427 mState = ACTIVE;
1428 }
1429
1430 mFlushHwPending = true;
1431 mResumeToStopping = false;
1432 } else {
1433 if (mState != STOPPING_1 && mState != STOPPING_2 && mState != STOPPED &&
1434 mState != PAUSED && mState != PAUSING && mState != IDLE && mState != FLUSHED) {
1435 return;
1436 }
1437 // No point remaining in PAUSED state after a flush => go to
1438 // FLUSHED state
1439 mState = FLUSHED;
1440 // do not reset the track if it is still in the process of being stopped or paused.
1441 // this will be done by prepareTracks_l() when the track is stopped.
1442 // prepareTracks_l() will see mState == FLUSHED, then
1443 // remove from active track list, reset(), and trigger presentation complete
1444 if (isDirect()) {
1445 mFlushHwPending = true;
1446 }
1447 if (!playbackThread->isTrackActive(this)) {
1448 reset();
1449 }
1450 }
1451 // Prevent flush being lost if the track is flushed and then resumed
1452 // before mixer thread can run. This is important when offloading
1453 // because the hardware buffer could hold a large amount of audio
1454 playbackThread->broadcast_l();
1455 // Flush the Tee to avoid on resume playing old data and glitching on the transition to
1456 // new data
1457 forEachTeePatchTrack_l([](const auto& patchTrack) { patchTrack->flush(); });
1458 }
1459 }
1460
1461 // must be called with thread lock held
flushAck()1462 void Track::flushAck()
1463 {
1464 if (!isOffloaded() && !isDirect()) {
1465 return;
1466 }
1467
1468 // Clear the client ring buffer so that the app can prime the buffer while paused.
1469 // Otherwise it might not get cleared until playback is resumed and obtainBuffer() is called.
1470 mServerProxy->flushBufferIfNeeded();
1471
1472 mFlushHwPending = false;
1473 }
1474
pauseAck()1475 void Track::pauseAck()
1476 {
1477 mPauseHwPending = false;
1478 }
1479
reset()1480 void Track::reset()
1481 {
1482 // Do not reset twice to avoid discarding data written just after a flush and before
1483 // the audioflinger thread detects the track is stopped.
1484 if (!mResetDone) {
1485 // Force underrun condition to avoid false underrun callback until first data is
1486 // written to buffer
1487 android_atomic_and(~CBLK_FORCEREADY, &mCblk->mFlags);
1488 mFillingStatus = FS_FILLING;
1489 mResetDone = true;
1490 if (mState == FLUSHED) {
1491 mState = IDLE;
1492 }
1493 }
1494 }
1495
setParameters(const String8 & keyValuePairs)1496 status_t Track::setParameters(const String8& keyValuePairs)
1497 {
1498 const sp<IAfThreadBase> thread = mThread.promote();
1499 if (thread == 0) {
1500 ALOGE("%s(%d): thread is dead", __func__, mId);
1501 return FAILED_TRANSACTION;
1502 } else if (thread->type() == IAfThreadBase::DIRECT
1503 || thread->type() == IAfThreadBase::OFFLOAD) {
1504 return thread->setParameters(keyValuePairs);
1505 } else {
1506 return PERMISSION_DENIED;
1507 }
1508 }
1509
selectPresentation(int presentationId,int programId)1510 status_t Track::selectPresentation(int presentationId,
1511 int programId) {
1512 const sp<IAfThreadBase> thread = mThread.promote();
1513 if (thread == 0) {
1514 ALOGE("thread is dead");
1515 return FAILED_TRANSACTION;
1516 } else if (thread->type() == IAfThreadBase::DIRECT
1517 || thread->type() == IAfThreadBase::OFFLOAD) {
1518 auto directOutputThread = thread->asIAfDirectOutputThread().get();
1519 return directOutputThread->selectPresentation(presentationId, programId);
1520 }
1521 return INVALID_OPERATION;
1522 }
1523
applyVolumeShaper(const sp<VolumeShaper::Configuration> & configuration,const sp<VolumeShaper::Operation> & operation)1524 VolumeShaper::Status Track::applyVolumeShaper(
1525 const sp<VolumeShaper::Configuration>& configuration,
1526 const sp<VolumeShaper::Operation>& operation)
1527 {
1528 VolumeShaper::Status status = mVolumeHandler->applyVolumeShaper(configuration, operation);
1529
1530 if (isOffloadedOrDirect()) {
1531 // Signal thread to fetch new volume.
1532 const sp<IAfThreadBase> thread = mThread.promote();
1533 if (thread != 0) {
1534 audio_utils::lock_guard _l(thread->mutex());
1535 thread->broadcast_l();
1536 }
1537 }
1538 return status;
1539 }
1540
getVolumeShaperState(int id) const1541 sp<VolumeShaper::State> Track::getVolumeShaperState(int id) const
1542 {
1543 // Note: We don't check if Thread exists.
1544
1545 // mVolumeHandler is thread safe.
1546 return mVolumeHandler->getVolumeShaperState(id);
1547 }
1548
setFinalVolume(float volumeLeft,float volumeRight)1549 void Track::setFinalVolume(float volumeLeft, float volumeRight)
1550 {
1551 mFinalVolumeLeft = volumeLeft;
1552 mFinalVolumeRight = volumeRight;
1553 const float volume = (volumeLeft + volumeRight) * 0.5f;
1554 if (mFinalVolume != volume) { // Compare to an epsilon if too many meaningless updates
1555 mFinalVolume = volume;
1556 setMetadataHasChanged();
1557 mLogForceVolumeUpdate = true;
1558 }
1559 if (mLogForceVolumeUpdate) {
1560 mLogForceVolumeUpdate = false;
1561 mTrackMetrics.logVolume(mFinalVolume);
1562 }
1563 }
1564
copyMetadataTo(MetadataInserter & backInserter) const1565 void Track::copyMetadataTo(MetadataInserter& backInserter) const
1566 {
1567 // Do not forward metadata for PatchTrack with unspecified stream type
1568 if (mStreamType == AUDIO_STREAM_PATCH) {
1569 return;
1570 }
1571
1572 playback_track_metadata_v7_t metadata;
1573 metadata.base = {
1574 .usage = mAttr.usage,
1575 .content_type = mAttr.content_type,
1576 .gain = mFinalVolume,
1577 };
1578
1579 // When attributes are undefined, derive default values from stream type.
1580 // See AudioAttributes.java, usageForStreamType() and Builder.setInternalLegacyStreamType()
1581 if (mAttr.usage == AUDIO_USAGE_UNKNOWN) {
1582 switch (mStreamType) {
1583 case AUDIO_STREAM_VOICE_CALL:
1584 metadata.base.usage = AUDIO_USAGE_VOICE_COMMUNICATION;
1585 metadata.base.content_type = AUDIO_CONTENT_TYPE_SPEECH;
1586 break;
1587 case AUDIO_STREAM_SYSTEM:
1588 metadata.base.usage = AUDIO_USAGE_ASSISTANCE_SONIFICATION;
1589 metadata.base.content_type = AUDIO_CONTENT_TYPE_SONIFICATION;
1590 break;
1591 case AUDIO_STREAM_RING:
1592 metadata.base.usage = AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE;
1593 metadata.base.content_type = AUDIO_CONTENT_TYPE_SONIFICATION;
1594 break;
1595 case AUDIO_STREAM_MUSIC:
1596 metadata.base.usage = AUDIO_USAGE_MEDIA;
1597 metadata.base.content_type = AUDIO_CONTENT_TYPE_MUSIC;
1598 break;
1599 case AUDIO_STREAM_ALARM:
1600 metadata.base.usage = AUDIO_USAGE_ALARM;
1601 metadata.base.content_type = AUDIO_CONTENT_TYPE_SONIFICATION;
1602 break;
1603 case AUDIO_STREAM_NOTIFICATION:
1604 metadata.base.usage = AUDIO_USAGE_NOTIFICATION;
1605 metadata.base.content_type = AUDIO_CONTENT_TYPE_SONIFICATION;
1606 break;
1607 case AUDIO_STREAM_DTMF:
1608 metadata.base.usage = AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING;
1609 metadata.base.content_type = AUDIO_CONTENT_TYPE_SONIFICATION;
1610 break;
1611 case AUDIO_STREAM_ACCESSIBILITY:
1612 metadata.base.usage = AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY;
1613 metadata.base.content_type = AUDIO_CONTENT_TYPE_SPEECH;
1614 break;
1615 case AUDIO_STREAM_ASSISTANT:
1616 metadata.base.usage = AUDIO_USAGE_ASSISTANT;
1617 metadata.base.content_type = AUDIO_CONTENT_TYPE_SPEECH;
1618 break;
1619 case AUDIO_STREAM_REROUTING:
1620 metadata.base.usage = AUDIO_USAGE_VIRTUAL_SOURCE;
1621 // unknown content type
1622 break;
1623 case AUDIO_STREAM_CALL_ASSISTANT:
1624 metadata.base.usage = AUDIO_USAGE_CALL_ASSISTANT;
1625 metadata.base.content_type = AUDIO_CONTENT_TYPE_SPEECH;
1626 break;
1627 default:
1628 break;
1629 }
1630 }
1631
1632 metadata.channel_mask = mChannelMask;
1633 strncpy(metadata.tags, mAttr.tags, AUDIO_ATTRIBUTES_TAGS_MAX_SIZE);
1634 *backInserter++ = metadata;
1635 }
1636
updateTeePatches_l()1637 void Track::updateTeePatches_l() {
1638 if (mTeePatchesToUpdate.has_value()) {
1639 forEachTeePatchTrack_l([](const auto& patchTrack) { patchTrack->destroy(); });
1640 {
1641 RWLock::AutoWLock writeLock(mTeePatchesRWLock);
1642 mTeePatches = std::move(mTeePatchesToUpdate.value());
1643 }
1644 if (mState == TrackBase::ACTIVE || mState == TrackBase::RESUMING ||
1645 mState == TrackBase::STOPPING_1) {
1646 forEachTeePatchTrack_l([](const auto& patchTrack) { patchTrack->start(); });
1647 }
1648 mTeePatchesToUpdate.reset();
1649 }
1650 }
1651
setTeePatchesToUpdate_l(TeePatches teePatchesToUpdate)1652 void Track::setTeePatchesToUpdate_l(TeePatches teePatchesToUpdate) {
1653 ALOGW_IF(mTeePatchesToUpdate.has_value(),
1654 "%s, existing tee patches to update will be ignored", __func__);
1655 mTeePatchesToUpdate = std::move(teePatchesToUpdate);
1656 }
1657
1658 // must be called with player thread lock held
processMuteEvent_l(const sp<IAudioManager> & audioManager,mute_state_t muteState)1659 void Track::processMuteEvent_l(const sp<
1660 IAudioManager>& audioManager, mute_state_t muteState)
1661 {
1662 if (mMuteState == muteState) {
1663 // mute state did not change, do nothing
1664 return;
1665 }
1666
1667 status_t result = UNKNOWN_ERROR;
1668 if (audioManager && mPortId != AUDIO_PORT_HANDLE_NONE) {
1669 if (mMuteEventExtras == nullptr) {
1670 mMuteEventExtras = std::make_unique<os::PersistableBundle>();
1671 }
1672 mMuteEventExtras->putInt(String16(kExtraPlayerEventMuteKey), static_cast<int>(muteState));
1673
1674 result = audioManager->portEvent(mPortId, PLAYER_UPDATE_MUTED, mMuteEventExtras);
1675 }
1676
1677 if (result == OK) {
1678 ALOGI("%s(%d): processed mute state for port ID %d from %d to %d", __func__, id(), mPortId,
1679 static_cast<int>(mMuteState), static_cast<int>(muteState));
1680 mMuteState = muteState;
1681 } else {
1682 ALOGW("%s(%d): cannot process mute state for port ID %d, status error %d", __func__, id(),
1683 mPortId, result);
1684 }
1685 }
1686
getTimestamp(AudioTimestamp & timestamp)1687 status_t Track::getTimestamp(AudioTimestamp& timestamp)
1688 {
1689 if (!isOffloaded() && !isDirect()) {
1690 return INVALID_OPERATION; // normal tracks handled through SSQ
1691 }
1692 const sp<IAfThreadBase> thread = mThread.promote();
1693 if (thread == 0) {
1694 return INVALID_OPERATION;
1695 }
1696
1697 audio_utils::lock_guard _l(thread->mutex());
1698 auto* const playbackThread = thread->asIAfPlaybackThread().get();
1699 return playbackThread->getTimestamp_l(timestamp);
1700 }
1701
attachAuxEffect(int EffectId)1702 status_t Track::attachAuxEffect(int EffectId)
1703 {
1704 const sp<IAfThreadBase> thread = mThread.promote();
1705 if (thread == nullptr) {
1706 return DEAD_OBJECT;
1707 }
1708
1709 auto dstThread = thread->asIAfPlaybackThread();
1710 // srcThread is initialized by call to moveAuxEffectToIo()
1711 sp<IAfPlaybackThread> srcThread;
1712 const auto& af = mClient->afClientCallback();
1713 status_t status = af->moveAuxEffectToIo(EffectId, dstThread, &srcThread);
1714
1715 if (EffectId != 0 && status == NO_ERROR) {
1716 status = dstThread->attachAuxEffect(this, EffectId);
1717 if (status == NO_ERROR) {
1718 AudioSystem::moveEffectsToIo(std::vector<int>(EffectId), dstThread->id());
1719 }
1720 }
1721
1722 if (status != NO_ERROR && srcThread != nullptr) {
1723 af->moveAuxEffectToIo(EffectId, srcThread, &dstThread);
1724 }
1725 return status;
1726 }
1727
setAuxBuffer(int EffectId,int32_t * buffer)1728 void Track::setAuxBuffer(int EffectId, int32_t *buffer)
1729 {
1730 mAuxEffectId = EffectId;
1731 mAuxBuffer = buffer;
1732 }
1733
1734 // presentationComplete verified by frames, used by Mixed tracks.
presentationComplete(int64_t framesWritten,size_t audioHalFrames)1735 bool Track::presentationComplete(
1736 int64_t framesWritten, size_t audioHalFrames)
1737 {
1738 // TODO: improve this based on FrameMap if it exists, to ensure full drain.
1739 // This assists in proper timestamp computation as well as wakelock management.
1740
1741 // a track is considered presented when the total number of frames written to audio HAL
1742 // corresponds to the number of frames written when presentationComplete() is called for the
1743 // first time (mPresentationCompleteFrames == 0) plus the buffer filling status at that time.
1744 // For an offloaded track the HAL+h/w delay is variable so a HAL drain() is used
1745 // to detect when all frames have been played. In this case framesWritten isn't
1746 // useful because it doesn't always reflect whether there is data in the h/w
1747 // buffers, particularly if a track has been paused and resumed during draining
1748 ALOGV("%s(%d): presentationComplete() mPresentationCompleteFrames %lld framesWritten %lld",
1749 __func__, mId,
1750 (long long)mPresentationCompleteFrames, (long long)framesWritten);
1751 if (mPresentationCompleteFrames == 0) {
1752 mPresentationCompleteFrames = framesWritten + audioHalFrames;
1753 ALOGV("%s(%d): set:"
1754 " mPresentationCompleteFrames %lld audioHalFrames %zu",
1755 __func__, mId,
1756 (long long)mPresentationCompleteFrames, audioHalFrames);
1757 }
1758
1759 bool complete;
1760 if (isFastTrack()) { // does not go through linear map
1761 complete = framesWritten >= (int64_t) mPresentationCompleteFrames;
1762 ALOGV("%s(%d): %s framesWritten:%lld mPresentationCompleteFrames:%lld",
1763 __func__, mId, (complete ? "complete" : "waiting"),
1764 (long long) framesWritten, (long long) mPresentationCompleteFrames);
1765 } else { // Normal tracks, OutputTracks, and PatchTracks
1766 complete = framesWritten >= (int64_t) mPresentationCompleteFrames
1767 && mAudioTrackServerProxy->isDrained();
1768 }
1769
1770 if (complete) {
1771 notifyPresentationComplete();
1772 return true;
1773 }
1774 return false;
1775 }
1776
1777 // presentationComplete checked by time, used by DirectTracks.
presentationComplete(uint32_t latencyMs)1778 bool Track::presentationComplete(uint32_t latencyMs)
1779 {
1780 // For Offloaded or Direct tracks.
1781
1782 // For a direct track, we incorporated time based testing for presentationComplete.
1783
1784 // For an offloaded track the HAL+h/w delay is variable so a HAL drain() is used
1785 // to detect when all frames have been played. In this case latencyMs isn't
1786 // useful because it doesn't always reflect whether there is data in the h/w
1787 // buffers, particularly if a track has been paused and resumed during draining
1788
1789 constexpr float MIN_SPEED = 0.125f; // min speed scaling allowed for timely response.
1790 if (mPresentationCompleteTimeNs == 0) {
1791 mPresentationCompleteTimeNs = systemTime() + latencyMs * 1e6 / fmax(mSpeed, MIN_SPEED);
1792 ALOGV("%s(%d): set: latencyMs %u mPresentationCompleteTimeNs:%lld",
1793 __func__, mId, latencyMs, (long long) mPresentationCompleteTimeNs);
1794 }
1795
1796 bool complete;
1797 if (isOffloaded()) {
1798 complete = true;
1799 } else { // Direct
1800 complete = systemTime() >= mPresentationCompleteTimeNs;
1801 ALOGV("%s(%d): %s", __func__, mId, (complete ? "complete" : "waiting"));
1802 }
1803 if (complete) {
1804 notifyPresentationComplete();
1805 return true;
1806 }
1807 return false;
1808 }
1809
notifyPresentationComplete()1810 void Track::notifyPresentationComplete()
1811 {
1812 // This only triggers once. TODO: should we enforce this?
1813 triggerEvents(AudioSystem::SYNC_EVENT_PRESENTATION_COMPLETE);
1814 mAudioTrackServerProxy->setStreamEndDone();
1815 }
1816
triggerEvents(AudioSystem::sync_event_t type)1817 void Track::triggerEvents(AudioSystem::sync_event_t type)
1818 {
1819 for (auto it = mSyncEvents.begin(); it != mSyncEvents.end();) {
1820 if ((*it)->type() == type) {
1821 ALOGV("%s: triggering SyncEvent type %d", __func__, type);
1822 (*it)->trigger();
1823 it = mSyncEvents.erase(it);
1824 } else {
1825 ++it;
1826 }
1827 }
1828 }
1829
1830 // implement VolumeBufferProvider interface
1831
getVolumeLR() const1832 gain_minifloat_packed_t Track::getVolumeLR() const
1833 {
1834 // called by FastMixer, so not allowed to take any locks, block, or do I/O including logs
1835 ALOG_ASSERT(isFastTrack() && (mCblk != NULL));
1836 gain_minifloat_packed_t vlr = mAudioTrackServerProxy->getVolumeLR();
1837 float vl = float_from_gain(gain_minifloat_unpack_left(vlr));
1838 float vr = float_from_gain(gain_minifloat_unpack_right(vlr));
1839 // track volumes come from shared memory, so can't be trusted and must be clamped
1840 if (vl > GAIN_FLOAT_UNITY) {
1841 vl = GAIN_FLOAT_UNITY;
1842 }
1843 if (vr > GAIN_FLOAT_UNITY) {
1844 vr = GAIN_FLOAT_UNITY;
1845 }
1846 // now apply the cached master volume and stream type volume;
1847 // this is trusted but lacks any synchronization or barrier so may be stale
1848 float v = mCachedVolume;
1849 vl *= v;
1850 vr *= v;
1851 // re-combine into packed minifloat
1852 vlr = gain_minifloat_pack(gain_from_float(vl), gain_from_float(vr));
1853 // FIXME look at mute, pause, and stop flags
1854 return vlr;
1855 }
1856
setSyncEvent(const sp<audioflinger::SyncEvent> & event)1857 status_t Track::setSyncEvent(
1858 const sp<audioflinger::SyncEvent>& event)
1859 {
1860 if (isTerminated() || mState == PAUSED ||
1861 ((framesReady() == 0) && ((mSharedBuffer != 0) ||
1862 (mState == STOPPED)))) {
1863 ALOGW("%s(%d): in invalid state %d on session %d %s mode, framesReady %zu",
1864 __func__, mId,
1865 (int)mState, mSessionId, (mSharedBuffer != 0) ? "static" : "stream", framesReady());
1866 event->cancel();
1867 return INVALID_OPERATION;
1868 }
1869 (void) TrackBase::setSyncEvent(event);
1870 return NO_ERROR;
1871 }
1872
invalidate()1873 void Track::invalidate()
1874 {
1875 TrackBase::invalidate();
1876 signalClientFlag(CBLK_INVALID);
1877 }
1878
disable()1879 void Track::disable()
1880 {
1881 // TODO(b/142394888): the filling status should also be reset to filling
1882 signalClientFlag(CBLK_DISABLED);
1883 }
1884
isDisabled() const1885 bool Track::isDisabled() const {
1886 audio_track_cblk_t* cblk = mCblk;
1887 return (cblk != nullptr)
1888 && ((android_atomic_release_load(&cblk->mFlags) & CBLK_DISABLED) != 0);
1889 }
1890
signalClientFlag(int32_t flag)1891 void Track::signalClientFlag(int32_t flag)
1892 {
1893 // FIXME should use proxy, and needs work
1894 audio_track_cblk_t* cblk = mCblk;
1895 android_atomic_or(flag, &cblk->mFlags);
1896 android_atomic_release_store(0x40000000, &cblk->mFutex);
1897 // client is not in server, so FUTEX_WAKE is needed instead of FUTEX_WAKE_PRIVATE
1898 (void) syscall(__NR_futex, &cblk->mFutex, FUTEX_WAKE, INT_MAX);
1899 }
1900
signal()1901 void Track::signal()
1902 {
1903 const sp<IAfThreadBase> thread = mThread.promote();
1904 if (thread != 0) {
1905 auto* const t = thread->asIAfPlaybackThread().get();
1906 audio_utils::lock_guard _l(t->mutex());
1907 t->broadcast_l();
1908 }
1909 }
1910
getDualMonoMode(audio_dual_mono_mode_t * mode) const1911 status_t Track::getDualMonoMode(audio_dual_mono_mode_t* mode) const
1912 {
1913 status_t status = INVALID_OPERATION;
1914 if (isOffloadedOrDirect()) {
1915 const sp<IAfThreadBase> thread = mThread.promote();
1916 if (thread != nullptr) {
1917 auto* const t = thread->asIAfPlaybackThread().get();
1918 audio_utils::lock_guard _l(t->mutex());
1919 status = t->getOutput_l()->stream->getDualMonoMode(mode);
1920 ALOGD_IF((status == NO_ERROR) && (mDualMonoMode != *mode),
1921 "%s: mode %d inconsistent", __func__, mDualMonoMode);
1922 }
1923 }
1924 return status;
1925 }
1926
setDualMonoMode(audio_dual_mono_mode_t mode)1927 status_t Track::setDualMonoMode(audio_dual_mono_mode_t mode)
1928 {
1929 status_t status = INVALID_OPERATION;
1930 if (isOffloadedOrDirect()) {
1931 const sp<IAfThreadBase> thread = mThread.promote();
1932 if (thread != nullptr) {
1933 auto* const t = thread->asIAfPlaybackThread().get();
1934 audio_utils::lock_guard lock(t->mutex());
1935 status = t->getOutput_l()->stream->setDualMonoMode(mode);
1936 if (status == NO_ERROR) {
1937 mDualMonoMode = mode;
1938 }
1939 }
1940 }
1941 return status;
1942 }
1943
getAudioDescriptionMixLevel(float * leveldB) const1944 status_t Track::getAudioDescriptionMixLevel(float* leveldB) const
1945 {
1946 status_t status = INVALID_OPERATION;
1947 if (isOffloadedOrDirect()) {
1948 sp<IAfThreadBase> thread = mThread.promote();
1949 if (thread != nullptr) {
1950 auto* const t = thread->asIAfPlaybackThread().get();
1951 audio_utils::lock_guard lock(t->mutex());
1952 status = t->getOutput_l()->stream->getAudioDescriptionMixLevel(leveldB);
1953 ALOGD_IF((status == NO_ERROR) && (mAudioDescriptionMixLevel != *leveldB),
1954 "%s: level %.3f inconsistent", __func__, mAudioDescriptionMixLevel);
1955 }
1956 }
1957 return status;
1958 }
1959
setAudioDescriptionMixLevel(float leveldB)1960 status_t Track::setAudioDescriptionMixLevel(float leveldB)
1961 {
1962 status_t status = INVALID_OPERATION;
1963 if (isOffloadedOrDirect()) {
1964 const sp<IAfThreadBase> thread = mThread.promote();
1965 if (thread != nullptr) {
1966 auto* const t = thread->asIAfPlaybackThread().get();
1967 audio_utils::lock_guard lock(t->mutex());
1968 status = t->getOutput_l()->stream->setAudioDescriptionMixLevel(leveldB);
1969 if (status == NO_ERROR) {
1970 mAudioDescriptionMixLevel = leveldB;
1971 }
1972 }
1973 }
1974 return status;
1975 }
1976
getPlaybackRateParameters(audio_playback_rate_t * playbackRate) const1977 status_t Track::getPlaybackRateParameters(
1978 audio_playback_rate_t* playbackRate) const
1979 {
1980 status_t status = INVALID_OPERATION;
1981 if (isOffloadedOrDirect()) {
1982 const sp<IAfThreadBase> thread = mThread.promote();
1983 if (thread != nullptr) {
1984 auto* const t = thread->asIAfPlaybackThread().get();
1985 audio_utils::lock_guard lock(t->mutex());
1986 status = t->getOutput_l()->stream->getPlaybackRateParameters(playbackRate);
1987 ALOGD_IF((status == NO_ERROR) &&
1988 !isAudioPlaybackRateEqual(mPlaybackRateParameters, *playbackRate),
1989 "%s: playbackRate inconsistent", __func__);
1990 }
1991 }
1992 return status;
1993 }
1994
setPlaybackRateParameters(const audio_playback_rate_t & playbackRate)1995 status_t Track::setPlaybackRateParameters(
1996 const audio_playback_rate_t& playbackRate)
1997 {
1998 status_t status = INVALID_OPERATION;
1999 if (isOffloadedOrDirect()) {
2000 const sp<IAfThreadBase> thread = mThread.promote();
2001 if (thread != nullptr) {
2002 auto* const t = thread->asIAfPlaybackThread().get();
2003 audio_utils::lock_guard lock(t->mutex());
2004 status = t->getOutput_l()->stream->setPlaybackRateParameters(playbackRate);
2005 if (status == NO_ERROR) {
2006 mPlaybackRateParameters = playbackRate;
2007 }
2008 }
2009 }
2010 return status;
2011 }
2012
2013 //To be called with thread lock held
isResumePending() const2014 bool Track::isResumePending() const {
2015 if (mState == RESUMING) {
2016 return true;
2017 }
2018 /* Resume is pending if track was stopping before pause was called */
2019 if (mState == STOPPING_1 &&
2020 mResumeToStopping) {
2021 return true;
2022 }
2023
2024 return false;
2025 }
2026
2027 //To be called with thread lock held
resumeAck()2028 void Track::resumeAck() {
2029 if (mState == RESUMING) {
2030 mState = ACTIVE;
2031 }
2032
2033 // Other possibility of pending resume is stopping_1 state
2034 // Do not update the state from stopping as this prevents
2035 // drain being called.
2036 if (mState == STOPPING_1) {
2037 mResumeToStopping = false;
2038 }
2039 }
2040
2041 //To be called with thread lock held
updateTrackFrameInfo(int64_t trackFramesReleased,int64_t sinkFramesWritten,uint32_t halSampleRate,const ExtendedTimestamp & timeStamp)2042 void Track::updateTrackFrameInfo(
2043 int64_t trackFramesReleased, int64_t sinkFramesWritten,
2044 uint32_t halSampleRate, const ExtendedTimestamp &timeStamp) {
2045 // Make the kernel frametime available.
2046 const FrameTime ft{
2047 timeStamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL],
2048 timeStamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL]};
2049 // ALOGD("FrameTime: %lld %lld", (long long)ft.frames, (long long)ft.timeNs);
2050 mKernelFrameTime.store(ft);
2051 if (!audio_is_linear_pcm(mFormat)) {
2052 return;
2053 }
2054
2055 //update frame map
2056 mFrameMap.push(trackFramesReleased, sinkFramesWritten);
2057
2058 // adjust server times and set drained state.
2059 //
2060 // Our timestamps are only updated when the track is on the Thread active list.
2061 // We need to ensure that tracks are not removed before full drain.
2062 ExtendedTimestamp local = timeStamp;
2063 bool drained = true; // default assume drained, if no server info found
2064 bool checked = false;
2065 for (int i = ExtendedTimestamp::LOCATION_MAX - 1;
2066 i >= ExtendedTimestamp::LOCATION_SERVER; --i) {
2067 // Lookup the track frame corresponding to the sink frame position.
2068 if (local.mTimeNs[i] > 0) {
2069 local.mPosition[i] = mFrameMap.findX(local.mPosition[i]);
2070 // check drain state from the latest stage in the pipeline.
2071 if (!checked && i <= ExtendedTimestamp::LOCATION_KERNEL) {
2072 drained = local.mPosition[i] >= mAudioTrackServerProxy->framesReleased();
2073 checked = true;
2074 }
2075 }
2076 }
2077
2078 ALOGV("%s: trackFramesReleased:%lld sinkFramesWritten:%lld setDrained: %d",
2079 __func__, (long long)trackFramesReleased, (long long)sinkFramesWritten, drained);
2080 mAudioTrackServerProxy->setDrained(drained);
2081 // Set correction for flushed frames that are not accounted for in released.
2082 local.mFlushed = mAudioTrackServerProxy->framesFlushed();
2083 mServerProxy->setTimestamp(local);
2084
2085 // Compute latency info.
2086 const bool useTrackTimestamp = !drained;
2087 const double latencyMs = useTrackTimestamp
2088 ? local.getOutputServerLatencyMs(sampleRate())
2089 : timeStamp.getOutputServerLatencyMs(halSampleRate);
2090
2091 mServerLatencyFromTrack.store(useTrackTimestamp);
2092 mServerLatencyMs.store(latencyMs);
2093
2094 if (mLogStartCountdown > 0
2095 && local.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL] > 0
2096 && local.mPosition[ExtendedTimestamp::LOCATION_KERNEL] > 0)
2097 {
2098 if (mLogStartCountdown > 1) {
2099 --mLogStartCountdown;
2100 } else if (latencyMs < mLogLatencyMs) { // wait for latency to stabilize (dip)
2101 mLogStartCountdown = 0;
2102 // startup is the difference in times for the current timestamp and our start
2103 double startUpMs =
2104 (local.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL] - mLogStartTimeNs) * 1e-6;
2105 // adjust for frames played.
2106 startUpMs -= (local.mPosition[ExtendedTimestamp::LOCATION_KERNEL] - mLogStartFrames)
2107 * 1e3 / mSampleRate;
2108 ALOGV("%s: latencyMs:%lf startUpMs:%lf"
2109 " localTime:%lld startTime:%lld"
2110 " localPosition:%lld startPosition:%lld",
2111 __func__, latencyMs, startUpMs,
2112 (long long)local.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL],
2113 (long long)mLogStartTimeNs,
2114 (long long)local.mPosition[ExtendedTimestamp::LOCATION_KERNEL],
2115 (long long)mLogStartFrames);
2116 mTrackMetrics.logLatencyAndStartup(latencyMs, startUpMs);
2117 }
2118 mLogLatencyMs = latencyMs;
2119 }
2120 }
2121
setMute(bool muted)2122 bool Track::AudioVibrationController::setMute(bool muted) {
2123 const sp<IAfThreadBase> thread = mTrack->mThread.promote();
2124 if (thread != 0) {
2125 // Lock for updating mHapticPlaybackEnabled.
2126 audio_utils::lock_guard _l(thread->mutex());
2127 auto* const playbackThread = thread->asIAfPlaybackThread().get();
2128 if ((mTrack->channelMask() & AUDIO_CHANNEL_HAPTIC_ALL) != AUDIO_CHANNEL_NONE
2129 && playbackThread->hapticChannelCount() > 0) {
2130 ALOGD("%s, haptic playback was %s for track %d",
2131 __func__, muted ? "muted" : "unmuted", mTrack->id());
2132 mTrack->setHapticPlaybackEnabled(!muted);
2133 return true;
2134 }
2135 }
2136 return false;
2137 }
2138
mute(bool * ret)2139 binder::Status Track::AudioVibrationController::mute(
2140 /*out*/ bool *ret) {
2141 *ret = setMute(true);
2142 return binder::Status::ok();
2143 }
2144
unmute(bool * ret)2145 binder::Status Track::AudioVibrationController::unmute(
2146 /*out*/ bool *ret) {
2147 *ret = setMute(false);
2148 return binder::Status::ok();
2149 }
2150
2151 // ----------------------------------------------------------------------------
2152 #undef LOG_TAG
2153 #define LOG_TAG "AF::OutputTrack"
2154
2155 /* static */
create(IAfPlaybackThread * playbackThread,IAfDuplicatingThread * sourceThread,uint32_t sampleRate,audio_format_t format,audio_channel_mask_t channelMask,size_t frameCount,const AttributionSourceState & attributionSource)2156 sp<IAfOutputTrack> IAfOutputTrack::create(
2157 IAfPlaybackThread* playbackThread,
2158 IAfDuplicatingThread* sourceThread,
2159 uint32_t sampleRate,
2160 audio_format_t format,
2161 audio_channel_mask_t channelMask,
2162 size_t frameCount,
2163 const AttributionSourceState& attributionSource) {
2164 return sp<OutputTrack>::make(
2165 playbackThread,
2166 sourceThread,
2167 sampleRate,
2168 format,
2169 channelMask,
2170 frameCount,
2171 attributionSource);
2172 }
2173
OutputTrack(IAfPlaybackThread * playbackThread,IAfDuplicatingThread * sourceThread,uint32_t sampleRate,audio_format_t format,audio_channel_mask_t channelMask,size_t frameCount,const AttributionSourceState & attributionSource)2174 OutputTrack::OutputTrack(
2175 IAfPlaybackThread* playbackThread,
2176 IAfDuplicatingThread* sourceThread,
2177 uint32_t sampleRate,
2178 audio_format_t format,
2179 audio_channel_mask_t channelMask,
2180 size_t frameCount,
2181 const AttributionSourceState& attributionSource)
2182 : Track(playbackThread, NULL, AUDIO_STREAM_PATCH,
2183 audio_attributes_t{} /* currently unused for output track */,
2184 sampleRate, format, channelMask, frameCount,
2185 nullptr /* buffer */, (size_t)0 /* bufferSize */, nullptr /* sharedBuffer */,
2186 AUDIO_SESSION_NONE, getpid(), attributionSource, AUDIO_OUTPUT_FLAG_NONE,
2187 TYPE_OUTPUT),
2188 mActive(false), mSourceThread(sourceThread)
2189 {
2190
2191 if (mCblk != NULL) {
2192 mOutBuffer.frameCount = 0;
2193 playbackThread->addOutputTrack_l(this);
2194 ALOGV("%s(): mCblk %p, mBuffer %p, "
2195 "frameCount %zu, mChannelMask 0x%08x",
2196 __func__, mCblk, mBuffer,
2197 frameCount, mChannelMask);
2198 // since client and server are in the same process,
2199 // the buffer has the same virtual address on both sides
2200 mClientProxy = new AudioTrackClientProxy(mCblk, mBuffer, mFrameCount, mFrameSize,
2201 true /*clientInServer*/);
2202 mClientProxy->setVolumeLR(GAIN_MINIFLOAT_PACKED_UNITY);
2203 mClientProxy->setSendLevel(0.0);
2204 mClientProxy->setSampleRate(sampleRate);
2205 } else {
2206 ALOGW("%s(%d): Error creating output track on thread %d",
2207 __func__, mId, (int)mThreadIoHandle);
2208 }
2209 }
2210
~OutputTrack()2211 OutputTrack::~OutputTrack()
2212 {
2213 clearBufferQueue();
2214 // superclass destructor will now delete the server proxy and shared memory both refer to
2215 }
2216
start(AudioSystem::sync_event_t event,audio_session_t triggerSession)2217 status_t OutputTrack::start(AudioSystem::sync_event_t event,
2218 audio_session_t triggerSession)
2219 {
2220 status_t status = Track::start(event, triggerSession);
2221 if (status != NO_ERROR) {
2222 return status;
2223 }
2224
2225 mActive = true;
2226 mRetryCount = 127;
2227 return status;
2228 }
2229
stop()2230 void OutputTrack::stop()
2231 {
2232 Track::stop();
2233 clearBufferQueue();
2234 mOutBuffer.frameCount = 0;
2235 mActive = false;
2236 }
2237
write(void * data,uint32_t frames)2238 ssize_t OutputTrack::write(void* data, uint32_t frames)
2239 {
2240 if (!mActive && frames != 0) {
2241 const sp<IAfThreadBase> thread = mThread.promote();
2242 if (thread != nullptr && thread->inStandby()) {
2243 // preload one silent buffer to trigger mixer on start()
2244 ClientProxy::Buffer buf { .mFrameCount = mClientProxy->getStartThresholdInFrames() };
2245 status_t status = mClientProxy->obtainBuffer(&buf);
2246 if (status != NO_ERROR && status != NOT_ENOUGH_DATA && status != WOULD_BLOCK) {
2247 ALOGE("%s(%d): could not obtain buffer on start", __func__, mId);
2248 return 0;
2249 }
2250 memset(buf.mRaw, 0, buf.mFrameCount * mFrameSize);
2251 mClientProxy->releaseBuffer(&buf);
2252
2253 (void) start();
2254
2255 // wait for HAL stream to start before sending actual audio. Doing this on each
2256 // OutputTrack makes that playback start on all output streams is synchronized.
2257 // If another OutputTrack has already started it can underrun but this is OK
2258 // as only silence has been played so far and the retry count is very high on
2259 // OutputTrack.
2260 auto* const pt = thread->asIAfPlaybackThread().get();
2261 if (!pt->waitForHalStart()) {
2262 ALOGW("%s(%d): timeout waiting for thread to exit standby", __func__, mId);
2263 stop();
2264 return 0;
2265 }
2266
2267 // enqueue the first buffer and exit so that other OutputTracks will also start before
2268 // write() is called again and this buffer actually consumed.
2269 Buffer firstBuffer;
2270 firstBuffer.frameCount = frames;
2271 firstBuffer.raw = data;
2272 queueBuffer(firstBuffer);
2273 return frames;
2274 } else {
2275 (void) start();
2276 }
2277 }
2278
2279 Buffer *pInBuffer;
2280 Buffer inBuffer;
2281 inBuffer.frameCount = frames;
2282 inBuffer.raw = data;
2283 uint32_t waitTimeLeftMs = mSourceThread->waitTimeMs();
2284 while (waitTimeLeftMs) {
2285 // First write pending buffers, then new data
2286 if (mBufferQueue.size()) {
2287 pInBuffer = mBufferQueue.itemAt(0);
2288 } else {
2289 pInBuffer = &inBuffer;
2290 }
2291
2292 if (pInBuffer->frameCount == 0) {
2293 break;
2294 }
2295
2296 if (mOutBuffer.frameCount == 0) {
2297 mOutBuffer.frameCount = pInBuffer->frameCount;
2298 nsecs_t startTime = systemTime();
2299 status_t status = obtainBuffer(&mOutBuffer, waitTimeLeftMs);
2300 if (status != NO_ERROR && status != NOT_ENOUGH_DATA) {
2301 ALOGV("%s(%d): thread %d no more output buffers; status %d",
2302 __func__, mId,
2303 (int)mThreadIoHandle, status);
2304 break;
2305 }
2306 uint32_t waitTimeMs = (uint32_t)ns2ms(systemTime() - startTime);
2307 if (waitTimeLeftMs >= waitTimeMs) {
2308 waitTimeLeftMs -= waitTimeMs;
2309 } else {
2310 waitTimeLeftMs = 0;
2311 }
2312 if (status == NOT_ENOUGH_DATA) {
2313 restartIfDisabled();
2314 continue;
2315 }
2316 }
2317
2318 uint32_t outFrames = pInBuffer->frameCount > mOutBuffer.frameCount ? mOutBuffer.frameCount :
2319 pInBuffer->frameCount;
2320 memcpy(mOutBuffer.raw, pInBuffer->raw, outFrames * mFrameSize);
2321 Proxy::Buffer buf;
2322 buf.mFrameCount = outFrames;
2323 buf.mRaw = NULL;
2324 mClientProxy->releaseBuffer(&buf);
2325 restartIfDisabled();
2326 pInBuffer->frameCount -= outFrames;
2327 pInBuffer->raw = (int8_t *)pInBuffer->raw + outFrames * mFrameSize;
2328 mOutBuffer.frameCount -= outFrames;
2329 mOutBuffer.raw = (int8_t *)mOutBuffer.raw + outFrames * mFrameSize;
2330
2331 if (pInBuffer->frameCount == 0) {
2332 if (mBufferQueue.size()) {
2333 mBufferQueue.removeAt(0);
2334 free(pInBuffer->mBuffer);
2335 if (pInBuffer != &inBuffer) {
2336 delete pInBuffer;
2337 }
2338 ALOGV("%s(%d): thread %d released overflow buffer %zu",
2339 __func__, mId,
2340 (int)mThreadIoHandle, mBufferQueue.size());
2341 } else {
2342 break;
2343 }
2344 }
2345 }
2346
2347 // If we could not write all frames, allocate a buffer and queue it for next time.
2348 if (inBuffer.frameCount) {
2349 const sp<IAfThreadBase> thread = mThread.promote();
2350 if (thread != nullptr && !thread->inStandby()) {
2351 queueBuffer(inBuffer);
2352 }
2353 }
2354
2355 // Calling write() with a 0 length buffer means that no more data will be written:
2356 // We rely on stop() to set the appropriate flags to allow the remaining frames to play out.
2357 if (frames == 0 && mBufferQueue.size() == 0 && mActive) {
2358 stop();
2359 }
2360
2361 return frames - inBuffer.frameCount; // number of frames consumed.
2362 }
2363
queueBuffer(Buffer & inBuffer)2364 void OutputTrack::queueBuffer(Buffer& inBuffer) {
2365
2366 if (mBufferQueue.size() < kMaxOverFlowBuffers) {
2367 Buffer *pInBuffer = new Buffer;
2368 const size_t bufferSize = inBuffer.frameCount * mFrameSize;
2369 pInBuffer->mBuffer = malloc(bufferSize);
2370 LOG_ALWAYS_FATAL_IF(pInBuffer->mBuffer == nullptr,
2371 "%s: Unable to malloc size %zu", __func__, bufferSize);
2372 pInBuffer->frameCount = inBuffer.frameCount;
2373 pInBuffer->raw = pInBuffer->mBuffer;
2374 memcpy(pInBuffer->raw, inBuffer.raw, inBuffer.frameCount * mFrameSize);
2375 mBufferQueue.add(pInBuffer);
2376 ALOGV("%s(%d): thread %d adding overflow buffer %zu", __func__, mId,
2377 (int)mThreadIoHandle, mBufferQueue.size());
2378 // audio data is consumed (stored locally); set frameCount to 0.
2379 inBuffer.frameCount = 0;
2380 } else {
2381 ALOGW("%s(%d): thread %d no more overflow buffers",
2382 __func__, mId, (int)mThreadIoHandle);
2383 // TODO: return error for this.
2384 }
2385 }
2386
copyMetadataTo(MetadataInserter & backInserter) const2387 void OutputTrack::copyMetadataTo(MetadataInserter& backInserter) const
2388 {
2389 audio_utils::lock_guard lock(trackMetadataMutex());
2390 backInserter = std::copy(mTrackMetadatas.begin(), mTrackMetadatas.end(), backInserter);
2391 }
2392
setMetadatas(const SourceMetadatas & metadatas)2393 void OutputTrack::setMetadatas(const SourceMetadatas& metadatas) {
2394 {
2395 audio_utils::lock_guard lock(trackMetadataMutex());
2396 mTrackMetadatas = metadatas;
2397 }
2398 // No need to adjust metadata track volumes as OutputTrack volumes are always 0dBFS.
2399 setMetadataHasChanged();
2400 }
2401
obtainBuffer(AudioBufferProvider::Buffer * buffer,uint32_t waitTimeMs)2402 status_t OutputTrack::obtainBuffer(
2403 AudioBufferProvider::Buffer* buffer, uint32_t waitTimeMs)
2404 {
2405 ClientProxy::Buffer buf;
2406 buf.mFrameCount = buffer->frameCount;
2407 struct timespec timeout;
2408 timeout.tv_sec = waitTimeMs / 1000;
2409 timeout.tv_nsec = (int) (waitTimeMs % 1000) * 1000000;
2410 status_t status = mClientProxy->obtainBuffer(&buf, &timeout);
2411 buffer->frameCount = buf.mFrameCount;
2412 buffer->raw = buf.mRaw;
2413 return status;
2414 }
2415
clearBufferQueue()2416 void OutputTrack::clearBufferQueue()
2417 {
2418 size_t size = mBufferQueue.size();
2419
2420 for (size_t i = 0; i < size; i++) {
2421 Buffer *pBuffer = mBufferQueue.itemAt(i);
2422 free(pBuffer->mBuffer);
2423 delete pBuffer;
2424 }
2425 mBufferQueue.clear();
2426 }
2427
restartIfDisabled()2428 void OutputTrack::restartIfDisabled()
2429 {
2430 int32_t flags = android_atomic_and(~CBLK_DISABLED, &mCblk->mFlags);
2431 if (mActive && (flags & CBLK_DISABLED)) {
2432 start();
2433 }
2434 }
2435
2436 // ----------------------------------------------------------------------------
2437 #undef LOG_TAG
2438 #define LOG_TAG "AF::PatchTrack"
2439
2440 /* static */
create(IAfPlaybackThread * playbackThread,audio_stream_type_t streamType,uint32_t sampleRate,audio_channel_mask_t channelMask,audio_format_t format,size_t frameCount,void * buffer,size_t bufferSize,audio_output_flags_t flags,const Timeout & timeout,size_t frameCountToBeReady,float speed)2441 sp<IAfPatchTrack> IAfPatchTrack::create(
2442 IAfPlaybackThread* playbackThread,
2443 audio_stream_type_t streamType,
2444 uint32_t sampleRate,
2445 audio_channel_mask_t channelMask,
2446 audio_format_t format,
2447 size_t frameCount,
2448 void* buffer,
2449 size_t bufferSize,
2450 audio_output_flags_t flags,
2451 const Timeout& timeout,
2452 size_t frameCountToBeReady, /** Default behaviour is to start
2453 * as soon as possible to have
2454 * the lowest possible latency
2455 * even if it might glitch. */
2456 float speed)
2457 {
2458 return sp<PatchTrack>::make(
2459 playbackThread,
2460 streamType,
2461 sampleRate,
2462 channelMask,
2463 format,
2464 frameCount,
2465 buffer,
2466 bufferSize,
2467 flags,
2468 timeout,
2469 frameCountToBeReady,
2470 speed);
2471 }
2472
PatchTrack(IAfPlaybackThread * playbackThread,audio_stream_type_t streamType,uint32_t sampleRate,audio_channel_mask_t channelMask,audio_format_t format,size_t frameCount,void * buffer,size_t bufferSize,audio_output_flags_t flags,const Timeout & timeout,size_t frameCountToBeReady,float speed)2473 PatchTrack::PatchTrack(IAfPlaybackThread* playbackThread,
2474 audio_stream_type_t streamType,
2475 uint32_t sampleRate,
2476 audio_channel_mask_t channelMask,
2477 audio_format_t format,
2478 size_t frameCount,
2479 void *buffer,
2480 size_t bufferSize,
2481 audio_output_flags_t flags,
2482 const Timeout& timeout,
2483 size_t frameCountToBeReady,
2484 float speed)
2485 : Track(playbackThread, NULL, streamType,
2486 audio_attributes_t{} /* currently unused for patch track */,
2487 sampleRate, format, channelMask, frameCount,
2488 buffer, bufferSize, nullptr /* sharedBuffer */,
2489 AUDIO_SESSION_NONE, getpid(), audioServerAttributionSource(getpid()), flags,
2490 TYPE_PATCH, AUDIO_PORT_HANDLE_NONE, frameCountToBeReady, speed),
2491 PatchTrackBase(mCblk ? new AudioTrackClientProxy(mCblk, mBuffer, frameCount, mFrameSize,
2492 true /*clientInServer*/) : nullptr,
2493 playbackThread, timeout)
2494 {
2495 if (mProxy != nullptr) {
2496 sp<AudioTrackClientProxy>::cast(mProxy)->setPlaybackRate({
2497 /* .mSpeed = */ speed,
2498 /* .mPitch = */ AUDIO_TIMESTRETCH_PITCH_NORMAL,
2499 /* .mStretchMode = */ AUDIO_TIMESTRETCH_STRETCH_DEFAULT,
2500 /* .mFallbackMode = */ AUDIO_TIMESTRETCH_FALLBACK_FAIL
2501 });
2502 }
2503 ALOGV("%s(%d): sampleRate %d mPeerTimeout %d.%03d sec",
2504 __func__, mId, sampleRate,
2505 (int)mPeerTimeout.tv_sec,
2506 (int)(mPeerTimeout.tv_nsec / 1000000));
2507 }
2508
~PatchTrack()2509 PatchTrack::~PatchTrack()
2510 {
2511 ALOGV("%s(%d)", __func__, mId);
2512 }
2513
framesReady() const2514 size_t PatchTrack::framesReady() const
2515 {
2516 if (mPeerProxy && mPeerProxy->producesBufferOnDemand()) {
2517 return std::numeric_limits<size_t>::max();
2518 } else {
2519 return Track::framesReady();
2520 }
2521 }
2522
start(AudioSystem::sync_event_t event,audio_session_t triggerSession)2523 status_t PatchTrack::start(AudioSystem::sync_event_t event,
2524 audio_session_t triggerSession)
2525 {
2526 status_t status = Track::start(event, triggerSession);
2527 if (status != NO_ERROR) {
2528 return status;
2529 }
2530 android_atomic_and(~CBLK_DISABLED, &mCblk->mFlags);
2531 return status;
2532 }
2533
2534 // AudioBufferProvider interface
getNextBuffer(AudioBufferProvider::Buffer * buffer)2535 status_t PatchTrack::getNextBuffer(
2536 AudioBufferProvider::Buffer* buffer)
2537 {
2538 ALOG_ASSERT(mPeerProxy != 0, "%s(%d): called without peer proxy", __func__, mId);
2539 Proxy::Buffer buf;
2540 buf.mFrameCount = buffer->frameCount;
2541 if (ATRACE_ENABLED()) {
2542 std::string traceName("PTnReq");
2543 traceName += std::to_string(id());
2544 ATRACE_INT(traceName.c_str(), buf.mFrameCount);
2545 }
2546 status_t status = mPeerProxy->obtainBuffer(&buf, &mPeerTimeout);
2547 ALOGV_IF(status != NO_ERROR, "%s(%d): getNextBuffer status %d", __func__, mId, status);
2548 buffer->frameCount = buf.mFrameCount;
2549 if (ATRACE_ENABLED()) {
2550 std::string traceName("PTnObt");
2551 traceName += std::to_string(id());
2552 ATRACE_INT(traceName.c_str(), buf.mFrameCount);
2553 }
2554 if (buf.mFrameCount == 0) {
2555 return WOULD_BLOCK;
2556 }
2557 status = Track::getNextBuffer(buffer);
2558 return status;
2559 }
2560
releaseBuffer(AudioBufferProvider::Buffer * buffer)2561 void PatchTrack::releaseBuffer(AudioBufferProvider::Buffer* buffer)
2562 {
2563 ALOG_ASSERT(mPeerProxy != 0, "%s(%d): called without peer proxy", __func__, mId);
2564 Proxy::Buffer buf;
2565 buf.mFrameCount = buffer->frameCount;
2566 buf.mRaw = buffer->raw;
2567 mPeerProxy->releaseBuffer(&buf);
2568 TrackBase::releaseBuffer(buffer); // Note: this is the base class.
2569 }
2570
obtainBuffer(Proxy::Buffer * buffer,const struct timespec * timeOut)2571 status_t PatchTrack::obtainBuffer(Proxy::Buffer* buffer,
2572 const struct timespec *timeOut)
2573 {
2574 status_t status = NO_ERROR;
2575 static const int32_t kMaxTries = 5;
2576 int32_t tryCounter = kMaxTries;
2577 const size_t originalFrameCount = buffer->mFrameCount;
2578 do {
2579 if (status == NOT_ENOUGH_DATA) {
2580 restartIfDisabled();
2581 buffer->mFrameCount = originalFrameCount; // cleared on error, must be restored.
2582 }
2583 status = mProxy->obtainBuffer(buffer, timeOut);
2584 } while ((status == NOT_ENOUGH_DATA) && (tryCounter-- > 0));
2585 return status;
2586 }
2587
releaseBuffer(Proxy::Buffer * buffer)2588 void PatchTrack::releaseBuffer(Proxy::Buffer* buffer)
2589 {
2590 mProxy->releaseBuffer(buffer);
2591 restartIfDisabled();
2592
2593 // Check if the PatchTrack has enough data to write once in releaseBuffer().
2594 // If not, prevent an underrun from occurring by moving the track into FS_FILLING;
2595 // this logic avoids glitches when suspending A2DP with AudioPlaybackCapture.
2596 // TODO: perhaps underrun avoidance could be a track property checked in isReady() instead.
2597 if (mFillingStatus == FS_ACTIVE
2598 && audio_is_linear_pcm(mFormat)
2599 && !isOffloadedOrDirect()) {
2600 if (const sp<IAfThreadBase> thread = mThread.promote();
2601 thread != 0) {
2602 auto* const playbackThread = thread->asIAfPlaybackThread().get();
2603 const size_t frameCount = playbackThread->frameCount() * sampleRate()
2604 / playbackThread->sampleRate();
2605 if (framesReady() < frameCount) {
2606 ALOGD("%s(%d) Not enough data, wait for buffer to fill", __func__, mId);
2607 mFillingStatus = FS_FILLING;
2608 }
2609 }
2610 }
2611 }
2612
restartIfDisabled()2613 void PatchTrack::restartIfDisabled()
2614 {
2615 if (android_atomic_and(~CBLK_DISABLED, &mCblk->mFlags) & CBLK_DISABLED) {
2616 ALOGW("%s(%d): disabled due to previous underrun, restarting", __func__, mId);
2617 start();
2618 }
2619 }
2620
2621 // ----------------------------------------------------------------------------
2622 // Record
2623 // ----------------------------------------------------------------------------
2624
2625
2626 #undef LOG_TAG
2627 #define LOG_TAG "AF::RecordHandle"
2628
2629 class RecordHandle : public android::media::BnAudioRecord {
2630 public:
2631 explicit RecordHandle(const sp<IAfRecordTrack>& recordTrack);
2632 ~RecordHandle() override;
2633 binder::Status start(int /*AudioSystem::sync_event_t*/ event,
2634 int /*audio_session_t*/ triggerSession) final;
2635 binder::Status stop() final;
2636 binder::Status getActiveMicrophones(
2637 std::vector<media::MicrophoneInfoFw>* activeMicrophones) final;
2638 binder::Status setPreferredMicrophoneDirection(
2639 int /*audio_microphone_direction_t*/ direction) final;
2640 binder::Status setPreferredMicrophoneFieldDimension(float zoom) final;
2641 binder::Status shareAudioHistory(
2642 const std::string& sharedAudioPackageName, int64_t sharedAudioStartMs) final;
2643
2644 private:
2645 const sp<IAfRecordTrack> mRecordTrack;
2646
2647 // for use from destructor
2648 void stop_nonvirtual();
2649 };
2650
2651 /* static */
createIAudioRecordAdapter(const sp<IAfRecordTrack> & recordTrack)2652 sp<media::IAudioRecord> IAfRecordTrack::createIAudioRecordAdapter(
2653 const sp<IAfRecordTrack>& recordTrack) {
2654 return sp<RecordHandle>::make(recordTrack);
2655 }
2656
RecordHandle(const sp<IAfRecordTrack> & recordTrack)2657 RecordHandle::RecordHandle(
2658 const sp<IAfRecordTrack>& recordTrack)
2659 : BnAudioRecord(),
2660 mRecordTrack(recordTrack)
2661 {
2662 setMinSchedulerPolicy(SCHED_NORMAL, ANDROID_PRIORITY_AUDIO);
2663 setInheritRt(true);
2664 }
2665
~RecordHandle()2666 RecordHandle::~RecordHandle() {
2667 stop_nonvirtual();
2668 mRecordTrack->destroy();
2669 }
2670
start(int event,int triggerSession)2671 binder::Status RecordHandle::start(int /*AudioSystem::sync_event_t*/ event,
2672 int /*audio_session_t*/ triggerSession) {
2673 ALOGV("%s()", __func__);
2674 return binderStatusFromStatusT(
2675 mRecordTrack->start((AudioSystem::sync_event_t)event, (audio_session_t) triggerSession));
2676 }
2677
stop()2678 binder::Status RecordHandle::stop() {
2679 stop_nonvirtual();
2680 return binder::Status::ok();
2681 }
2682
stop_nonvirtual()2683 void RecordHandle::stop_nonvirtual() {
2684 ALOGV("%s()", __func__);
2685 mRecordTrack->stop();
2686 }
2687
getActiveMicrophones(std::vector<media::MicrophoneInfoFw> * activeMicrophones)2688 binder::Status RecordHandle::getActiveMicrophones(
2689 std::vector<media::MicrophoneInfoFw>* activeMicrophones) {
2690 ALOGV("%s()", __func__);
2691 return binderStatusFromStatusT(mRecordTrack->getActiveMicrophones(activeMicrophones));
2692 }
2693
setPreferredMicrophoneDirection(int direction)2694 binder::Status RecordHandle::setPreferredMicrophoneDirection(
2695 int /*audio_microphone_direction_t*/ direction) {
2696 ALOGV("%s()", __func__);
2697 return binderStatusFromStatusT(mRecordTrack->setPreferredMicrophoneDirection(
2698 static_cast<audio_microphone_direction_t>(direction)));
2699 }
2700
setPreferredMicrophoneFieldDimension(float zoom)2701 binder::Status RecordHandle::setPreferredMicrophoneFieldDimension(float zoom) {
2702 ALOGV("%s()", __func__);
2703 return binderStatusFromStatusT(mRecordTrack->setPreferredMicrophoneFieldDimension(zoom));
2704 }
2705
shareAudioHistory(const std::string & sharedAudioPackageName,int64_t sharedAudioStartMs)2706 binder::Status RecordHandle::shareAudioHistory(
2707 const std::string& sharedAudioPackageName, int64_t sharedAudioStartMs) {
2708 return binderStatusFromStatusT(
2709 mRecordTrack->shareAudioHistory(sharedAudioPackageName, sharedAudioStartMs));
2710 }
2711
2712 // ----------------------------------------------------------------------------
2713 #undef LOG_TAG
2714 #define LOG_TAG "AF::RecordTrack"
2715
2716
2717 /* static */
create(IAfRecordThread * thread,const sp<Client> & client,const audio_attributes_t & attr,uint32_t sampleRate,audio_format_t format,audio_channel_mask_t channelMask,size_t frameCount,void * buffer,size_t bufferSize,audio_session_t sessionId,pid_t creatorPid,const AttributionSourceState & attributionSource,audio_input_flags_t flags,track_type type,audio_port_handle_t portId,int32_t startFrames)2718 sp<IAfRecordTrack> IAfRecordTrack::create(IAfRecordThread* thread,
2719 const sp<Client>& client,
2720 const audio_attributes_t& attr,
2721 uint32_t sampleRate,
2722 audio_format_t format,
2723 audio_channel_mask_t channelMask,
2724 size_t frameCount,
2725 void* buffer,
2726 size_t bufferSize,
2727 audio_session_t sessionId,
2728 pid_t creatorPid,
2729 const AttributionSourceState& attributionSource,
2730 audio_input_flags_t flags,
2731 track_type type,
2732 audio_port_handle_t portId,
2733 int32_t startFrames)
2734 {
2735 return sp<RecordTrack>::make(
2736 thread,
2737 client,
2738 attr,
2739 sampleRate,
2740 format,
2741 channelMask,
2742 frameCount,
2743 buffer,
2744 bufferSize,
2745 sessionId,
2746 creatorPid,
2747 attributionSource,
2748 flags,
2749 type,
2750 portId,
2751 startFrames);
2752 }
2753
2754 // RecordTrack constructor must be called with AudioFlinger::mLock and ThreadBase::mLock held
RecordTrack(IAfRecordThread * thread,const sp<Client> & client,const audio_attributes_t & attr,uint32_t sampleRate,audio_format_t format,audio_channel_mask_t channelMask,size_t frameCount,void * buffer,size_t bufferSize,audio_session_t sessionId,pid_t creatorPid,const AttributionSourceState & attributionSource,audio_input_flags_t flags,track_type type,audio_port_handle_t portId,int32_t startFrames)2755 RecordTrack::RecordTrack(
2756 IAfRecordThread* thread,
2757 const sp<Client>& client,
2758 const audio_attributes_t& attr,
2759 uint32_t sampleRate,
2760 audio_format_t format,
2761 audio_channel_mask_t channelMask,
2762 size_t frameCount,
2763 void *buffer,
2764 size_t bufferSize,
2765 audio_session_t sessionId,
2766 pid_t creatorPid,
2767 const AttributionSourceState& attributionSource,
2768 audio_input_flags_t flags,
2769 track_type type,
2770 audio_port_handle_t portId,
2771 int32_t startFrames)
2772 : TrackBase(thread, client, attr, sampleRate, format,
2773 channelMask, frameCount, buffer, bufferSize, sessionId,
2774 creatorPid,
2775 VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(attributionSource.uid)),
2776 false /*isOut*/,
2777 (type == TYPE_DEFAULT) ?
2778 ((flags & AUDIO_INPUT_FLAG_FAST) ? ALLOC_PIPE : ALLOC_CBLK) :
2779 ((buffer == NULL) ? ALLOC_LOCAL : ALLOC_NONE),
2780 type, portId,
2781 std::string(AMEDIAMETRICS_KEY_PREFIX_AUDIO_RECORD) + std::to_string(portId)),
2782 mOverflow(false),
2783 mResamplerBufferProvider(NULL), // initialize in case of early constructor exit
2784 mRecordBufferConverter(NULL),
2785 mFlags(flags),
2786 mSilenced(false),
2787 mStartFrames(startFrames)
2788 {
2789 if (mCblk == NULL) {
2790 return;
2791 }
2792
2793 if (!isDirect()) {
2794 mRecordBufferConverter = new RecordBufferConverter(
2795 thread->channelMask(), thread->format(), thread->sampleRate(),
2796 channelMask, format, sampleRate);
2797 // Check if the RecordBufferConverter construction was successful.
2798 // If not, don't continue with construction.
2799 //
2800 // NOTE: It would be extremely rare that the record track cannot be created
2801 // for the current device, but a pending or future device change would make
2802 // the record track configuration valid.
2803 if (mRecordBufferConverter->initCheck() != NO_ERROR) {
2804 ALOGE("%s(%d): RecordTrack unable to create record buffer converter", __func__, mId);
2805 return;
2806 }
2807 }
2808
2809 mServerProxy = new AudioRecordServerProxy(mCblk, mBuffer, frameCount,
2810 mFrameSize, !isExternalTrack());
2811
2812 mResamplerBufferProvider = new ResamplerBufferProvider(this);
2813
2814 if (flags & AUDIO_INPUT_FLAG_FAST) {
2815 ALOG_ASSERT(thread->fastTrackAvailable());
2816 thread->setFastTrackAvailable(false);
2817 } else {
2818 // TODO: only Normal Record has timestamps (Fast Record does not).
2819 mServerLatencySupported = checkServerLatencySupported(mFormat, flags);
2820 }
2821 #ifdef TEE_SINK
2822 mTee.setId(std::string("_") + std::to_string(mThreadIoHandle)
2823 + "_" + std::to_string(mId)
2824 + "_R");
2825 #endif
2826
2827 // Once this item is logged by the server, the client can add properties.
2828 mTrackMetrics.logConstructor(creatorPid, uid(), id());
2829 }
2830
~RecordTrack()2831 RecordTrack::~RecordTrack()
2832 {
2833 ALOGV("%s()", __func__);
2834 delete mRecordBufferConverter;
2835 delete mResamplerBufferProvider;
2836 }
2837
initCheck() const2838 status_t RecordTrack::initCheck() const
2839 {
2840 status_t status = TrackBase::initCheck();
2841 if (status == NO_ERROR && mServerProxy == 0) {
2842 status = BAD_VALUE;
2843 }
2844 return status;
2845 }
2846
2847 // AudioBufferProvider interface
getNextBuffer(AudioBufferProvider::Buffer * buffer)2848 status_t RecordTrack::getNextBuffer(AudioBufferProvider::Buffer* buffer)
2849 {
2850 ServerProxy::Buffer buf;
2851 buf.mFrameCount = buffer->frameCount;
2852 status_t status = mServerProxy->obtainBuffer(&buf);
2853 buffer->frameCount = buf.mFrameCount;
2854 buffer->raw = buf.mRaw;
2855 if (buf.mFrameCount == 0) {
2856 // FIXME also wake futex so that overrun is noticed more quickly
2857 (void) android_atomic_or(CBLK_OVERRUN, &mCblk->mFlags);
2858 }
2859 return status;
2860 }
2861
start(AudioSystem::sync_event_t event,audio_session_t triggerSession)2862 status_t RecordTrack::start(AudioSystem::sync_event_t event,
2863 audio_session_t triggerSession)
2864 {
2865 const sp<IAfThreadBase> thread = mThread.promote();
2866 if (thread != 0) {
2867 auto* const recordThread = thread->asIAfRecordThread().get();
2868 return recordThread->start(this, event, triggerSession);
2869 } else {
2870 ALOGW("%s track %d: thread was destroyed", __func__, portId());
2871 return DEAD_OBJECT;
2872 }
2873 }
2874
stop()2875 void RecordTrack::stop()
2876 {
2877 const sp<IAfThreadBase> thread = mThread.promote();
2878 if (thread != 0) {
2879 auto* const recordThread = thread->asIAfRecordThread().get();
2880 if (recordThread->stop(this) && isExternalTrack()) {
2881 AudioSystem::stopInput(mPortId);
2882 }
2883 }
2884 }
2885
destroy()2886 void RecordTrack::destroy()
2887 {
2888 // see comments at Track::destroy()
2889 sp<RecordTrack> keep(this);
2890 {
2891 track_state priorState = mState;
2892 const sp<IAfThreadBase> thread = mThread.promote();
2893 if (thread != 0) {
2894 audio_utils::lock_guard _l(thread->mutex());
2895 auto* const recordThread = thread->asIAfRecordThread().get();
2896 priorState = mState;
2897 if (!mSharedAudioPackageName.empty()) {
2898 recordThread->resetAudioHistory_l();
2899 }
2900 recordThread->destroyTrack_l(this); // move mState to STOPPED, terminate
2901 }
2902 // APM portid/client management done outside of lock.
2903 // NOTE: if thread doesn't exist, the input descriptor probably doesn't either.
2904 if (isExternalTrack()) {
2905 switch (priorState) {
2906 case ACTIVE: // invalidated while still active
2907 case STARTING_2: // invalidated/start-aborted after startInput successfully called
2908 case PAUSING: // invalidated while in the middle of stop() pausing (still active)
2909 AudioSystem::stopInput(mPortId);
2910 break;
2911
2912 case STARTING_1: // invalidated/start-aborted and startInput not successful
2913 case PAUSED: // OK, not active
2914 case IDLE: // OK, not active
2915 break;
2916
2917 case STOPPED: // unexpected (destroyed)
2918 default:
2919 LOG_ALWAYS_FATAL("%s(%d): invalid prior state: %d", __func__, mId, priorState);
2920 }
2921 AudioSystem::releaseInput(mPortId);
2922 }
2923 }
2924 }
2925
invalidate()2926 void RecordTrack::invalidate()
2927 {
2928 TrackBase::invalidate();
2929 // FIXME should use proxy, and needs work
2930 audio_track_cblk_t* cblk = mCblk;
2931 android_atomic_or(CBLK_INVALID, &cblk->mFlags);
2932 android_atomic_release_store(0x40000000, &cblk->mFutex);
2933 // client is not in server, so FUTEX_WAKE is needed instead of FUTEX_WAKE_PRIVATE
2934 (void) syscall(__NR_futex, &cblk->mFutex, FUTEX_WAKE, INT_MAX);
2935 }
2936
2937
appendDumpHeader(String8 & result) const2938 void RecordTrack::appendDumpHeader(String8& result) const
2939 {
2940 result.appendFormat("Active Id Client Session Port Id S Flags "
2941 " Format Chn mask SRate Source "
2942 " Server FrmCnt FrmRdy Sil%s\n",
2943 isServerLatencySupported() ? " Latency" : "");
2944 }
2945
appendDump(String8 & result,bool active) const2946 void RecordTrack::appendDump(String8& result, bool active) const
2947 {
2948 result.appendFormat("%c%5s %6d %6u %7u %7u %2s 0x%03X "
2949 "%08X %08X %6u %6X "
2950 "%08X %6zu %6zu %3c",
2951 isFastTrack() ? 'F' : ' ',
2952 active ? "yes" : "no",
2953 mId,
2954 (mClient == 0) ? getpid() : mClient->pid(),
2955 mSessionId,
2956 mPortId,
2957 getTrackStateAsCodedString(),
2958 mCblk->mFlags,
2959
2960 mFormat,
2961 mChannelMask,
2962 mSampleRate,
2963 mAttr.source,
2964
2965 mCblk->mServer,
2966 mFrameCount,
2967 mServerProxy->framesReadySafe(),
2968 isSilenced() ? 's' : 'n'
2969 );
2970 if (isServerLatencySupported()) {
2971 double latencyMs;
2972 bool fromTrack;
2973 if (getTrackLatencyMs(&latencyMs, &fromTrack) == OK) {
2974 // Show latency in msec, followed by 't' if from track timestamp (the most accurate)
2975 // or 'k' if estimated from kernel (usually for debugging).
2976 result.appendFormat(" %7.2lf %c", latencyMs, fromTrack ? 't' : 'k');
2977 } else {
2978 result.appendFormat("%10s", mCblk->mServer != 0 ? "unavail" : "new");
2979 }
2980 }
2981 result.append("\n");
2982 }
2983
2984 // This is invoked by SyncEvent callback.
handleSyncStartEvent(const sp<audioflinger::SyncEvent> & event)2985 void RecordTrack::handleSyncStartEvent(
2986 const sp<audioflinger::SyncEvent>& event)
2987 {
2988 size_t framesToDrop = 0;
2989 const sp<IAfThreadBase> threadBase = mThread.promote();
2990 if (threadBase != 0) {
2991 // TODO: use actual buffer filling status instead of 2 buffers when info is available
2992 // from audio HAL
2993 framesToDrop = threadBase->frameCount() * 2;
2994 }
2995
2996 mSynchronizedRecordState.onPlaybackFinished(event, framesToDrop);
2997 }
2998
clearSyncStartEvent()2999 void RecordTrack::clearSyncStartEvent()
3000 {
3001 mSynchronizedRecordState.clear();
3002 }
3003
updateTrackFrameInfo(int64_t trackFramesReleased,int64_t sourceFramesRead,uint32_t halSampleRate,const ExtendedTimestamp & timestamp)3004 void RecordTrack::updateTrackFrameInfo(
3005 int64_t trackFramesReleased, int64_t sourceFramesRead,
3006 uint32_t halSampleRate, const ExtendedTimestamp ×tamp)
3007 {
3008 // Make the kernel frametime available.
3009 const FrameTime ft{
3010 timestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL],
3011 timestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL]};
3012 // ALOGD("FrameTime: %lld %lld", (long long)ft.frames, (long long)ft.timeNs);
3013 mKernelFrameTime.store(ft);
3014 if (!audio_is_linear_pcm(mFormat)) {
3015 // Stream is direct, return provided timestamp with no conversion
3016 mServerProxy->setTimestamp(timestamp);
3017 return;
3018 }
3019
3020 ExtendedTimestamp local = timestamp;
3021
3022 // Convert HAL frames to server-side track frames at track sample rate.
3023 // We use trackFramesReleased and sourceFramesRead as an anchor point.
3024 for (int i = ExtendedTimestamp::LOCATION_SERVER; i < ExtendedTimestamp::LOCATION_MAX; ++i) {
3025 if (local.mTimeNs[i] != 0) {
3026 const int64_t relativeServerFrames = local.mPosition[i] - sourceFramesRead;
3027 const int64_t relativeTrackFrames = relativeServerFrames
3028 * mSampleRate / halSampleRate; // TODO: potential computation overflow
3029 local.mPosition[i] = relativeTrackFrames + trackFramesReleased;
3030 }
3031 }
3032 mServerProxy->setTimestamp(local);
3033
3034 // Compute latency info.
3035 const bool useTrackTimestamp = true; // use track unless debugging.
3036 const double latencyMs = - (useTrackTimestamp
3037 ? local.getOutputServerLatencyMs(sampleRate())
3038 : timestamp.getOutputServerLatencyMs(halSampleRate));
3039
3040 mServerLatencyFromTrack.store(useTrackTimestamp);
3041 mServerLatencyMs.store(latencyMs);
3042 }
3043
getActiveMicrophones(std::vector<media::MicrophoneInfoFw> * activeMicrophones) const3044 status_t RecordTrack::getActiveMicrophones(
3045 std::vector<media::MicrophoneInfoFw>* activeMicrophones) const
3046 {
3047 const sp<IAfThreadBase> thread = mThread.promote();
3048 if (thread != 0) {
3049 auto* const recordThread = thread->asIAfRecordThread().get();
3050 return recordThread->getActiveMicrophones(activeMicrophones);
3051 } else {
3052 return BAD_VALUE;
3053 }
3054 }
3055
setPreferredMicrophoneDirection(audio_microphone_direction_t direction)3056 status_t RecordTrack::setPreferredMicrophoneDirection(
3057 audio_microphone_direction_t direction) {
3058 const sp<IAfThreadBase> thread = mThread.promote();
3059 if (thread != 0) {
3060 auto* const recordThread = thread->asIAfRecordThread().get();
3061 return recordThread->setPreferredMicrophoneDirection(direction);
3062 } else {
3063 return BAD_VALUE;
3064 }
3065 }
3066
setPreferredMicrophoneFieldDimension(float zoom)3067 status_t RecordTrack::setPreferredMicrophoneFieldDimension(float zoom) {
3068 const sp<IAfThreadBase> thread = mThread.promote();
3069 if (thread != 0) {
3070 auto* const recordThread = thread->asIAfRecordThread().get();
3071 return recordThread->setPreferredMicrophoneFieldDimension(zoom);
3072 } else {
3073 return BAD_VALUE;
3074 }
3075 }
3076
shareAudioHistory(const std::string & sharedAudioPackageName,int64_t sharedAudioStartMs)3077 status_t RecordTrack::shareAudioHistory(
3078 const std::string& sharedAudioPackageName, int64_t sharedAudioStartMs) {
3079
3080 const uid_t callingUid = IPCThreadState::self()->getCallingUid();
3081 const pid_t callingPid = IPCThreadState::self()->getCallingPid();
3082 if (callingUid != mUid || callingPid != mCreatorPid) {
3083 return PERMISSION_DENIED;
3084 }
3085
3086 AttributionSourceState attributionSource{};
3087 attributionSource.uid = VALUE_OR_RETURN_STATUS(legacy2aidl_uid_t_int32_t(callingUid));
3088 attributionSource.pid = VALUE_OR_RETURN_STATUS(legacy2aidl_uid_t_int32_t(callingPid));
3089 attributionSource.token = sp<BBinder>::make();
3090 if (!captureHotwordAllowed(attributionSource)) {
3091 return PERMISSION_DENIED;
3092 }
3093
3094 const sp<IAfThreadBase> thread = mThread.promote();
3095 if (thread != 0) {
3096 auto* const recordThread = thread->asIAfRecordThread().get();
3097 status_t status = recordThread->shareAudioHistory(
3098 sharedAudioPackageName, mSessionId, sharedAudioStartMs);
3099 if (status == NO_ERROR) {
3100 mSharedAudioPackageName = sharedAudioPackageName;
3101 }
3102 return status;
3103 } else {
3104 return BAD_VALUE;
3105 }
3106 }
3107
copyMetadataTo(MetadataInserter & backInserter) const3108 void RecordTrack::copyMetadataTo(MetadataInserter& backInserter) const
3109 {
3110
3111 // Do not forward PatchRecord metadata with unspecified audio source
3112 if (mAttr.source == AUDIO_SOURCE_DEFAULT) {
3113 return;
3114 }
3115
3116 // No track is invalid as this is called after prepareTrack_l in the same critical section
3117 record_track_metadata_v7_t metadata;
3118 metadata.base = {
3119 .source = mAttr.source,
3120 .gain = 1, // capture tracks do not have volumes
3121 };
3122 metadata.channel_mask = mChannelMask;
3123 strncpy(metadata.tags, mAttr.tags, AUDIO_ATTRIBUTES_TAGS_MAX_SIZE);
3124
3125 *backInserter++ = metadata;
3126 }
3127
3128 // ----------------------------------------------------------------------------
3129 #undef LOG_TAG
3130 #define LOG_TAG "AF::PatchRecord"
3131
3132 /* static */
create(IAfRecordThread * recordThread,uint32_t sampleRate,audio_channel_mask_t channelMask,audio_format_t format,size_t frameCount,void * buffer,size_t bufferSize,audio_input_flags_t flags,const Timeout & timeout,audio_source_t source)3133 sp<IAfPatchRecord> IAfPatchRecord::create(
3134 IAfRecordThread* recordThread,
3135 uint32_t sampleRate,
3136 audio_channel_mask_t channelMask,
3137 audio_format_t format,
3138 size_t frameCount,
3139 void *buffer,
3140 size_t bufferSize,
3141 audio_input_flags_t flags,
3142 const Timeout& timeout,
3143 audio_source_t source)
3144 {
3145 return sp<PatchRecord>::make(
3146 recordThread,
3147 sampleRate,
3148 channelMask,
3149 format,
3150 frameCount,
3151 buffer,
3152 bufferSize,
3153 flags,
3154 timeout,
3155 source);
3156 }
3157
PatchRecord(IAfRecordThread * recordThread,uint32_t sampleRate,audio_channel_mask_t channelMask,audio_format_t format,size_t frameCount,void * buffer,size_t bufferSize,audio_input_flags_t flags,const Timeout & timeout,audio_source_t source)3158 PatchRecord::PatchRecord(IAfRecordThread* recordThread,
3159 uint32_t sampleRate,
3160 audio_channel_mask_t channelMask,
3161 audio_format_t format,
3162 size_t frameCount,
3163 void *buffer,
3164 size_t bufferSize,
3165 audio_input_flags_t flags,
3166 const Timeout& timeout,
3167 audio_source_t source)
3168 : RecordTrack(recordThread, NULL,
3169 audio_attributes_t{ .source = source } ,
3170 sampleRate, format, channelMask, frameCount,
3171 buffer, bufferSize, AUDIO_SESSION_NONE, getpid(),
3172 audioServerAttributionSource(getpid()), flags, TYPE_PATCH),
3173 PatchTrackBase(mCblk ? new ClientProxy(mCblk, mBuffer, frameCount, mFrameSize, false, true)
3174 : nullptr,
3175 recordThread, timeout)
3176 {
3177 ALOGV("%s(%d): sampleRate %d mPeerTimeout %d.%03d sec",
3178 __func__, mId, sampleRate,
3179 (int)mPeerTimeout.tv_sec,
3180 (int)(mPeerTimeout.tv_nsec / 1000000));
3181 }
3182
~PatchRecord()3183 PatchRecord::~PatchRecord()
3184 {
3185 ALOGV("%s(%d)", __func__, mId);
3186 }
3187
writeFramesHelper(AudioBufferProvider * dest,const void * src,size_t frameCount,size_t frameSize)3188 static size_t writeFramesHelper(
3189 AudioBufferProvider* dest, const void* src, size_t frameCount, size_t frameSize)
3190 {
3191 AudioBufferProvider::Buffer patchBuffer;
3192 patchBuffer.frameCount = frameCount;
3193 auto status = dest->getNextBuffer(&patchBuffer);
3194 if (status != NO_ERROR) {
3195 ALOGW("%s PathRecord getNextBuffer failed with error %d: %s",
3196 __func__, status, strerror(-status));
3197 return 0;
3198 }
3199 ALOG_ASSERT(patchBuffer.frameCount <= frameCount);
3200 memcpy(patchBuffer.raw, src, patchBuffer.frameCount * frameSize);
3201 size_t framesWritten = patchBuffer.frameCount;
3202 dest->releaseBuffer(&patchBuffer);
3203 return framesWritten;
3204 }
3205
3206 // static
writeFrames(AudioBufferProvider * dest,const void * src,size_t frameCount,size_t frameSize)3207 size_t PatchRecord::writeFrames(
3208 AudioBufferProvider* dest, const void* src, size_t frameCount, size_t frameSize)
3209 {
3210 size_t framesWritten = writeFramesHelper(dest, src, frameCount, frameSize);
3211 // On buffer wrap, the buffer frame count will be less than requested,
3212 // when this happens a second buffer needs to be used to write the leftover audio
3213 const size_t framesLeft = frameCount - framesWritten;
3214 if (framesWritten != 0 && framesLeft != 0) {
3215 framesWritten += writeFramesHelper(dest, (const char*)src + framesWritten * frameSize,
3216 framesLeft, frameSize);
3217 }
3218 return framesWritten;
3219 }
3220
3221 // AudioBufferProvider interface
getNextBuffer(AudioBufferProvider::Buffer * buffer)3222 status_t PatchRecord::getNextBuffer(
3223 AudioBufferProvider::Buffer* buffer)
3224 {
3225 ALOG_ASSERT(mPeerProxy != 0, "%s(%d): called without peer proxy", __func__, mId);
3226 Proxy::Buffer buf;
3227 buf.mFrameCount = buffer->frameCount;
3228 status_t status = mPeerProxy->obtainBuffer(&buf, &mPeerTimeout);
3229 ALOGV_IF(status != NO_ERROR,
3230 "%s(%d): mPeerProxy->obtainBuffer status %d", __func__, mId, status);
3231 buffer->frameCount = buf.mFrameCount;
3232 if (ATRACE_ENABLED()) {
3233 std::string traceName("PRnObt");
3234 traceName += std::to_string(id());
3235 ATRACE_INT(traceName.c_str(), buf.mFrameCount);
3236 }
3237 if (buf.mFrameCount == 0) {
3238 return WOULD_BLOCK;
3239 }
3240 status = RecordTrack::getNextBuffer(buffer);
3241 return status;
3242 }
3243
releaseBuffer(AudioBufferProvider::Buffer * buffer)3244 void PatchRecord::releaseBuffer(AudioBufferProvider::Buffer* buffer)
3245 {
3246 ALOG_ASSERT(mPeerProxy != 0, "%s(%d): called without peer proxy", __func__, mId);
3247 Proxy::Buffer buf;
3248 buf.mFrameCount = buffer->frameCount;
3249 buf.mRaw = buffer->raw;
3250 mPeerProxy->releaseBuffer(&buf);
3251 TrackBase::releaseBuffer(buffer);
3252 }
3253
obtainBuffer(Proxy::Buffer * buffer,const struct timespec * timeOut)3254 status_t PatchRecord::obtainBuffer(Proxy::Buffer* buffer,
3255 const struct timespec *timeOut)
3256 {
3257 return mProxy->obtainBuffer(buffer, timeOut);
3258 }
3259
releaseBuffer(Proxy::Buffer * buffer)3260 void PatchRecord::releaseBuffer(Proxy::Buffer* buffer)
3261 {
3262 mProxy->releaseBuffer(buffer);
3263 }
3264
3265 #undef LOG_TAG
3266 #define LOG_TAG "AF::PthrPatchRecord"
3267
allocAligned(size_t alignment,size_t size)3268 static std::unique_ptr<void, decltype(free)*> allocAligned(size_t alignment, size_t size)
3269 {
3270 void *ptr = nullptr;
3271 (void)posix_memalign(&ptr, alignment, size);
3272 return {ptr, free};
3273 }
3274
3275 /* static */
createPassThru(IAfRecordThread * recordThread,uint32_t sampleRate,audio_channel_mask_t channelMask,audio_format_t format,size_t frameCount,audio_input_flags_t flags,audio_source_t source)3276 sp<IAfPatchRecord> IAfPatchRecord::createPassThru(
3277 IAfRecordThread* recordThread,
3278 uint32_t sampleRate,
3279 audio_channel_mask_t channelMask,
3280 audio_format_t format,
3281 size_t frameCount,
3282 audio_input_flags_t flags,
3283 audio_source_t source)
3284 {
3285 return sp<PassthruPatchRecord>::make(
3286 recordThread,
3287 sampleRate,
3288 channelMask,
3289 format,
3290 frameCount,
3291 flags,
3292 source);
3293 }
3294
PassthruPatchRecord(IAfRecordThread * recordThread,uint32_t sampleRate,audio_channel_mask_t channelMask,audio_format_t format,size_t frameCount,audio_input_flags_t flags,audio_source_t source)3295 PassthruPatchRecord::PassthruPatchRecord(
3296 IAfRecordThread* recordThread,
3297 uint32_t sampleRate,
3298 audio_channel_mask_t channelMask,
3299 audio_format_t format,
3300 size_t frameCount,
3301 audio_input_flags_t flags,
3302 audio_source_t source)
3303 : PatchRecord(recordThread, sampleRate, channelMask, format, frameCount,
3304 nullptr /*buffer*/, 0 /*bufferSize*/, flags, {} /* timeout */, source),
3305 mPatchRecordAudioBufferProvider(*this),
3306 mSinkBuffer(allocAligned(32, mFrameCount * mFrameSize)),
3307 mStubBuffer(allocAligned(32, mFrameCount * mFrameSize))
3308 {
3309 memset(mStubBuffer.get(), 0, mFrameCount * mFrameSize);
3310 }
3311
obtainStream(sp<IAfThreadBase> * thread)3312 sp<StreamInHalInterface> PassthruPatchRecord::obtainStream(
3313 sp<IAfThreadBase>* thread)
3314 {
3315 *thread = mThread.promote();
3316 if (!*thread) return nullptr;
3317 auto* const recordThread = (*thread)->asIAfRecordThread().get();
3318 audio_utils::lock_guard _l(recordThread->mutex());
3319 return recordThread->getInput() ? recordThread->getInput()->stream : nullptr;
3320 }
3321
3322 // PatchProxyBufferProvider methods are called on DirectOutputThread
obtainBuffer(Proxy::Buffer * buffer,const struct timespec * timeOut)3323 status_t PassthruPatchRecord::obtainBuffer(
3324 Proxy::Buffer* buffer, const struct timespec* timeOut)
3325 {
3326 if (mUnconsumedFrames) {
3327 buffer->mFrameCount = std::min(buffer->mFrameCount, mUnconsumedFrames);
3328 // mUnconsumedFrames is decreased in releaseBuffer to use actual frame consumption figure.
3329 return PatchRecord::obtainBuffer(buffer, timeOut);
3330 }
3331
3332 // Otherwise, execute a read from HAL and write into the buffer.
3333 nsecs_t startTimeNs = 0;
3334 if (timeOut && (timeOut->tv_sec != 0 || timeOut->tv_nsec != 0) && timeOut->tv_sec != INT_MAX) {
3335 // Will need to correct timeOut by elapsed time.
3336 startTimeNs = systemTime();
3337 }
3338 const size_t framesToRead = std::min(buffer->mFrameCount, mFrameCount);
3339 buffer->mFrameCount = 0;
3340 buffer->mRaw = nullptr;
3341 sp<IAfThreadBase> thread;
3342 sp<StreamInHalInterface> stream = obtainStream(&thread);
3343 if (!stream) return NO_INIT; // If there is no stream, RecordThread is not reading.
3344
3345 status_t result = NO_ERROR;
3346 size_t bytesRead = 0;
3347 {
3348 ATRACE_NAME("read");
3349 result = stream->read(mSinkBuffer.get(), framesToRead * mFrameSize, &bytesRead);
3350 if (result != NO_ERROR) goto stream_error;
3351 if (bytesRead == 0) return NO_ERROR;
3352 }
3353
3354 {
3355 audio_utils::lock_guard lock(readMutex());
3356 mReadBytes += bytesRead;
3357 mReadError = NO_ERROR;
3358 }
3359 mReadCV.notify_one();
3360 // writeFrames handles wraparound and should write all the provided frames.
3361 // If it couldn't, there is something wrong with the client/server buffer of the software patch.
3362 buffer->mFrameCount = writeFrames(
3363 &mPatchRecordAudioBufferProvider,
3364 mSinkBuffer.get(), bytesRead / mFrameSize, mFrameSize);
3365 ALOGW_IF(buffer->mFrameCount < bytesRead / mFrameSize,
3366 "Lost %zu frames obtained from HAL", bytesRead / mFrameSize - buffer->mFrameCount);
3367 mUnconsumedFrames = buffer->mFrameCount;
3368 struct timespec newTimeOut;
3369 if (startTimeNs) {
3370 // Correct the timeout by elapsed time.
3371 nsecs_t newTimeOutNs = audio_utils_ns_from_timespec(timeOut) - (systemTime() - startTimeNs);
3372 if (newTimeOutNs < 0) newTimeOutNs = 0;
3373 newTimeOut.tv_sec = newTimeOutNs / NANOS_PER_SECOND;
3374 newTimeOut.tv_nsec = newTimeOutNs - newTimeOut.tv_sec * NANOS_PER_SECOND;
3375 timeOut = &newTimeOut;
3376 }
3377 return PatchRecord::obtainBuffer(buffer, timeOut);
3378
3379 stream_error:
3380 stream->standby();
3381 {
3382 audio_utils::lock_guard lock(readMutex());
3383 mReadError = result;
3384 }
3385 mReadCV.notify_one();
3386 return result;
3387 }
3388
releaseBuffer(Proxy::Buffer * buffer)3389 void PassthruPatchRecord::releaseBuffer(Proxy::Buffer* buffer)
3390 {
3391 if (buffer->mFrameCount <= mUnconsumedFrames) {
3392 mUnconsumedFrames -= buffer->mFrameCount;
3393 } else {
3394 ALOGW("Write side has consumed more frames than we had: %zu > %zu",
3395 buffer->mFrameCount, mUnconsumedFrames);
3396 mUnconsumedFrames = 0;
3397 }
3398 PatchRecord::releaseBuffer(buffer);
3399 }
3400
3401 // AudioBufferProvider and Source methods are called on RecordThread
3402 // 'read' emulates actual audio data with 0's. This is OK as 'getNextBuffer'
3403 // and 'releaseBuffer' are stubbed out and ignore their input.
3404 // It's not possible to retrieve actual data here w/o blocking 'obtainBuffer'
3405 // until we copy it.
read(void * buffer,size_t bytes,size_t * read)3406 status_t PassthruPatchRecord::read(
3407 void* buffer, size_t bytes, size_t* read)
3408 {
3409 bytes = std::min(bytes, mFrameCount * mFrameSize);
3410 {
3411 audio_utils::unique_lock lock(readMutex());
3412 mReadCV.wait(lock, [&]{ return mReadError != NO_ERROR || mReadBytes != 0; });
3413 if (mReadError != NO_ERROR) {
3414 mLastReadFrames = 0;
3415 return mReadError;
3416 }
3417 *read = std::min(bytes, mReadBytes);
3418 mReadBytes -= *read;
3419 }
3420 mLastReadFrames = *read / mFrameSize;
3421 memset(buffer, 0, *read);
3422 return 0;
3423 }
3424
getCapturePosition(int64_t * frames,int64_t * time)3425 status_t PassthruPatchRecord::getCapturePosition(
3426 int64_t* frames, int64_t* time)
3427 {
3428 sp<IAfThreadBase> thread;
3429 sp<StreamInHalInterface> stream = obtainStream(&thread);
3430 return stream ? stream->getCapturePosition(frames, time) : NO_INIT;
3431 }
3432
standby()3433 status_t PassthruPatchRecord::standby()
3434 {
3435 // RecordThread issues 'standby' command in two major cases:
3436 // 1. Error on read--this case is handled in 'obtainBuffer'.
3437 // 2. Track is stopping--as PassthruPatchRecord assumes continuous
3438 // output, this can only happen when the software patch
3439 // is being torn down. In this case, the RecordThread
3440 // will terminate and close the HAL stream.
3441 return 0;
3442 }
3443
3444 // As the buffer gets filled in obtainBuffer, here we only simulate data consumption.
getNextBuffer(AudioBufferProvider::Buffer * buffer)3445 status_t PassthruPatchRecord::getNextBuffer(
3446 AudioBufferProvider::Buffer* buffer)
3447 {
3448 buffer->frameCount = mLastReadFrames;
3449 buffer->raw = buffer->frameCount != 0 ? mStubBuffer.get() : nullptr;
3450 return NO_ERROR;
3451 }
3452
releaseBuffer(AudioBufferProvider::Buffer * buffer)3453 void PassthruPatchRecord::releaseBuffer(
3454 AudioBufferProvider::Buffer* buffer)
3455 {
3456 buffer->frameCount = 0;
3457 buffer->raw = nullptr;
3458 }
3459
3460 // ----------------------------------------------------------------------------
3461 #undef LOG_TAG
3462 #define LOG_TAG "AF::MmapTrack"
3463
3464 /* static */
create(IAfThreadBase * thread,const audio_attributes_t & attr,uint32_t sampleRate,audio_format_t format,audio_channel_mask_t channelMask,audio_session_t sessionId,bool isOut,const android::content::AttributionSourceState & attributionSource,pid_t creatorPid,audio_port_handle_t portId)3465 sp<IAfMmapTrack> IAfMmapTrack::create(IAfThreadBase* thread,
3466 const audio_attributes_t& attr,
3467 uint32_t sampleRate,
3468 audio_format_t format,
3469 audio_channel_mask_t channelMask,
3470 audio_session_t sessionId,
3471 bool isOut,
3472 const android::content::AttributionSourceState& attributionSource,
3473 pid_t creatorPid,
3474 audio_port_handle_t portId)
3475 {
3476 return sp<MmapTrack>::make(
3477 thread,
3478 attr,
3479 sampleRate,
3480 format,
3481 channelMask,
3482 sessionId,
3483 isOut,
3484 attributionSource,
3485 creatorPid,
3486 portId);
3487 }
3488
MmapTrack(IAfThreadBase * thread,const audio_attributes_t & attr,uint32_t sampleRate,audio_format_t format,audio_channel_mask_t channelMask,audio_session_t sessionId,bool isOut,const AttributionSourceState & attributionSource,pid_t creatorPid,audio_port_handle_t portId)3489 MmapTrack::MmapTrack(IAfThreadBase* thread,
3490 const audio_attributes_t& attr,
3491 uint32_t sampleRate,
3492 audio_format_t format,
3493 audio_channel_mask_t channelMask,
3494 audio_session_t sessionId,
3495 bool isOut,
3496 const AttributionSourceState& attributionSource,
3497 pid_t creatorPid,
3498 audio_port_handle_t portId)
3499 : TrackBase(thread, NULL, attr, sampleRate, format,
3500 channelMask, (size_t)0 /* frameCount */,
3501 nullptr /* buffer */, (size_t)0 /* bufferSize */,
3502 sessionId, creatorPid,
3503 VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(attributionSource.uid)),
3504 isOut,
3505 ALLOC_NONE,
3506 TYPE_DEFAULT, portId,
3507 std::string(AMEDIAMETRICS_KEY_PREFIX_AUDIO_MMAP) + std::to_string(portId)),
3508 mPid(VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(attributionSource.pid))),
3509 mSilenced(false), mSilencedNotified(false)
3510 {
3511 // Once this item is logged by the server, the client can add properties.
3512 mTrackMetrics.logConstructor(creatorPid, uid(), id());
3513 }
3514
~MmapTrack()3515 MmapTrack::~MmapTrack()
3516 {
3517 }
3518
initCheck() const3519 status_t MmapTrack::initCheck() const
3520 {
3521 return NO_ERROR;
3522 }
3523
start(AudioSystem::sync_event_t event __unused,audio_session_t triggerSession __unused)3524 status_t MmapTrack::start(AudioSystem::sync_event_t event __unused,
3525 audio_session_t triggerSession __unused)
3526 {
3527 return NO_ERROR;
3528 }
3529
stop()3530 void MmapTrack::stop()
3531 {
3532 }
3533
3534 // AudioBufferProvider interface
getNextBuffer(AudioBufferProvider::Buffer * buffer)3535 status_t MmapTrack::getNextBuffer(AudioBufferProvider::Buffer* buffer)
3536 {
3537 buffer->frameCount = 0;
3538 buffer->raw = nullptr;
3539 return INVALID_OPERATION;
3540 }
3541
3542 // ExtendedAudioBufferProvider interface
framesReady() const3543 size_t MmapTrack::framesReady() const {
3544 return 0;
3545 }
3546
framesReleased() const3547 int64_t MmapTrack::framesReleased() const
3548 {
3549 return 0;
3550 }
3551
onTimestamp(const ExtendedTimestamp & timestamp __unused)3552 void MmapTrack::onTimestamp(const ExtendedTimestamp& timestamp __unused)
3553 {
3554 }
3555
processMuteEvent_l(const sp<IAudioManager> & audioManager,mute_state_t muteState)3556 void MmapTrack::processMuteEvent_l(const sp<IAudioManager>& audioManager, mute_state_t muteState)
3557 {
3558 if (mMuteState == muteState) {
3559 // mute state did not change, do nothing
3560 return;
3561 }
3562
3563 status_t result = UNKNOWN_ERROR;
3564 if (audioManager && mPortId != AUDIO_PORT_HANDLE_NONE) {
3565 if (mMuteEventExtras == nullptr) {
3566 mMuteEventExtras = std::make_unique<os::PersistableBundle>();
3567 }
3568 mMuteEventExtras->putInt(String16(kExtraPlayerEventMuteKey),
3569 static_cast<int>(muteState));
3570
3571 result = audioManager->portEvent(mPortId,
3572 PLAYER_UPDATE_MUTED,
3573 mMuteEventExtras);
3574 }
3575
3576 if (result == OK) {
3577 ALOGI("%s(%d): processed mute state for port ID %d from %d to %d", __func__, id(), mPortId,
3578 static_cast<int>(mMuteState), static_cast<int>(muteState));
3579 mMuteState = muteState;
3580 } else {
3581 ALOGW("%s(%d): cannot process mute state for port ID %d, status error %d",
3582 __func__,
3583 id(),
3584 mPortId,
3585 result);
3586 }
3587 }
3588
appendDumpHeader(String8 & result) const3589 void MmapTrack::appendDumpHeader(String8& result) const
3590 {
3591 result.appendFormat("Client Session Port Id Format Chn mask SRate Flags %s\n",
3592 isOut() ? "Usg CT": "Source");
3593 }
3594
appendDump(String8 & result,bool active __unused) const3595 void MmapTrack::appendDump(String8& result, bool active __unused) const
3596 {
3597 result.appendFormat("%6u %7u %7u %08X %08X %6u 0x%03X ",
3598 mPid,
3599 mSessionId,
3600 mPortId,
3601 mFormat,
3602 mChannelMask,
3603 mSampleRate,
3604 mAttr.flags);
3605 if (isOut()) {
3606 result.appendFormat("%3x %2x", mAttr.usage, mAttr.content_type);
3607 } else {
3608 result.appendFormat("%6x", mAttr.source);
3609 }
3610 result.append("\n");
3611 }
3612
3613 } // namespace android
3614