• Home
  • History
  • Annotate
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1  /*
2  **
3  ** Copyright 2012, The Android Open Source Project
4  **
5  ** Licensed under the Apache License, Version 2.0 (the "License");
6  ** you may not use this file except in compliance with the License.
7  ** You may obtain a copy of the License at
8  **
9  **     http://www.apache.org/licenses/LICENSE-2.0
10  **
11  ** Unless required by applicable law or agreed to in writing, software
12  ** distributed under the License is distributed on an "AS IS" BASIS,
13  ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  ** See the License for the specific language governing permissions and
15  ** limitations under the License.
16  */
17  
18  
19  #define LOG_TAG "AudioFlinger"
20  //#define LOG_NDEBUG 0
21  
22  #include "Configuration.h"
23  #include <math.h>
24  #include <sys/syscall.h>
25  #include <utils/Log.h>
26  
27  #include <private/media/AudioTrackShared.h>
28  
29  #include <common_time/cc_helper.h>
30  #include <common_time/local_clock.h>
31  
32  #include "AudioMixer.h"
33  #include "AudioFlinger.h"
34  #include "ServiceUtilities.h"
35  
36  #include <media/nbaio/Pipe.h>
37  #include <media/nbaio/PipeReader.h>
38  #include <audio_utils/minifloat.h>
39  
40  // ----------------------------------------------------------------------------
41  
42  // Note: the following macro is used for extremely verbose logging message.  In
43  // order to run with ALOG_ASSERT turned on, we need to have LOG_NDEBUG set to
44  // 0; but one side effect of this is to turn all LOGV's as well.  Some messages
45  // are so verbose that we want to suppress them even when we have ALOG_ASSERT
46  // turned on.  Do not uncomment the #def below unless you really know what you
47  // are doing and want to see all of the extremely verbose messages.
48  //#define VERY_VERY_VERBOSE_LOGGING
49  #ifdef VERY_VERY_VERBOSE_LOGGING
50  #define ALOGVV ALOGV
51  #else
52  #define ALOGVV(a...) do { } while(0)
53  #endif
54  
55  namespace android {
56  
57  // ----------------------------------------------------------------------------
58  //      TrackBase
59  // ----------------------------------------------------------------------------
60  
61  static volatile int32_t nextTrackId = 55;
62  
63  // TrackBase constructor must be called with AudioFlinger::mLock held
TrackBase(ThreadBase * thread,const sp<Client> & client,uint32_t sampleRate,audio_format_t format,audio_channel_mask_t channelMask,size_t frameCount,void * buffer,int sessionId,int clientUid,IAudioFlinger::track_flags_t flags,bool isOut,alloc_type alloc,track_type type)64  AudioFlinger::ThreadBase::TrackBase::TrackBase(
65              ThreadBase *thread,
66              const sp<Client>& client,
67              uint32_t sampleRate,
68              audio_format_t format,
69              audio_channel_mask_t channelMask,
70              size_t frameCount,
71              void *buffer,
72              int sessionId,
73              int clientUid,
74              IAudioFlinger::track_flags_t flags,
75              bool isOut,
76              alloc_type alloc,
77              track_type type)
78      :   RefBase(),
79          mThread(thread),
80          mClient(client),
81          mCblk(NULL),
82          // mBuffer
83          mState(IDLE),
84          mSampleRate(sampleRate),
85          mFormat(format),
86          mChannelMask(channelMask),
87          mChannelCount(isOut ?
88                  audio_channel_count_from_out_mask(channelMask) :
89                  audio_channel_count_from_in_mask(channelMask)),
90          mFrameSize(audio_is_linear_pcm(format) ?
91                  mChannelCount * audio_bytes_per_sample(format) : sizeof(int8_t)),
92          mFrameCount(frameCount),
93          mSessionId(sessionId),
94          mFlags(flags),
95          mIsOut(isOut),
96          mServerProxy(NULL),
97          mId(android_atomic_inc(&nextTrackId)),
98          mTerminated(false),
99          mType(type),
100          mThreadIoHandle(thread->id())
101  {
102      // if the caller is us, trust the specified uid
103      if (IPCThreadState::self()->getCallingPid() != getpid_cached || clientUid == -1) {
104          int newclientUid = IPCThreadState::self()->getCallingUid();
105          if (clientUid != -1 && clientUid != newclientUid) {
106              ALOGW("uid %d tried to pass itself off as %d", newclientUid, clientUid);
107          }
108          clientUid = newclientUid;
109      }
110      // clientUid contains the uid of the app that is responsible for this track, so we can blame
111      // battery usage on it.
112      mUid = clientUid;
113  
114      // ALOGD("Creating track with %d buffers @ %d bytes", bufferCount, bufferSize);
115      size_t size = sizeof(audio_track_cblk_t);
116      size_t bufferSize = (buffer == NULL ? roundup(frameCount) : frameCount) * mFrameSize;
117      if (buffer == NULL && alloc == ALLOC_CBLK) {
118          size += bufferSize;
119      }
120  
121      if (client != 0) {
122          mCblkMemory = client->heap()->allocate(size);
123          if (mCblkMemory == 0 ||
124                  (mCblk = static_cast<audio_track_cblk_t *>(mCblkMemory->pointer())) == NULL) {
125              ALOGE("not enough memory for AudioTrack size=%u", size);
126              client->heap()->dump("AudioTrack");
127              mCblkMemory.clear();
128              return;
129          }
130      } else {
131          // this syntax avoids calling the audio_track_cblk_t constructor twice
132          mCblk = (audio_track_cblk_t *) new uint8_t[size];
133          // assume mCblk != NULL
134      }
135  
136      // construct the shared structure in-place.
137      if (mCblk != NULL) {
138          new(mCblk) audio_track_cblk_t();
139          switch (alloc) {
140          case ALLOC_READONLY: {
141              const sp<MemoryDealer> roHeap(thread->readOnlyHeap());
142              if (roHeap == 0 ||
143                      (mBufferMemory = roHeap->allocate(bufferSize)) == 0 ||
144                      (mBuffer = mBufferMemory->pointer()) == NULL) {
145                  ALOGE("not enough memory for read-only buffer size=%zu", bufferSize);
146                  if (roHeap != 0) {
147                      roHeap->dump("buffer");
148                  }
149                  mCblkMemory.clear();
150                  mBufferMemory.clear();
151                  return;
152              }
153              memset(mBuffer, 0, bufferSize);
154              } break;
155          case ALLOC_PIPE:
156              mBufferMemory = thread->pipeMemory();
157              // mBuffer is the virtual address as seen from current process (mediaserver),
158              // and should normally be coming from mBufferMemory->pointer().
159              // However in this case the TrackBase does not reference the buffer directly.
160              // It should references the buffer via the pipe.
161              // Therefore, to detect incorrect usage of the buffer, we set mBuffer to NULL.
162              mBuffer = NULL;
163              break;
164          case ALLOC_CBLK:
165              // clear all buffers
166              if (buffer == NULL) {
167                  mBuffer = (char*)mCblk + sizeof(audio_track_cblk_t);
168                  memset(mBuffer, 0, bufferSize);
169              } else {
170                  mBuffer = buffer;
171  #if 0
172                  mCblk->mFlags = CBLK_FORCEREADY;    // FIXME hack, need to fix the track ready logic
173  #endif
174              }
175              break;
176          case ALLOC_LOCAL:
177              mBuffer = calloc(1, bufferSize);
178              break;
179          case ALLOC_NONE:
180              mBuffer = buffer;
181              break;
182          }
183  
184  #ifdef TEE_SINK
185          if (mTeeSinkTrackEnabled) {
186              NBAIO_Format pipeFormat = Format_from_SR_C(mSampleRate, mChannelCount, mFormat);
187              if (Format_isValid(pipeFormat)) {
188                  Pipe *pipe = new Pipe(mTeeSinkTrackFrames, pipeFormat);
189                  size_t numCounterOffers = 0;
190                  const NBAIO_Format offers[1] = {pipeFormat};
191                  ssize_t index = pipe->negotiate(offers, 1, NULL, numCounterOffers);
192                  ALOG_ASSERT(index == 0);
193                  PipeReader *pipeReader = new PipeReader(*pipe);
194                  numCounterOffers = 0;
195                  index = pipeReader->negotiate(offers, 1, NULL, numCounterOffers);
196                  ALOG_ASSERT(index == 0);
197                  mTeeSink = pipe;
198                  mTeeSource = pipeReader;
199              }
200          }
201  #endif
202  
203      }
204  }
205  
initCheck() const206  status_t AudioFlinger::ThreadBase::TrackBase::initCheck() const
207  {
208      status_t status;
209      if (mType == TYPE_OUTPUT || mType == TYPE_PATCH) {
210          status = cblk() != NULL ? NO_ERROR : NO_MEMORY;
211      } else {
212          status = getCblk() != 0 ? NO_ERROR : NO_MEMORY;
213      }
214      return status;
215  }
216  
~TrackBase()217  AudioFlinger::ThreadBase::TrackBase::~TrackBase()
218  {
219  #ifdef TEE_SINK
220      dumpTee(-1, mTeeSource, mId);
221  #endif
222      // delete the proxy before deleting the shared memory it refers to, to avoid dangling reference
223      delete mServerProxy;
224      if (mCblk != NULL) {
225          if (mClient == 0) {
226              delete mCblk;
227          } else {
228              mCblk->~audio_track_cblk_t();   // destroy our shared-structure.
229          }
230      }
231      mCblkMemory.clear();    // free the shared memory before releasing the heap it belongs to
232      if (mClient != 0) {
233          // Client destructor must run with AudioFlinger client mutex locked
234          Mutex::Autolock _l(mClient->audioFlinger()->mClientLock);
235          // If the client's reference count drops to zero, the associated destructor
236          // must run with AudioFlinger lock held. Thus the explicit clear() rather than
237          // relying on the automatic clear() at end of scope.
238          mClient.clear();
239      }
240      // flush the binder command buffer
241      IPCThreadState::self()->flushCommands();
242  }
243  
244  // AudioBufferProvider interface
245  // getNextBuffer() = 0;
246  // This implementation of releaseBuffer() is used by Track and RecordTrack, but not TimedTrack
releaseBuffer(AudioBufferProvider::Buffer * buffer)247  void AudioFlinger::ThreadBase::TrackBase::releaseBuffer(AudioBufferProvider::Buffer* buffer)
248  {
249  #ifdef TEE_SINK
250      if (mTeeSink != 0) {
251          (void) mTeeSink->write(buffer->raw, buffer->frameCount);
252      }
253  #endif
254  
255      ServerProxy::Buffer buf;
256      buf.mFrameCount = buffer->frameCount;
257      buf.mRaw = buffer->raw;
258      buffer->frameCount = 0;
259      buffer->raw = NULL;
260      mServerProxy->releaseBuffer(&buf);
261  }
262  
setSyncEvent(const sp<SyncEvent> & event)263  status_t AudioFlinger::ThreadBase::TrackBase::setSyncEvent(const sp<SyncEvent>& event)
264  {
265      mSyncEvents.add(event);
266      return NO_ERROR;
267  }
268  
269  // ----------------------------------------------------------------------------
270  //      Playback
271  // ----------------------------------------------------------------------------
272  
TrackHandle(const sp<AudioFlinger::PlaybackThread::Track> & track)273  AudioFlinger::TrackHandle::TrackHandle(const sp<AudioFlinger::PlaybackThread::Track>& track)
274      : BnAudioTrack(),
275        mTrack(track)
276  {
277  }
278  
~TrackHandle()279  AudioFlinger::TrackHandle::~TrackHandle() {
280      // just stop the track on deletion, associated resources
281      // will be freed from the main thread once all pending buffers have
282      // been played. Unless it's not in the active track list, in which
283      // case we free everything now...
284      mTrack->destroy();
285  }
286  
getCblk() const287  sp<IMemory> AudioFlinger::TrackHandle::getCblk() const {
288      return mTrack->getCblk();
289  }
290  
start()291  status_t AudioFlinger::TrackHandle::start() {
292      return mTrack->start();
293  }
294  
stop()295  void AudioFlinger::TrackHandle::stop() {
296      mTrack->stop();
297  }
298  
flush()299  void AudioFlinger::TrackHandle::flush() {
300      mTrack->flush();
301  }
302  
pause()303  void AudioFlinger::TrackHandle::pause() {
304      mTrack->pause();
305  }
306  
attachAuxEffect(int EffectId)307  status_t AudioFlinger::TrackHandle::attachAuxEffect(int EffectId)
308  {
309      return mTrack->attachAuxEffect(EffectId);
310  }
311  
allocateTimedBuffer(size_t size,sp<IMemory> * buffer)312  status_t AudioFlinger::TrackHandle::allocateTimedBuffer(size_t size,
313                                                           sp<IMemory>* buffer) {
314      if (!mTrack->isTimedTrack())
315          return INVALID_OPERATION;
316  
317      PlaybackThread::TimedTrack* tt =
318              reinterpret_cast<PlaybackThread::TimedTrack*>(mTrack.get());
319      return tt->allocateTimedBuffer(size, buffer);
320  }
321  
queueTimedBuffer(const sp<IMemory> & buffer,int64_t pts)322  status_t AudioFlinger::TrackHandle::queueTimedBuffer(const sp<IMemory>& buffer,
323                                                       int64_t pts) {
324      if (!mTrack->isTimedTrack())
325          return INVALID_OPERATION;
326  
327      if (buffer == 0 || buffer->pointer() == NULL) {
328          ALOGE("queueTimedBuffer() buffer is 0 or has NULL pointer()");
329          return BAD_VALUE;
330      }
331  
332      PlaybackThread::TimedTrack* tt =
333              reinterpret_cast<PlaybackThread::TimedTrack*>(mTrack.get());
334      return tt->queueTimedBuffer(buffer, pts);
335  }
336  
setMediaTimeTransform(const LinearTransform & xform,int target)337  status_t AudioFlinger::TrackHandle::setMediaTimeTransform(
338      const LinearTransform& xform, int target) {
339  
340      if (!mTrack->isTimedTrack())
341          return INVALID_OPERATION;
342  
343      PlaybackThread::TimedTrack* tt =
344              reinterpret_cast<PlaybackThread::TimedTrack*>(mTrack.get());
345      return tt->setMediaTimeTransform(
346          xform, static_cast<TimedAudioTrack::TargetTimeline>(target));
347  }
348  
setParameters(const String8 & keyValuePairs)349  status_t AudioFlinger::TrackHandle::setParameters(const String8& keyValuePairs) {
350      return mTrack->setParameters(keyValuePairs);
351  }
352  
getTimestamp(AudioTimestamp & timestamp)353  status_t AudioFlinger::TrackHandle::getTimestamp(AudioTimestamp& timestamp)
354  {
355      return mTrack->getTimestamp(timestamp);
356  }
357  
358  
signal()359  void AudioFlinger::TrackHandle::signal()
360  {
361      return mTrack->signal();
362  }
363  
onTransact(uint32_t code,const Parcel & data,Parcel * reply,uint32_t flags)364  status_t AudioFlinger::TrackHandle::onTransact(
365      uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
366  {
367      return BnAudioTrack::onTransact(code, data, reply, flags);
368  }
369  
370  // ----------------------------------------------------------------------------
371  
372  // Track constructor must be called with AudioFlinger::mLock and ThreadBase::mLock held
Track(PlaybackThread * thread,const sp<Client> & client,audio_stream_type_t streamType,uint32_t sampleRate,audio_format_t format,audio_channel_mask_t channelMask,size_t frameCount,void * buffer,const sp<IMemory> & sharedBuffer,int sessionId,int uid,IAudioFlinger::track_flags_t flags,track_type type)373  AudioFlinger::PlaybackThread::Track::Track(
374              PlaybackThread *thread,
375              const sp<Client>& client,
376              audio_stream_type_t streamType,
377              uint32_t sampleRate,
378              audio_format_t format,
379              audio_channel_mask_t channelMask,
380              size_t frameCount,
381              void *buffer,
382              const sp<IMemory>& sharedBuffer,
383              int sessionId,
384              int uid,
385              IAudioFlinger::track_flags_t flags,
386              track_type type)
387      :   TrackBase(thread, client, sampleRate, format, channelMask, frameCount,
388                    (sharedBuffer != 0) ? sharedBuffer->pointer() : buffer,
389                    sessionId, uid, flags, true /*isOut*/,
390                    (type == TYPE_PATCH) ? ( buffer == NULL ? ALLOC_LOCAL : ALLOC_NONE) : ALLOC_CBLK,
391                    type),
392      mFillingUpStatus(FS_INVALID),
393      // mRetryCount initialized later when needed
394      mSharedBuffer(sharedBuffer),
395      mStreamType(streamType),
396      mName(-1),  // see note below
397      mMainBuffer(thread->mixBuffer()),
398      mAuxBuffer(NULL),
399      mAuxEffectId(0), mHasVolumeController(false),
400      mPresentationCompleteFrames(0),
401      mFastIndex(-1),
402      mCachedVolume(1.0),
403      mIsInvalid(false),
404      mAudioTrackServerProxy(NULL),
405      mResumeToStopping(false),
406      mFlushHwPending(false),
407      mPreviousValid(false),
408      mPreviousFramesWritten(0)
409      // mPreviousTimestamp
410  {
411      // client == 0 implies sharedBuffer == 0
412      ALOG_ASSERT(!(client == 0 && sharedBuffer != 0));
413  
414      ALOGV_IF(sharedBuffer != 0, "sharedBuffer: %p, size: %d", sharedBuffer->pointer(),
415              sharedBuffer->size());
416  
417      if (mCblk == NULL) {
418          return;
419      }
420  
421      if (sharedBuffer == 0) {
422          mAudioTrackServerProxy = new AudioTrackServerProxy(mCblk, mBuffer, frameCount,
423                  mFrameSize, !isExternalTrack(), sampleRate);
424      } else {
425          mAudioTrackServerProxy = new StaticAudioTrackServerProxy(mCblk, mBuffer, frameCount,
426                  mFrameSize);
427      }
428      mServerProxy = mAudioTrackServerProxy;
429  
430      mName = thread->getTrackName_l(channelMask, format, sessionId);
431      if (mName < 0) {
432          ALOGE("no more track names available");
433          return;
434      }
435      // only allocate a fast track index if we were able to allocate a normal track name
436      if (flags & IAudioFlinger::TRACK_FAST) {
437          mAudioTrackServerProxy->framesReadyIsCalledByMultipleThreads();
438          ALOG_ASSERT(thread->mFastTrackAvailMask != 0);
439          int i = __builtin_ctz(thread->mFastTrackAvailMask);
440          ALOG_ASSERT(0 < i && i < (int)FastMixerState::kMaxFastTracks);
441          // FIXME This is too eager.  We allocate a fast track index before the
442          //       fast track becomes active.  Since fast tracks are a scarce resource,
443          //       this means we are potentially denying other more important fast tracks from
444          //       being created.  It would be better to allocate the index dynamically.
445          mFastIndex = i;
446          // Read the initial underruns because this field is never cleared by the fast mixer
447          mObservedUnderruns = thread->getFastTrackUnderruns(i);
448          thread->mFastTrackAvailMask &= ~(1 << i);
449      }
450  }
451  
~Track()452  AudioFlinger::PlaybackThread::Track::~Track()
453  {
454      ALOGV("PlaybackThread::Track destructor");
455  
456      // The destructor would clear mSharedBuffer,
457      // but it will not push the decremented reference count,
458      // leaving the client's IMemory dangling indefinitely.
459      // This prevents that leak.
460      if (mSharedBuffer != 0) {
461          mSharedBuffer.clear();
462      }
463  }
464  
initCheck() const465  status_t AudioFlinger::PlaybackThread::Track::initCheck() const
466  {
467      status_t status = TrackBase::initCheck();
468      if (status == NO_ERROR && mName < 0) {
469          status = NO_MEMORY;
470      }
471      return status;
472  }
473  
destroy()474  void AudioFlinger::PlaybackThread::Track::destroy()
475  {
476      // NOTE: destroyTrack_l() can remove a strong reference to this Track
477      // by removing it from mTracks vector, so there is a risk that this Tracks's
478      // destructor is called. As the destructor needs to lock mLock,
479      // we must acquire a strong reference on this Track before locking mLock
480      // here so that the destructor is called only when exiting this function.
481      // On the other hand, as long as Track::destroy() is only called by
482      // TrackHandle destructor, the TrackHandle still holds a strong ref on
483      // this Track with its member mTrack.
484      sp<Track> keep(this);
485      { // scope for mLock
486          bool wasActive = false;
487          sp<ThreadBase> thread = mThread.promote();
488          if (thread != 0) {
489              Mutex::Autolock _l(thread->mLock);
490              PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
491              wasActive = playbackThread->destroyTrack_l(this);
492          }
493          if (isExternalTrack() && !wasActive) {
494              AudioSystem::releaseOutput(mThreadIoHandle, mStreamType, (audio_session_t)mSessionId);
495          }
496      }
497  }
498  
appendDumpHeader(String8 & result)499  /*static*/ void AudioFlinger::PlaybackThread::Track::appendDumpHeader(String8& result)
500  {
501      result.append("    Name Active Client Type      Fmt Chn mask Session fCount S F SRate  "
502                    "L dB  R dB    Server Main buf  Aux Buf Flags UndFrmCnt\n");
503  }
504  
dump(char * buffer,size_t size,bool active)505  void AudioFlinger::PlaybackThread::Track::dump(char* buffer, size_t size, bool active)
506  {
507      gain_minifloat_packed_t vlr = mAudioTrackServerProxy->getVolumeLR();
508      if (isFastTrack()) {
509          sprintf(buffer, "    F %2d", mFastIndex);
510      } else if (mName >= AudioMixer::TRACK0) {
511          sprintf(buffer, "    %4d", mName - AudioMixer::TRACK0);
512      } else {
513          sprintf(buffer, "    none");
514      }
515      track_state state = mState;
516      char stateChar;
517      if (isTerminated()) {
518          stateChar = 'T';
519      } else {
520          switch (state) {
521          case IDLE:
522              stateChar = 'I';
523              break;
524          case STOPPING_1:
525              stateChar = 's';
526              break;
527          case STOPPING_2:
528              stateChar = '5';
529              break;
530          case STOPPED:
531              stateChar = 'S';
532              break;
533          case RESUMING:
534              stateChar = 'R';
535              break;
536          case ACTIVE:
537              stateChar = 'A';
538              break;
539          case PAUSING:
540              stateChar = 'p';
541              break;
542          case PAUSED:
543              stateChar = 'P';
544              break;
545          case FLUSHED:
546              stateChar = 'F';
547              break;
548          default:
549              stateChar = '?';
550              break;
551          }
552      }
553      char nowInUnderrun;
554      switch (mObservedUnderruns.mBitFields.mMostRecent) {
555      case UNDERRUN_FULL:
556          nowInUnderrun = ' ';
557          break;
558      case UNDERRUN_PARTIAL:
559          nowInUnderrun = '<';
560          break;
561      case UNDERRUN_EMPTY:
562          nowInUnderrun = '*';
563          break;
564      default:
565          nowInUnderrun = '?';
566          break;
567      }
568      snprintf(&buffer[8], size-8, " %6s %6u %4u %08X %08X %7u %6zu %1c %1d %5u %5.2g %5.2g  "
569                                   "%08X %p %p 0x%03X %9u%c\n",
570              active ? "yes" : "no",
571              (mClient == 0) ? getpid_cached : mClient->pid(),
572              mStreamType,
573              mFormat,
574              mChannelMask,
575              mSessionId,
576              mFrameCount,
577              stateChar,
578              mFillingUpStatus,
579              mAudioTrackServerProxy->getSampleRate(),
580              20.0 * log10(float_from_gain(gain_minifloat_unpack_left(vlr))),
581              20.0 * log10(float_from_gain(gain_minifloat_unpack_right(vlr))),
582              mCblk->mServer,
583              mMainBuffer,
584              mAuxBuffer,
585              mCblk->mFlags,
586              mAudioTrackServerProxy->getUnderrunFrames(),
587              nowInUnderrun);
588  }
589  
sampleRate() const590  uint32_t AudioFlinger::PlaybackThread::Track::sampleRate() const {
591      return mAudioTrackServerProxy->getSampleRate();
592  }
593  
594  // AudioBufferProvider interface
getNextBuffer(AudioBufferProvider::Buffer * buffer,int64_t pts __unused)595  status_t AudioFlinger::PlaybackThread::Track::getNextBuffer(
596          AudioBufferProvider::Buffer* buffer, int64_t pts __unused)
597  {
598      ServerProxy::Buffer buf;
599      size_t desiredFrames = buffer->frameCount;
600      buf.mFrameCount = desiredFrames;
601      status_t status = mServerProxy->obtainBuffer(&buf);
602      buffer->frameCount = buf.mFrameCount;
603      buffer->raw = buf.mRaw;
604      if (buf.mFrameCount == 0) {
605          mAudioTrackServerProxy->tallyUnderrunFrames(desiredFrames);
606      }
607      return status;
608  }
609  
610  // releaseBuffer() is not overridden
611  
612  // ExtendedAudioBufferProvider interface
613  
614  // framesReady() may return an approximation of the number of frames if called
615  // from a different thread than the one calling Proxy->obtainBuffer() and
616  // Proxy->releaseBuffer(). Also note there is no mutual exclusion in the
617  // AudioTrackServerProxy so be especially careful calling with FastTracks.
framesReady() const618  size_t AudioFlinger::PlaybackThread::Track::framesReady() const {
619      if (mSharedBuffer != 0 && (isStopped() || isStopping())) {
620          // Static tracks return zero frames immediately upon stopping (for FastTracks).
621          // The remainder of the buffer is not drained.
622          return 0;
623      }
624      return mAudioTrackServerProxy->framesReady();
625  }
626  
framesReleased() const627  size_t AudioFlinger::PlaybackThread::Track::framesReleased() const
628  {
629      return mAudioTrackServerProxy->framesReleased();
630  }
631  
632  // Don't call for fast tracks; the framesReady() could result in priority inversion
isReady() const633  bool AudioFlinger::PlaybackThread::Track::isReady() const {
634      if (mFillingUpStatus != FS_FILLING || isStopped() || isPausing()) {
635          return true;
636      }
637  
638      if (isStopping()) {
639          if (framesReady() > 0) {
640              mFillingUpStatus = FS_FILLED;
641          }
642          return true;
643      }
644  
645      if (framesReady() >= mFrameCount ||
646              (mCblk->mFlags & CBLK_FORCEREADY)) {
647          mFillingUpStatus = FS_FILLED;
648          android_atomic_and(~CBLK_FORCEREADY, &mCblk->mFlags);
649          return true;
650      }
651      return false;
652  }
653  
start(AudioSystem::sync_event_t event __unused,int triggerSession __unused)654  status_t AudioFlinger::PlaybackThread::Track::start(AudioSystem::sync_event_t event __unused,
655                                                      int triggerSession __unused)
656  {
657      status_t status = NO_ERROR;
658      ALOGV("start(%d), calling pid %d session %d",
659              mName, IPCThreadState::self()->getCallingPid(), mSessionId);
660  
661      sp<ThreadBase> thread = mThread.promote();
662      if (thread != 0) {
663          if (isOffloaded()) {
664              Mutex::Autolock _laf(thread->mAudioFlinger->mLock);
665              Mutex::Autolock _lth(thread->mLock);
666              sp<EffectChain> ec = thread->getEffectChain_l(mSessionId);
667              if (thread->mAudioFlinger->isNonOffloadableGlobalEffectEnabled_l() ||
668                      (ec != 0 && ec->isNonOffloadableEnabled())) {
669                  invalidate();
670                  return PERMISSION_DENIED;
671              }
672          }
673          Mutex::Autolock _lth(thread->mLock);
674          track_state state = mState;
675          // here the track could be either new, or restarted
676          // in both cases "unstop" the track
677  
678          // initial state-stopping. next state-pausing.
679          // What if resume is called ?
680  
681          if (state == PAUSED || state == PAUSING) {
682              if (mResumeToStopping) {
683                  // happened we need to resume to STOPPING_1
684                  mState = TrackBase::STOPPING_1;
685                  ALOGV("PAUSED => STOPPING_1 (%d) on thread %p", mName, this);
686              } else {
687                  mState = TrackBase::RESUMING;
688                  ALOGV("PAUSED => RESUMING (%d) on thread %p", mName, this);
689              }
690          } else {
691              mState = TrackBase::ACTIVE;
692              ALOGV("? => ACTIVE (%d) on thread %p", mName, this);
693          }
694  
695          PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
696          status = playbackThread->addTrack_l(this);
697          if (status == INVALID_OPERATION || status == PERMISSION_DENIED) {
698              triggerEvents(AudioSystem::SYNC_EVENT_PRESENTATION_COMPLETE);
699              //  restore previous state if start was rejected by policy manager
700              if (status == PERMISSION_DENIED) {
701                  mState = state;
702              }
703          }
704          // track was already in the active list, not a problem
705          if (status == ALREADY_EXISTS) {
706              status = NO_ERROR;
707          } else {
708              // Acknowledge any pending flush(), so that subsequent new data isn't discarded.
709              // It is usually unsafe to access the server proxy from a binder thread.
710              // But in this case we know the mixer thread (whether normal mixer or fast mixer)
711              // isn't looking at this track yet:  we still hold the normal mixer thread lock,
712              // and for fast tracks the track is not yet in the fast mixer thread's active set.
713              ServerProxy::Buffer buffer;
714              buffer.mFrameCount = 1;
715              (void) mAudioTrackServerProxy->obtainBuffer(&buffer, true /*ackFlush*/);
716          }
717      } else {
718          status = BAD_VALUE;
719      }
720      return status;
721  }
722  
stop()723  void AudioFlinger::PlaybackThread::Track::stop()
724  {
725      ALOGV("stop(%d), calling pid %d", mName, IPCThreadState::self()->getCallingPid());
726      sp<ThreadBase> thread = mThread.promote();
727      if (thread != 0) {
728          Mutex::Autolock _l(thread->mLock);
729          track_state state = mState;
730          if (state == RESUMING || state == ACTIVE || state == PAUSING || state == PAUSED) {
731              // If the track is not active (PAUSED and buffers full), flush buffers
732              PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
733              if (playbackThread->mActiveTracks.indexOf(this) < 0) {
734                  reset();
735                  mState = STOPPED;
736              } else if (!isFastTrack() && !isOffloaded() && !isDirect()) {
737                  mState = STOPPED;
738              } else {
739                  // For fast tracks prepareTracks_l() will set state to STOPPING_2
740                  // presentation is complete
741                  // For an offloaded track this starts a drain and state will
742                  // move to STOPPING_2 when drain completes and then STOPPED
743                  mState = STOPPING_1;
744              }
745              ALOGV("not stopping/stopped => stopping/stopped (%d) on thread %p", mName,
746                      playbackThread);
747          }
748      }
749  }
750  
pause()751  void AudioFlinger::PlaybackThread::Track::pause()
752  {
753      ALOGV("pause(%d), calling pid %d", mName, IPCThreadState::self()->getCallingPid());
754      sp<ThreadBase> thread = mThread.promote();
755      if (thread != 0) {
756          Mutex::Autolock _l(thread->mLock);
757          PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
758          switch (mState) {
759          case STOPPING_1:
760          case STOPPING_2:
761              if (!isOffloaded()) {
762                  /* nothing to do if track is not offloaded */
763                  break;
764              }
765  
766              // Offloaded track was draining, we need to carry on draining when resumed
767              mResumeToStopping = true;
768              // fall through...
769          case ACTIVE:
770          case RESUMING:
771              mState = PAUSING;
772              ALOGV("ACTIVE/RESUMING => PAUSING (%d) on thread %p", mName, thread.get());
773              playbackThread->broadcast_l();
774              break;
775  
776          default:
777              break;
778          }
779      }
780  }
781  
flush()782  void AudioFlinger::PlaybackThread::Track::flush()
783  {
784      ALOGV("flush(%d)", mName);
785      sp<ThreadBase> thread = mThread.promote();
786      if (thread != 0) {
787          Mutex::Autolock _l(thread->mLock);
788          PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
789  
790          if (isOffloaded()) {
791              // If offloaded we allow flush during any state except terminated
792              // and keep the track active to avoid problems if user is seeking
793              // rapidly and underlying hardware has a significant delay handling
794              // a pause
795              if (isTerminated()) {
796                  return;
797              }
798  
799              ALOGV("flush: offload flush");
800              reset();
801  
802              if (mState == STOPPING_1 || mState == STOPPING_2) {
803                  ALOGV("flushed in STOPPING_1 or 2 state, change state to ACTIVE");
804                  mState = ACTIVE;
805              }
806  
807              if (mState == ACTIVE) {
808                  ALOGV("flush called in active state, resetting buffer time out retry count");
809                  mRetryCount = PlaybackThread::kMaxTrackRetriesOffload;
810              }
811  
812              mFlushHwPending = true;
813              mResumeToStopping = false;
814          } else {
815              if (mState != STOPPING_1 && mState != STOPPING_2 && mState != STOPPED &&
816                      mState != PAUSED && mState != PAUSING && mState != IDLE && mState != FLUSHED) {
817                  return;
818              }
819              // No point remaining in PAUSED state after a flush => go to
820              // FLUSHED state
821              mState = FLUSHED;
822              // do not reset the track if it is still in the process of being stopped or paused.
823              // this will be done by prepareTracks_l() when the track is stopped.
824              // prepareTracks_l() will see mState == FLUSHED, then
825              // remove from active track list, reset(), and trigger presentation complete
826              if (isDirect()) {
827                  mFlushHwPending = true;
828              }
829              if (playbackThread->mActiveTracks.indexOf(this) < 0) {
830                  reset();
831              }
832          }
833          // Prevent flush being lost if the track is flushed and then resumed
834          // before mixer thread can run. This is important when offloading
835          // because the hardware buffer could hold a large amount of audio
836          playbackThread->broadcast_l();
837      }
838  }
839  
840  // must be called with thread lock held
flushAck()841  void AudioFlinger::PlaybackThread::Track::flushAck()
842  {
843      if (!isOffloaded() && !isDirect())
844          return;
845  
846      mFlushHwPending = false;
847  }
848  
reset()849  void AudioFlinger::PlaybackThread::Track::reset()
850  {
851      // Do not reset twice to avoid discarding data written just after a flush and before
852      // the audioflinger thread detects the track is stopped.
853      if (!mResetDone) {
854          // Force underrun condition to avoid false underrun callback until first data is
855          // written to buffer
856          android_atomic_and(~CBLK_FORCEREADY, &mCblk->mFlags);
857          mFillingUpStatus = FS_FILLING;
858          mResetDone = true;
859          if (mState == FLUSHED) {
860              mState = IDLE;
861          }
862      }
863  }
864  
setParameters(const String8 & keyValuePairs)865  status_t AudioFlinger::PlaybackThread::Track::setParameters(const String8& keyValuePairs)
866  {
867      sp<ThreadBase> thread = mThread.promote();
868      if (thread == 0) {
869          ALOGE("thread is dead");
870          return FAILED_TRANSACTION;
871      } else if ((thread->type() == ThreadBase::DIRECT) ||
872                      (thread->type() == ThreadBase::OFFLOAD)) {
873          return thread->setParameters(keyValuePairs);
874      } else {
875          return PERMISSION_DENIED;
876      }
877  }
878  
getTimestamp(AudioTimestamp & timestamp)879  status_t AudioFlinger::PlaybackThread::Track::getTimestamp(AudioTimestamp& timestamp)
880  {
881      // Client should implement this using SSQ; the unpresented frame count in latch is irrelevant
882      if (isFastTrack()) {
883          // FIXME no lock held to set mPreviousValid = false
884          return INVALID_OPERATION;
885      }
886      sp<ThreadBase> thread = mThread.promote();
887      if (thread == 0) {
888          // FIXME no lock held to set mPreviousValid = false
889          return INVALID_OPERATION;
890      }
891      Mutex::Autolock _l(thread->mLock);
892      PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
893      if (!isOffloaded() && !isDirect()) {
894          if (!playbackThread->mLatchQValid) {
895              mPreviousValid = false;
896              return INVALID_OPERATION;
897          }
898          uint32_t unpresentedFrames =
899                  ((int64_t) playbackThread->mLatchQ.mUnpresentedFrames * mSampleRate) /
900                  playbackThread->mSampleRate;
901          // FIXME Since we're using a raw pointer as the key, it is theoretically possible
902          //       for a brand new track to share the same address as a recently destroyed
903          //       track, and thus for us to get the frames released of the wrong track.
904          //       It is unlikely that we would be able to call getTimestamp() so quickly
905          //       right after creating a new track.  Nevertheless, the index here should
906          //       be changed to something that is unique.  Or use a completely different strategy.
907          ssize_t i = playbackThread->mLatchQ.mFramesReleased.indexOfKey(this);
908          uint32_t framesWritten = i >= 0 ?
909                  playbackThread->mLatchQ.mFramesReleased[i] :
910                  mAudioTrackServerProxy->framesReleased();
911          bool checkPreviousTimestamp = mPreviousValid && framesWritten >= mPreviousFramesWritten;
912          if (framesWritten < unpresentedFrames) {
913              mPreviousValid = false;
914              return INVALID_OPERATION;
915          }
916          mPreviousFramesWritten = framesWritten;
917          uint32_t position = framesWritten - unpresentedFrames;
918          struct timespec time = playbackThread->mLatchQ.mTimestamp.mTime;
919          if (checkPreviousTimestamp) {
920              if (time.tv_sec < mPreviousTimestamp.mTime.tv_sec ||
921                      (time.tv_sec == mPreviousTimestamp.mTime.tv_sec &&
922                      time.tv_nsec < mPreviousTimestamp.mTime.tv_nsec)) {
923                  ALOGW("Time is going backwards");
924              }
925              // position can bobble slightly as an artifact; this hides the bobble
926              static const uint32_t MINIMUM_POSITION_DELTA = 8u;
927              if ((position <= mPreviousTimestamp.mPosition) ||
928                      (position - mPreviousTimestamp.mPosition) < MINIMUM_POSITION_DELTA) {
929                  position = mPreviousTimestamp.mPosition;
930                  time = mPreviousTimestamp.mTime;
931              }
932          }
933          timestamp.mPosition = position;
934          timestamp.mTime = time;
935          mPreviousTimestamp = timestamp;
936          mPreviousValid = true;
937          return NO_ERROR;
938      }
939  
940      return playbackThread->getTimestamp_l(timestamp);
941  }
942  
attachAuxEffect(int EffectId)943  status_t AudioFlinger::PlaybackThread::Track::attachAuxEffect(int EffectId)
944  {
945      status_t status = DEAD_OBJECT;
946      sp<ThreadBase> thread = mThread.promote();
947      if (thread != 0) {
948          PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
949          sp<AudioFlinger> af = mClient->audioFlinger();
950  
951          Mutex::Autolock _l(af->mLock);
952  
953          sp<PlaybackThread> srcThread = af->getEffectThread_l(AUDIO_SESSION_OUTPUT_MIX, EffectId);
954  
955          if (EffectId != 0 && srcThread != 0 && playbackThread != srcThread.get()) {
956              Mutex::Autolock _dl(playbackThread->mLock);
957              Mutex::Autolock _sl(srcThread->mLock);
958              sp<EffectChain> chain = srcThread->getEffectChain_l(AUDIO_SESSION_OUTPUT_MIX);
959              if (chain == 0) {
960                  return INVALID_OPERATION;
961              }
962  
963              sp<EffectModule> effect = chain->getEffectFromId_l(EffectId);
964              if (effect == 0) {
965                  return INVALID_OPERATION;
966              }
967              srcThread->removeEffect_l(effect);
968              status = playbackThread->addEffect_l(effect);
969              if (status != NO_ERROR) {
970                  srcThread->addEffect_l(effect);
971                  return INVALID_OPERATION;
972              }
973              // removeEffect_l() has stopped the effect if it was active so it must be restarted
974              if (effect->state() == EffectModule::ACTIVE ||
975                      effect->state() == EffectModule::STOPPING) {
976                  effect->start();
977              }
978  
979              sp<EffectChain> dstChain = effect->chain().promote();
980              if (dstChain == 0) {
981                  srcThread->addEffect_l(effect);
982                  return INVALID_OPERATION;
983              }
984              AudioSystem::unregisterEffect(effect->id());
985              AudioSystem::registerEffect(&effect->desc(),
986                                          srcThread->id(),
987                                          dstChain->strategy(),
988                                          AUDIO_SESSION_OUTPUT_MIX,
989                                          effect->id());
990              AudioSystem::setEffectEnabled(effect->id(), effect->isEnabled());
991          }
992          status = playbackThread->attachAuxEffect(this, EffectId);
993      }
994      return status;
995  }
996  
setAuxBuffer(int EffectId,int32_t * buffer)997  void AudioFlinger::PlaybackThread::Track::setAuxBuffer(int EffectId, int32_t *buffer)
998  {
999      mAuxEffectId = EffectId;
1000      mAuxBuffer = buffer;
1001  }
1002  
presentationComplete(size_t framesWritten,size_t audioHalFrames)1003  bool AudioFlinger::PlaybackThread::Track::presentationComplete(size_t framesWritten,
1004                                                           size_t audioHalFrames)
1005  {
1006      // a track is considered presented when the total number of frames written to audio HAL
1007      // corresponds to the number of frames written when presentationComplete() is called for the
1008      // first time (mPresentationCompleteFrames == 0) plus the buffer filling status at that time.
1009      // For an offloaded track the HAL+h/w delay is variable so a HAL drain() is used
1010      // to detect when all frames have been played. In this case framesWritten isn't
1011      // useful because it doesn't always reflect whether there is data in the h/w
1012      // buffers, particularly if a track has been paused and resumed during draining
1013      ALOGV("presentationComplete() mPresentationCompleteFrames %d framesWritten %d",
1014                        mPresentationCompleteFrames, framesWritten);
1015      if (mPresentationCompleteFrames == 0) {
1016          mPresentationCompleteFrames = framesWritten + audioHalFrames;
1017          ALOGV("presentationComplete() reset: mPresentationCompleteFrames %d audioHalFrames %d",
1018                    mPresentationCompleteFrames, audioHalFrames);
1019      }
1020  
1021      if (framesWritten >= mPresentationCompleteFrames || isOffloaded()) {
1022          triggerEvents(AudioSystem::SYNC_EVENT_PRESENTATION_COMPLETE);
1023          mAudioTrackServerProxy->setStreamEndDone();
1024          return true;
1025      }
1026      return false;
1027  }
1028  
triggerEvents(AudioSystem::sync_event_t type)1029  void AudioFlinger::PlaybackThread::Track::triggerEvents(AudioSystem::sync_event_t type)
1030  {
1031      for (size_t i = 0; i < mSyncEvents.size(); i++) {
1032          if (mSyncEvents[i]->type() == type) {
1033              mSyncEvents[i]->trigger();
1034              mSyncEvents.removeAt(i);
1035              i--;
1036          }
1037      }
1038  }
1039  
1040  // implement VolumeBufferProvider interface
1041  
getVolumeLR()1042  gain_minifloat_packed_t AudioFlinger::PlaybackThread::Track::getVolumeLR()
1043  {
1044      // called by FastMixer, so not allowed to take any locks, block, or do I/O including logs
1045      ALOG_ASSERT(isFastTrack() && (mCblk != NULL));
1046      gain_minifloat_packed_t vlr = mAudioTrackServerProxy->getVolumeLR();
1047      float vl = float_from_gain(gain_minifloat_unpack_left(vlr));
1048      float vr = float_from_gain(gain_minifloat_unpack_right(vlr));
1049      // track volumes come from shared memory, so can't be trusted and must be clamped
1050      if (vl > GAIN_FLOAT_UNITY) {
1051          vl = GAIN_FLOAT_UNITY;
1052      }
1053      if (vr > GAIN_FLOAT_UNITY) {
1054          vr = GAIN_FLOAT_UNITY;
1055      }
1056      // now apply the cached master volume and stream type volume;
1057      // this is trusted but lacks any synchronization or barrier so may be stale
1058      float v = mCachedVolume;
1059      vl *= v;
1060      vr *= v;
1061      // re-combine into packed minifloat
1062      vlr = gain_minifloat_pack(gain_from_float(vl), gain_from_float(vr));
1063      // FIXME look at mute, pause, and stop flags
1064      return vlr;
1065  }
1066  
setSyncEvent(const sp<SyncEvent> & event)1067  status_t AudioFlinger::PlaybackThread::Track::setSyncEvent(const sp<SyncEvent>& event)
1068  {
1069      if (isTerminated() || mState == PAUSED ||
1070              ((framesReady() == 0) && ((mSharedBuffer != 0) ||
1071                                        (mState == STOPPED)))) {
1072          ALOGW("Track::setSyncEvent() in invalid state %d on session %d %s mode, framesReady %d ",
1073                mState, mSessionId, (mSharedBuffer != 0) ? "static" : "stream", framesReady());
1074          event->cancel();
1075          return INVALID_OPERATION;
1076      }
1077      (void) TrackBase::setSyncEvent(event);
1078      return NO_ERROR;
1079  }
1080  
invalidate()1081  void AudioFlinger::PlaybackThread::Track::invalidate()
1082  {
1083      // FIXME should use proxy, and needs work
1084      audio_track_cblk_t* cblk = mCblk;
1085      android_atomic_or(CBLK_INVALID, &cblk->mFlags);
1086      android_atomic_release_store(0x40000000, &cblk->mFutex);
1087      // client is not in server, so FUTEX_WAKE is needed instead of FUTEX_WAKE_PRIVATE
1088      (void) syscall(__NR_futex, &cblk->mFutex, FUTEX_WAKE, INT_MAX);
1089      mIsInvalid = true;
1090  }
1091  
signal()1092  void AudioFlinger::PlaybackThread::Track::signal()
1093  {
1094      sp<ThreadBase> thread = mThread.promote();
1095      if (thread != 0) {
1096          PlaybackThread *t = (PlaybackThread *)thread.get();
1097          Mutex::Autolock _l(t->mLock);
1098          t->broadcast_l();
1099      }
1100  }
1101  
1102  //To be called with thread lock held
isResumePending()1103  bool AudioFlinger::PlaybackThread::Track::isResumePending() {
1104  
1105      if (mState == RESUMING)
1106          return true;
1107      /* Resume is pending if track was stopping before pause was called */
1108      if (mState == STOPPING_1 &&
1109          mResumeToStopping)
1110          return true;
1111  
1112      return false;
1113  }
1114  
1115  //To be called with thread lock held
resumeAck()1116  void AudioFlinger::PlaybackThread::Track::resumeAck() {
1117  
1118  
1119      if (mState == RESUMING)
1120          mState = ACTIVE;
1121  
1122      // Other possibility of  pending resume is stopping_1 state
1123      // Do not update the state from stopping as this prevents
1124      // drain being called.
1125      if (mState == STOPPING_1) {
1126          mResumeToStopping = false;
1127      }
1128  }
1129  // ----------------------------------------------------------------------------
1130  
1131  sp<AudioFlinger::PlaybackThread::TimedTrack>
create(PlaybackThread * thread,const sp<Client> & client,audio_stream_type_t streamType,uint32_t sampleRate,audio_format_t format,audio_channel_mask_t channelMask,size_t frameCount,const sp<IMemory> & sharedBuffer,int sessionId,int uid)1132  AudioFlinger::PlaybackThread::TimedTrack::create(
1133              PlaybackThread *thread,
1134              const sp<Client>& client,
1135              audio_stream_type_t streamType,
1136              uint32_t sampleRate,
1137              audio_format_t format,
1138              audio_channel_mask_t channelMask,
1139              size_t frameCount,
1140              const sp<IMemory>& sharedBuffer,
1141              int sessionId,
1142              int uid)
1143  {
1144      if (!client->reserveTimedTrack())
1145          return 0;
1146  
1147      return new TimedTrack(
1148          thread, client, streamType, sampleRate, format, channelMask, frameCount,
1149          sharedBuffer, sessionId, uid);
1150  }
1151  
TimedTrack(PlaybackThread * thread,const sp<Client> & client,audio_stream_type_t streamType,uint32_t sampleRate,audio_format_t format,audio_channel_mask_t channelMask,size_t frameCount,const sp<IMemory> & sharedBuffer,int sessionId,int uid)1152  AudioFlinger::PlaybackThread::TimedTrack::TimedTrack(
1153              PlaybackThread *thread,
1154              const sp<Client>& client,
1155              audio_stream_type_t streamType,
1156              uint32_t sampleRate,
1157              audio_format_t format,
1158              audio_channel_mask_t channelMask,
1159              size_t frameCount,
1160              const sp<IMemory>& sharedBuffer,
1161              int sessionId,
1162              int uid)
1163      : Track(thread, client, streamType, sampleRate, format, channelMask,
1164              frameCount, (sharedBuffer != 0) ? sharedBuffer->pointer() : NULL, sharedBuffer,
1165                      sessionId, uid, IAudioFlinger::TRACK_TIMED, TYPE_TIMED),
1166        mQueueHeadInFlight(false),
1167        mTrimQueueHeadOnRelease(false),
1168        mFramesPendingInQueue(0),
1169        mTimedSilenceBuffer(NULL),
1170        mTimedSilenceBufferSize(0),
1171        mTimedAudioOutputOnTime(false),
1172        mMediaTimeTransformValid(false)
1173  {
1174      LocalClock lc;
1175      mLocalTimeFreq = lc.getLocalFreq();
1176  
1177      mLocalTimeToSampleTransform.a_zero = 0;
1178      mLocalTimeToSampleTransform.b_zero = 0;
1179      mLocalTimeToSampleTransform.a_to_b_numer = sampleRate;
1180      mLocalTimeToSampleTransform.a_to_b_denom = mLocalTimeFreq;
1181      LinearTransform::reduce(&mLocalTimeToSampleTransform.a_to_b_numer,
1182                              &mLocalTimeToSampleTransform.a_to_b_denom);
1183  
1184      mMediaTimeToSampleTransform.a_zero = 0;
1185      mMediaTimeToSampleTransform.b_zero = 0;
1186      mMediaTimeToSampleTransform.a_to_b_numer = sampleRate;
1187      mMediaTimeToSampleTransform.a_to_b_denom = 1000000;
1188      LinearTransform::reduce(&mMediaTimeToSampleTransform.a_to_b_numer,
1189                              &mMediaTimeToSampleTransform.a_to_b_denom);
1190  }
1191  
~TimedTrack()1192  AudioFlinger::PlaybackThread::TimedTrack::~TimedTrack() {
1193      mClient->releaseTimedTrack();
1194      delete [] mTimedSilenceBuffer;
1195  }
1196  
allocateTimedBuffer(size_t size,sp<IMemory> * buffer)1197  status_t AudioFlinger::PlaybackThread::TimedTrack::allocateTimedBuffer(
1198      size_t size, sp<IMemory>* buffer) {
1199  
1200      Mutex::Autolock _l(mTimedBufferQueueLock);
1201  
1202      trimTimedBufferQueue_l();
1203  
1204      // lazily initialize the shared memory heap for timed buffers
1205      if (mTimedMemoryDealer == NULL) {
1206          const int kTimedBufferHeapSize = 512 << 10;
1207  
1208          mTimedMemoryDealer = new MemoryDealer(kTimedBufferHeapSize,
1209                                                "AudioFlingerTimed");
1210          if (mTimedMemoryDealer == NULL) {
1211              return NO_MEMORY;
1212          }
1213      }
1214  
1215      sp<IMemory> newBuffer = mTimedMemoryDealer->allocate(size);
1216      if (newBuffer == 0 || newBuffer->pointer() == NULL) {
1217          return NO_MEMORY;
1218      }
1219  
1220      *buffer = newBuffer;
1221      return NO_ERROR;
1222  }
1223  
1224  // caller must hold mTimedBufferQueueLock
trimTimedBufferQueue_l()1225  void AudioFlinger::PlaybackThread::TimedTrack::trimTimedBufferQueue_l() {
1226      int64_t mediaTimeNow;
1227      {
1228          Mutex::Autolock mttLock(mMediaTimeTransformLock);
1229          if (!mMediaTimeTransformValid)
1230              return;
1231  
1232          int64_t targetTimeNow;
1233          status_t res = (mMediaTimeTransformTarget == TimedAudioTrack::COMMON_TIME)
1234              ? mCCHelper.getCommonTime(&targetTimeNow)
1235              : mCCHelper.getLocalTime(&targetTimeNow);
1236  
1237          if (OK != res)
1238              return;
1239  
1240          if (!mMediaTimeTransform.doReverseTransform(targetTimeNow,
1241                                                      &mediaTimeNow)) {
1242              return;
1243          }
1244      }
1245  
1246      size_t trimEnd;
1247      for (trimEnd = 0; trimEnd < mTimedBufferQueue.size(); trimEnd++) {
1248          int64_t bufEnd;
1249  
1250          if ((trimEnd + 1) < mTimedBufferQueue.size()) {
1251              // We have a next buffer.  Just use its PTS as the PTS of the frame
1252              // following the last frame in this buffer.  If the stream is sparse
1253              // (ie, there are deliberate gaps left in the stream which should be
1254              // filled with silence by the TimedAudioTrack), then this can result
1255              // in one extra buffer being left un-trimmed when it could have
1256              // been.  In general, this is not typical, and we would rather
1257              // optimized away the TS calculation below for the more common case
1258              // where PTSes are contiguous.
1259              bufEnd = mTimedBufferQueue[trimEnd + 1].pts();
1260          } else {
1261              // We have no next buffer.  Compute the PTS of the frame following
1262              // the last frame in this buffer by computing the duration of of
1263              // this frame in media time units and adding it to the PTS of the
1264              // buffer.
1265              int64_t frameCount = mTimedBufferQueue[trimEnd].buffer()->size()
1266                                 / mFrameSize;
1267  
1268              if (!mMediaTimeToSampleTransform.doReverseTransform(frameCount,
1269                                                                  &bufEnd)) {
1270                  ALOGE("Failed to convert frame count of %lld to media time"
1271                        " duration" " (scale factor %d/%u) in %s",
1272                        frameCount,
1273                        mMediaTimeToSampleTransform.a_to_b_numer,
1274                        mMediaTimeToSampleTransform.a_to_b_denom,
1275                        __PRETTY_FUNCTION__);
1276                  break;
1277              }
1278              bufEnd += mTimedBufferQueue[trimEnd].pts();
1279          }
1280  
1281          if (bufEnd > mediaTimeNow)
1282              break;
1283  
1284          // Is the buffer we want to use in the middle of a mix operation right
1285          // now?  If so, don't actually trim it.  Just wait for the releaseBuffer
1286          // from the mixer which should be coming back shortly.
1287          if (!trimEnd && mQueueHeadInFlight) {
1288              mTrimQueueHeadOnRelease = true;
1289          }
1290      }
1291  
1292      size_t trimStart = mTrimQueueHeadOnRelease ? 1 : 0;
1293      if (trimStart < trimEnd) {
1294          // Update the bookkeeping for framesReady()
1295          for (size_t i = trimStart; i < trimEnd; ++i) {
1296              updateFramesPendingAfterTrim_l(mTimedBufferQueue[i], "trim");
1297          }
1298  
1299          // Now actually remove the buffers from the queue.
1300          mTimedBufferQueue.removeItemsAt(trimStart, trimEnd);
1301      }
1302  }
1303  
trimTimedBufferQueueHead_l(const char * logTag)1304  void AudioFlinger::PlaybackThread::TimedTrack::trimTimedBufferQueueHead_l(
1305          const char* logTag) {
1306      ALOG_ASSERT(mTimedBufferQueue.size() > 0,
1307                  "%s called (reason \"%s\"), but timed buffer queue has no"
1308                  " elements to trim.", __FUNCTION__, logTag);
1309  
1310      updateFramesPendingAfterTrim_l(mTimedBufferQueue[0], logTag);
1311      mTimedBufferQueue.removeAt(0);
1312  }
1313  
updateFramesPendingAfterTrim_l(const TimedBuffer & buf,const char * logTag __unused)1314  void AudioFlinger::PlaybackThread::TimedTrack::updateFramesPendingAfterTrim_l(
1315          const TimedBuffer& buf,
1316          const char* logTag __unused) {
1317      uint32_t bufBytes        = buf.buffer()->size();
1318      uint32_t consumedAlready = buf.position();
1319  
1320      ALOG_ASSERT(consumedAlready <= bufBytes,
1321                  "Bad bookkeeping while updating frames pending.  Timed buffer is"
1322                  " only %u bytes long, but claims to have consumed %u"
1323                  " bytes.  (update reason: \"%s\")",
1324                  bufBytes, consumedAlready, logTag);
1325  
1326      uint32_t bufFrames = (bufBytes - consumedAlready) / mFrameSize;
1327      ALOG_ASSERT(mFramesPendingInQueue >= bufFrames,
1328                  "Bad bookkeeping while updating frames pending.  Should have at"
1329                  " least %u queued frames, but we think we have only %u.  (update"
1330                  " reason: \"%s\")",
1331                  bufFrames, mFramesPendingInQueue, logTag);
1332  
1333      mFramesPendingInQueue -= bufFrames;
1334  }
1335  
queueTimedBuffer(const sp<IMemory> & buffer,int64_t pts)1336  status_t AudioFlinger::PlaybackThread::TimedTrack::queueTimedBuffer(
1337      const sp<IMemory>& buffer, int64_t pts) {
1338  
1339      {
1340          Mutex::Autolock mttLock(mMediaTimeTransformLock);
1341          if (!mMediaTimeTransformValid)
1342              return INVALID_OPERATION;
1343      }
1344  
1345      Mutex::Autolock _l(mTimedBufferQueueLock);
1346  
1347      uint32_t bufFrames = buffer->size() / mFrameSize;
1348      mFramesPendingInQueue += bufFrames;
1349      mTimedBufferQueue.add(TimedBuffer(buffer, pts));
1350  
1351      return NO_ERROR;
1352  }
1353  
setMediaTimeTransform(const LinearTransform & xform,TimedAudioTrack::TargetTimeline target)1354  status_t AudioFlinger::PlaybackThread::TimedTrack::setMediaTimeTransform(
1355      const LinearTransform& xform, TimedAudioTrack::TargetTimeline target) {
1356  
1357      ALOGVV("setMediaTimeTransform az=%lld bz=%lld n=%d d=%u tgt=%d",
1358             xform.a_zero, xform.b_zero, xform.a_to_b_numer, xform.a_to_b_denom,
1359             target);
1360  
1361      if (!(target == TimedAudioTrack::LOCAL_TIME ||
1362            target == TimedAudioTrack::COMMON_TIME)) {
1363          return BAD_VALUE;
1364      }
1365  
1366      Mutex::Autolock lock(mMediaTimeTransformLock);
1367      mMediaTimeTransform = xform;
1368      mMediaTimeTransformTarget = target;
1369      mMediaTimeTransformValid = true;
1370  
1371      return NO_ERROR;
1372  }
1373  
1374  #define min(a, b) ((a) < (b) ? (a) : (b))
1375  
1376  // implementation of getNextBuffer for tracks whose buffers have timestamps
getNextBuffer(AudioBufferProvider::Buffer * buffer,int64_t pts)1377  status_t AudioFlinger::PlaybackThread::TimedTrack::getNextBuffer(
1378      AudioBufferProvider::Buffer* buffer, int64_t pts)
1379  {
1380      if (pts == AudioBufferProvider::kInvalidPTS) {
1381          buffer->raw = NULL;
1382          buffer->frameCount = 0;
1383          mTimedAudioOutputOnTime = false;
1384          return INVALID_OPERATION;
1385      }
1386  
1387      Mutex::Autolock _l(mTimedBufferQueueLock);
1388  
1389      ALOG_ASSERT(!mQueueHeadInFlight,
1390                  "getNextBuffer called without releaseBuffer!");
1391  
1392      while (true) {
1393  
1394          // if we have no timed buffers, then fail
1395          if (mTimedBufferQueue.isEmpty()) {
1396              buffer->raw = NULL;
1397              buffer->frameCount = 0;
1398              return NOT_ENOUGH_DATA;
1399          }
1400  
1401          TimedBuffer& head = mTimedBufferQueue.editItemAt(0);
1402  
1403          // calculate the PTS of the head of the timed buffer queue expressed in
1404          // local time
1405          int64_t headLocalPTS;
1406          {
1407              Mutex::Autolock mttLock(mMediaTimeTransformLock);
1408  
1409              ALOG_ASSERT(mMediaTimeTransformValid, "media time transform invalid");
1410  
1411              if (mMediaTimeTransform.a_to_b_denom == 0) {
1412                  // the transform represents a pause, so yield silence
1413                  timedYieldSilence_l(buffer->frameCount, buffer);
1414                  return NO_ERROR;
1415              }
1416  
1417              int64_t transformedPTS;
1418              if (!mMediaTimeTransform.doForwardTransform(head.pts(),
1419                                                          &transformedPTS)) {
1420                  // the transform failed.  this shouldn't happen, but if it does
1421                  // then just drop this buffer
1422                  ALOGW("timedGetNextBuffer transform failed");
1423                  buffer->raw = NULL;
1424                  buffer->frameCount = 0;
1425                  trimTimedBufferQueueHead_l("getNextBuffer; no transform");
1426                  return NO_ERROR;
1427              }
1428  
1429              if (mMediaTimeTransformTarget == TimedAudioTrack::COMMON_TIME) {
1430                  if (OK != mCCHelper.commonTimeToLocalTime(transformedPTS,
1431                                                            &headLocalPTS)) {
1432                      buffer->raw = NULL;
1433                      buffer->frameCount = 0;
1434                      return INVALID_OPERATION;
1435                  }
1436              } else {
1437                  headLocalPTS = transformedPTS;
1438              }
1439          }
1440  
1441          uint32_t sr = sampleRate();
1442  
1443          // adjust the head buffer's PTS to reflect the portion of the head buffer
1444          // that has already been consumed
1445          int64_t effectivePTS = headLocalPTS +
1446                  ((head.position() / mFrameSize) * mLocalTimeFreq / sr);
1447  
1448          // Calculate the delta in samples between the head of the input buffer
1449          // queue and the start of the next output buffer that will be written.
1450          // If the transformation fails because of over or underflow, it means
1451          // that the sample's position in the output stream is so far out of
1452          // whack that it should just be dropped.
1453          int64_t sampleDelta;
1454          if (llabs(effectivePTS - pts) >= (static_cast<int64_t>(1) << 31)) {
1455              ALOGV("*** head buffer is too far from PTS: dropped buffer");
1456              trimTimedBufferQueueHead_l("getNextBuffer, buf pts too far from"
1457                                         " mix");
1458              continue;
1459          }
1460          if (!mLocalTimeToSampleTransform.doForwardTransform(
1461                  (effectivePTS - pts) << 32, &sampleDelta)) {
1462              ALOGV("*** too late during sample rate transform: dropped buffer");
1463              trimTimedBufferQueueHead_l("getNextBuffer, bad local to sample");
1464              continue;
1465          }
1466  
1467          ALOGVV("*** getNextBuffer head.pts=%lld head.pos=%d pts=%lld"
1468                 " sampleDelta=[%d.%08x]",
1469                 head.pts(), head.position(), pts,
1470                 static_cast<int32_t>((sampleDelta >= 0 ? 0 : 1)
1471                     + (sampleDelta >> 32)),
1472                 static_cast<uint32_t>(sampleDelta & 0xFFFFFFFF));
1473  
1474          // if the delta between the ideal placement for the next input sample and
1475          // the current output position is within this threshold, then we will
1476          // concatenate the next input samples to the previous output
1477          const int64_t kSampleContinuityThreshold =
1478                  (static_cast<int64_t>(sr) << 32) / 250;
1479  
1480          // if this is the first buffer of audio that we're emitting from this track
1481          // then it should be almost exactly on time.
1482          const int64_t kSampleStartupThreshold = 1LL << 32;
1483  
1484          if ((mTimedAudioOutputOnTime && llabs(sampleDelta) <= kSampleContinuityThreshold) ||
1485             (!mTimedAudioOutputOnTime && llabs(sampleDelta) <= kSampleStartupThreshold)) {
1486              // the next input is close enough to being on time, so concatenate it
1487              // with the last output
1488              timedYieldSamples_l(buffer);
1489  
1490              ALOGVV("*** on time: head.pos=%d frameCount=%u",
1491                      head.position(), buffer->frameCount);
1492              return NO_ERROR;
1493          }
1494  
1495          // Looks like our output is not on time.  Reset our on timed status.
1496          // Next time we mix samples from our input queue, then should be within
1497          // the StartupThreshold.
1498          mTimedAudioOutputOnTime = false;
1499          if (sampleDelta > 0) {
1500              // the gap between the current output position and the proper start of
1501              // the next input sample is too big, so fill it with silence
1502              uint32_t framesUntilNextInput = (sampleDelta + 0x80000000) >> 32;
1503  
1504              timedYieldSilence_l(framesUntilNextInput, buffer);
1505              ALOGV("*** silence: frameCount=%u", buffer->frameCount);
1506              return NO_ERROR;
1507          } else {
1508              // the next input sample is late
1509              uint32_t lateFrames = static_cast<uint32_t>(-((sampleDelta + 0x80000000) >> 32));
1510              size_t onTimeSamplePosition =
1511                      head.position() + lateFrames * mFrameSize;
1512  
1513              if (onTimeSamplePosition > head.buffer()->size()) {
1514                  // all the remaining samples in the head are too late, so
1515                  // drop it and move on
1516                  ALOGV("*** too late: dropped buffer");
1517                  trimTimedBufferQueueHead_l("getNextBuffer, dropped late buffer");
1518                  continue;
1519              } else {
1520                  // skip over the late samples
1521                  head.setPosition(onTimeSamplePosition);
1522  
1523                  // yield the available samples
1524                  timedYieldSamples_l(buffer);
1525  
1526                  ALOGV("*** late: head.pos=%d frameCount=%u", head.position(), buffer->frameCount);
1527                  return NO_ERROR;
1528              }
1529          }
1530      }
1531  }
1532  
1533  // Yield samples from the timed buffer queue head up to the given output
1534  // buffer's capacity.
1535  //
1536  // Caller must hold mTimedBufferQueueLock
timedYieldSamples_l(AudioBufferProvider::Buffer * buffer)1537  void AudioFlinger::PlaybackThread::TimedTrack::timedYieldSamples_l(
1538      AudioBufferProvider::Buffer* buffer) {
1539  
1540      const TimedBuffer& head = mTimedBufferQueue[0];
1541  
1542      buffer->raw = (static_cast<uint8_t*>(head.buffer()->pointer()) +
1543                     head.position());
1544  
1545      uint32_t framesLeftInHead = ((head.buffer()->size() - head.position()) /
1546                                   mFrameSize);
1547      size_t framesRequested = buffer->frameCount;
1548      buffer->frameCount = min(framesLeftInHead, framesRequested);
1549  
1550      mQueueHeadInFlight = true;
1551      mTimedAudioOutputOnTime = true;
1552  }
1553  
1554  // Yield samples of silence up to the given output buffer's capacity
1555  //
1556  // Caller must hold mTimedBufferQueueLock
timedYieldSilence_l(uint32_t numFrames,AudioBufferProvider::Buffer * buffer)1557  void AudioFlinger::PlaybackThread::TimedTrack::timedYieldSilence_l(
1558      uint32_t numFrames, AudioBufferProvider::Buffer* buffer) {
1559  
1560      // lazily allocate a buffer filled with silence
1561      if (mTimedSilenceBufferSize < numFrames * mFrameSize) {
1562          delete [] mTimedSilenceBuffer;
1563          mTimedSilenceBufferSize = numFrames * mFrameSize;
1564          mTimedSilenceBuffer = new uint8_t[mTimedSilenceBufferSize];
1565          memset(mTimedSilenceBuffer, 0, mTimedSilenceBufferSize);
1566      }
1567  
1568      buffer->raw = mTimedSilenceBuffer;
1569      size_t framesRequested = buffer->frameCount;
1570      buffer->frameCount = min(numFrames, framesRequested);
1571  
1572      mTimedAudioOutputOnTime = false;
1573  }
1574  
1575  // AudioBufferProvider interface
releaseBuffer(AudioBufferProvider::Buffer * buffer)1576  void AudioFlinger::PlaybackThread::TimedTrack::releaseBuffer(
1577      AudioBufferProvider::Buffer* buffer) {
1578  
1579      Mutex::Autolock _l(mTimedBufferQueueLock);
1580  
1581      // If the buffer which was just released is part of the buffer at the head
1582      // of the queue, be sure to update the amt of the buffer which has been
1583      // consumed.  If the buffer being returned is not part of the head of the
1584      // queue, its either because the buffer is part of the silence buffer, or
1585      // because the head of the timed queue was trimmed after the mixer called
1586      // getNextBuffer but before the mixer called releaseBuffer.
1587      if (buffer->raw == mTimedSilenceBuffer) {
1588          ALOG_ASSERT(!mQueueHeadInFlight,
1589                      "Queue head in flight during release of silence buffer!");
1590          goto done;
1591      }
1592  
1593      ALOG_ASSERT(mQueueHeadInFlight,
1594                  "TimedTrack::releaseBuffer of non-silence buffer, but no queue"
1595                  " head in flight.");
1596  
1597      if (mTimedBufferQueue.size()) {
1598          TimedBuffer& head = mTimedBufferQueue.editItemAt(0);
1599  
1600          void* start = head.buffer()->pointer();
1601          void* end   = reinterpret_cast<void*>(
1602                          reinterpret_cast<uint8_t*>(head.buffer()->pointer())
1603                          + head.buffer()->size());
1604  
1605          ALOG_ASSERT((buffer->raw >= start) && (buffer->raw < end),
1606                      "released buffer not within the head of the timed buffer"
1607                      " queue; qHead = [%p, %p], released buffer = %p",
1608                      start, end, buffer->raw);
1609  
1610          head.setPosition(head.position() +
1611                  (buffer->frameCount * mFrameSize));
1612          mQueueHeadInFlight = false;
1613  
1614          ALOG_ASSERT(mFramesPendingInQueue >= buffer->frameCount,
1615                      "Bad bookkeeping during releaseBuffer!  Should have at"
1616                      " least %u queued frames, but we think we have only %u",
1617                      buffer->frameCount, mFramesPendingInQueue);
1618  
1619          mFramesPendingInQueue -= buffer->frameCount;
1620  
1621          if ((static_cast<size_t>(head.position()) >= head.buffer()->size())
1622              || mTrimQueueHeadOnRelease) {
1623              trimTimedBufferQueueHead_l("releaseBuffer");
1624              mTrimQueueHeadOnRelease = false;
1625          }
1626      } else {
1627          LOG_ALWAYS_FATAL("TimedTrack::releaseBuffer of non-silence buffer with no"
1628                    " buffers in the timed buffer queue");
1629      }
1630  
1631  done:
1632      buffer->raw = 0;
1633      buffer->frameCount = 0;
1634  }
1635  
framesReady() const1636  size_t AudioFlinger::PlaybackThread::TimedTrack::framesReady() const {
1637      Mutex::Autolock _l(mTimedBufferQueueLock);
1638      return mFramesPendingInQueue;
1639  }
1640  
TimedBuffer()1641  AudioFlinger::PlaybackThread::TimedTrack::TimedBuffer::TimedBuffer()
1642          : mPTS(0), mPosition(0) {}
1643  
TimedBuffer(const sp<IMemory> & buffer,int64_t pts)1644  AudioFlinger::PlaybackThread::TimedTrack::TimedBuffer::TimedBuffer(
1645      const sp<IMemory>& buffer, int64_t pts)
1646          : mBuffer(buffer), mPTS(pts), mPosition(0) {}
1647  
1648  
1649  // ----------------------------------------------------------------------------
1650  
OutputTrack(PlaybackThread * playbackThread,DuplicatingThread * sourceThread,uint32_t sampleRate,audio_format_t format,audio_channel_mask_t channelMask,size_t frameCount,int uid)1651  AudioFlinger::PlaybackThread::OutputTrack::OutputTrack(
1652              PlaybackThread *playbackThread,
1653              DuplicatingThread *sourceThread,
1654              uint32_t sampleRate,
1655              audio_format_t format,
1656              audio_channel_mask_t channelMask,
1657              size_t frameCount,
1658              int uid)
1659      :   Track(playbackThread, NULL, AUDIO_STREAM_PATCH,
1660                sampleRate, format, channelMask, frameCount,
1661                NULL, 0, 0, uid, IAudioFlinger::TRACK_DEFAULT, TYPE_OUTPUT),
1662      mActive(false), mSourceThread(sourceThread), mClientProxy(NULL)
1663  {
1664  
1665      if (mCblk != NULL) {
1666          mOutBuffer.frameCount = 0;
1667          playbackThread->mTracks.add(this);
1668          ALOGV("OutputTrack constructor mCblk %p, mBuffer %p, "
1669                  "frameCount %u, mChannelMask 0x%08x",
1670                  mCblk, mBuffer,
1671                  frameCount, mChannelMask);
1672          // since client and server are in the same process,
1673          // the buffer has the same virtual address on both sides
1674          mClientProxy = new AudioTrackClientProxy(mCblk, mBuffer, mFrameCount, mFrameSize,
1675                  true /*clientInServer*/);
1676          mClientProxy->setVolumeLR(GAIN_MINIFLOAT_PACKED_UNITY);
1677          mClientProxy->setSendLevel(0.0);
1678          mClientProxy->setSampleRate(sampleRate);
1679      } else {
1680          ALOGW("Error creating output track on thread %p", playbackThread);
1681      }
1682  }
1683  
~OutputTrack()1684  AudioFlinger::PlaybackThread::OutputTrack::~OutputTrack()
1685  {
1686      clearBufferQueue();
1687      delete mClientProxy;
1688      // superclass destructor will now delete the server proxy and shared memory both refer to
1689  }
1690  
start(AudioSystem::sync_event_t event,int triggerSession)1691  status_t AudioFlinger::PlaybackThread::OutputTrack::start(AudioSystem::sync_event_t event,
1692                                                            int triggerSession)
1693  {
1694      status_t status = Track::start(event, triggerSession);
1695      if (status != NO_ERROR) {
1696          return status;
1697      }
1698  
1699      mActive = true;
1700      mRetryCount = 127;
1701      return status;
1702  }
1703  
stop()1704  void AudioFlinger::PlaybackThread::OutputTrack::stop()
1705  {
1706      Track::stop();
1707      clearBufferQueue();
1708      mOutBuffer.frameCount = 0;
1709      mActive = false;
1710  }
1711  
write(int16_t * data,uint32_t frames)1712  bool AudioFlinger::PlaybackThread::OutputTrack::write(int16_t* data, uint32_t frames)
1713  {
1714      Buffer *pInBuffer;
1715      Buffer inBuffer;
1716      uint32_t channelCount = mChannelCount;
1717      bool outputBufferFull = false;
1718      inBuffer.frameCount = frames;
1719      inBuffer.i16 = data;
1720  
1721      uint32_t waitTimeLeftMs = mSourceThread->waitTimeMs();
1722  
1723      if (!mActive && frames != 0) {
1724          start();
1725          sp<ThreadBase> thread = mThread.promote();
1726          if (thread != 0) {
1727              MixerThread *mixerThread = (MixerThread *)thread.get();
1728              if (mFrameCount > frames) {
1729                  if (mBufferQueue.size() < kMaxOverFlowBuffers) {
1730                      uint32_t startFrames = (mFrameCount - frames);
1731                      pInBuffer = new Buffer;
1732                      pInBuffer->mBuffer = new int16_t[startFrames * channelCount];
1733                      pInBuffer->frameCount = startFrames;
1734                      pInBuffer->i16 = pInBuffer->mBuffer;
1735                      memset(pInBuffer->raw, 0, startFrames * channelCount * sizeof(int16_t));
1736                      mBufferQueue.add(pInBuffer);
1737                  } else {
1738                      ALOGW("OutputTrack::write() %p no more buffers in queue", this);
1739                  }
1740              }
1741          }
1742      }
1743  
1744      while (waitTimeLeftMs) {
1745          // First write pending buffers, then new data
1746          if (mBufferQueue.size()) {
1747              pInBuffer = mBufferQueue.itemAt(0);
1748          } else {
1749              pInBuffer = &inBuffer;
1750          }
1751  
1752          if (pInBuffer->frameCount == 0) {
1753              break;
1754          }
1755  
1756          if (mOutBuffer.frameCount == 0) {
1757              mOutBuffer.frameCount = pInBuffer->frameCount;
1758              nsecs_t startTime = systemTime();
1759              status_t status = obtainBuffer(&mOutBuffer, waitTimeLeftMs);
1760              if (status != NO_ERROR) {
1761                  ALOGV("OutputTrack::write() %p thread %p no more output buffers; status %d", this,
1762                          mThread.unsafe_get(), status);
1763                  outputBufferFull = true;
1764                  break;
1765              }
1766              uint32_t waitTimeMs = (uint32_t)ns2ms(systemTime() - startTime);
1767              if (waitTimeLeftMs >= waitTimeMs) {
1768                  waitTimeLeftMs -= waitTimeMs;
1769              } else {
1770                  waitTimeLeftMs = 0;
1771              }
1772          }
1773  
1774          uint32_t outFrames = pInBuffer->frameCount > mOutBuffer.frameCount ? mOutBuffer.frameCount :
1775                  pInBuffer->frameCount;
1776          memcpy(mOutBuffer.raw, pInBuffer->raw, outFrames * channelCount * sizeof(int16_t));
1777          Proxy::Buffer buf;
1778          buf.mFrameCount = outFrames;
1779          buf.mRaw = NULL;
1780          mClientProxy->releaseBuffer(&buf);
1781          pInBuffer->frameCount -= outFrames;
1782          pInBuffer->i16 += outFrames * channelCount;
1783          mOutBuffer.frameCount -= outFrames;
1784          mOutBuffer.i16 += outFrames * channelCount;
1785  
1786          if (pInBuffer->frameCount == 0) {
1787              if (mBufferQueue.size()) {
1788                  mBufferQueue.removeAt(0);
1789                  delete [] pInBuffer->mBuffer;
1790                  delete pInBuffer;
1791                  ALOGV("OutputTrack::write() %p thread %p released overflow buffer %d", this,
1792                          mThread.unsafe_get(), mBufferQueue.size());
1793              } else {
1794                  break;
1795              }
1796          }
1797      }
1798  
1799      // If we could not write all frames, allocate a buffer and queue it for next time.
1800      if (inBuffer.frameCount) {
1801          sp<ThreadBase> thread = mThread.promote();
1802          if (thread != 0 && !thread->standby()) {
1803              if (mBufferQueue.size() < kMaxOverFlowBuffers) {
1804                  pInBuffer = new Buffer;
1805                  pInBuffer->mBuffer = new int16_t[inBuffer.frameCount * channelCount];
1806                  pInBuffer->frameCount = inBuffer.frameCount;
1807                  pInBuffer->i16 = pInBuffer->mBuffer;
1808                  memcpy(pInBuffer->raw, inBuffer.raw, inBuffer.frameCount * channelCount *
1809                          sizeof(int16_t));
1810                  mBufferQueue.add(pInBuffer);
1811                  ALOGV("OutputTrack::write() %p thread %p adding overflow buffer %d", this,
1812                          mThread.unsafe_get(), mBufferQueue.size());
1813              } else {
1814                  ALOGW("OutputTrack::write() %p thread %p no more overflow buffers",
1815                          mThread.unsafe_get(), this);
1816              }
1817          }
1818      }
1819  
1820      // Calling write() with a 0 length buffer, means that no more data will be written:
1821      // If no more buffers are pending, fill output track buffer to make sure it is started
1822      // by output mixer.
1823      if (frames == 0 && mBufferQueue.size() == 0) {
1824          // FIXME borken, replace by getting framesReady() from proxy
1825          size_t user = 0;    // was mCblk->user
1826          if (user < mFrameCount) {
1827              frames = mFrameCount - user;
1828              pInBuffer = new Buffer;
1829              pInBuffer->mBuffer = new int16_t[frames * channelCount];
1830              pInBuffer->frameCount = frames;
1831              pInBuffer->i16 = pInBuffer->mBuffer;
1832              memset(pInBuffer->raw, 0, frames * channelCount * sizeof(int16_t));
1833              mBufferQueue.add(pInBuffer);
1834          } else if (mActive) {
1835              stop();
1836          }
1837      }
1838  
1839      return outputBufferFull;
1840  }
1841  
obtainBuffer(AudioBufferProvider::Buffer * buffer,uint32_t waitTimeMs)1842  status_t AudioFlinger::PlaybackThread::OutputTrack::obtainBuffer(
1843          AudioBufferProvider::Buffer* buffer, uint32_t waitTimeMs)
1844  {
1845      ClientProxy::Buffer buf;
1846      buf.mFrameCount = buffer->frameCount;
1847      struct timespec timeout;
1848      timeout.tv_sec = waitTimeMs / 1000;
1849      timeout.tv_nsec = (int) (waitTimeMs % 1000) * 1000000;
1850      status_t status = mClientProxy->obtainBuffer(&buf, &timeout);
1851      buffer->frameCount = buf.mFrameCount;
1852      buffer->raw = buf.mRaw;
1853      return status;
1854  }
1855  
clearBufferQueue()1856  void AudioFlinger::PlaybackThread::OutputTrack::clearBufferQueue()
1857  {
1858      size_t size = mBufferQueue.size();
1859  
1860      for (size_t i = 0; i < size; i++) {
1861          Buffer *pBuffer = mBufferQueue.itemAt(i);
1862          delete [] pBuffer->mBuffer;
1863          delete pBuffer;
1864      }
1865      mBufferQueue.clear();
1866  }
1867  
1868  
PatchTrack(PlaybackThread * playbackThread,uint32_t sampleRate,audio_channel_mask_t channelMask,audio_format_t format,size_t frameCount,void * buffer,IAudioFlinger::track_flags_t flags)1869  AudioFlinger::PlaybackThread::PatchTrack::PatchTrack(PlaybackThread *playbackThread,
1870                                                       uint32_t sampleRate,
1871                                                       audio_channel_mask_t channelMask,
1872                                                       audio_format_t format,
1873                                                       size_t frameCount,
1874                                                       void *buffer,
1875                                                       IAudioFlinger::track_flags_t flags)
1876      :   Track(playbackThread, NULL, AUDIO_STREAM_PATCH,
1877                sampleRate, format, channelMask, frameCount,
1878                buffer, 0, 0, getuid(), flags, TYPE_PATCH),
1879                mProxy(new ClientProxy(mCblk, mBuffer, frameCount, mFrameSize, true, true))
1880  {
1881      uint64_t mixBufferNs = ((uint64_t)2 * playbackThread->frameCount() * 1000000000) /
1882                                                                      playbackThread->sampleRate();
1883      mPeerTimeout.tv_sec = mixBufferNs / 1000000000;
1884      mPeerTimeout.tv_nsec = (int) (mixBufferNs % 1000000000);
1885  
1886      ALOGV("PatchTrack %p sampleRate %d mPeerTimeout %d.%03d sec",
1887                                        this, sampleRate,
1888                                        (int)mPeerTimeout.tv_sec,
1889                                        (int)(mPeerTimeout.tv_nsec / 1000000));
1890  }
1891  
~PatchTrack()1892  AudioFlinger::PlaybackThread::PatchTrack::~PatchTrack()
1893  {
1894  }
1895  
1896  // AudioBufferProvider interface
getNextBuffer(AudioBufferProvider::Buffer * buffer,int64_t pts)1897  status_t AudioFlinger::PlaybackThread::PatchTrack::getNextBuffer(
1898          AudioBufferProvider::Buffer* buffer, int64_t pts)
1899  {
1900      ALOG_ASSERT(mPeerProxy != 0, "PatchTrack::getNextBuffer() called without peer proxy");
1901      Proxy::Buffer buf;
1902      buf.mFrameCount = buffer->frameCount;
1903      status_t status = mPeerProxy->obtainBuffer(&buf, &mPeerTimeout);
1904      ALOGV_IF(status != NO_ERROR, "PatchTrack() %p getNextBuffer status %d", this, status);
1905      buffer->frameCount = buf.mFrameCount;
1906      if (buf.mFrameCount == 0) {
1907          return WOULD_BLOCK;
1908      }
1909      status = Track::getNextBuffer(buffer, pts);
1910      return status;
1911  }
1912  
releaseBuffer(AudioBufferProvider::Buffer * buffer)1913  void AudioFlinger::PlaybackThread::PatchTrack::releaseBuffer(AudioBufferProvider::Buffer* buffer)
1914  {
1915      ALOG_ASSERT(mPeerProxy != 0, "PatchTrack::releaseBuffer() called without peer proxy");
1916      Proxy::Buffer buf;
1917      buf.mFrameCount = buffer->frameCount;
1918      buf.mRaw = buffer->raw;
1919      mPeerProxy->releaseBuffer(&buf);
1920      TrackBase::releaseBuffer(buffer);
1921  }
1922  
obtainBuffer(Proxy::Buffer * buffer,const struct timespec * timeOut)1923  status_t AudioFlinger::PlaybackThread::PatchTrack::obtainBuffer(Proxy::Buffer* buffer,
1924                                                                  const struct timespec *timeOut)
1925  {
1926      return mProxy->obtainBuffer(buffer, timeOut);
1927  }
1928  
releaseBuffer(Proxy::Buffer * buffer)1929  void AudioFlinger::PlaybackThread::PatchTrack::releaseBuffer(Proxy::Buffer* buffer)
1930  {
1931      mProxy->releaseBuffer(buffer);
1932      if (android_atomic_and(~CBLK_DISABLED, &mCblk->mFlags) & CBLK_DISABLED) {
1933          ALOGW("PatchTrack::releaseBuffer() disabled due to previous underrun, restarting");
1934          start();
1935      }
1936      android_atomic_or(CBLK_FORCEREADY, &mCblk->mFlags);
1937  }
1938  
1939  // ----------------------------------------------------------------------------
1940  //      Record
1941  // ----------------------------------------------------------------------------
1942  
RecordHandle(const sp<AudioFlinger::RecordThread::RecordTrack> & recordTrack)1943  AudioFlinger::RecordHandle::RecordHandle(
1944          const sp<AudioFlinger::RecordThread::RecordTrack>& recordTrack)
1945      : BnAudioRecord(),
1946      mRecordTrack(recordTrack)
1947  {
1948  }
1949  
~RecordHandle()1950  AudioFlinger::RecordHandle::~RecordHandle() {
1951      stop_nonvirtual();
1952      mRecordTrack->destroy();
1953  }
1954  
start(int event,int triggerSession)1955  status_t AudioFlinger::RecordHandle::start(int /*AudioSystem::sync_event_t*/ event,
1956          int triggerSession) {
1957      ALOGV("RecordHandle::start()");
1958      return mRecordTrack->start((AudioSystem::sync_event_t)event, triggerSession);
1959  }
1960  
stop()1961  void AudioFlinger::RecordHandle::stop() {
1962      stop_nonvirtual();
1963  }
1964  
stop_nonvirtual()1965  void AudioFlinger::RecordHandle::stop_nonvirtual() {
1966      ALOGV("RecordHandle::stop()");
1967      mRecordTrack->stop();
1968  }
1969  
onTransact(uint32_t code,const Parcel & data,Parcel * reply,uint32_t flags)1970  status_t AudioFlinger::RecordHandle::onTransact(
1971      uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
1972  {
1973      return BnAudioRecord::onTransact(code, data, reply, flags);
1974  }
1975  
1976  // ----------------------------------------------------------------------------
1977  
1978  // RecordTrack constructor must be called with AudioFlinger::mLock and ThreadBase::mLock held
RecordTrack(RecordThread * thread,const sp<Client> & client,uint32_t sampleRate,audio_format_t format,audio_channel_mask_t channelMask,size_t frameCount,void * buffer,int sessionId,int uid,IAudioFlinger::track_flags_t flags,track_type type)1979  AudioFlinger::RecordThread::RecordTrack::RecordTrack(
1980              RecordThread *thread,
1981              const sp<Client>& client,
1982              uint32_t sampleRate,
1983              audio_format_t format,
1984              audio_channel_mask_t channelMask,
1985              size_t frameCount,
1986              void *buffer,
1987              int sessionId,
1988              int uid,
1989              IAudioFlinger::track_flags_t flags,
1990              track_type type)
1991      :   TrackBase(thread, client, sampleRate, format,
1992                    channelMask, frameCount, buffer, sessionId, uid,
1993                    flags, false /*isOut*/,
1994                    (type == TYPE_DEFAULT) ?
1995                            ((flags & IAudioFlinger::TRACK_FAST) ? ALLOC_PIPE : ALLOC_CBLK) :
1996                            ((buffer == NULL) ? ALLOC_LOCAL : ALLOC_NONE),
1997                    type),
1998          mOverflow(false), mResampler(NULL), mRsmpOutBuffer(NULL), mRsmpOutFrameCount(0),
1999          // See real initialization of mRsmpInFront at RecordThread::start()
2000          mRsmpInUnrel(0), mRsmpInFront(0), mFramesToDrop(0), mResamplerBufferProvider(NULL)
2001  {
2002      if (mCblk == NULL) {
2003          return;
2004      }
2005  
2006      mServerProxy = new AudioRecordServerProxy(mCblk, mBuffer, frameCount,
2007                                                mFrameSize, !isExternalTrack());
2008  
2009      uint32_t channelCount = audio_channel_count_from_in_mask(channelMask);
2010      // FIXME I don't understand either of the channel count checks
2011      if (thread->mSampleRate != sampleRate && thread->mChannelCount <= FCC_2 &&
2012              channelCount <= FCC_2) {
2013          // sink SR
2014          mResampler = AudioResampler::create(AUDIO_FORMAT_PCM_16_BIT,
2015                  thread->mChannelCount, sampleRate);
2016          // source SR
2017          mResampler->setSampleRate(thread->mSampleRate);
2018          mResampler->setVolume(AudioMixer::UNITY_GAIN_FLOAT, AudioMixer::UNITY_GAIN_FLOAT);
2019          mResamplerBufferProvider = new ResamplerBufferProvider(this);
2020      }
2021  
2022      if (flags & IAudioFlinger::TRACK_FAST) {
2023          ALOG_ASSERT(thread->mFastTrackAvail);
2024          thread->mFastTrackAvail = false;
2025      }
2026  }
2027  
~RecordTrack()2028  AudioFlinger::RecordThread::RecordTrack::~RecordTrack()
2029  {
2030      ALOGV("%s", __func__);
2031      delete mResampler;
2032      delete[] mRsmpOutBuffer;
2033      delete mResamplerBufferProvider;
2034  }
2035  
2036  // AudioBufferProvider interface
getNextBuffer(AudioBufferProvider::Buffer * buffer,int64_t pts __unused)2037  status_t AudioFlinger::RecordThread::RecordTrack::getNextBuffer(AudioBufferProvider::Buffer* buffer,
2038          int64_t pts __unused)
2039  {
2040      ServerProxy::Buffer buf;
2041      buf.mFrameCount = buffer->frameCount;
2042      status_t status = mServerProxy->obtainBuffer(&buf);
2043      buffer->frameCount = buf.mFrameCount;
2044      buffer->raw = buf.mRaw;
2045      if (buf.mFrameCount == 0) {
2046          // FIXME also wake futex so that overrun is noticed more quickly
2047          (void) android_atomic_or(CBLK_OVERRUN, &mCblk->mFlags);
2048      }
2049      return status;
2050  }
2051  
start(AudioSystem::sync_event_t event,int triggerSession)2052  status_t AudioFlinger::RecordThread::RecordTrack::start(AudioSystem::sync_event_t event,
2053                                                          int triggerSession)
2054  {
2055      sp<ThreadBase> thread = mThread.promote();
2056      if (thread != 0) {
2057          RecordThread *recordThread = (RecordThread *)thread.get();
2058          return recordThread->start(this, event, triggerSession);
2059      } else {
2060          return BAD_VALUE;
2061      }
2062  }
2063  
stop()2064  void AudioFlinger::RecordThread::RecordTrack::stop()
2065  {
2066      sp<ThreadBase> thread = mThread.promote();
2067      if (thread != 0) {
2068          RecordThread *recordThread = (RecordThread *)thread.get();
2069          if (recordThread->stop(this) && isExternalTrack()) {
2070              AudioSystem::stopInput(mThreadIoHandle, (audio_session_t)mSessionId);
2071          }
2072      }
2073  }
2074  
destroy()2075  void AudioFlinger::RecordThread::RecordTrack::destroy()
2076  {
2077      // see comments at AudioFlinger::PlaybackThread::Track::destroy()
2078      sp<RecordTrack> keep(this);
2079      {
2080          if (isExternalTrack()) {
2081              if (mState == ACTIVE || mState == RESUMING) {
2082                  AudioSystem::stopInput(mThreadIoHandle, (audio_session_t)mSessionId);
2083              }
2084              AudioSystem::releaseInput(mThreadIoHandle, (audio_session_t)mSessionId);
2085          }
2086          sp<ThreadBase> thread = mThread.promote();
2087          if (thread != 0) {
2088              Mutex::Autolock _l(thread->mLock);
2089              RecordThread *recordThread = (RecordThread *) thread.get();
2090              recordThread->destroyTrack_l(this);
2091          }
2092      }
2093  }
2094  
invalidate()2095  void AudioFlinger::RecordThread::RecordTrack::invalidate()
2096  {
2097      // FIXME should use proxy, and needs work
2098      audio_track_cblk_t* cblk = mCblk;
2099      android_atomic_or(CBLK_INVALID, &cblk->mFlags);
2100      android_atomic_release_store(0x40000000, &cblk->mFutex);
2101      // client is not in server, so FUTEX_WAKE is needed instead of FUTEX_WAKE_PRIVATE
2102      (void) syscall(__NR_futex, &cblk->mFutex, FUTEX_WAKE, INT_MAX);
2103  }
2104  
2105  
appendDumpHeader(String8 & result)2106  /*static*/ void AudioFlinger::RecordThread::RecordTrack::appendDumpHeader(String8& result)
2107  {
2108      result.append("    Active Client Fmt Chn mask Session S   Server fCount SRate\n");
2109  }
2110  
dump(char * buffer,size_t size,bool active)2111  void AudioFlinger::RecordThread::RecordTrack::dump(char* buffer, size_t size, bool active)
2112  {
2113      snprintf(buffer, size, "    %6s %6u %3u %08X %7u %1d %08X %6zu %5u\n",
2114              active ? "yes" : "no",
2115              (mClient == 0) ? getpid_cached : mClient->pid(),
2116              mFormat,
2117              mChannelMask,
2118              mSessionId,
2119              mState,
2120              mCblk->mServer,
2121              mFrameCount,
2122              mSampleRate);
2123  
2124  }
2125  
handleSyncStartEvent(const sp<SyncEvent> & event)2126  void AudioFlinger::RecordThread::RecordTrack::handleSyncStartEvent(const sp<SyncEvent>& event)
2127  {
2128      if (event == mSyncStartEvent) {
2129          ssize_t framesToDrop = 0;
2130          sp<ThreadBase> threadBase = mThread.promote();
2131          if (threadBase != 0) {
2132              // TODO: use actual buffer filling status instead of 2 buffers when info is available
2133              // from audio HAL
2134              framesToDrop = threadBase->mFrameCount * 2;
2135          }
2136          mFramesToDrop = framesToDrop;
2137      }
2138  }
2139  
clearSyncStartEvent()2140  void AudioFlinger::RecordThread::RecordTrack::clearSyncStartEvent()
2141  {
2142      if (mSyncStartEvent != 0) {
2143          mSyncStartEvent->cancel();
2144          mSyncStartEvent.clear();
2145      }
2146      mFramesToDrop = 0;
2147  }
2148  
2149  
PatchRecord(RecordThread * recordThread,uint32_t sampleRate,audio_channel_mask_t channelMask,audio_format_t format,size_t frameCount,void * buffer,IAudioFlinger::track_flags_t flags)2150  AudioFlinger::RecordThread::PatchRecord::PatchRecord(RecordThread *recordThread,
2151                                                       uint32_t sampleRate,
2152                                                       audio_channel_mask_t channelMask,
2153                                                       audio_format_t format,
2154                                                       size_t frameCount,
2155                                                       void *buffer,
2156                                                       IAudioFlinger::track_flags_t flags)
2157      :   RecordTrack(recordThread, NULL, sampleRate, format, channelMask, frameCount,
2158                  buffer, 0, getuid(), flags, TYPE_PATCH),
2159                  mProxy(new ClientProxy(mCblk, mBuffer, frameCount, mFrameSize, false, true))
2160  {
2161      uint64_t mixBufferNs = ((uint64_t)2 * recordThread->frameCount() * 1000000000) /
2162                                                                  recordThread->sampleRate();
2163      mPeerTimeout.tv_sec = mixBufferNs / 1000000000;
2164      mPeerTimeout.tv_nsec = (int) (mixBufferNs % 1000000000);
2165  
2166      ALOGV("PatchRecord %p sampleRate %d mPeerTimeout %d.%03d sec",
2167                                        this, sampleRate,
2168                                        (int)mPeerTimeout.tv_sec,
2169                                        (int)(mPeerTimeout.tv_nsec / 1000000));
2170  }
2171  
~PatchRecord()2172  AudioFlinger::RecordThread::PatchRecord::~PatchRecord()
2173  {
2174  }
2175  
2176  // AudioBufferProvider interface
getNextBuffer(AudioBufferProvider::Buffer * buffer,int64_t pts)2177  status_t AudioFlinger::RecordThread::PatchRecord::getNextBuffer(
2178                                                    AudioBufferProvider::Buffer* buffer, int64_t pts)
2179  {
2180      ALOG_ASSERT(mPeerProxy != 0, "PatchRecord::getNextBuffer() called without peer proxy");
2181      Proxy::Buffer buf;
2182      buf.mFrameCount = buffer->frameCount;
2183      status_t status = mPeerProxy->obtainBuffer(&buf, &mPeerTimeout);
2184      ALOGV_IF(status != NO_ERROR,
2185               "PatchRecord() %p mPeerProxy->obtainBuffer status %d", this, status);
2186      buffer->frameCount = buf.mFrameCount;
2187      if (buf.mFrameCount == 0) {
2188          return WOULD_BLOCK;
2189      }
2190      status = RecordTrack::getNextBuffer(buffer, pts);
2191      return status;
2192  }
2193  
releaseBuffer(AudioBufferProvider::Buffer * buffer)2194  void AudioFlinger::RecordThread::PatchRecord::releaseBuffer(AudioBufferProvider::Buffer* buffer)
2195  {
2196      ALOG_ASSERT(mPeerProxy != 0, "PatchRecord::releaseBuffer() called without peer proxy");
2197      Proxy::Buffer buf;
2198      buf.mFrameCount = buffer->frameCount;
2199      buf.mRaw = buffer->raw;
2200      mPeerProxy->releaseBuffer(&buf);
2201      TrackBase::releaseBuffer(buffer);
2202  }
2203  
obtainBuffer(Proxy::Buffer * buffer,const struct timespec * timeOut)2204  status_t AudioFlinger::RecordThread::PatchRecord::obtainBuffer(Proxy::Buffer* buffer,
2205                                                                 const struct timespec *timeOut)
2206  {
2207      return mProxy->obtainBuffer(buffer, timeOut);
2208  }
2209  
releaseBuffer(Proxy::Buffer * buffer)2210  void AudioFlinger::RecordThread::PatchRecord::releaseBuffer(Proxy::Buffer* buffer)
2211  {
2212      mProxy->releaseBuffer(buffer);
2213  }
2214  
2215  }; // namespace android
2216