Home | History | Annotate | Download | only in audioflinger
      1 /*
      2 **
      3 ** Copyright 2012, The Android Open Source Project
      4 **
      5 ** Licensed under the Apache License, Version 2.0 (the "License");
      6 ** you may not use this file except in compliance with the License.
      7 ** You may obtain a copy of the License at
      8 **
      9 **     http://www.apache.org/licenses/LICENSE-2.0
     10 **
     11 ** Unless required by applicable law or agreed to in writing, software
     12 ** distributed under the License is distributed on an "AS IS" BASIS,
     13 ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     14 ** See the License for the specific language governing permissions and
     15 ** limitations under the License.
     16 */
     17 
     18 
     19 #define LOG_TAG "AudioFlinger"
     20 //#define LOG_NDEBUG 0
     21 
     22 #include "Configuration.h"
     23 #include <math.h>
     24 #include <sys/syscall.h>
     25 #include <utils/Log.h>
     26 
     27 #include <private/media/AudioTrackShared.h>
     28 
     29 #include <common_time/cc_helper.h>
     30 #include <common_time/local_clock.h>
     31 
     32 #include "AudioMixer.h"
     33 #include "AudioFlinger.h"
     34 #include "ServiceUtilities.h"
     35 
     36 #include <media/nbaio/Pipe.h>
     37 #include <media/nbaio/PipeReader.h>
     38 #include <audio_utils/minifloat.h>
     39 
     40 // ----------------------------------------------------------------------------
     41 
     42 // Note: the following macro is used for extremely verbose logging message.  In
     43 // order to run with ALOG_ASSERT turned on, we need to have LOG_NDEBUG set to
     44 // 0; but one side effect of this is to turn all LOGV's as well.  Some messages
     45 // are so verbose that we want to suppress them even when we have ALOG_ASSERT
     46 // turned on.  Do not uncomment the #def below unless you really know what you
     47 // are doing and want to see all of the extremely verbose messages.
     48 //#define VERY_VERY_VERBOSE_LOGGING
     49 #ifdef VERY_VERY_VERBOSE_LOGGING
     50 #define ALOGVV ALOGV
     51 #else
     52 #define ALOGVV(a...) do { } while(0)
     53 #endif
     54 
     55 namespace android {
     56 
     57 // ----------------------------------------------------------------------------
     58 //      TrackBase
     59 // ----------------------------------------------------------------------------
     60 
     61 static volatile int32_t nextTrackId = 55;
     62 
     63 // TrackBase constructor must be called with AudioFlinger::mLock held
     64 AudioFlinger::ThreadBase::TrackBase::TrackBase(
     65             ThreadBase *thread,
     66             const sp<Client>& client,
     67             uint32_t sampleRate,
     68             audio_format_t format,
     69             audio_channel_mask_t channelMask,
     70             size_t frameCount,
     71             void *buffer,
     72             int sessionId,
     73             int clientUid,
     74             IAudioFlinger::track_flags_t flags,
     75             bool isOut,
     76             alloc_type alloc,
     77             track_type type)
     78     :   RefBase(),
     79         mThread(thread),
     80         mClient(client),
     81         mCblk(NULL),
     82         // mBuffer
     83         mState(IDLE),
     84         mSampleRate(sampleRate),
     85         mFormat(format),
     86         mChannelMask(channelMask),
     87         mChannelCount(isOut ?
     88                 audio_channel_count_from_out_mask(channelMask) :
     89                 audio_channel_count_from_in_mask(channelMask)),
     90         mFrameSize(audio_is_linear_pcm(format) ?
     91                 mChannelCount * audio_bytes_per_sample(format) : sizeof(int8_t)),
     92         mFrameCount(frameCount),
     93         mSessionId(sessionId),
     94         mFlags(flags),
     95         mIsOut(isOut),
     96         mServerProxy(NULL),
     97         mId(android_atomic_inc(&nextTrackId)),
     98         mTerminated(false),
     99         mType(type),
    100         mThreadIoHandle(thread->id())
    101 {
    102     // if the caller is us, trust the specified uid
    103     if (IPCThreadState::self()->getCallingPid() != getpid_cached || clientUid == -1) {
    104         int newclientUid = IPCThreadState::self()->getCallingUid();
    105         if (clientUid != -1 && clientUid != newclientUid) {
    106             ALOGW("uid %d tried to pass itself off as %d", newclientUid, clientUid);
    107         }
    108         clientUid = newclientUid;
    109     }
    110     // clientUid contains the uid of the app that is responsible for this track, so we can blame
    111     // battery usage on it.
    112     mUid = clientUid;
    113 
    114     // ALOGD("Creating track with %d buffers @ %d bytes", bufferCount, bufferSize);
    115     size_t size = sizeof(audio_track_cblk_t);
    116     size_t bufferSize = (buffer == NULL ? roundup(frameCount) : frameCount) * mFrameSize;
    117     if (buffer == NULL && alloc == ALLOC_CBLK) {
    118         size += bufferSize;
    119     }
    120 
    121     if (client != 0) {
    122         mCblkMemory = client->heap()->allocate(size);
    123         if (mCblkMemory == 0 ||
    124                 (mCblk = static_cast<audio_track_cblk_t *>(mCblkMemory->pointer())) == NULL) {
    125             ALOGE("not enough memory for AudioTrack size=%u", size);
    126             client->heap()->dump("AudioTrack");
    127             mCblkMemory.clear();
    128             return;
    129         }
    130     } else {
    131         // this syntax avoids calling the audio_track_cblk_t constructor twice
    132         mCblk = (audio_track_cblk_t *) new uint8_t[size];
    133         // assume mCblk != NULL
    134     }
    135 
    136     // construct the shared structure in-place.
    137     if (mCblk != NULL) {
    138         new(mCblk) audio_track_cblk_t();
    139         switch (alloc) {
    140         case ALLOC_READONLY: {
    141             const sp<MemoryDealer> roHeap(thread->readOnlyHeap());
    142             if (roHeap == 0 ||
    143                     (mBufferMemory = roHeap->allocate(bufferSize)) == 0 ||
    144                     (mBuffer = mBufferMemory->pointer()) == NULL) {
    145                 ALOGE("not enough memory for read-only buffer size=%zu", bufferSize);
    146                 if (roHeap != 0) {
    147                     roHeap->dump("buffer");
    148                 }
    149                 mCblkMemory.clear();
    150                 mBufferMemory.clear();
    151                 return;
    152             }
    153             memset(mBuffer, 0, bufferSize);
    154             } break;
    155         case ALLOC_PIPE:
    156             mBufferMemory = thread->pipeMemory();
    157             // mBuffer is the virtual address as seen from current process (mediaserver),
    158             // and should normally be coming from mBufferMemory->pointer().
    159             // However in this case the TrackBase does not reference the buffer directly.
    160             // It should references the buffer via the pipe.
    161             // Therefore, to detect incorrect usage of the buffer, we set mBuffer to NULL.
    162             mBuffer = NULL;
    163             break;
    164         case ALLOC_CBLK:
    165             // clear all buffers
    166             if (buffer == NULL) {
    167                 mBuffer = (char*)mCblk + sizeof(audio_track_cblk_t);
    168                 memset(mBuffer, 0, bufferSize);
    169             } else {
    170                 mBuffer = buffer;
    171 #if 0
    172                 mCblk->mFlags = CBLK_FORCEREADY;    // FIXME hack, need to fix the track ready logic
    173 #endif
    174             }
    175             break;
    176         case ALLOC_LOCAL:
    177             mBuffer = calloc(1, bufferSize);
    178             break;
    179         case ALLOC_NONE:
    180             mBuffer = buffer;
    181             break;
    182         }
    183 
    184 #ifdef TEE_SINK
    185         if (mTeeSinkTrackEnabled) {
    186             NBAIO_Format pipeFormat = Format_from_SR_C(mSampleRate, mChannelCount, mFormat);
    187             if (Format_isValid(pipeFormat)) {
    188                 Pipe *pipe = new Pipe(mTeeSinkTrackFrames, pipeFormat);
    189                 size_t numCounterOffers = 0;
    190                 const NBAIO_Format offers[1] = {pipeFormat};
    191                 ssize_t index = pipe->negotiate(offers, 1, NULL, numCounterOffers);
    192                 ALOG_ASSERT(index == 0);
    193                 PipeReader *pipeReader = new PipeReader(*pipe);
    194                 numCounterOffers = 0;
    195                 index = pipeReader->negotiate(offers, 1, NULL, numCounterOffers);
    196                 ALOG_ASSERT(index == 0);
    197                 mTeeSink = pipe;
    198                 mTeeSource = pipeReader;
    199             }
    200         }
    201 #endif
    202 
    203     }
    204 }
    205 
    206 status_t AudioFlinger::ThreadBase::TrackBase::initCheck() const
    207 {
    208     status_t status;
    209     if (mType == TYPE_OUTPUT || mType == TYPE_PATCH) {
    210         status = cblk() != NULL ? NO_ERROR : NO_MEMORY;
    211     } else {
    212         status = getCblk() != 0 ? NO_ERROR : NO_MEMORY;
    213     }
    214     return status;
    215 }
    216 
    217 AudioFlinger::ThreadBase::TrackBase::~TrackBase()
    218 {
    219 #ifdef TEE_SINK
    220     dumpTee(-1, mTeeSource, mId);
    221 #endif
    222     // delete the proxy before deleting the shared memory it refers to, to avoid dangling reference
    223     delete mServerProxy;
    224     if (mCblk != NULL) {
    225         if (mClient == 0) {
    226             delete mCblk;
    227         } else {
    228             mCblk->~audio_track_cblk_t();   // destroy our shared-structure.
    229         }
    230     }
    231     mCblkMemory.clear();    // free the shared memory before releasing the heap it belongs to
    232     if (mClient != 0) {
    233         // Client destructor must run with AudioFlinger client mutex locked
    234         Mutex::Autolock _l(mClient->audioFlinger()->mClientLock);
    235         // If the client's reference count drops to zero, the associated destructor
    236         // must run with AudioFlinger lock held. Thus the explicit clear() rather than
    237         // relying on the automatic clear() at end of scope.
    238         mClient.clear();
    239     }
    240     // flush the binder command buffer
    241     IPCThreadState::self()->flushCommands();
    242 }
    243 
    244 // AudioBufferProvider interface
    245 // getNextBuffer() = 0;
    246 // This implementation of releaseBuffer() is used by Track and RecordTrack, but not TimedTrack
    247 void AudioFlinger::ThreadBase::TrackBase::releaseBuffer(AudioBufferProvider::Buffer* buffer)
    248 {
    249 #ifdef TEE_SINK
    250     if (mTeeSink != 0) {
    251         (void) mTeeSink->write(buffer->raw, buffer->frameCount);
    252     }
    253 #endif
    254 
    255     ServerProxy::Buffer buf;
    256     buf.mFrameCount = buffer->frameCount;
    257     buf.mRaw = buffer->raw;
    258     buffer->frameCount = 0;
    259     buffer->raw = NULL;
    260     mServerProxy->releaseBuffer(&buf);
    261 }
    262 
    263 status_t AudioFlinger::ThreadBase::TrackBase::setSyncEvent(const sp<SyncEvent>& event)
    264 {
    265     mSyncEvents.add(event);
    266     return NO_ERROR;
    267 }
    268 
    269 // ----------------------------------------------------------------------------
    270 //      Playback
    271 // ----------------------------------------------------------------------------
    272 
    273 AudioFlinger::TrackHandle::TrackHandle(const sp<AudioFlinger::PlaybackThread::Track>& track)
    274     : BnAudioTrack(),
    275       mTrack(track)
    276 {
    277 }
    278 
    279 AudioFlinger::TrackHandle::~TrackHandle() {
    280     // just stop the track on deletion, associated resources
    281     // will be freed from the main thread once all pending buffers have
    282     // been played. Unless it's not in the active track list, in which
    283     // case we free everything now...
    284     mTrack->destroy();
    285 }
    286 
    287 sp<IMemory> AudioFlinger::TrackHandle::getCblk() const {
    288     return mTrack->getCblk();
    289 }
    290 
    291 status_t AudioFlinger::TrackHandle::start() {
    292     return mTrack->start();
    293 }
    294 
    295 void AudioFlinger::TrackHandle::stop() {
    296     mTrack->stop();
    297 }
    298 
    299 void AudioFlinger::TrackHandle::flush() {
    300     mTrack->flush();
    301 }
    302 
    303 void AudioFlinger::TrackHandle::pause() {
    304     mTrack->pause();
    305 }
    306 
    307 status_t AudioFlinger::TrackHandle::attachAuxEffect(int EffectId)
    308 {
    309     return mTrack->attachAuxEffect(EffectId);
    310 }
    311 
    312 status_t AudioFlinger::TrackHandle::allocateTimedBuffer(size_t size,
    313                                                          sp<IMemory>* buffer) {
    314     if (!mTrack->isTimedTrack())
    315         return INVALID_OPERATION;
    316 
    317     PlaybackThread::TimedTrack* tt =
    318             reinterpret_cast<PlaybackThread::TimedTrack*>(mTrack.get());
    319     return tt->allocateTimedBuffer(size, buffer);
    320 }
    321 
    322 status_t AudioFlinger::TrackHandle::queueTimedBuffer(const sp<IMemory>& buffer,
    323                                                      int64_t pts) {
    324     if (!mTrack->isTimedTrack())
    325         return INVALID_OPERATION;
    326 
    327     if (buffer == 0 || buffer->pointer() == NULL) {
    328         ALOGE("queueTimedBuffer() buffer is 0 or has NULL pointer()");
    329         return BAD_VALUE;
    330     }
    331 
    332     PlaybackThread::TimedTrack* tt =
    333             reinterpret_cast<PlaybackThread::TimedTrack*>(mTrack.get());
    334     return tt->queueTimedBuffer(buffer, pts);
    335 }
    336 
    337 status_t AudioFlinger::TrackHandle::setMediaTimeTransform(
    338     const LinearTransform& xform, int target) {
    339 
    340     if (!mTrack->isTimedTrack())
    341         return INVALID_OPERATION;
    342 
    343     PlaybackThread::TimedTrack* tt =
    344             reinterpret_cast<PlaybackThread::TimedTrack*>(mTrack.get());
    345     return tt->setMediaTimeTransform(
    346         xform, static_cast<TimedAudioTrack::TargetTimeline>(target));
    347 }
    348 
    349 status_t AudioFlinger::TrackHandle::setParameters(const String8& keyValuePairs) {
    350     return mTrack->setParameters(keyValuePairs);
    351 }
    352 
    353 status_t AudioFlinger::TrackHandle::getTimestamp(AudioTimestamp& timestamp)
    354 {
    355     return mTrack->getTimestamp(timestamp);
    356 }
    357 
    358 
    359 void AudioFlinger::TrackHandle::signal()
    360 {
    361     return mTrack->signal();
    362 }
    363 
    364 status_t AudioFlinger::TrackHandle::onTransact(
    365     uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
    366 {
    367     return BnAudioTrack::onTransact(code, data, reply, flags);
    368 }
    369 
    370 // ----------------------------------------------------------------------------
    371 
    372 // Track constructor must be called with AudioFlinger::mLock and ThreadBase::mLock held
    373 AudioFlinger::PlaybackThread::Track::Track(
    374             PlaybackThread *thread,
    375             const sp<Client>& client,
    376             audio_stream_type_t streamType,
    377             uint32_t sampleRate,
    378             audio_format_t format,
    379             audio_channel_mask_t channelMask,
    380             size_t frameCount,
    381             void *buffer,
    382             const sp<IMemory>& sharedBuffer,
    383             int sessionId,
    384             int uid,
    385             IAudioFlinger::track_flags_t flags,
    386             track_type type)
    387     :   TrackBase(thread, client, sampleRate, format, channelMask, frameCount,
    388                   (sharedBuffer != 0) ? sharedBuffer->pointer() : buffer,
    389                   sessionId, uid, flags, true /*isOut*/,
    390                   (type == TYPE_PATCH) ? ( buffer == NULL ? ALLOC_LOCAL : ALLOC_NONE) : ALLOC_CBLK,
    391                   type),
    392     mFillingUpStatus(FS_INVALID),
    393     // mRetryCount initialized later when needed
    394     mSharedBuffer(sharedBuffer),
    395     mStreamType(streamType),
    396     mName(-1),  // see note below
    397     mMainBuffer(thread->mixBuffer()),
    398     mAuxBuffer(NULL),
    399     mAuxEffectId(0), mHasVolumeController(false),
    400     mPresentationCompleteFrames(0),
    401     mFastIndex(-1),
    402     mCachedVolume(1.0),
    403     mIsInvalid(false),
    404     mAudioTrackServerProxy(NULL),
    405     mResumeToStopping(false),
    406     mFlushHwPending(false),
    407     mPreviousValid(false),
    408     mPreviousFramesWritten(0)
    409     // mPreviousTimestamp
    410 {
    411     // client == 0 implies sharedBuffer == 0
    412     ALOG_ASSERT(!(client == 0 && sharedBuffer != 0));
    413 
    414     ALOGV_IF(sharedBuffer != 0, "sharedBuffer: %p, size: %d", sharedBuffer->pointer(),
    415             sharedBuffer->size());
    416 
    417     if (mCblk == NULL) {
    418         return;
    419     }
    420 
    421     if (sharedBuffer == 0) {
    422         mAudioTrackServerProxy = new AudioTrackServerProxy(mCblk, mBuffer, frameCount,
    423                 mFrameSize, !isExternalTrack(), sampleRate);
    424     } else {
    425         mAudioTrackServerProxy = new StaticAudioTrackServerProxy(mCblk, mBuffer, frameCount,
    426                 mFrameSize);
    427     }
    428     mServerProxy = mAudioTrackServerProxy;
    429 
    430     mName = thread->getTrackName_l(channelMask, format, sessionId);
    431     if (mName < 0) {
    432         ALOGE("no more track names available");
    433         return;
    434     }
    435     // only allocate a fast track index if we were able to allocate a normal track name
    436     if (flags & IAudioFlinger::TRACK_FAST) {
    437         mAudioTrackServerProxy->framesReadyIsCalledByMultipleThreads();
    438         ALOG_ASSERT(thread->mFastTrackAvailMask != 0);
    439         int i = __builtin_ctz(thread->mFastTrackAvailMask);
    440         ALOG_ASSERT(0 < i && i < (int)FastMixerState::kMaxFastTracks);
    441         // FIXME This is too eager.  We allocate a fast track index before the
    442         //       fast track becomes active.  Since fast tracks are a scarce resource,
    443         //       this means we are potentially denying other more important fast tracks from
    444         //       being created.  It would be better to allocate the index dynamically.
    445         mFastIndex = i;
    446         // Read the initial underruns because this field is never cleared by the fast mixer
    447         mObservedUnderruns = thread->getFastTrackUnderruns(i);
    448         thread->mFastTrackAvailMask &= ~(1 << i);
    449     }
    450 }
    451 
    452 AudioFlinger::PlaybackThread::Track::~Track()
    453 {
    454     ALOGV("PlaybackThread::Track destructor");
    455 
    456     // The destructor would clear mSharedBuffer,
    457     // but it will not push the decremented reference count,
    458     // leaving the client's IMemory dangling indefinitely.
    459     // This prevents that leak.
    460     if (mSharedBuffer != 0) {
    461         mSharedBuffer.clear();
    462     }
    463 }
    464 
    465 status_t AudioFlinger::PlaybackThread::Track::initCheck() const
    466 {
    467     status_t status = TrackBase::initCheck();
    468     if (status == NO_ERROR && mName < 0) {
    469         status = NO_MEMORY;
    470     }
    471     return status;
    472 }
    473 
    474 void AudioFlinger::PlaybackThread::Track::destroy()
    475 {
    476     // NOTE: destroyTrack_l() can remove a strong reference to this Track
    477     // by removing it from mTracks vector, so there is a risk that this Tracks's
    478     // destructor is called. As the destructor needs to lock mLock,
    479     // we must acquire a strong reference on this Track before locking mLock
    480     // here so that the destructor is called only when exiting this function.
    481     // On the other hand, as long as Track::destroy() is only called by
    482     // TrackHandle destructor, the TrackHandle still holds a strong ref on
    483     // this Track with its member mTrack.
    484     sp<Track> keep(this);
    485     { // scope for mLock
    486         bool wasActive = false;
    487         sp<ThreadBase> thread = mThread.promote();
    488         if (thread != 0) {
    489             Mutex::Autolock _l(thread->mLock);
    490             PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
    491             wasActive = playbackThread->destroyTrack_l(this);
    492         }
    493         if (isExternalTrack() && !wasActive) {
    494             AudioSystem::releaseOutput(mThreadIoHandle);
    495         }
    496     }
    497 }
    498 
    499 /*static*/ void AudioFlinger::PlaybackThread::Track::appendDumpHeader(String8& result)
    500 {
    501     result.append("    Name Active Client Type      Fmt Chn mask Session fCount S F SRate  "
    502                   "L dB  R dB    Server Main buf  Aux Buf Flags UndFrmCnt\n");
    503 }
    504 
    505 void AudioFlinger::PlaybackThread::Track::dump(char* buffer, size_t size, bool active)
    506 {
    507     gain_minifloat_packed_t vlr = mAudioTrackServerProxy->getVolumeLR();
    508     if (isFastTrack()) {
    509         sprintf(buffer, "    F %2d", mFastIndex);
    510     } else if (mName >= AudioMixer::TRACK0) {
    511         sprintf(buffer, "    %4d", mName - AudioMixer::TRACK0);
    512     } else {
    513         sprintf(buffer, "    none");
    514     }
    515     track_state state = mState;
    516     char stateChar;
    517     if (isTerminated()) {
    518         stateChar = 'T';
    519     } else {
    520         switch (state) {
    521         case IDLE:
    522             stateChar = 'I';
    523             break;
    524         case STOPPING_1:
    525             stateChar = 's';
    526             break;
    527         case STOPPING_2:
    528             stateChar = '5';
    529             break;
    530         case STOPPED:
    531             stateChar = 'S';
    532             break;
    533         case RESUMING:
    534             stateChar = 'R';
    535             break;
    536         case ACTIVE:
    537             stateChar = 'A';
    538             break;
    539         case PAUSING:
    540             stateChar = 'p';
    541             break;
    542         case PAUSED:
    543             stateChar = 'P';
    544             break;
    545         case FLUSHED:
    546             stateChar = 'F';
    547             break;
    548         default:
    549             stateChar = '?';
    550             break;
    551         }
    552     }
    553     char nowInUnderrun;
    554     switch (mObservedUnderruns.mBitFields.mMostRecent) {
    555     case UNDERRUN_FULL:
    556         nowInUnderrun = ' ';
    557         break;
    558     case UNDERRUN_PARTIAL:
    559         nowInUnderrun = '<';
    560         break;
    561     case UNDERRUN_EMPTY:
    562         nowInUnderrun = '*';
    563         break;
    564     default:
    565         nowInUnderrun = '?';
    566         break;
    567     }
    568     snprintf(&buffer[8], size-8, " %6s %6u %4u %08X %08X %7u %6zu %1c %1d %5u %5.2g %5.2g  "
    569                                  "%08X %p %p 0x%03X %9u%c\n",
    570             active ? "yes" : "no",
    571             (mClient == 0) ? getpid_cached : mClient->pid(),
    572             mStreamType,
    573             mFormat,
    574             mChannelMask,
    575             mSessionId,
    576             mFrameCount,
    577             stateChar,
    578             mFillingUpStatus,
    579             mAudioTrackServerProxy->getSampleRate(),
    580             20.0 * log10(float_from_gain(gain_minifloat_unpack_left(vlr))),
    581             20.0 * log10(float_from_gain(gain_minifloat_unpack_right(vlr))),
    582             mCblk->mServer,
    583             mMainBuffer,
    584             mAuxBuffer,
    585             mCblk->mFlags,
    586             mAudioTrackServerProxy->getUnderrunFrames(),
    587             nowInUnderrun);
    588 }
    589 
    590 uint32_t AudioFlinger::PlaybackThread::Track::sampleRate() const {
    591     return mAudioTrackServerProxy->getSampleRate();
    592 }
    593 
    594 // AudioBufferProvider interface
    595 status_t AudioFlinger::PlaybackThread::Track::getNextBuffer(
    596         AudioBufferProvider::Buffer* buffer, int64_t pts __unused)
    597 {
    598     ServerProxy::Buffer buf;
    599     size_t desiredFrames = buffer->frameCount;
    600     buf.mFrameCount = desiredFrames;
    601     status_t status = mServerProxy->obtainBuffer(&buf);
    602     buffer->frameCount = buf.mFrameCount;
    603     buffer->raw = buf.mRaw;
    604     if (buf.mFrameCount == 0) {
    605         mAudioTrackServerProxy->tallyUnderrunFrames(desiredFrames);
    606     }
    607     return status;
    608 }
    609 
    610 // releaseBuffer() is not overridden
    611 
    612 // ExtendedAudioBufferProvider interface
    613 
    614 // Note that framesReady() takes a mutex on the control block using tryLock().
    615 // This could result in priority inversion if framesReady() is called by the normal mixer,
    616 // as the normal mixer thread runs at lower
    617 // priority than the client's callback thread:  there is a short window within framesReady()
    618 // during which the normal mixer could be preempted, and the client callback would block.
    619 // Another problem can occur if framesReady() is called by the fast mixer:
    620 // the tryLock() could block for up to 1 ms, and a sequence of these could delay fast mixer.
    621 // FIXME Replace AudioTrackShared control block implementation by a non-blocking FIFO queue.
    622 size_t AudioFlinger::PlaybackThread::Track::framesReady() const {
    623     return mAudioTrackServerProxy->framesReady();
    624 }
    625 
    626 size_t AudioFlinger::PlaybackThread::Track::framesReleased() const
    627 {
    628     return mAudioTrackServerProxy->framesReleased();
    629 }
    630 
    631 // Don't call for fast tracks; the framesReady() could result in priority inversion
    632 bool AudioFlinger::PlaybackThread::Track::isReady() const {
    633     if (mFillingUpStatus != FS_FILLING || isStopped() || isPausing()) {
    634         return true;
    635     }
    636 
    637     if (isStopping()) {
    638         if (framesReady() > 0) {
    639             mFillingUpStatus = FS_FILLED;
    640         }
    641         return true;
    642     }
    643 
    644     if (framesReady() >= mFrameCount ||
    645             (mCblk->mFlags & CBLK_FORCEREADY)) {
    646         mFillingUpStatus = FS_FILLED;
    647         android_atomic_and(~CBLK_FORCEREADY, &mCblk->mFlags);
    648         return true;
    649     }
    650     return false;
    651 }
    652 
    653 status_t AudioFlinger::PlaybackThread::Track::start(AudioSystem::sync_event_t event __unused,
    654                                                     int triggerSession __unused)
    655 {
    656     status_t status = NO_ERROR;
    657     ALOGV("start(%d), calling pid %d session %d",
    658             mName, IPCThreadState::self()->getCallingPid(), mSessionId);
    659 
    660     sp<ThreadBase> thread = mThread.promote();
    661     if (thread != 0) {
    662         if (isOffloaded()) {
    663             Mutex::Autolock _laf(thread->mAudioFlinger->mLock);
    664             Mutex::Autolock _lth(thread->mLock);
    665             sp<EffectChain> ec = thread->getEffectChain_l(mSessionId);
    666             if (thread->mAudioFlinger->isNonOffloadableGlobalEffectEnabled_l() ||
    667                     (ec != 0 && ec->isNonOffloadableEnabled())) {
    668                 invalidate();
    669                 return PERMISSION_DENIED;
    670             }
    671         }
    672         Mutex::Autolock _lth(thread->mLock);
    673         track_state state = mState;
    674         // here the track could be either new, or restarted
    675         // in both cases "unstop" the track
    676 
    677         // initial state-stopping. next state-pausing.
    678         // What if resume is called ?
    679 
    680         if (state == PAUSED || state == PAUSING) {
    681             if (mResumeToStopping) {
    682                 // happened we need to resume to STOPPING_1
    683                 mState = TrackBase::STOPPING_1;
    684                 ALOGV("PAUSED => STOPPING_1 (%d) on thread %p", mName, this);
    685             } else {
    686                 mState = TrackBase::RESUMING;
    687                 ALOGV("PAUSED => RESUMING (%d) on thread %p", mName, this);
    688             }
    689         } else {
    690             mState = TrackBase::ACTIVE;
    691             ALOGV("? => ACTIVE (%d) on thread %p", mName, this);
    692         }
    693 
    694         PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
    695         status = playbackThread->addTrack_l(this);
    696         if (status == INVALID_OPERATION || status == PERMISSION_DENIED) {
    697             triggerEvents(AudioSystem::SYNC_EVENT_PRESENTATION_COMPLETE);
    698             //  restore previous state if start was rejected by policy manager
    699             if (status == PERMISSION_DENIED) {
    700                 mState = state;
    701             }
    702         }
    703         // track was already in the active list, not a problem
    704         if (status == ALREADY_EXISTS) {
    705             status = NO_ERROR;
    706         } else {
    707             // Acknowledge any pending flush(), so that subsequent new data isn't discarded.
    708             // It is usually unsafe to access the server proxy from a binder thread.
    709             // But in this case we know the mixer thread (whether normal mixer or fast mixer)
    710             // isn't looking at this track yet:  we still hold the normal mixer thread lock,
    711             // and for fast tracks the track is not yet in the fast mixer thread's active set.
    712             ServerProxy::Buffer buffer;
    713             buffer.mFrameCount = 1;
    714             (void) mAudioTrackServerProxy->obtainBuffer(&buffer, true /*ackFlush*/);
    715         }
    716     } else {
    717         status = BAD_VALUE;
    718     }
    719     return status;
    720 }
    721 
    722 void AudioFlinger::PlaybackThread::Track::stop()
    723 {
    724     ALOGV("stop(%d), calling pid %d", mName, IPCThreadState::self()->getCallingPid());
    725     sp<ThreadBase> thread = mThread.promote();
    726     if (thread != 0) {
    727         Mutex::Autolock _l(thread->mLock);
    728         track_state state = mState;
    729         if (state == RESUMING || state == ACTIVE || state == PAUSING || state == PAUSED) {
    730             // If the track is not active (PAUSED and buffers full), flush buffers
    731             PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
    732             if (playbackThread->mActiveTracks.indexOf(this) < 0) {
    733                 reset();
    734                 mState = STOPPED;
    735             } else if (!isFastTrack() && !isOffloaded() && !isDirect()) {
    736                 mState = STOPPED;
    737             } else {
    738                 // For fast tracks prepareTracks_l() will set state to STOPPING_2
    739                 // presentation is complete
    740                 // For an offloaded track this starts a drain and state will
    741                 // move to STOPPING_2 when drain completes and then STOPPED
    742                 mState = STOPPING_1;
    743             }
    744             ALOGV("not stopping/stopped => stopping/stopped (%d) on thread %p", mName,
    745                     playbackThread);
    746         }
    747     }
    748 }
    749 
    750 void AudioFlinger::PlaybackThread::Track::pause()
    751 {
    752     ALOGV("pause(%d), calling pid %d", mName, IPCThreadState::self()->getCallingPid());
    753     sp<ThreadBase> thread = mThread.promote();
    754     if (thread != 0) {
    755         Mutex::Autolock _l(thread->mLock);
    756         PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
    757         switch (mState) {
    758         case STOPPING_1:
    759         case STOPPING_2:
    760             if (!isOffloaded()) {
    761                 /* nothing to do if track is not offloaded */
    762                 break;
    763             }
    764 
    765             // Offloaded track was draining, we need to carry on draining when resumed
    766             mResumeToStopping = true;
    767             // fall through...
    768         case ACTIVE:
    769         case RESUMING:
    770             mState = PAUSING;
    771             ALOGV("ACTIVE/RESUMING => PAUSING (%d) on thread %p", mName, thread.get());
    772             playbackThread->broadcast_l();
    773             break;
    774 
    775         default:
    776             break;
    777         }
    778     }
    779 }
    780 
    781 void AudioFlinger::PlaybackThread::Track::flush()
    782 {
    783     ALOGV("flush(%d)", mName);
    784     sp<ThreadBase> thread = mThread.promote();
    785     if (thread != 0) {
    786         Mutex::Autolock _l(thread->mLock);
    787         PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
    788 
    789         if (isOffloaded()) {
    790             // If offloaded we allow flush during any state except terminated
    791             // and keep the track active to avoid problems if user is seeking
    792             // rapidly and underlying hardware has a significant delay handling
    793             // a pause
    794             if (isTerminated()) {
    795                 return;
    796             }
    797 
    798             ALOGV("flush: offload flush");
    799             reset();
    800 
    801             if (mState == STOPPING_1 || mState == STOPPING_2) {
    802                 ALOGV("flushed in STOPPING_1 or 2 state, change state to ACTIVE");
    803                 mState = ACTIVE;
    804             }
    805 
    806             if (mState == ACTIVE) {
    807                 ALOGV("flush called in active state, resetting buffer time out retry count");
    808                 mRetryCount = PlaybackThread::kMaxTrackRetriesOffload;
    809             }
    810 
    811             mFlushHwPending = true;
    812             mResumeToStopping = false;
    813         } else {
    814             if (mState != STOPPING_1 && mState != STOPPING_2 && mState != STOPPED &&
    815                     mState != PAUSED && mState != PAUSING && mState != IDLE && mState != FLUSHED) {
    816                 return;
    817             }
    818             // No point remaining in PAUSED state after a flush => go to
    819             // FLUSHED state
    820             mState = FLUSHED;
    821             // do not reset the track if it is still in the process of being stopped or paused.
    822             // this will be done by prepareTracks_l() when the track is stopped.
    823             // prepareTracks_l() will see mState == FLUSHED, then
    824             // remove from active track list, reset(), and trigger presentation complete
    825             if (playbackThread->mActiveTracks.indexOf(this) < 0) {
    826                 reset();
    827                 if (thread->type() == ThreadBase::DIRECT) {
    828                     DirectOutputThread *t = (DirectOutputThread *)playbackThread;
    829                     t->flushHw_l();
    830                 }
    831             }
    832         }
    833         // Prevent flush being lost if the track is flushed and then resumed
    834         // before mixer thread can run. This is important when offloading
    835         // because the hardware buffer could hold a large amount of audio
    836         playbackThread->broadcast_l();
    837     }
    838 }
    839 
    840 // must be called with thread lock held
    841 void AudioFlinger::PlaybackThread::Track::flushAck()
    842 {
    843     if (!isOffloaded())
    844         return;
    845 
    846     mFlushHwPending = false;
    847 }
    848 
    849 void AudioFlinger::PlaybackThread::Track::reset()
    850 {
    851     // Do not reset twice to avoid discarding data written just after a flush and before
    852     // the audioflinger thread detects the track is stopped.
    853     if (!mResetDone) {
    854         // Force underrun condition to avoid false underrun callback until first data is
    855         // written to buffer
    856         android_atomic_and(~CBLK_FORCEREADY, &mCblk->mFlags);
    857         mFillingUpStatus = FS_FILLING;
    858         mResetDone = true;
    859         if (mState == FLUSHED) {
    860             mState = IDLE;
    861         }
    862     }
    863 }
    864 
    865 status_t AudioFlinger::PlaybackThread::Track::setParameters(const String8& keyValuePairs)
    866 {
    867     sp<ThreadBase> thread = mThread.promote();
    868     if (thread == 0) {
    869         ALOGE("thread is dead");
    870         return FAILED_TRANSACTION;
    871     } else if ((thread->type() == ThreadBase::DIRECT) ||
    872                     (thread->type() == ThreadBase::OFFLOAD)) {
    873         return thread->setParameters(keyValuePairs);
    874     } else {
    875         return PERMISSION_DENIED;
    876     }
    877 }
    878 
    879 status_t AudioFlinger::PlaybackThread::Track::getTimestamp(AudioTimestamp& timestamp)
    880 {
    881     // Client should implement this using SSQ; the unpresented frame count in latch is irrelevant
    882     if (isFastTrack()) {
    883         // FIXME no lock held to set mPreviousValid = false
    884         return INVALID_OPERATION;
    885     }
    886     sp<ThreadBase> thread = mThread.promote();
    887     if (thread == 0) {
    888         // FIXME no lock held to set mPreviousValid = false
    889         return INVALID_OPERATION;
    890     }
    891     Mutex::Autolock _l(thread->mLock);
    892     PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
    893     if (!isOffloaded() && !isDirect()) {
    894         if (!playbackThread->mLatchQValid) {
    895             mPreviousValid = false;
    896             return INVALID_OPERATION;
    897         }
    898         uint32_t unpresentedFrames =
    899                 ((int64_t) playbackThread->mLatchQ.mUnpresentedFrames * mSampleRate) /
    900                 playbackThread->mSampleRate;
    901         // FIXME Since we're using a raw pointer as the key, it is theoretically possible
    902         //       for a brand new track to share the same address as a recently destroyed
    903         //       track, and thus for us to get the frames released of the wrong track.
    904         //       It is unlikely that we would be able to call getTimestamp() so quickly
    905         //       right after creating a new track.  Nevertheless, the index here should
    906         //       be changed to something that is unique.  Or use a completely different strategy.
    907         ssize_t i = playbackThread->mLatchQ.mFramesReleased.indexOfKey(this);
    908         uint32_t framesWritten = i >= 0 ?
    909                 playbackThread->mLatchQ.mFramesReleased[i] :
    910                 mAudioTrackServerProxy->framesReleased();
    911         bool checkPreviousTimestamp = mPreviousValid && framesWritten >= mPreviousFramesWritten;
    912         if (framesWritten < unpresentedFrames) {
    913             mPreviousValid = false;
    914             return INVALID_OPERATION;
    915         }
    916         mPreviousFramesWritten = framesWritten;
    917         uint32_t position = framesWritten - unpresentedFrames;
    918         struct timespec time = playbackThread->mLatchQ.mTimestamp.mTime;
    919         if (checkPreviousTimestamp) {
    920             if (time.tv_sec < mPreviousTimestamp.mTime.tv_sec ||
    921                     (time.tv_sec == mPreviousTimestamp.mTime.tv_sec &&
    922                     time.tv_nsec < mPreviousTimestamp.mTime.tv_nsec)) {
    923                 ALOGW("Time is going backwards");
    924             }
    925             // position can bobble slightly as an artifact; this hides the bobble
    926             static const uint32_t MINIMUM_POSITION_DELTA = 8u;
    927             if ((position <= mPreviousTimestamp.mPosition) ||
    928                     (position - mPreviousTimestamp.mPosition) < MINIMUM_POSITION_DELTA) {
    929                 position = mPreviousTimestamp.mPosition;
    930                 time = mPreviousTimestamp.mTime;
    931             }
    932         }
    933         timestamp.mPosition = position;
    934         timestamp.mTime = time;
    935         mPreviousTimestamp = timestamp;
    936         mPreviousValid = true;
    937         return NO_ERROR;
    938     }
    939 
    940     return playbackThread->getTimestamp_l(timestamp);
    941 }
    942 
    943 status_t AudioFlinger::PlaybackThread::Track::attachAuxEffect(int EffectId)
    944 {
    945     status_t status = DEAD_OBJECT;
    946     sp<ThreadBase> thread = mThread.promote();
    947     if (thread != 0) {
    948         PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
    949         sp<AudioFlinger> af = mClient->audioFlinger();
    950 
    951         Mutex::Autolock _l(af->mLock);
    952 
    953         sp<PlaybackThread> srcThread = af->getEffectThread_l(AUDIO_SESSION_OUTPUT_MIX, EffectId);
    954 
    955         if (EffectId != 0 && srcThread != 0 && playbackThread != srcThread.get()) {
    956             Mutex::Autolock _dl(playbackThread->mLock);
    957             Mutex::Autolock _sl(srcThread->mLock);
    958             sp<EffectChain> chain = srcThread->getEffectChain_l(AUDIO_SESSION_OUTPUT_MIX);
    959             if (chain == 0) {
    960                 return INVALID_OPERATION;
    961             }
    962 
    963             sp<EffectModule> effect = chain->getEffectFromId_l(EffectId);
    964             if (effect == 0) {
    965                 return INVALID_OPERATION;
    966             }
    967             srcThread->removeEffect_l(effect);
    968             status = playbackThread->addEffect_l(effect);
    969             if (status != NO_ERROR) {
    970                 srcThread->addEffect_l(effect);
    971                 return INVALID_OPERATION;
    972             }
    973             // removeEffect_l() has stopped the effect if it was active so it must be restarted
    974             if (effect->state() == EffectModule::ACTIVE ||
    975                     effect->state() == EffectModule::STOPPING) {
    976                 effect->start();
    977             }
    978 
    979             sp<EffectChain> dstChain = effect->chain().promote();
    980             if (dstChain == 0) {
    981                 srcThread->addEffect_l(effect);
    982                 return INVALID_OPERATION;
    983             }
    984             AudioSystem::unregisterEffect(effect->id());
    985             AudioSystem::registerEffect(&effect->desc(),
    986                                         srcThread->id(),
    987                                         dstChain->strategy(),
    988                                         AUDIO_SESSION_OUTPUT_MIX,
    989                                         effect->id());
    990             AudioSystem::setEffectEnabled(effect->id(), effect->isEnabled());
    991         }
    992         status = playbackThread->attachAuxEffect(this, EffectId);
    993     }
    994     return status;
    995 }
    996 
    997 void AudioFlinger::PlaybackThread::Track::setAuxBuffer(int EffectId, int32_t *buffer)
    998 {
    999     mAuxEffectId = EffectId;
   1000     mAuxBuffer = buffer;
   1001 }
   1002 
   1003 bool AudioFlinger::PlaybackThread::Track::presentationComplete(size_t framesWritten,
   1004                                                          size_t audioHalFrames)
   1005 {
   1006     // a track is considered presented when the total number of frames written to audio HAL
   1007     // corresponds to the number of frames written when presentationComplete() is called for the
   1008     // first time (mPresentationCompleteFrames == 0) plus the buffer filling status at that time.
   1009     // For an offloaded track the HAL+h/w delay is variable so a HAL drain() is used
   1010     // to detect when all frames have been played. In this case framesWritten isn't
   1011     // useful because it doesn't always reflect whether there is data in the h/w
   1012     // buffers, particularly if a track has been paused and resumed during draining
   1013     ALOGV("presentationComplete() mPresentationCompleteFrames %d framesWritten %d",
   1014                       mPresentationCompleteFrames, framesWritten);
   1015     if (mPresentationCompleteFrames == 0) {
   1016         mPresentationCompleteFrames = framesWritten + audioHalFrames;
   1017         ALOGV("presentationComplete() reset: mPresentationCompleteFrames %d audioHalFrames %d",
   1018                   mPresentationCompleteFrames, audioHalFrames);
   1019     }
   1020 
   1021     if (framesWritten >= mPresentationCompleteFrames || isOffloaded()) {
   1022         triggerEvents(AudioSystem::SYNC_EVENT_PRESENTATION_COMPLETE);
   1023         mAudioTrackServerProxy->setStreamEndDone();
   1024         return true;
   1025     }
   1026     return false;
   1027 }
   1028 
   1029 void AudioFlinger::PlaybackThread::Track::triggerEvents(AudioSystem::sync_event_t type)
   1030 {
   1031     for (size_t i = 0; i < mSyncEvents.size(); i++) {
   1032         if (mSyncEvents[i]->type() == type) {
   1033             mSyncEvents[i]->trigger();
   1034             mSyncEvents.removeAt(i);
   1035             i--;
   1036         }
   1037     }
   1038 }
   1039 
   1040 // implement VolumeBufferProvider interface
   1041 
   1042 gain_minifloat_packed_t AudioFlinger::PlaybackThread::Track::getVolumeLR()
   1043 {
   1044     // called by FastMixer, so not allowed to take any locks, block, or do I/O including logs
   1045     ALOG_ASSERT(isFastTrack() && (mCblk != NULL));
   1046     gain_minifloat_packed_t vlr = mAudioTrackServerProxy->getVolumeLR();
   1047     float vl = float_from_gain(gain_minifloat_unpack_left(vlr));
   1048     float vr = float_from_gain(gain_minifloat_unpack_right(vlr));
   1049     // track volumes come from shared memory, so can't be trusted and must be clamped
   1050     if (vl > GAIN_FLOAT_UNITY) {
   1051         vl = GAIN_FLOAT_UNITY;
   1052     }
   1053     if (vr > GAIN_FLOAT_UNITY) {
   1054         vr = GAIN_FLOAT_UNITY;
   1055     }
   1056     // now apply the cached master volume and stream type volume;
   1057     // this is trusted but lacks any synchronization or barrier so may be stale
   1058     float v = mCachedVolume;
   1059     vl *= v;
   1060     vr *= v;
   1061     // re-combine into packed minifloat
   1062     vlr = gain_minifloat_pack(gain_from_float(vl), gain_from_float(vr));
   1063     // FIXME look at mute, pause, and stop flags
   1064     return vlr;
   1065 }
   1066 
   1067 status_t AudioFlinger::PlaybackThread::Track::setSyncEvent(const sp<SyncEvent>& event)
   1068 {
   1069     if (isTerminated() || mState == PAUSED ||
   1070             ((framesReady() == 0) && ((mSharedBuffer != 0) ||
   1071                                       (mState == STOPPED)))) {
   1072         ALOGW("Track::setSyncEvent() in invalid state %d on session %d %s mode, framesReady %d ",
   1073               mState, mSessionId, (mSharedBuffer != 0) ? "static" : "stream", framesReady());
   1074         event->cancel();
   1075         return INVALID_OPERATION;
   1076     }
   1077     (void) TrackBase::setSyncEvent(event);
   1078     return NO_ERROR;
   1079 }
   1080 
   1081 void AudioFlinger::PlaybackThread::Track::invalidate()
   1082 {
   1083     // FIXME should use proxy, and needs work
   1084     audio_track_cblk_t* cblk = mCblk;
   1085     android_atomic_or(CBLK_INVALID, &cblk->mFlags);
   1086     android_atomic_release_store(0x40000000, &cblk->mFutex);
   1087     // client is not in server, so FUTEX_WAKE is needed instead of FUTEX_WAKE_PRIVATE
   1088     (void) syscall(__NR_futex, &cblk->mFutex, FUTEX_WAKE, INT_MAX);
   1089     mIsInvalid = true;
   1090 }
   1091 
   1092 void AudioFlinger::PlaybackThread::Track::signal()
   1093 {
   1094     sp<ThreadBase> thread = mThread.promote();
   1095     if (thread != 0) {
   1096         PlaybackThread *t = (PlaybackThread *)thread.get();
   1097         Mutex::Autolock _l(t->mLock);
   1098         t->broadcast_l();
   1099     }
   1100 }
   1101 
   1102 //To be called with thread lock held
   1103 bool AudioFlinger::PlaybackThread::Track::isResumePending() {
   1104 
   1105     if (mState == RESUMING)
   1106         return true;
   1107     /* Resume is pending if track was stopping before pause was called */
   1108     if (mState == STOPPING_1 &&
   1109         mResumeToStopping)
   1110         return true;
   1111 
   1112     return false;
   1113 }
   1114 
   1115 //To be called with thread lock held
   1116 void AudioFlinger::PlaybackThread::Track::resumeAck() {
   1117 
   1118 
   1119     if (mState == RESUMING)
   1120         mState = ACTIVE;
   1121 
   1122     // Other possibility of  pending resume is stopping_1 state
   1123     // Do not update the state from stopping as this prevents
   1124     // drain being called.
   1125     if (mState == STOPPING_1) {
   1126         mResumeToStopping = false;
   1127     }
   1128 }
   1129 // ----------------------------------------------------------------------------
   1130 
   1131 sp<AudioFlinger::PlaybackThread::TimedTrack>
   1132 AudioFlinger::PlaybackThread::TimedTrack::create(
   1133             PlaybackThread *thread,
   1134             const sp<Client>& client,
   1135             audio_stream_type_t streamType,
   1136             uint32_t sampleRate,
   1137             audio_format_t format,
   1138             audio_channel_mask_t channelMask,
   1139             size_t frameCount,
   1140             const sp<IMemory>& sharedBuffer,
   1141             int sessionId,
   1142             int uid)
   1143 {
   1144     if (!client->reserveTimedTrack())
   1145         return 0;
   1146 
   1147     return new TimedTrack(
   1148         thread, client, streamType, sampleRate, format, channelMask, frameCount,
   1149         sharedBuffer, sessionId, uid);
   1150 }
   1151 
   1152 AudioFlinger::PlaybackThread::TimedTrack::TimedTrack(
   1153             PlaybackThread *thread,
   1154             const sp<Client>& client,
   1155             audio_stream_type_t streamType,
   1156             uint32_t sampleRate,
   1157             audio_format_t format,
   1158             audio_channel_mask_t channelMask,
   1159             size_t frameCount,
   1160             const sp<IMemory>& sharedBuffer,
   1161             int sessionId,
   1162             int uid)
   1163     : Track(thread, client, streamType, sampleRate, format, channelMask,
   1164             frameCount, (sharedBuffer != 0) ? sharedBuffer->pointer() : NULL, sharedBuffer,
   1165                     sessionId, uid, IAudioFlinger::TRACK_TIMED, TYPE_TIMED),
   1166       mQueueHeadInFlight(false),
   1167       mTrimQueueHeadOnRelease(false),
   1168       mFramesPendingInQueue(0),
   1169       mTimedSilenceBuffer(NULL),
   1170       mTimedSilenceBufferSize(0),
   1171       mTimedAudioOutputOnTime(false),
   1172       mMediaTimeTransformValid(false)
   1173 {
   1174     LocalClock lc;
   1175     mLocalTimeFreq = lc.getLocalFreq();
   1176 
   1177     mLocalTimeToSampleTransform.a_zero = 0;
   1178     mLocalTimeToSampleTransform.b_zero = 0;
   1179     mLocalTimeToSampleTransform.a_to_b_numer = sampleRate;
   1180     mLocalTimeToSampleTransform.a_to_b_denom = mLocalTimeFreq;
   1181     LinearTransform::reduce(&mLocalTimeToSampleTransform.a_to_b_numer,
   1182                             &mLocalTimeToSampleTransform.a_to_b_denom);
   1183 
   1184     mMediaTimeToSampleTransform.a_zero = 0;
   1185     mMediaTimeToSampleTransform.b_zero = 0;
   1186     mMediaTimeToSampleTransform.a_to_b_numer = sampleRate;
   1187     mMediaTimeToSampleTransform.a_to_b_denom = 1000000;
   1188     LinearTransform::reduce(&mMediaTimeToSampleTransform.a_to_b_numer,
   1189                             &mMediaTimeToSampleTransform.a_to_b_denom);
   1190 }
   1191 
   1192 AudioFlinger::PlaybackThread::TimedTrack::~TimedTrack() {
   1193     mClient->releaseTimedTrack();
   1194     delete [] mTimedSilenceBuffer;
   1195 }
   1196 
   1197 status_t AudioFlinger::PlaybackThread::TimedTrack::allocateTimedBuffer(
   1198     size_t size, sp<IMemory>* buffer) {
   1199 
   1200     Mutex::Autolock _l(mTimedBufferQueueLock);
   1201 
   1202     trimTimedBufferQueue_l();
   1203 
   1204     // lazily initialize the shared memory heap for timed buffers
   1205     if (mTimedMemoryDealer == NULL) {
   1206         const int kTimedBufferHeapSize = 512 << 10;
   1207 
   1208         mTimedMemoryDealer = new MemoryDealer(kTimedBufferHeapSize,
   1209                                               "AudioFlingerTimed");
   1210         if (mTimedMemoryDealer == NULL) {
   1211             return NO_MEMORY;
   1212         }
   1213     }
   1214 
   1215     sp<IMemory> newBuffer = mTimedMemoryDealer->allocate(size);
   1216     if (newBuffer == 0 || newBuffer->pointer() == NULL) {
   1217         return NO_MEMORY;
   1218     }
   1219 
   1220     *buffer = newBuffer;
   1221     return NO_ERROR;
   1222 }
   1223 
   1224 // caller must hold mTimedBufferQueueLock
   1225 void AudioFlinger::PlaybackThread::TimedTrack::trimTimedBufferQueue_l() {
   1226     int64_t mediaTimeNow;
   1227     {
   1228         Mutex::Autolock mttLock(mMediaTimeTransformLock);
   1229         if (!mMediaTimeTransformValid)
   1230             return;
   1231 
   1232         int64_t targetTimeNow;
   1233         status_t res = (mMediaTimeTransformTarget == TimedAudioTrack::COMMON_TIME)
   1234             ? mCCHelper.getCommonTime(&targetTimeNow)
   1235             : mCCHelper.getLocalTime(&targetTimeNow);
   1236 
   1237         if (OK != res)
   1238             return;
   1239 
   1240         if (!mMediaTimeTransform.doReverseTransform(targetTimeNow,
   1241                                                     &mediaTimeNow)) {
   1242             return;
   1243         }
   1244     }
   1245 
   1246     size_t trimEnd;
   1247     for (trimEnd = 0; trimEnd < mTimedBufferQueue.size(); trimEnd++) {
   1248         int64_t bufEnd;
   1249 
   1250         if ((trimEnd + 1) < mTimedBufferQueue.size()) {
   1251             // We have a next buffer.  Just use its PTS as the PTS of the frame
   1252             // following the last frame in this buffer.  If the stream is sparse
   1253             // (ie, there are deliberate gaps left in the stream which should be
   1254             // filled with silence by the TimedAudioTrack), then this can result
   1255             // in one extra buffer being left un-trimmed when it could have
   1256             // been.  In general, this is not typical, and we would rather
   1257             // optimized away the TS calculation below for the more common case
   1258             // where PTSes are contiguous.
   1259             bufEnd = mTimedBufferQueue[trimEnd + 1].pts();
   1260         } else {
   1261             // We have no next buffer.  Compute the PTS of the frame following
   1262             // the last frame in this buffer by computing the duration of of
   1263             // this frame in media time units and adding it to the PTS of the
   1264             // buffer.
   1265             int64_t frameCount = mTimedBufferQueue[trimEnd].buffer()->size()
   1266                                / mFrameSize;
   1267 
   1268             if (!mMediaTimeToSampleTransform.doReverseTransform(frameCount,
   1269                                                                 &bufEnd)) {
   1270                 ALOGE("Failed to convert frame count of %lld to media time"
   1271                       " duration" " (scale factor %d/%u) in %s",
   1272                       frameCount,
   1273                       mMediaTimeToSampleTransform.a_to_b_numer,
   1274                       mMediaTimeToSampleTransform.a_to_b_denom,
   1275                       __PRETTY_FUNCTION__);
   1276                 break;
   1277             }
   1278             bufEnd += mTimedBufferQueue[trimEnd].pts();
   1279         }
   1280 
   1281         if (bufEnd > mediaTimeNow)
   1282             break;
   1283 
   1284         // Is the buffer we want to use in the middle of a mix operation right
   1285         // now?  If so, don't actually trim it.  Just wait for the releaseBuffer
   1286         // from the mixer which should be coming back shortly.
   1287         if (!trimEnd && mQueueHeadInFlight) {
   1288             mTrimQueueHeadOnRelease = true;
   1289         }
   1290     }
   1291 
   1292     size_t trimStart = mTrimQueueHeadOnRelease ? 1 : 0;
   1293     if (trimStart < trimEnd) {
   1294         // Update the bookkeeping for framesReady()
   1295         for (size_t i = trimStart; i < trimEnd; ++i) {
   1296             updateFramesPendingAfterTrim_l(mTimedBufferQueue[i], "trim");
   1297         }
   1298 
   1299         // Now actually remove the buffers from the queue.
   1300         mTimedBufferQueue.removeItemsAt(trimStart, trimEnd);
   1301     }
   1302 }
   1303 
   1304 void AudioFlinger::PlaybackThread::TimedTrack::trimTimedBufferQueueHead_l(
   1305         const char* logTag) {
   1306     ALOG_ASSERT(mTimedBufferQueue.size() > 0,
   1307                 "%s called (reason \"%s\"), but timed buffer queue has no"
   1308                 " elements to trim.", __FUNCTION__, logTag);
   1309 
   1310     updateFramesPendingAfterTrim_l(mTimedBufferQueue[0], logTag);
   1311     mTimedBufferQueue.removeAt(0);
   1312 }
   1313 
   1314 void AudioFlinger::PlaybackThread::TimedTrack::updateFramesPendingAfterTrim_l(
   1315         const TimedBuffer& buf,
   1316         const char* logTag __unused) {
   1317     uint32_t bufBytes        = buf.buffer()->size();
   1318     uint32_t consumedAlready = buf.position();
   1319 
   1320     ALOG_ASSERT(consumedAlready <= bufBytes,
   1321                 "Bad bookkeeping while updating frames pending.  Timed buffer is"
   1322                 " only %u bytes long, but claims to have consumed %u"
   1323                 " bytes.  (update reason: \"%s\")",
   1324                 bufBytes, consumedAlready, logTag);
   1325 
   1326     uint32_t bufFrames = (bufBytes - consumedAlready) / mFrameSize;
   1327     ALOG_ASSERT(mFramesPendingInQueue >= bufFrames,
   1328                 "Bad bookkeeping while updating frames pending.  Should have at"
   1329                 " least %u queued frames, but we think we have only %u.  (update"
   1330                 " reason: \"%s\")",
   1331                 bufFrames, mFramesPendingInQueue, logTag);
   1332 
   1333     mFramesPendingInQueue -= bufFrames;
   1334 }
   1335 
   1336 status_t AudioFlinger::PlaybackThread::TimedTrack::queueTimedBuffer(
   1337     const sp<IMemory>& buffer, int64_t pts) {
   1338 
   1339     {
   1340         Mutex::Autolock mttLock(mMediaTimeTransformLock);
   1341         if (!mMediaTimeTransformValid)
   1342             return INVALID_OPERATION;
   1343     }
   1344 
   1345     Mutex::Autolock _l(mTimedBufferQueueLock);
   1346 
   1347     uint32_t bufFrames = buffer->size() / mFrameSize;
   1348     mFramesPendingInQueue += bufFrames;
   1349     mTimedBufferQueue.add(TimedBuffer(buffer, pts));
   1350 
   1351     return NO_ERROR;
   1352 }
   1353 
   1354 status_t AudioFlinger::PlaybackThread::TimedTrack::setMediaTimeTransform(
   1355     const LinearTransform& xform, TimedAudioTrack::TargetTimeline target) {
   1356 
   1357     ALOGVV("setMediaTimeTransform az=%lld bz=%lld n=%d d=%u tgt=%d",
   1358            xform.a_zero, xform.b_zero, xform.a_to_b_numer, xform.a_to_b_denom,
   1359            target);
   1360 
   1361     if (!(target == TimedAudioTrack::LOCAL_TIME ||
   1362           target == TimedAudioTrack::COMMON_TIME)) {
   1363         return BAD_VALUE;
   1364     }
   1365 
   1366     Mutex::Autolock lock(mMediaTimeTransformLock);
   1367     mMediaTimeTransform = xform;
   1368     mMediaTimeTransformTarget = target;
   1369     mMediaTimeTransformValid = true;
   1370 
   1371     return NO_ERROR;
   1372 }
   1373 
   1374 #define min(a, b) ((a) < (b) ? (a) : (b))
   1375 
   1376 // implementation of getNextBuffer for tracks whose buffers have timestamps
   1377 status_t AudioFlinger::PlaybackThread::TimedTrack::getNextBuffer(
   1378     AudioBufferProvider::Buffer* buffer, int64_t pts)
   1379 {
   1380     if (pts == AudioBufferProvider::kInvalidPTS) {
   1381         buffer->raw = NULL;
   1382         buffer->frameCount = 0;
   1383         mTimedAudioOutputOnTime = false;
   1384         return INVALID_OPERATION;
   1385     }
   1386 
   1387     Mutex::Autolock _l(mTimedBufferQueueLock);
   1388 
   1389     ALOG_ASSERT(!mQueueHeadInFlight,
   1390                 "getNextBuffer called without releaseBuffer!");
   1391 
   1392     while (true) {
   1393 
   1394         // if we have no timed buffers, then fail
   1395         if (mTimedBufferQueue.isEmpty()) {
   1396             buffer->raw = NULL;
   1397             buffer->frameCount = 0;
   1398             return NOT_ENOUGH_DATA;
   1399         }
   1400 
   1401         TimedBuffer& head = mTimedBufferQueue.editItemAt(0);
   1402 
   1403         // calculate the PTS of the head of the timed buffer queue expressed in
   1404         // local time
   1405         int64_t headLocalPTS;
   1406         {
   1407             Mutex::Autolock mttLock(mMediaTimeTransformLock);
   1408 
   1409             ALOG_ASSERT(mMediaTimeTransformValid, "media time transform invalid");
   1410 
   1411             if (mMediaTimeTransform.a_to_b_denom == 0) {
   1412                 // the transform represents a pause, so yield silence
   1413                 timedYieldSilence_l(buffer->frameCount, buffer);
   1414                 return NO_ERROR;
   1415             }
   1416 
   1417             int64_t transformedPTS;
   1418             if (!mMediaTimeTransform.doForwardTransform(head.pts(),
   1419                                                         &transformedPTS)) {
   1420                 // the transform failed.  this shouldn't happen, but if it does
   1421                 // then just drop this buffer
   1422                 ALOGW("timedGetNextBuffer transform failed");
   1423                 buffer->raw = NULL;
   1424                 buffer->frameCount = 0;
   1425                 trimTimedBufferQueueHead_l("getNextBuffer; no transform");
   1426                 return NO_ERROR;
   1427             }
   1428 
   1429             if (mMediaTimeTransformTarget == TimedAudioTrack::COMMON_TIME) {
   1430                 if (OK != mCCHelper.commonTimeToLocalTime(transformedPTS,
   1431                                                           &headLocalPTS)) {
   1432                     buffer->raw = NULL;
   1433                     buffer->frameCount = 0;
   1434                     return INVALID_OPERATION;
   1435                 }
   1436             } else {
   1437                 headLocalPTS = transformedPTS;
   1438             }
   1439         }
   1440 
   1441         uint32_t sr = sampleRate();
   1442 
   1443         // adjust the head buffer's PTS to reflect the portion of the head buffer
   1444         // that has already been consumed
   1445         int64_t effectivePTS = headLocalPTS +
   1446                 ((head.position() / mFrameSize) * mLocalTimeFreq / sr);
   1447 
   1448         // Calculate the delta in samples between the head of the input buffer
   1449         // queue and the start of the next output buffer that will be written.
   1450         // If the transformation fails because of over or underflow, it means
   1451         // that the sample's position in the output stream is so far out of
   1452         // whack that it should just be dropped.
   1453         int64_t sampleDelta;
   1454         if (llabs(effectivePTS - pts) >= (static_cast<int64_t>(1) << 31)) {
   1455             ALOGV("*** head buffer is too far from PTS: dropped buffer");
   1456             trimTimedBufferQueueHead_l("getNextBuffer, buf pts too far from"
   1457                                        " mix");
   1458             continue;
   1459         }
   1460         if (!mLocalTimeToSampleTransform.doForwardTransform(
   1461                 (effectivePTS - pts) << 32, &sampleDelta)) {
   1462             ALOGV("*** too late during sample rate transform: dropped buffer");
   1463             trimTimedBufferQueueHead_l("getNextBuffer, bad local to sample");
   1464             continue;
   1465         }
   1466 
   1467         ALOGVV("*** getNextBuffer head.pts=%lld head.pos=%d pts=%lld"
   1468                " sampleDelta=[%d.%08x]",
   1469                head.pts(), head.position(), pts,
   1470                static_cast<int32_t>((sampleDelta >= 0 ? 0 : 1)
   1471                    + (sampleDelta >> 32)),
   1472                static_cast<uint32_t>(sampleDelta & 0xFFFFFFFF));
   1473 
   1474         // if the delta between the ideal placement for the next input sample and
   1475         // the current output position is within this threshold, then we will
   1476         // concatenate the next input samples to the previous output
   1477         const int64_t kSampleContinuityThreshold =
   1478                 (static_cast<int64_t>(sr) << 32) / 250;
   1479 
   1480         // if this is the first buffer of audio that we're emitting from this track
   1481         // then it should be almost exactly on time.
   1482         const int64_t kSampleStartupThreshold = 1LL << 32;
   1483 
   1484         if ((mTimedAudioOutputOnTime && llabs(sampleDelta) <= kSampleContinuityThreshold) ||
   1485            (!mTimedAudioOutputOnTime && llabs(sampleDelta) <= kSampleStartupThreshold)) {
   1486             // the next input is close enough to being on time, so concatenate it
   1487             // with the last output
   1488             timedYieldSamples_l(buffer);
   1489 
   1490             ALOGVV("*** on time: head.pos=%d frameCount=%u",
   1491                     head.position(), buffer->frameCount);
   1492             return NO_ERROR;
   1493         }
   1494 
   1495         // Looks like our output is not on time.  Reset our on timed status.
   1496         // Next time we mix samples from our input queue, then should be within
   1497         // the StartupThreshold.
   1498         mTimedAudioOutputOnTime = false;
   1499         if (sampleDelta > 0) {
   1500             // the gap between the current output position and the proper start of
   1501             // the next input sample is too big, so fill it with silence
   1502             uint32_t framesUntilNextInput = (sampleDelta + 0x80000000) >> 32;
   1503 
   1504             timedYieldSilence_l(framesUntilNextInput, buffer);
   1505             ALOGV("*** silence: frameCount=%u", buffer->frameCount);
   1506             return NO_ERROR;
   1507         } else {
   1508             // the next input sample is late
   1509             uint32_t lateFrames = static_cast<uint32_t>(-((sampleDelta + 0x80000000) >> 32));
   1510             size_t onTimeSamplePosition =
   1511                     head.position() + lateFrames * mFrameSize;
   1512 
   1513             if (onTimeSamplePosition > head.buffer()->size()) {
   1514                 // all the remaining samples in the head are too late, so
   1515                 // drop it and move on
   1516                 ALOGV("*** too late: dropped buffer");
   1517                 trimTimedBufferQueueHead_l("getNextBuffer, dropped late buffer");
   1518                 continue;
   1519             } else {
   1520                 // skip over the late samples
   1521                 head.setPosition(onTimeSamplePosition);
   1522 
   1523                 // yield the available samples
   1524                 timedYieldSamples_l(buffer);
   1525 
   1526                 ALOGV("*** late: head.pos=%d frameCount=%u", head.position(), buffer->frameCount);
   1527                 return NO_ERROR;
   1528             }
   1529         }
   1530     }
   1531 }
   1532 
   1533 // Yield samples from the timed buffer queue head up to the given output
   1534 // buffer's capacity.
   1535 //
   1536 // Caller must hold mTimedBufferQueueLock
   1537 void AudioFlinger::PlaybackThread::TimedTrack::timedYieldSamples_l(
   1538     AudioBufferProvider::Buffer* buffer) {
   1539 
   1540     const TimedBuffer& head = mTimedBufferQueue[0];
   1541 
   1542     buffer->raw = (static_cast<uint8_t*>(head.buffer()->pointer()) +
   1543                    head.position());
   1544 
   1545     uint32_t framesLeftInHead = ((head.buffer()->size() - head.position()) /
   1546                                  mFrameSize);
   1547     size_t framesRequested = buffer->frameCount;
   1548     buffer->frameCount = min(framesLeftInHead, framesRequested);
   1549 
   1550     mQueueHeadInFlight = true;
   1551     mTimedAudioOutputOnTime = true;
   1552 }
   1553 
   1554 // Yield samples of silence up to the given output buffer's capacity
   1555 //
   1556 // Caller must hold mTimedBufferQueueLock
   1557 void AudioFlinger::PlaybackThread::TimedTrack::timedYieldSilence_l(
   1558     uint32_t numFrames, AudioBufferProvider::Buffer* buffer) {
   1559 
   1560     // lazily allocate a buffer filled with silence
   1561     if (mTimedSilenceBufferSize < numFrames * mFrameSize) {
   1562         delete [] mTimedSilenceBuffer;
   1563         mTimedSilenceBufferSize = numFrames * mFrameSize;
   1564         mTimedSilenceBuffer = new uint8_t[mTimedSilenceBufferSize];
   1565         memset(mTimedSilenceBuffer, 0, mTimedSilenceBufferSize);
   1566     }
   1567 
   1568     buffer->raw = mTimedSilenceBuffer;
   1569     size_t framesRequested = buffer->frameCount;
   1570     buffer->frameCount = min(numFrames, framesRequested);
   1571 
   1572     mTimedAudioOutputOnTime = false;
   1573 }
   1574 
   1575 // AudioBufferProvider interface
   1576 void AudioFlinger::PlaybackThread::TimedTrack::releaseBuffer(
   1577     AudioBufferProvider::Buffer* buffer) {
   1578 
   1579     Mutex::Autolock _l(mTimedBufferQueueLock);
   1580 
   1581     // If the buffer which was just released is part of the buffer at the head
   1582     // of the queue, be sure to update the amt of the buffer which has been
   1583     // consumed.  If the buffer being returned is not part of the head of the
   1584     // queue, its either because the buffer is part of the silence buffer, or
   1585     // because the head of the timed queue was trimmed after the mixer called
   1586     // getNextBuffer but before the mixer called releaseBuffer.
   1587     if (buffer->raw == mTimedSilenceBuffer) {
   1588         ALOG_ASSERT(!mQueueHeadInFlight,
   1589                     "Queue head in flight during release of silence buffer!");
   1590         goto done;
   1591     }
   1592 
   1593     ALOG_ASSERT(mQueueHeadInFlight,
   1594                 "TimedTrack::releaseBuffer of non-silence buffer, but no queue"
   1595                 " head in flight.");
   1596 
   1597     if (mTimedBufferQueue.size()) {
   1598         TimedBuffer& head = mTimedBufferQueue.editItemAt(0);
   1599 
   1600         void* start = head.buffer()->pointer();
   1601         void* end   = reinterpret_cast<void*>(
   1602                         reinterpret_cast<uint8_t*>(head.buffer()->pointer())
   1603                         + head.buffer()->size());
   1604 
   1605         ALOG_ASSERT((buffer->raw >= start) && (buffer->raw < end),
   1606                     "released buffer not within the head of the timed buffer"
   1607                     " queue; qHead = [%p, %p], released buffer = %p",
   1608                     start, end, buffer->raw);
   1609 
   1610         head.setPosition(head.position() +
   1611                 (buffer->frameCount * mFrameSize));
   1612         mQueueHeadInFlight = false;
   1613 
   1614         ALOG_ASSERT(mFramesPendingInQueue >= buffer->frameCount,
   1615                     "Bad bookkeeping during releaseBuffer!  Should have at"
   1616                     " least %u queued frames, but we think we have only %u",
   1617                     buffer->frameCount, mFramesPendingInQueue);
   1618 
   1619         mFramesPendingInQueue -= buffer->frameCount;
   1620 
   1621         if ((static_cast<size_t>(head.position()) >= head.buffer()->size())
   1622             || mTrimQueueHeadOnRelease) {
   1623             trimTimedBufferQueueHead_l("releaseBuffer");
   1624             mTrimQueueHeadOnRelease = false;
   1625         }
   1626     } else {
   1627         LOG_ALWAYS_FATAL("TimedTrack::releaseBuffer of non-silence buffer with no"
   1628                   " buffers in the timed buffer queue");
   1629     }
   1630 
   1631 done:
   1632     buffer->raw = 0;
   1633     buffer->frameCount = 0;
   1634 }
   1635 
   1636 size_t AudioFlinger::PlaybackThread::TimedTrack::framesReady() const {
   1637     Mutex::Autolock _l(mTimedBufferQueueLock);
   1638     return mFramesPendingInQueue;
   1639 }
   1640 
   1641 AudioFlinger::PlaybackThread::TimedTrack::TimedBuffer::TimedBuffer()
   1642         : mPTS(0), mPosition(0) {}
   1643 
   1644 AudioFlinger::PlaybackThread::TimedTrack::TimedBuffer::TimedBuffer(
   1645     const sp<IMemory>& buffer, int64_t pts)
   1646         : mBuffer(buffer), mPTS(pts), mPosition(0) {}
   1647 
   1648 
   1649 // ----------------------------------------------------------------------------
   1650 
   1651 AudioFlinger::PlaybackThread::OutputTrack::OutputTrack(
   1652             PlaybackThread *playbackThread,
   1653             DuplicatingThread *sourceThread,
   1654             uint32_t sampleRate,
   1655             audio_format_t format,
   1656             audio_channel_mask_t channelMask,
   1657             size_t frameCount,
   1658             int uid)
   1659     :   Track(playbackThread, NULL, AUDIO_STREAM_CNT, sampleRate, format, channelMask, frameCount,
   1660                 NULL, 0, 0, uid, IAudioFlinger::TRACK_DEFAULT, TYPE_OUTPUT),
   1661     mActive(false), mSourceThread(sourceThread), mClientProxy(NULL)
   1662 {
   1663 
   1664     if (mCblk != NULL) {
   1665         mOutBuffer.frameCount = 0;
   1666         playbackThread->mTracks.add(this);
   1667         ALOGV("OutputTrack constructor mCblk %p, mBuffer %p, "
   1668                 "frameCount %u, mChannelMask 0x%08x",
   1669                 mCblk, mBuffer,
   1670                 frameCount, mChannelMask);
   1671         // since client and server are in the same process,
   1672         // the buffer has the same virtual address on both sides
   1673         mClientProxy = new AudioTrackClientProxy(mCblk, mBuffer, mFrameCount, mFrameSize,
   1674                 true /*clientInServer*/);
   1675         mClientProxy->setVolumeLR(GAIN_MINIFLOAT_PACKED_UNITY);
   1676         mClientProxy->setSendLevel(0.0);
   1677         mClientProxy->setSampleRate(sampleRate);
   1678     } else {
   1679         ALOGW("Error creating output track on thread %p", playbackThread);
   1680     }
   1681 }
   1682 
   1683 AudioFlinger::PlaybackThread::OutputTrack::~OutputTrack()
   1684 {
   1685     clearBufferQueue();
   1686     delete mClientProxy;
   1687     // superclass destructor will now delete the server proxy and shared memory both refer to
   1688 }
   1689 
   1690 status_t AudioFlinger::PlaybackThread::OutputTrack::start(AudioSystem::sync_event_t event,
   1691                                                           int triggerSession)
   1692 {
   1693     status_t status = Track::start(event, triggerSession);
   1694     if (status != NO_ERROR) {
   1695         return status;
   1696     }
   1697 
   1698     mActive = true;
   1699     mRetryCount = 127;
   1700     return status;
   1701 }
   1702 
   1703 void AudioFlinger::PlaybackThread::OutputTrack::stop()
   1704 {
   1705     Track::stop();
   1706     clearBufferQueue();
   1707     mOutBuffer.frameCount = 0;
   1708     mActive = false;
   1709 }
   1710 
   1711 bool AudioFlinger::PlaybackThread::OutputTrack::write(int16_t* data, uint32_t frames)
   1712 {
   1713     Buffer *pInBuffer;
   1714     Buffer inBuffer;
   1715     uint32_t channelCount = mChannelCount;
   1716     bool outputBufferFull = false;
   1717     inBuffer.frameCount = frames;
   1718     inBuffer.i16 = data;
   1719 
   1720     uint32_t waitTimeLeftMs = mSourceThread->waitTimeMs();
   1721 
   1722     if (!mActive && frames != 0) {
   1723         start();
   1724         sp<ThreadBase> thread = mThread.promote();
   1725         if (thread != 0) {
   1726             MixerThread *mixerThread = (MixerThread *)thread.get();
   1727             if (mFrameCount > frames) {
   1728                 if (mBufferQueue.size() < kMaxOverFlowBuffers) {
   1729                     uint32_t startFrames = (mFrameCount - frames);
   1730                     pInBuffer = new Buffer;
   1731                     pInBuffer->mBuffer = new int16_t[startFrames * channelCount];
   1732                     pInBuffer->frameCount = startFrames;
   1733                     pInBuffer->i16 = pInBuffer->mBuffer;
   1734                     memset(pInBuffer->raw, 0, startFrames * channelCount * sizeof(int16_t));
   1735                     mBufferQueue.add(pInBuffer);
   1736                 } else {
   1737                     ALOGW("OutputTrack::write() %p no more buffers in queue", this);
   1738                 }
   1739             }
   1740         }
   1741     }
   1742 
   1743     while (waitTimeLeftMs) {
   1744         // First write pending buffers, then new data
   1745         if (mBufferQueue.size()) {
   1746             pInBuffer = mBufferQueue.itemAt(0);
   1747         } else {
   1748             pInBuffer = &inBuffer;
   1749         }
   1750 
   1751         if (pInBuffer->frameCount == 0) {
   1752             break;
   1753         }
   1754 
   1755         if (mOutBuffer.frameCount == 0) {
   1756             mOutBuffer.frameCount = pInBuffer->frameCount;
   1757             nsecs_t startTime = systemTime();
   1758             status_t status = obtainBuffer(&mOutBuffer, waitTimeLeftMs);
   1759             if (status != NO_ERROR) {
   1760                 ALOGV("OutputTrack::write() %p thread %p no more output buffers; status %d", this,
   1761                         mThread.unsafe_get(), status);
   1762                 outputBufferFull = true;
   1763                 break;
   1764             }
   1765             uint32_t waitTimeMs = (uint32_t)ns2ms(systemTime() - startTime);
   1766             if (waitTimeLeftMs >= waitTimeMs) {
   1767                 waitTimeLeftMs -= waitTimeMs;
   1768             } else {
   1769                 waitTimeLeftMs = 0;
   1770             }
   1771         }
   1772 
   1773         uint32_t outFrames = pInBuffer->frameCount > mOutBuffer.frameCount ? mOutBuffer.frameCount :
   1774                 pInBuffer->frameCount;
   1775         memcpy(mOutBuffer.raw, pInBuffer->raw, outFrames * channelCount * sizeof(int16_t));
   1776         Proxy::Buffer buf;
   1777         buf.mFrameCount = outFrames;
   1778         buf.mRaw = NULL;
   1779         mClientProxy->releaseBuffer(&buf);
   1780         pInBuffer->frameCount -= outFrames;
   1781         pInBuffer->i16 += outFrames * channelCount;
   1782         mOutBuffer.frameCount -= outFrames;
   1783         mOutBuffer.i16 += outFrames * channelCount;
   1784 
   1785         if (pInBuffer->frameCount == 0) {
   1786             if (mBufferQueue.size()) {
   1787                 mBufferQueue.removeAt(0);
   1788                 delete [] pInBuffer->mBuffer;
   1789                 delete pInBuffer;
   1790                 ALOGV("OutputTrack::write() %p thread %p released overflow buffer %d", this,
   1791                         mThread.unsafe_get(), mBufferQueue.size());
   1792             } else {
   1793                 break;
   1794             }
   1795         }
   1796     }
   1797 
   1798     // If we could not write all frames, allocate a buffer and queue it for next time.
   1799     if (inBuffer.frameCount) {
   1800         sp<ThreadBase> thread = mThread.promote();
   1801         if (thread != 0 && !thread->standby()) {
   1802             if (mBufferQueue.size() < kMaxOverFlowBuffers) {
   1803                 pInBuffer = new Buffer;
   1804                 pInBuffer->mBuffer = new int16_t[inBuffer.frameCount * channelCount];
   1805                 pInBuffer->frameCount = inBuffer.frameCount;
   1806                 pInBuffer->i16 = pInBuffer->mBuffer;
   1807                 memcpy(pInBuffer->raw, inBuffer.raw, inBuffer.frameCount * channelCount *
   1808                         sizeof(int16_t));
   1809                 mBufferQueue.add(pInBuffer);
   1810                 ALOGV("OutputTrack::write() %p thread %p adding overflow buffer %d", this,
   1811                         mThread.unsafe_get(), mBufferQueue.size());
   1812             } else {
   1813                 ALOGW("OutputTrack::write() %p thread %p no more overflow buffers",
   1814                         mThread.unsafe_get(), this);
   1815             }
   1816         }
   1817     }
   1818 
   1819     // Calling write() with a 0 length buffer, means that no more data will be written:
   1820     // If no more buffers are pending, fill output track buffer to make sure it is started
   1821     // by output mixer.
   1822     if (frames == 0 && mBufferQueue.size() == 0) {
   1823         // FIXME borken, replace by getting framesReady() from proxy
   1824         size_t user = 0;    // was mCblk->user
   1825         if (user < mFrameCount) {
   1826             frames = mFrameCount - user;
   1827             pInBuffer = new Buffer;
   1828             pInBuffer->mBuffer = new int16_t[frames * channelCount];
   1829             pInBuffer->frameCount = frames;
   1830             pInBuffer->i16 = pInBuffer->mBuffer;
   1831             memset(pInBuffer->raw, 0, frames * channelCount * sizeof(int16_t));
   1832             mBufferQueue.add(pInBuffer);
   1833         } else if (mActive) {
   1834             stop();
   1835         }
   1836     }
   1837 
   1838     return outputBufferFull;
   1839 }
   1840 
   1841 status_t AudioFlinger::PlaybackThread::OutputTrack::obtainBuffer(
   1842         AudioBufferProvider::Buffer* buffer, uint32_t waitTimeMs)
   1843 {
   1844     ClientProxy::Buffer buf;
   1845     buf.mFrameCount = buffer->frameCount;
   1846     struct timespec timeout;
   1847     timeout.tv_sec = waitTimeMs / 1000;
   1848     timeout.tv_nsec = (int) (waitTimeMs % 1000) * 1000000;
   1849     status_t status = mClientProxy->obtainBuffer(&buf, &timeout);
   1850     buffer->frameCount = buf.mFrameCount;
   1851     buffer->raw = buf.mRaw;
   1852     return status;
   1853 }
   1854 
   1855 void AudioFlinger::PlaybackThread::OutputTrack::clearBufferQueue()
   1856 {
   1857     size_t size = mBufferQueue.size();
   1858 
   1859     for (size_t i = 0; i < size; i++) {
   1860         Buffer *pBuffer = mBufferQueue.itemAt(i);
   1861         delete [] pBuffer->mBuffer;
   1862         delete pBuffer;
   1863     }
   1864     mBufferQueue.clear();
   1865 }
   1866 
   1867 
   1868 AudioFlinger::PlaybackThread::PatchTrack::PatchTrack(PlaybackThread *playbackThread,
   1869                                                      uint32_t sampleRate,
   1870                                                      audio_channel_mask_t channelMask,
   1871                                                      audio_format_t format,
   1872                                                      size_t frameCount,
   1873                                                      void *buffer,
   1874                                                      IAudioFlinger::track_flags_t flags)
   1875     :   Track(playbackThread, NULL, AUDIO_STREAM_CNT, sampleRate, format, channelMask, frameCount,
   1876               buffer, 0, 0, getuid(), flags, TYPE_PATCH),
   1877               mProxy(new ClientProxy(mCblk, mBuffer, frameCount, mFrameSize, true, true))
   1878 {
   1879     uint64_t mixBufferNs = ((uint64_t)2 * playbackThread->frameCount() * 1000000000) /
   1880                                                                     playbackThread->sampleRate();
   1881     mPeerTimeout.tv_sec = mixBufferNs / 1000000000;
   1882     mPeerTimeout.tv_nsec = (int) (mixBufferNs % 1000000000);
   1883 
   1884     ALOGV("PatchTrack %p sampleRate %d mPeerTimeout %d.%03d sec",
   1885                                       this, sampleRate,
   1886                                       (int)mPeerTimeout.tv_sec,
   1887                                       (int)(mPeerTimeout.tv_nsec / 1000000));
   1888 }
   1889 
   1890 AudioFlinger::PlaybackThread::PatchTrack::~PatchTrack()
   1891 {
   1892 }
   1893 
   1894 // AudioBufferProvider interface
   1895 status_t AudioFlinger::PlaybackThread::PatchTrack::getNextBuffer(
   1896         AudioBufferProvider::Buffer* buffer, int64_t pts)
   1897 {
   1898     ALOG_ASSERT(mPeerProxy != 0, "PatchTrack::getNextBuffer() called without peer proxy");
   1899     Proxy::Buffer buf;
   1900     buf.mFrameCount = buffer->frameCount;
   1901     status_t status = mPeerProxy->obtainBuffer(&buf, &mPeerTimeout);
   1902     ALOGV_IF(status != NO_ERROR, "PatchTrack() %p getNextBuffer status %d", this, status);
   1903     buffer->frameCount = buf.mFrameCount;
   1904     if (buf.mFrameCount == 0) {
   1905         return WOULD_BLOCK;
   1906     }
   1907     status = Track::getNextBuffer(buffer, pts);
   1908     return status;
   1909 }
   1910 
   1911 void AudioFlinger::PlaybackThread::PatchTrack::releaseBuffer(AudioBufferProvider::Buffer* buffer)
   1912 {
   1913     ALOG_ASSERT(mPeerProxy != 0, "PatchTrack::releaseBuffer() called without peer proxy");
   1914     Proxy::Buffer buf;
   1915     buf.mFrameCount = buffer->frameCount;
   1916     buf.mRaw = buffer->raw;
   1917     mPeerProxy->releaseBuffer(&buf);
   1918     TrackBase::releaseBuffer(buffer);
   1919 }
   1920 
   1921 status_t AudioFlinger::PlaybackThread::PatchTrack::obtainBuffer(Proxy::Buffer* buffer,
   1922                                                                 const struct timespec *timeOut)
   1923 {
   1924     return mProxy->obtainBuffer(buffer, timeOut);
   1925 }
   1926 
   1927 void AudioFlinger::PlaybackThread::PatchTrack::releaseBuffer(Proxy::Buffer* buffer)
   1928 {
   1929     mProxy->releaseBuffer(buffer);
   1930     if (android_atomic_and(~CBLK_DISABLED, &mCblk->mFlags) & CBLK_DISABLED) {
   1931         ALOGW("PatchTrack::releaseBuffer() disabled due to previous underrun, restarting");
   1932         start();
   1933     }
   1934     android_atomic_or(CBLK_FORCEREADY, &mCblk->mFlags);
   1935 }
   1936 
   1937 // ----------------------------------------------------------------------------
   1938 //      Record
   1939 // ----------------------------------------------------------------------------
   1940 
   1941 AudioFlinger::RecordHandle::RecordHandle(
   1942         const sp<AudioFlinger::RecordThread::RecordTrack>& recordTrack)
   1943     : BnAudioRecord(),
   1944     mRecordTrack(recordTrack)
   1945 {
   1946 }
   1947 
   1948 AudioFlinger::RecordHandle::~RecordHandle() {
   1949     stop_nonvirtual();
   1950     mRecordTrack->destroy();
   1951 }
   1952 
   1953 status_t AudioFlinger::RecordHandle::start(int /*AudioSystem::sync_event_t*/ event,
   1954         int triggerSession) {
   1955     ALOGV("RecordHandle::start()");
   1956     return mRecordTrack->start((AudioSystem::sync_event_t)event, triggerSession);
   1957 }
   1958 
   1959 void AudioFlinger::RecordHandle::stop() {
   1960     stop_nonvirtual();
   1961 }
   1962 
   1963 void AudioFlinger::RecordHandle::stop_nonvirtual() {
   1964     ALOGV("RecordHandle::stop()");
   1965     mRecordTrack->stop();
   1966 }
   1967 
   1968 status_t AudioFlinger::RecordHandle::onTransact(
   1969     uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
   1970 {
   1971     return BnAudioRecord::onTransact(code, data, reply, flags);
   1972 }
   1973 
   1974 // ----------------------------------------------------------------------------
   1975 
   1976 // RecordTrack constructor must be called with AudioFlinger::mLock and ThreadBase::mLock held
   1977 AudioFlinger::RecordThread::RecordTrack::RecordTrack(
   1978             RecordThread *thread,
   1979             const sp<Client>& client,
   1980             uint32_t sampleRate,
   1981             audio_format_t format,
   1982             audio_channel_mask_t channelMask,
   1983             size_t frameCount,
   1984             void *buffer,
   1985             int sessionId,
   1986             int uid,
   1987             IAudioFlinger::track_flags_t flags,
   1988             track_type type)
   1989     :   TrackBase(thread, client, sampleRate, format,
   1990                   channelMask, frameCount, buffer, sessionId, uid,
   1991                   flags, false /*isOut*/,
   1992                   (type == TYPE_DEFAULT) ?
   1993                           ((flags & IAudioFlinger::TRACK_FAST) ? ALLOC_PIPE : ALLOC_CBLK) :
   1994                           ((buffer == NULL) ? ALLOC_LOCAL : ALLOC_NONE),
   1995                   type),
   1996         mOverflow(false), mResampler(NULL), mRsmpOutBuffer(NULL), mRsmpOutFrameCount(0),
   1997         // See real initialization of mRsmpInFront at RecordThread::start()
   1998         mRsmpInUnrel(0), mRsmpInFront(0), mFramesToDrop(0), mResamplerBufferProvider(NULL)
   1999 {
   2000     if (mCblk == NULL) {
   2001         return;
   2002     }
   2003 
   2004     mServerProxy = new AudioRecordServerProxy(mCblk, mBuffer, frameCount,
   2005                                               mFrameSize, !isExternalTrack());
   2006 
   2007     uint32_t channelCount = audio_channel_count_from_in_mask(channelMask);
   2008     // FIXME I don't understand either of the channel count checks
   2009     if (thread->mSampleRate != sampleRate && thread->mChannelCount <= FCC_2 &&
   2010             channelCount <= FCC_2) {
   2011         // sink SR
   2012         mResampler = AudioResampler::create(AUDIO_FORMAT_PCM_16_BIT,
   2013                 thread->mChannelCount, sampleRate);
   2014         // source SR
   2015         mResampler->setSampleRate(thread->mSampleRate);
   2016         mResampler->setVolume(AudioMixer::UNITY_GAIN_FLOAT, AudioMixer::UNITY_GAIN_FLOAT);
   2017         mResamplerBufferProvider = new ResamplerBufferProvider(this);
   2018     }
   2019 
   2020     if (flags & IAudioFlinger::TRACK_FAST) {
   2021         ALOG_ASSERT(thread->mFastTrackAvail);
   2022         thread->mFastTrackAvail = false;
   2023     }
   2024 }
   2025 
   2026 AudioFlinger::RecordThread::RecordTrack::~RecordTrack()
   2027 {
   2028     ALOGV("%s", __func__);
   2029     delete mResampler;
   2030     delete[] mRsmpOutBuffer;
   2031     delete mResamplerBufferProvider;
   2032 }
   2033 
   2034 // AudioBufferProvider interface
   2035 status_t AudioFlinger::RecordThread::RecordTrack::getNextBuffer(AudioBufferProvider::Buffer* buffer,
   2036         int64_t pts __unused)
   2037 {
   2038     ServerProxy::Buffer buf;
   2039     buf.mFrameCount = buffer->frameCount;
   2040     status_t status = mServerProxy->obtainBuffer(&buf);
   2041     buffer->frameCount = buf.mFrameCount;
   2042     buffer->raw = buf.mRaw;
   2043     if (buf.mFrameCount == 0) {
   2044         // FIXME also wake futex so that overrun is noticed more quickly
   2045         (void) android_atomic_or(CBLK_OVERRUN, &mCblk->mFlags);
   2046     }
   2047     return status;
   2048 }
   2049 
   2050 status_t AudioFlinger::RecordThread::RecordTrack::start(AudioSystem::sync_event_t event,
   2051                                                         int triggerSession)
   2052 {
   2053     sp<ThreadBase> thread = mThread.promote();
   2054     if (thread != 0) {
   2055         RecordThread *recordThread = (RecordThread *)thread.get();
   2056         return recordThread->start(this, event, triggerSession);
   2057     } else {
   2058         return BAD_VALUE;
   2059     }
   2060 }
   2061 
   2062 void AudioFlinger::RecordThread::RecordTrack::stop()
   2063 {
   2064     sp<ThreadBase> thread = mThread.promote();
   2065     if (thread != 0) {
   2066         RecordThread *recordThread = (RecordThread *)thread.get();
   2067         if (recordThread->stop(this) && isExternalTrack()) {
   2068             AudioSystem::stopInput(mThreadIoHandle, (audio_session_t)mSessionId);
   2069         }
   2070     }
   2071 }
   2072 
   2073 void AudioFlinger::RecordThread::RecordTrack::destroy()
   2074 {
   2075     // see comments at AudioFlinger::PlaybackThread::Track::destroy()
   2076     sp<RecordTrack> keep(this);
   2077     {
   2078         if (isExternalTrack()) {
   2079             if (mState == ACTIVE || mState == RESUMING) {
   2080                 AudioSystem::stopInput(mThreadIoHandle, (audio_session_t)mSessionId);
   2081             }
   2082             AudioSystem::releaseInput(mThreadIoHandle, (audio_session_t)mSessionId);
   2083         }
   2084         sp<ThreadBase> thread = mThread.promote();
   2085         if (thread != 0) {
   2086             Mutex::Autolock _l(thread->mLock);
   2087             RecordThread *recordThread = (RecordThread *) thread.get();
   2088             recordThread->destroyTrack_l(this);
   2089         }
   2090     }
   2091 }
   2092 
   2093 void AudioFlinger::RecordThread::RecordTrack::invalidate()
   2094 {
   2095     // FIXME should use proxy, and needs work
   2096     audio_track_cblk_t* cblk = mCblk;
   2097     android_atomic_or(CBLK_INVALID, &cblk->mFlags);
   2098     android_atomic_release_store(0x40000000, &cblk->mFutex);
   2099     // client is not in server, so FUTEX_WAKE is needed instead of FUTEX_WAKE_PRIVATE
   2100     (void) syscall(__NR_futex, &cblk->mFutex, FUTEX_WAKE, INT_MAX);
   2101 }
   2102 
   2103 
   2104 /*static*/ void AudioFlinger::RecordThread::RecordTrack::appendDumpHeader(String8& result)
   2105 {
   2106     result.append("    Active Client Fmt Chn mask Session S   Server fCount SRate\n");
   2107 }
   2108 
   2109 void AudioFlinger::RecordThread::RecordTrack::dump(char* buffer, size_t size, bool active)
   2110 {
   2111     snprintf(buffer, size, "    %6s %6u %3u %08X %7u %1d %08X %6zu %5u\n",
   2112             active ? "yes" : "no",
   2113             (mClient == 0) ? getpid_cached : mClient->pid(),
   2114             mFormat,
   2115             mChannelMask,
   2116             mSessionId,
   2117             mState,
   2118             mCblk->mServer,
   2119             mFrameCount,
   2120             mSampleRate);
   2121 
   2122 }
   2123 
   2124 void AudioFlinger::RecordThread::RecordTrack::handleSyncStartEvent(const sp<SyncEvent>& event)
   2125 {
   2126     if (event == mSyncStartEvent) {
   2127         ssize_t framesToDrop = 0;
   2128         sp<ThreadBase> threadBase = mThread.promote();
   2129         if (threadBase != 0) {
   2130             // TODO: use actual buffer filling status instead of 2 buffers when info is available
   2131             // from audio HAL
   2132             framesToDrop = threadBase->mFrameCount * 2;
   2133         }
   2134         mFramesToDrop = framesToDrop;
   2135     }
   2136 }
   2137 
   2138 void AudioFlinger::RecordThread::RecordTrack::clearSyncStartEvent()
   2139 {
   2140     if (mSyncStartEvent != 0) {
   2141         mSyncStartEvent->cancel();
   2142         mSyncStartEvent.clear();
   2143     }
   2144     mFramesToDrop = 0;
   2145 }
   2146 
   2147 
   2148 AudioFlinger::RecordThread::PatchRecord::PatchRecord(RecordThread *recordThread,
   2149                                                      uint32_t sampleRate,
   2150                                                      audio_channel_mask_t channelMask,
   2151                                                      audio_format_t format,
   2152                                                      size_t frameCount,
   2153                                                      void *buffer,
   2154                                                      IAudioFlinger::track_flags_t flags)
   2155     :   RecordTrack(recordThread, NULL, sampleRate, format, channelMask, frameCount,
   2156                 buffer, 0, getuid(), flags, TYPE_PATCH),
   2157                 mProxy(new ClientProxy(mCblk, mBuffer, frameCount, mFrameSize, false, true))
   2158 {
   2159     uint64_t mixBufferNs = ((uint64_t)2 * recordThread->frameCount() * 1000000000) /
   2160                                                                 recordThread->sampleRate();
   2161     mPeerTimeout.tv_sec = mixBufferNs / 1000000000;
   2162     mPeerTimeout.tv_nsec = (int) (mixBufferNs % 1000000000);
   2163 
   2164     ALOGV("PatchRecord %p sampleRate %d mPeerTimeout %d.%03d sec",
   2165                                       this, sampleRate,
   2166                                       (int)mPeerTimeout.tv_sec,
   2167                                       (int)(mPeerTimeout.tv_nsec / 1000000));
   2168 }
   2169 
   2170 AudioFlinger::RecordThread::PatchRecord::~PatchRecord()
   2171 {
   2172 }
   2173 
   2174 // AudioBufferProvider interface
   2175 status_t AudioFlinger::RecordThread::PatchRecord::getNextBuffer(
   2176                                                   AudioBufferProvider::Buffer* buffer, int64_t pts)
   2177 {
   2178     ALOG_ASSERT(mPeerProxy != 0, "PatchRecord::getNextBuffer() called without peer proxy");
   2179     Proxy::Buffer buf;
   2180     buf.mFrameCount = buffer->frameCount;
   2181     status_t status = mPeerProxy->obtainBuffer(&buf, &mPeerTimeout);
   2182     ALOGV_IF(status != NO_ERROR,
   2183              "PatchRecord() %p mPeerProxy->obtainBuffer status %d", this, status);
   2184     buffer->frameCount = buf.mFrameCount;
   2185     if (buf.mFrameCount == 0) {
   2186         return WOULD_BLOCK;
   2187     }
   2188     status = RecordTrack::getNextBuffer(buffer, pts);
   2189     return status;
   2190 }
   2191 
   2192 void AudioFlinger::RecordThread::PatchRecord::releaseBuffer(AudioBufferProvider::Buffer* buffer)
   2193 {
   2194     ALOG_ASSERT(mPeerProxy != 0, "PatchRecord::releaseBuffer() called without peer proxy");
   2195     Proxy::Buffer buf;
   2196     buf.mFrameCount = buffer->frameCount;
   2197     buf.mRaw = buffer->raw;
   2198     mPeerProxy->releaseBuffer(&buf);
   2199     TrackBase::releaseBuffer(buffer);
   2200 }
   2201 
   2202 status_t AudioFlinger::RecordThread::PatchRecord::obtainBuffer(Proxy::Buffer* buffer,
   2203                                                                const struct timespec *timeOut)
   2204 {
   2205     return mProxy->obtainBuffer(buffer, timeOut);
   2206 }
   2207 
   2208 void AudioFlinger::RecordThread::PatchRecord::releaseBuffer(Proxy::Buffer* buffer)
   2209 {
   2210     mProxy->releaseBuffer(buffer);
   2211 }
   2212 
   2213 }; // namespace android
   2214