Home | History | Annotate | Download | only in libmedia
      1 /*
      2 **
      3 ** Copyright 2007, The Android Open Source Project
      4 **
      5 ** Licensed under the Apache License, Version 2.0 (the "License");
      6 ** you may not use this file except in compliance with the License.
      7 ** You may obtain a copy of the License at
      8 **
      9 **     http://www.apache.org/licenses/LICENSE-2.0
     10 **
     11 ** Unless required by applicable law or agreed to in writing, software
     12 ** distributed under the License is distributed on an "AS IS" BASIS,
     13 ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     14 ** See the License for the specific language governing permissions and
     15 ** limitations under the License.
     16 */
     17 
     18 //#define LOG_NDEBUG 0
     19 #define LOG_TAG "AudioTrack"
     20 
     21 #include <inttypes.h>
     22 #include <math.h>
     23 #include <sys/resource.h>
     24 
     25 #include <audio_utils/primitives.h>
     26 #include <binder/IPCThreadState.h>
     27 #include <media/AudioTrack.h>
     28 #include <utils/Log.h>
     29 #include <private/media/AudioTrackShared.h>
     30 #include <media/IAudioFlinger.h>
     31 #include <media/AudioPolicyHelper.h>
     32 #include <media/AudioResamplerPublic.h>
     33 
     34 #define WAIT_PERIOD_MS                  10
     35 #define WAIT_STREAM_END_TIMEOUT_SEC     120
     36 static const int kMaxLoopCountNotifications = 32;
     37 
     38 namespace android {
     39 // ---------------------------------------------------------------------------
     40 
     41 // TODO: Move to a separate .h
     42 
     43 template <typename T>
     44 static inline const T &min(const T &x, const T &y) {
     45     return x < y ? x : y;
     46 }
     47 
     48 template <typename T>
     49 static inline const T &max(const T &x, const T &y) {
     50     return x > y ? x : y;
     51 }
     52 
     53 static inline nsecs_t framesToNanoseconds(ssize_t frames, uint32_t sampleRate, float speed)
     54 {
     55     return ((double)frames * 1000000000) / ((double)sampleRate * speed);
     56 }
     57 
     58 static int64_t convertTimespecToUs(const struct timespec &tv)
     59 {
     60     return tv.tv_sec * 1000000ll + tv.tv_nsec / 1000;
     61 }
     62 
     63 // current monotonic time in microseconds.
     64 static int64_t getNowUs()
     65 {
     66     struct timespec tv;
     67     (void) clock_gettime(CLOCK_MONOTONIC, &tv);
     68     return convertTimespecToUs(tv);
     69 }
     70 
     71 // FIXME: we don't use the pitch setting in the time stretcher (not working);
     72 // instead we emulate it using our sample rate converter.
     73 static const bool kFixPitch = true; // enable pitch fix
     74 static inline uint32_t adjustSampleRate(uint32_t sampleRate, float pitch)
     75 {
     76     return kFixPitch ? (sampleRate * pitch + 0.5) : sampleRate;
     77 }
     78 
     79 static inline float adjustSpeed(float speed, float pitch)
     80 {
     81     return kFixPitch ? speed / max(pitch, AUDIO_TIMESTRETCH_PITCH_MIN_DELTA) : speed;
     82 }
     83 
     84 static inline float adjustPitch(float pitch)
     85 {
     86     return kFixPitch ? AUDIO_TIMESTRETCH_PITCH_NORMAL : pitch;
     87 }
     88 
     89 // Must match similar computation in createTrack_l in Threads.cpp.
     90 // TODO: Move to a common library
     91 static size_t calculateMinFrameCount(
     92         uint32_t afLatencyMs, uint32_t afFrameCount, uint32_t afSampleRate,
     93         uint32_t sampleRate, float speed /*, uint32_t notificationsPerBufferReq*/)
     94 {
     95     // Ensure that buffer depth covers at least audio hardware latency
     96     uint32_t minBufCount = afLatencyMs / ((1000 * afFrameCount) / afSampleRate);
     97     if (minBufCount < 2) {
     98         minBufCount = 2;
     99     }
    100 #if 0
    101     // The notificationsPerBufferReq parameter is not yet used for non-fast tracks,
    102     // but keeping the code here to make it easier to add later.
    103     if (minBufCount < notificationsPerBufferReq) {
    104         minBufCount = notificationsPerBufferReq;
    105     }
    106 #endif
    107     ALOGV("calculateMinFrameCount afLatency %u  afFrameCount %u  afSampleRate %u  "
    108             "sampleRate %u  speed %f  minBufCount: %u" /*"  notificationsPerBufferReq %u"*/,
    109             afLatencyMs, afFrameCount, afSampleRate, sampleRate, speed, minBufCount
    110             /*, notificationsPerBufferReq*/);
    111     return minBufCount * sourceFramesNeededWithTimestretch(
    112             sampleRate, afFrameCount, afSampleRate, speed);
    113 }
    114 
    115 // static
    116 status_t AudioTrack::getMinFrameCount(
    117         size_t* frameCount,
    118         audio_stream_type_t streamType,
    119         uint32_t sampleRate)
    120 {
    121     if (frameCount == NULL) {
    122         return BAD_VALUE;
    123     }
    124 
    125     // FIXME handle in server, like createTrack_l(), possible missing info:
    126     //          audio_io_handle_t output
    127     //          audio_format_t format
    128     //          audio_channel_mask_t channelMask
    129     //          audio_output_flags_t flags (FAST)
    130     uint32_t afSampleRate;
    131     status_t status;
    132     status = AudioSystem::getOutputSamplingRate(&afSampleRate, streamType);
    133     if (status != NO_ERROR) {
    134         ALOGE("Unable to query output sample rate for stream type %d; status %d",
    135                 streamType, status);
    136         return status;
    137     }
    138     size_t afFrameCount;
    139     status = AudioSystem::getOutputFrameCount(&afFrameCount, streamType);
    140     if (status != NO_ERROR) {
    141         ALOGE("Unable to query output frame count for stream type %d; status %d",
    142                 streamType, status);
    143         return status;
    144     }
    145     uint32_t afLatency;
    146     status = AudioSystem::getOutputLatency(&afLatency, streamType);
    147     if (status != NO_ERROR) {
    148         ALOGE("Unable to query output latency for stream type %d; status %d",
    149                 streamType, status);
    150         return status;
    151     }
    152 
    153     // When called from createTrack, speed is 1.0f (normal speed).
    154     // This is rechecked again on setting playback rate (TODO: on setting sample rate, too).
    155     *frameCount = calculateMinFrameCount(afLatency, afFrameCount, afSampleRate, sampleRate, 1.0f
    156             /*, 0 notificationsPerBufferReq*/);
    157 
    158     // The formula above should always produce a non-zero value under normal circumstances:
    159     // AudioTrack.SAMPLE_RATE_HZ_MIN <= sampleRate <= AudioTrack.SAMPLE_RATE_HZ_MAX.
    160     // Return error in the unlikely event that it does not, as that's part of the API contract.
    161     if (*frameCount == 0) {
    162         ALOGE("AudioTrack::getMinFrameCount failed for streamType %d, sampleRate %u",
    163                 streamType, sampleRate);
    164         return BAD_VALUE;
    165     }
    166     ALOGV("getMinFrameCount=%zu: afFrameCount=%zu, afSampleRate=%u, afLatency=%u",
    167             *frameCount, afFrameCount, afSampleRate, afLatency);
    168     return NO_ERROR;
    169 }
    170 
    171 // ---------------------------------------------------------------------------
    172 
    173 AudioTrack::AudioTrack()
    174     : mStatus(NO_INIT),
    175       mState(STATE_STOPPED),
    176       mPreviousPriority(ANDROID_PRIORITY_NORMAL),
    177       mPreviousSchedulingGroup(SP_DEFAULT),
    178       mPausedPosition(0),
    179       mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE)
    180 {
    181     mAttributes.content_type = AUDIO_CONTENT_TYPE_UNKNOWN;
    182     mAttributes.usage = AUDIO_USAGE_UNKNOWN;
    183     mAttributes.flags = 0x0;
    184     strcpy(mAttributes.tags, "");
    185 }
    186 
    187 AudioTrack::AudioTrack(
    188         audio_stream_type_t streamType,
    189         uint32_t sampleRate,
    190         audio_format_t format,
    191         audio_channel_mask_t channelMask,
    192         size_t frameCount,
    193         audio_output_flags_t flags,
    194         callback_t cbf,
    195         void* user,
    196         int32_t notificationFrames,
    197         audio_session_t sessionId,
    198         transfer_type transferType,
    199         const audio_offload_info_t *offloadInfo,
    200         int uid,
    201         pid_t pid,
    202         const audio_attributes_t* pAttributes,
    203         bool doNotReconnect,
    204         float maxRequiredSpeed)
    205     : mStatus(NO_INIT),
    206       mState(STATE_STOPPED),
    207       mPreviousPriority(ANDROID_PRIORITY_NORMAL),
    208       mPreviousSchedulingGroup(SP_DEFAULT),
    209       mPausedPosition(0),
    210       mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE)
    211 {
    212     mStatus = set(streamType, sampleRate, format, channelMask,
    213             frameCount, flags, cbf, user, notificationFrames,
    214             0 /*sharedBuffer*/, false /*threadCanCallJava*/, sessionId, transferType,
    215             offloadInfo, uid, pid, pAttributes, doNotReconnect, maxRequiredSpeed);
    216 }
    217 
    218 AudioTrack::AudioTrack(
    219         audio_stream_type_t streamType,
    220         uint32_t sampleRate,
    221         audio_format_t format,
    222         audio_channel_mask_t channelMask,
    223         const sp<IMemory>& sharedBuffer,
    224         audio_output_flags_t flags,
    225         callback_t cbf,
    226         void* user,
    227         int32_t notificationFrames,
    228         audio_session_t sessionId,
    229         transfer_type transferType,
    230         const audio_offload_info_t *offloadInfo,
    231         int uid,
    232         pid_t pid,
    233         const audio_attributes_t* pAttributes,
    234         bool doNotReconnect,
    235         float maxRequiredSpeed)
    236     : mStatus(NO_INIT),
    237       mState(STATE_STOPPED),
    238       mPreviousPriority(ANDROID_PRIORITY_NORMAL),
    239       mPreviousSchedulingGroup(SP_DEFAULT),
    240       mPausedPosition(0),
    241       mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE)
    242 {
    243     mStatus = set(streamType, sampleRate, format, channelMask,
    244             0 /*frameCount*/, flags, cbf, user, notificationFrames,
    245             sharedBuffer, false /*threadCanCallJava*/, sessionId, transferType, offloadInfo,
    246             uid, pid, pAttributes, doNotReconnect, maxRequiredSpeed);
    247 }
    248 
    249 AudioTrack::~AudioTrack()
    250 {
    251     if (mStatus == NO_ERROR) {
    252         // Make sure that callback function exits in the case where
    253         // it is looping on buffer full condition in obtainBuffer().
    254         // Otherwise the callback thread will never exit.
    255         stop();
    256         if (mAudioTrackThread != 0) {
    257             mProxy->interrupt();
    258             mAudioTrackThread->requestExit();   // see comment in AudioTrack.h
    259             mAudioTrackThread->requestExitAndWait();
    260             mAudioTrackThread.clear();
    261         }
    262         // No lock here: worst case we remove a NULL callback which will be a nop
    263         if (mDeviceCallback != 0 && mOutput != AUDIO_IO_HANDLE_NONE) {
    264             AudioSystem::removeAudioDeviceCallback(mDeviceCallback, mOutput);
    265         }
    266         IInterface::asBinder(mAudioTrack)->unlinkToDeath(mDeathNotifier, this);
    267         mAudioTrack.clear();
    268         mCblkMemory.clear();
    269         mSharedBuffer.clear();
    270         IPCThreadState::self()->flushCommands();
    271         ALOGV("~AudioTrack, releasing session id %d from %d on behalf of %d",
    272                 mSessionId, IPCThreadState::self()->getCallingPid(), mClientPid);
    273         AudioSystem::releaseAudioSessionId(mSessionId, mClientPid);
    274     }
    275 }
    276 
    277 status_t AudioTrack::set(
    278         audio_stream_type_t streamType,
    279         uint32_t sampleRate,
    280         audio_format_t format,
    281         audio_channel_mask_t channelMask,
    282         size_t frameCount,
    283         audio_output_flags_t flags,
    284         callback_t cbf,
    285         void* user,
    286         int32_t notificationFrames,
    287         const sp<IMemory>& sharedBuffer,
    288         bool threadCanCallJava,
    289         audio_session_t sessionId,
    290         transfer_type transferType,
    291         const audio_offload_info_t *offloadInfo,
    292         int uid,
    293         pid_t pid,
    294         const audio_attributes_t* pAttributes,
    295         bool doNotReconnect,
    296         float maxRequiredSpeed)
    297 {
    298     ALOGV("set(): streamType %d, sampleRate %u, format %#x, channelMask %#x, frameCount %zu, "
    299           "flags #%x, notificationFrames %d, sessionId %d, transferType %d, uid %d, pid %d",
    300           streamType, sampleRate, format, channelMask, frameCount, flags, notificationFrames,
    301           sessionId, transferType, uid, pid);
    302 
    303     mThreadCanCallJava = threadCanCallJava;
    304 
    305     switch (transferType) {
    306     case TRANSFER_DEFAULT:
    307         if (sharedBuffer != 0) {
    308             transferType = TRANSFER_SHARED;
    309         } else if (cbf == NULL || threadCanCallJava) {
    310             transferType = TRANSFER_SYNC;
    311         } else {
    312             transferType = TRANSFER_CALLBACK;
    313         }
    314         break;
    315     case TRANSFER_CALLBACK:
    316         if (cbf == NULL || sharedBuffer != 0) {
    317             ALOGE("Transfer type TRANSFER_CALLBACK but cbf == NULL || sharedBuffer != 0");
    318             return BAD_VALUE;
    319         }
    320         break;
    321     case TRANSFER_OBTAIN:
    322     case TRANSFER_SYNC:
    323         if (sharedBuffer != 0) {
    324             ALOGE("Transfer type TRANSFER_OBTAIN but sharedBuffer != 0");
    325             return BAD_VALUE;
    326         }
    327         break;
    328     case TRANSFER_SHARED:
    329         if (sharedBuffer == 0) {
    330             ALOGE("Transfer type TRANSFER_SHARED but sharedBuffer == 0");
    331             return BAD_VALUE;
    332         }
    333         break;
    334     default:
    335         ALOGE("Invalid transfer type %d", transferType);
    336         return BAD_VALUE;
    337     }
    338     mSharedBuffer = sharedBuffer;
    339     mTransfer = transferType;
    340     mDoNotReconnect = doNotReconnect;
    341 
    342     ALOGV_IF(sharedBuffer != 0, "sharedBuffer: %p, size: %zu", sharedBuffer->pointer(),
    343             sharedBuffer->size());
    344 
    345     ALOGV("set() streamType %d frameCount %zu flags %04x", streamType, frameCount, flags);
    346 
    347     // invariant that mAudioTrack != 0 is true only after set() returns successfully
    348     if (mAudioTrack != 0) {
    349         ALOGE("Track already in use");
    350         return INVALID_OPERATION;
    351     }
    352 
    353     // handle default values first.
    354     if (streamType == AUDIO_STREAM_DEFAULT) {
    355         streamType = AUDIO_STREAM_MUSIC;
    356     }
    357     if (pAttributes == NULL) {
    358         if (uint32_t(streamType) >= AUDIO_STREAM_PUBLIC_CNT) {
    359             ALOGE("Invalid stream type %d", streamType);
    360             return BAD_VALUE;
    361         }
    362         mStreamType = streamType;
    363 
    364     } else {
    365         // stream type shouldn't be looked at, this track has audio attributes
    366         memcpy(&mAttributes, pAttributes, sizeof(audio_attributes_t));
    367         ALOGV("Building AudioTrack with attributes: usage=%d content=%d flags=0x%x tags=[%s]",
    368                 mAttributes.usage, mAttributes.content_type, mAttributes.flags, mAttributes.tags);
    369         mStreamType = AUDIO_STREAM_DEFAULT;
    370         if ((mAttributes.flags & AUDIO_FLAG_HW_AV_SYNC) != 0) {
    371             flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_HW_AV_SYNC);
    372         }
    373         if ((mAttributes.flags & AUDIO_FLAG_LOW_LATENCY) != 0) {
    374             flags = (audio_output_flags_t) (flags | AUDIO_OUTPUT_FLAG_FAST);
    375         }
    376     }
    377 
    378     // these below should probably come from the audioFlinger too...
    379     if (format == AUDIO_FORMAT_DEFAULT) {
    380         format = AUDIO_FORMAT_PCM_16_BIT;
    381     } else if (format == AUDIO_FORMAT_IEC61937) { // HDMI pass-through?
    382         mAttributes.flags |= AUDIO_OUTPUT_FLAG_IEC958_NONAUDIO;
    383     }
    384 
    385     // validate parameters
    386     if (!audio_is_valid_format(format)) {
    387         ALOGE("Invalid format %#x", format);
    388         return BAD_VALUE;
    389     }
    390     mFormat = format;
    391 
    392     if (!audio_is_output_channel(channelMask)) {
    393         ALOGE("Invalid channel mask %#x", channelMask);
    394         return BAD_VALUE;
    395     }
    396     mChannelMask = channelMask;
    397     uint32_t channelCount = audio_channel_count_from_out_mask(channelMask);
    398     mChannelCount = channelCount;
    399 
    400     // force direct flag if format is not linear PCM
    401     // or offload was requested
    402     if ((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
    403             || !audio_is_linear_pcm(format)) {
    404         ALOGV( (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
    405                     ? "Offload request, forcing to Direct Output"
    406                     : "Not linear PCM, forcing to Direct Output");
    407         flags = (audio_output_flags_t)
    408                 // FIXME why can't we allow direct AND fast?
    409                 ((flags | AUDIO_OUTPUT_FLAG_DIRECT) & ~AUDIO_OUTPUT_FLAG_FAST);
    410     }
    411 
    412     // force direct flag if HW A/V sync requested
    413     if ((flags & AUDIO_OUTPUT_FLAG_HW_AV_SYNC) != 0) {
    414         flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_DIRECT);
    415     }
    416 
    417     if (flags & AUDIO_OUTPUT_FLAG_DIRECT) {
    418         if (audio_has_proportional_frames(format)) {
    419             mFrameSize = channelCount * audio_bytes_per_sample(format);
    420         } else {
    421             mFrameSize = sizeof(uint8_t);
    422         }
    423     } else {
    424         ALOG_ASSERT(audio_has_proportional_frames(format));
    425         mFrameSize = channelCount * audio_bytes_per_sample(format);
    426         // createTrack will return an error if PCM format is not supported by server,
    427         // so no need to check for specific PCM formats here
    428     }
    429 
    430     // sampling rate must be specified for direct outputs
    431     if (sampleRate == 0 && (flags & AUDIO_OUTPUT_FLAG_DIRECT) != 0) {
    432         return BAD_VALUE;
    433     }
    434     mSampleRate = sampleRate;
    435     mOriginalSampleRate = sampleRate;
    436     mPlaybackRate = AUDIO_PLAYBACK_RATE_DEFAULT;
    437     // 1.0 <= mMaxRequiredSpeed <= AUDIO_TIMESTRETCH_SPEED_MAX
    438     mMaxRequiredSpeed = min(max(maxRequiredSpeed, 1.0f), AUDIO_TIMESTRETCH_SPEED_MAX);
    439 
    440     // Make copy of input parameter offloadInfo so that in the future:
    441     //  (a) createTrack_l doesn't need it as an input parameter
    442     //  (b) we can support re-creation of offloaded tracks
    443     if (offloadInfo != NULL) {
    444         mOffloadInfoCopy = *offloadInfo;
    445         mOffloadInfo = &mOffloadInfoCopy;
    446     } else {
    447         mOffloadInfo = NULL;
    448     }
    449 
    450     mVolume[AUDIO_INTERLEAVE_LEFT] = 1.0f;
    451     mVolume[AUDIO_INTERLEAVE_RIGHT] = 1.0f;
    452     mSendLevel = 0.0f;
    453     // mFrameCount is initialized in createTrack_l
    454     mReqFrameCount = frameCount;
    455     if (notificationFrames >= 0) {
    456         mNotificationFramesReq = notificationFrames;
    457         mNotificationsPerBufferReq = 0;
    458     } else {
    459         if (!(flags & AUDIO_OUTPUT_FLAG_FAST)) {
    460             ALOGE("notificationFrames=%d not permitted for non-fast track",
    461                     notificationFrames);
    462             return BAD_VALUE;
    463         }
    464         if (frameCount > 0) {
    465             ALOGE("notificationFrames=%d not permitted with non-zero frameCount=%zu",
    466                     notificationFrames, frameCount);
    467             return BAD_VALUE;
    468         }
    469         mNotificationFramesReq = 0;
    470         const uint32_t minNotificationsPerBuffer = 1;
    471         const uint32_t maxNotificationsPerBuffer = 8;
    472         mNotificationsPerBufferReq = min(maxNotificationsPerBuffer,
    473                 max((uint32_t) -notificationFrames, minNotificationsPerBuffer));
    474         ALOGW_IF(mNotificationsPerBufferReq != (uint32_t) -notificationFrames,
    475                 "notificationFrames=%d clamped to the range -%u to -%u",
    476                 notificationFrames, minNotificationsPerBuffer, maxNotificationsPerBuffer);
    477     }
    478     mNotificationFramesAct = 0;
    479     if (sessionId == AUDIO_SESSION_ALLOCATE) {
    480         mSessionId = (audio_session_t) AudioSystem::newAudioUniqueId(AUDIO_UNIQUE_ID_USE_SESSION);
    481     } else {
    482         mSessionId = sessionId;
    483     }
    484     int callingpid = IPCThreadState::self()->getCallingPid();
    485     int mypid = getpid();
    486     if (uid == -1 || (callingpid != mypid)) {
    487         mClientUid = IPCThreadState::self()->getCallingUid();
    488     } else {
    489         mClientUid = uid;
    490     }
    491     if (pid == -1 || (callingpid != mypid)) {
    492         mClientPid = callingpid;
    493     } else {
    494         mClientPid = pid;
    495     }
    496     mAuxEffectId = 0;
    497     mOrigFlags = mFlags = flags;
    498     mCbf = cbf;
    499 
    500     if (cbf != NULL) {
    501         mAudioTrackThread = new AudioTrackThread(*this, threadCanCallJava);
    502         mAudioTrackThread->run("AudioTrack", ANDROID_PRIORITY_AUDIO, 0 /*stack*/);
    503         // thread begins in paused state, and will not reference us until start()
    504     }
    505 
    506     // create the IAudioTrack
    507     status_t status = createTrack_l();
    508 
    509     if (status != NO_ERROR) {
    510         if (mAudioTrackThread != 0) {
    511             mAudioTrackThread->requestExit();   // see comment in AudioTrack.h
    512             mAudioTrackThread->requestExitAndWait();
    513             mAudioTrackThread.clear();
    514         }
    515         return status;
    516     }
    517 
    518     mStatus = NO_ERROR;
    519     mUserData = user;
    520     mLoopCount = 0;
    521     mLoopStart = 0;
    522     mLoopEnd = 0;
    523     mLoopCountNotified = 0;
    524     mMarkerPosition = 0;
    525     mMarkerReached = false;
    526     mNewPosition = 0;
    527     mUpdatePeriod = 0;
    528     mPosition = 0;
    529     mReleased = 0;
    530     mStartUs = 0;
    531     AudioSystem::acquireAudioSessionId(mSessionId, mClientPid);
    532     mSequence = 1;
    533     mObservedSequence = mSequence;
    534     mInUnderrun = false;
    535     mPreviousTimestampValid = false;
    536     mTimestampStartupGlitchReported = false;
    537     mRetrogradeMotionReported = false;
    538     mPreviousLocation = ExtendedTimestamp::LOCATION_INVALID;
    539     mUnderrunCountOffset = 0;
    540     mFramesWritten = 0;
    541     mFramesWrittenServerOffset = 0;
    542 
    543     return NO_ERROR;
    544 }
    545 
    546 // -------------------------------------------------------------------------
    547 
    548 status_t AudioTrack::start()
    549 {
    550     AutoMutex lock(mLock);
    551 
    552     if (mState == STATE_ACTIVE) {
    553         return INVALID_OPERATION;
    554     }
    555 
    556     mInUnderrun = true;
    557 
    558     State previousState = mState;
    559     if (previousState == STATE_PAUSED_STOPPING) {
    560         mState = STATE_STOPPING;
    561     } else {
    562         mState = STATE_ACTIVE;
    563     }
    564     (void) updateAndGetPosition_l();
    565     if (previousState == STATE_STOPPED || previousState == STATE_FLUSHED) {
    566         // reset current position as seen by client to 0
    567         mPosition = 0;
    568         mPreviousTimestampValid = false;
    569         mTimestampStartupGlitchReported = false;
    570         mRetrogradeMotionReported = false;
    571         mPreviousLocation = ExtendedTimestamp::LOCATION_INVALID;
    572 
    573         // read last server side position change via timestamp.
    574         ExtendedTimestamp ets;
    575         if (mProxy->getTimestamp(&ets) == OK &&
    576                 ets.mTimeNs[ExtendedTimestamp::LOCATION_SERVER] > 0) {
    577             // Server side has consumed something, but is it finished consuming?
    578             // It is possible since flush and stop are asynchronous that the server
    579             // is still active at this point.
    580             ALOGV("start: server read:%lld  cumulative flushed:%lld  client written:%lld",
    581                     (long long)(mFramesWrittenServerOffset
    582                             + ets.mPosition[ExtendedTimestamp::LOCATION_SERVER]),
    583                     (long long)ets.mFlushed,
    584                     (long long)mFramesWritten);
    585             mFramesWrittenServerOffset = -ets.mPosition[ExtendedTimestamp::LOCATION_SERVER];
    586         }
    587         mFramesWritten = 0;
    588         mProxy->clearTimestamp(); // need new server push for valid timestamp
    589         mMarkerReached = false;
    590 
    591         // For offloaded tracks, we don't know if the hardware counters are really zero here,
    592         // since the flush is asynchronous and stop may not fully drain.
    593         // We save the time when the track is started to later verify whether
    594         // the counters are realistic (i.e. start from zero after this time).
    595         mStartUs = getNowUs();
    596 
    597         // force refresh of remaining frames by processAudioBuffer() as last
    598         // write before stop could be partial.
    599         mRefreshRemaining = true;
    600     }
    601     mNewPosition = mPosition + mUpdatePeriod;
    602     int32_t flags = android_atomic_and(~CBLK_DISABLED, &mCblk->mFlags);
    603 
    604     status_t status = NO_ERROR;
    605     if (!(flags & CBLK_INVALID)) {
    606         status = mAudioTrack->start();
    607         if (status == DEAD_OBJECT) {
    608             flags |= CBLK_INVALID;
    609         }
    610     }
    611     if (flags & CBLK_INVALID) {
    612         status = restoreTrack_l("start");
    613     }
    614 
    615     // resume or pause the callback thread as needed.
    616     sp<AudioTrackThread> t = mAudioTrackThread;
    617     if (status == NO_ERROR) {
    618         if (t != 0) {
    619             if (previousState == STATE_STOPPING) {
    620                 mProxy->interrupt();
    621             } else {
    622                 t->resume();
    623             }
    624         } else {
    625             mPreviousPriority = getpriority(PRIO_PROCESS, 0);
    626             get_sched_policy(0, &mPreviousSchedulingGroup);
    627             androidSetThreadPriority(0, ANDROID_PRIORITY_AUDIO);
    628         }
    629     } else {
    630         ALOGE("start() status %d", status);
    631         mState = previousState;
    632         if (t != 0) {
    633             if (previousState != STATE_STOPPING) {
    634                 t->pause();
    635             }
    636         } else {
    637             setpriority(PRIO_PROCESS, 0, mPreviousPriority);
    638             set_sched_policy(0, mPreviousSchedulingGroup);
    639         }
    640     }
    641 
    642     return status;
    643 }
    644 
    645 void AudioTrack::stop()
    646 {
    647     AutoMutex lock(mLock);
    648     if (mState != STATE_ACTIVE && mState != STATE_PAUSED) {
    649         return;
    650     }
    651 
    652     if (isOffloaded_l()) {
    653         mState = STATE_STOPPING;
    654     } else {
    655         mState = STATE_STOPPED;
    656         mReleased = 0;
    657     }
    658 
    659     mProxy->interrupt();
    660     mAudioTrack->stop();
    661 
    662     // Note: legacy handling - stop does not clear playback marker
    663     // and periodic update counter, but flush does for streaming tracks.
    664 
    665     if (mSharedBuffer != 0) {
    666         // clear buffer position and loop count.
    667         mStaticProxy->setBufferPositionAndLoop(0 /* position */,
    668                 0 /* loopStart */, 0 /* loopEnd */, 0 /* loopCount */);
    669     }
    670 
    671     sp<AudioTrackThread> t = mAudioTrackThread;
    672     if (t != 0) {
    673         if (!isOffloaded_l()) {
    674             t->pause();
    675         }
    676     } else {
    677         setpriority(PRIO_PROCESS, 0, mPreviousPriority);
    678         set_sched_policy(0, mPreviousSchedulingGroup);
    679     }
    680 }
    681 
    682 bool AudioTrack::stopped() const
    683 {
    684     AutoMutex lock(mLock);
    685     return mState != STATE_ACTIVE;
    686 }
    687 
    688 void AudioTrack::flush()
    689 {
    690     if (mSharedBuffer != 0) {
    691         return;
    692     }
    693     AutoMutex lock(mLock);
    694     if (mState == STATE_ACTIVE || mState == STATE_FLUSHED) {
    695         return;
    696     }
    697     flush_l();
    698 }
    699 
    700 void AudioTrack::flush_l()
    701 {
    702     ALOG_ASSERT(mState != STATE_ACTIVE);
    703 
    704     // clear playback marker and periodic update counter
    705     mMarkerPosition = 0;
    706     mMarkerReached = false;
    707     mUpdatePeriod = 0;
    708     mRefreshRemaining = true;
    709 
    710     mState = STATE_FLUSHED;
    711     mReleased = 0;
    712     if (isOffloaded_l()) {
    713         mProxy->interrupt();
    714     }
    715     mProxy->flush();
    716     mAudioTrack->flush();
    717 }
    718 
    719 void AudioTrack::pause()
    720 {
    721     AutoMutex lock(mLock);
    722     if (mState == STATE_ACTIVE) {
    723         mState = STATE_PAUSED;
    724     } else if (mState == STATE_STOPPING) {
    725         mState = STATE_PAUSED_STOPPING;
    726     } else {
    727         return;
    728     }
    729     mProxy->interrupt();
    730     mAudioTrack->pause();
    731 
    732     if (isOffloaded_l()) {
    733         if (mOutput != AUDIO_IO_HANDLE_NONE) {
    734             // An offload output can be re-used between two audio tracks having
    735             // the same configuration. A timestamp query for a paused track
    736             // while the other is running would return an incorrect time.
    737             // To fix this, cache the playback position on a pause() and return
    738             // this time when requested until the track is resumed.
    739 
    740             // OffloadThread sends HAL pause in its threadLoop. Time saved
    741             // here can be slightly off.
    742 
    743             // TODO: check return code for getRenderPosition.
    744 
    745             uint32_t halFrames;
    746             AudioSystem::getRenderPosition(mOutput, &halFrames, &mPausedPosition);
    747             ALOGV("AudioTrack::pause for offload, cache current position %u", mPausedPosition);
    748         }
    749     }
    750 }
    751 
    752 status_t AudioTrack::setVolume(float left, float right)
    753 {
    754     // This duplicates a test by AudioTrack JNI, but that is not the only caller
    755     if (isnanf(left) || left < GAIN_FLOAT_ZERO || left > GAIN_FLOAT_UNITY ||
    756             isnanf(right) || right < GAIN_FLOAT_ZERO || right > GAIN_FLOAT_UNITY) {
    757         return BAD_VALUE;
    758     }
    759 
    760     AutoMutex lock(mLock);
    761     mVolume[AUDIO_INTERLEAVE_LEFT] = left;
    762     mVolume[AUDIO_INTERLEAVE_RIGHT] = right;
    763 
    764     mProxy->setVolumeLR(gain_minifloat_pack(gain_from_float(left), gain_from_float(right)));
    765 
    766     if (isOffloaded_l()) {
    767         mAudioTrack->signal();
    768     }
    769     return NO_ERROR;
    770 }
    771 
    772 status_t AudioTrack::setVolume(float volume)
    773 {
    774     return setVolume(volume, volume);
    775 }
    776 
    777 status_t AudioTrack::setAuxEffectSendLevel(float level)
    778 {
    779     // This duplicates a test by AudioTrack JNI, but that is not the only caller
    780     if (isnanf(level) || level < GAIN_FLOAT_ZERO || level > GAIN_FLOAT_UNITY) {
    781         return BAD_VALUE;
    782     }
    783 
    784     AutoMutex lock(mLock);
    785     mSendLevel = level;
    786     mProxy->setSendLevel(level);
    787 
    788     return NO_ERROR;
    789 }
    790 
    791 void AudioTrack::getAuxEffectSendLevel(float* level) const
    792 {
    793     if (level != NULL) {
    794         *level = mSendLevel;
    795     }
    796 }
    797 
    798 status_t AudioTrack::setSampleRate(uint32_t rate)
    799 {
    800     AutoMutex lock(mLock);
    801     if (rate == mSampleRate) {
    802         return NO_ERROR;
    803     }
    804     if (isOffloadedOrDirect_l() || (mFlags & AUDIO_OUTPUT_FLAG_FAST)) {
    805         return INVALID_OPERATION;
    806     }
    807     if (mOutput == AUDIO_IO_HANDLE_NONE) {
    808         return NO_INIT;
    809     }
    810     // NOTE: it is theoretically possible, but highly unlikely, that a device change
    811     // could mean a previously allowed sampling rate is no longer allowed.
    812     uint32_t afSamplingRate;
    813     if (AudioSystem::getSamplingRate(mOutput, &afSamplingRate) != NO_ERROR) {
    814         return NO_INIT;
    815     }
    816     // pitch is emulated by adjusting speed and sampleRate
    817     const uint32_t effectiveSampleRate = adjustSampleRate(rate, mPlaybackRate.mPitch);
    818     if (rate == 0 || effectiveSampleRate > afSamplingRate * AUDIO_RESAMPLER_DOWN_RATIO_MAX) {
    819         return BAD_VALUE;
    820     }
    821     // TODO: Should we also check if the buffer size is compatible?
    822 
    823     mSampleRate = rate;
    824     mProxy->setSampleRate(effectiveSampleRate);
    825 
    826     return NO_ERROR;
    827 }
    828 
    829 uint32_t AudioTrack::getSampleRate() const
    830 {
    831     AutoMutex lock(mLock);
    832 
    833     // sample rate can be updated during playback by the offloaded decoder so we need to
    834     // query the HAL and update if needed.
    835 // FIXME use Proxy return channel to update the rate from server and avoid polling here
    836     if (isOffloadedOrDirect_l()) {
    837         if (mOutput != AUDIO_IO_HANDLE_NONE) {
    838             uint32_t sampleRate = 0;
    839             status_t status = AudioSystem::getSamplingRate(mOutput, &sampleRate);
    840             if (status == NO_ERROR) {
    841                 mSampleRate = sampleRate;
    842             }
    843         }
    844     }
    845     return mSampleRate;
    846 }
    847 
    848 uint32_t AudioTrack::getOriginalSampleRate() const
    849 {
    850     return mOriginalSampleRate;
    851 }
    852 
    853 status_t AudioTrack::setPlaybackRate(const AudioPlaybackRate &playbackRate)
    854 {
    855     AutoMutex lock(mLock);
    856     if (isAudioPlaybackRateEqual(playbackRate, mPlaybackRate)) {
    857         return NO_ERROR;
    858     }
    859     if (isOffloadedOrDirect_l()) {
    860         return INVALID_OPERATION;
    861     }
    862     if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
    863         return INVALID_OPERATION;
    864     }
    865 
    866     ALOGV("setPlaybackRate (input): mSampleRate:%u  mSpeed:%f  mPitch:%f",
    867             mSampleRate, playbackRate.mSpeed, playbackRate.mPitch);
    868     // pitch is emulated by adjusting speed and sampleRate
    869     const uint32_t effectiveRate = adjustSampleRate(mSampleRate, playbackRate.mPitch);
    870     const float effectiveSpeed = adjustSpeed(playbackRate.mSpeed, playbackRate.mPitch);
    871     const float effectivePitch = adjustPitch(playbackRate.mPitch);
    872     AudioPlaybackRate playbackRateTemp = playbackRate;
    873     playbackRateTemp.mSpeed = effectiveSpeed;
    874     playbackRateTemp.mPitch = effectivePitch;
    875 
    876     ALOGV("setPlaybackRate (effective): mSampleRate:%u  mSpeed:%f  mPitch:%f",
    877             effectiveRate, effectiveSpeed, effectivePitch);
    878 
    879     if (!isAudioPlaybackRateValid(playbackRateTemp)) {
    880         ALOGV("setPlaybackRate(%f, %f) failed (effective rate out of bounds)",
    881                 playbackRate.mSpeed, playbackRate.mPitch);
    882         return BAD_VALUE;
    883     }
    884     // Check if the buffer size is compatible.
    885     if (!isSampleRateSpeedAllowed_l(effectiveRate, effectiveSpeed)) {
    886         ALOGV("setPlaybackRate(%f, %f) failed (buffer size)",
    887                 playbackRate.mSpeed, playbackRate.mPitch);
    888         return BAD_VALUE;
    889     }
    890 
    891     // Check resampler ratios are within bounds
    892     if ((uint64_t)effectiveRate > (uint64_t)mSampleRate * (uint64_t)AUDIO_RESAMPLER_DOWN_RATIO_MAX) {
    893         ALOGV("setPlaybackRate(%f, %f) failed. Resample rate exceeds max accepted value",
    894                 playbackRate.mSpeed, playbackRate.mPitch);
    895         return BAD_VALUE;
    896     }
    897 
    898     if ((uint64_t)effectiveRate * (uint64_t)AUDIO_RESAMPLER_UP_RATIO_MAX < (uint64_t)mSampleRate) {
    899         ALOGV("setPlaybackRate(%f, %f) failed. Resample rate below min accepted value",
    900                         playbackRate.mSpeed, playbackRate.mPitch);
    901         return BAD_VALUE;
    902     }
    903     mPlaybackRate = playbackRate;
    904     //set effective rates
    905     mProxy->setPlaybackRate(playbackRateTemp);
    906     mProxy->setSampleRate(effectiveRate); // FIXME: not quite "atomic" with setPlaybackRate
    907     return NO_ERROR;
    908 }
    909 
    910 const AudioPlaybackRate& AudioTrack::getPlaybackRate() const
    911 {
    912     AutoMutex lock(mLock);
    913     return mPlaybackRate;
    914 }
    915 
    916 ssize_t AudioTrack::getBufferSizeInFrames()
    917 {
    918     AutoMutex lock(mLock);
    919     if (mOutput == AUDIO_IO_HANDLE_NONE || mProxy.get() == 0) {
    920         return NO_INIT;
    921     }
    922     return (ssize_t) mProxy->getBufferSizeInFrames();
    923 }
    924 
    925 status_t AudioTrack::getBufferDurationInUs(int64_t *duration)
    926 {
    927     if (duration == nullptr) {
    928         return BAD_VALUE;
    929     }
    930     AutoMutex lock(mLock);
    931     if (mOutput == AUDIO_IO_HANDLE_NONE || mProxy.get() == 0) {
    932         return NO_INIT;
    933     }
    934     ssize_t bufferSizeInFrames = (ssize_t) mProxy->getBufferSizeInFrames();
    935     if (bufferSizeInFrames < 0) {
    936         return (status_t)bufferSizeInFrames;
    937     }
    938     *duration = (int64_t)((double)bufferSizeInFrames * 1000000
    939             / ((double)mSampleRate * mPlaybackRate.mSpeed));
    940     return NO_ERROR;
    941 }
    942 
    943 ssize_t AudioTrack::setBufferSizeInFrames(size_t bufferSizeInFrames)
    944 {
    945     AutoMutex lock(mLock);
    946     if (mOutput == AUDIO_IO_HANDLE_NONE || mProxy.get() == 0) {
    947         return NO_INIT;
    948     }
    949     // Reject if timed track or compressed audio.
    950     if (!audio_is_linear_pcm(mFormat)) {
    951         return INVALID_OPERATION;
    952     }
    953     return (ssize_t) mProxy->setBufferSizeInFrames((uint32_t) bufferSizeInFrames);
    954 }
    955 
    956 status_t AudioTrack::setLoop(uint32_t loopStart, uint32_t loopEnd, int loopCount)
    957 {
    958     if (mSharedBuffer == 0 || isOffloadedOrDirect()) {
    959         return INVALID_OPERATION;
    960     }
    961 
    962     if (loopCount == 0) {
    963         ;
    964     } else if (loopCount >= -1 && loopStart < loopEnd && loopEnd <= mFrameCount &&
    965             loopEnd - loopStart >= MIN_LOOP) {
    966         ;
    967     } else {
    968         return BAD_VALUE;
    969     }
    970 
    971     AutoMutex lock(mLock);
    972     // See setPosition() regarding setting parameters such as loop points or position while active
    973     if (mState == STATE_ACTIVE) {
    974         return INVALID_OPERATION;
    975     }
    976     setLoop_l(loopStart, loopEnd, loopCount);
    977     return NO_ERROR;
    978 }
    979 
    980 void AudioTrack::setLoop_l(uint32_t loopStart, uint32_t loopEnd, int loopCount)
    981 {
    982     // We do not update the periodic notification point.
    983     // mNewPosition = updateAndGetPosition_l() + mUpdatePeriod;
    984     mLoopCount = loopCount;
    985     mLoopEnd = loopEnd;
    986     mLoopStart = loopStart;
    987     mLoopCountNotified = loopCount;
    988     mStaticProxy->setLoop(loopStart, loopEnd, loopCount);
    989 
    990     // Waking the AudioTrackThread is not needed as this cannot be called when active.
    991 }
    992 
    993 status_t AudioTrack::setMarkerPosition(uint32_t marker)
    994 {
    995     // The only purpose of setting marker position is to get a callback
    996     if (mCbf == NULL || isOffloadedOrDirect()) {
    997         return INVALID_OPERATION;
    998     }
    999 
   1000     AutoMutex lock(mLock);
   1001     mMarkerPosition = marker;
   1002     mMarkerReached = false;
   1003 
   1004     sp<AudioTrackThread> t = mAudioTrackThread;
   1005     if (t != 0) {
   1006         t->wake();
   1007     }
   1008     return NO_ERROR;
   1009 }
   1010 
   1011 status_t AudioTrack::getMarkerPosition(uint32_t *marker) const
   1012 {
   1013     if (isOffloadedOrDirect()) {
   1014         return INVALID_OPERATION;
   1015     }
   1016     if (marker == NULL) {
   1017         return BAD_VALUE;
   1018     }
   1019 
   1020     AutoMutex lock(mLock);
   1021     mMarkerPosition.getValue(marker);
   1022 
   1023     return NO_ERROR;
   1024 }
   1025 
   1026 status_t AudioTrack::setPositionUpdatePeriod(uint32_t updatePeriod)
   1027 {
   1028     // The only purpose of setting position update period is to get a callback
   1029     if (mCbf == NULL || isOffloadedOrDirect()) {
   1030         return INVALID_OPERATION;
   1031     }
   1032 
   1033     AutoMutex lock(mLock);
   1034     mNewPosition = updateAndGetPosition_l() + updatePeriod;
   1035     mUpdatePeriod = updatePeriod;
   1036 
   1037     sp<AudioTrackThread> t = mAudioTrackThread;
   1038     if (t != 0) {
   1039         t->wake();
   1040     }
   1041     return NO_ERROR;
   1042 }
   1043 
   1044 status_t AudioTrack::getPositionUpdatePeriod(uint32_t *updatePeriod) const
   1045 {
   1046     if (isOffloadedOrDirect()) {
   1047         return INVALID_OPERATION;
   1048     }
   1049     if (updatePeriod == NULL) {
   1050         return BAD_VALUE;
   1051     }
   1052 
   1053     AutoMutex lock(mLock);
   1054     *updatePeriod = mUpdatePeriod;
   1055 
   1056     return NO_ERROR;
   1057 }
   1058 
   1059 status_t AudioTrack::setPosition(uint32_t position)
   1060 {
   1061     if (mSharedBuffer == 0 || isOffloadedOrDirect()) {
   1062         return INVALID_OPERATION;
   1063     }
   1064     if (position > mFrameCount) {
   1065         return BAD_VALUE;
   1066     }
   1067 
   1068     AutoMutex lock(mLock);
   1069     // Currently we require that the player is inactive before setting parameters such as position
   1070     // or loop points.  Otherwise, there could be a race condition: the application could read the
   1071     // current position, compute a new position or loop parameters, and then set that position or
   1072     // loop parameters but it would do the "wrong" thing since the position has continued to advance
   1073     // in the mean time.  If we ever provide a sequencer in server, we could allow a way for the app
   1074     // to specify how it wants to handle such scenarios.
   1075     if (mState == STATE_ACTIVE) {
   1076         return INVALID_OPERATION;
   1077     }
   1078     // After setting the position, use full update period before notification.
   1079     mNewPosition = updateAndGetPosition_l() + mUpdatePeriod;
   1080     mStaticProxy->setBufferPosition(position);
   1081 
   1082     // Waking the AudioTrackThread is not needed as this cannot be called when active.
   1083     return NO_ERROR;
   1084 }
   1085 
   1086 status_t AudioTrack::getPosition(uint32_t *position)
   1087 {
   1088     if (position == NULL) {
   1089         return BAD_VALUE;
   1090     }
   1091 
   1092     AutoMutex lock(mLock);
   1093     // FIXME: offloaded and direct tracks call into the HAL for render positions
   1094     // for compressed/synced data; however, we use proxy position for pure linear pcm data
   1095     // as we do not know the capability of the HAL for pcm position support and standby.
   1096     // There may be some latency differences between the HAL position and the proxy position.
   1097     if (isOffloadedOrDirect_l() && !isPurePcmData_l()) {
   1098         uint32_t dspFrames = 0;
   1099 
   1100         if (isOffloaded_l() && ((mState == STATE_PAUSED) || (mState == STATE_PAUSED_STOPPING))) {
   1101             ALOGV("getPosition called in paused state, return cached position %u", mPausedPosition);
   1102             *position = mPausedPosition;
   1103             return NO_ERROR;
   1104         }
   1105 
   1106         if (mOutput != AUDIO_IO_HANDLE_NONE) {
   1107             uint32_t halFrames; // actually unused
   1108             (void) AudioSystem::getRenderPosition(mOutput, &halFrames, &dspFrames);
   1109             // FIXME: on getRenderPosition() error, we return OK with frame position 0.
   1110         }
   1111         // FIXME: dspFrames may not be zero in (mState == STATE_STOPPED || mState == STATE_FLUSHED)
   1112         // due to hardware latency. We leave this behavior for now.
   1113         *position = dspFrames;
   1114     } else {
   1115         if (mCblk->mFlags & CBLK_INVALID) {
   1116             (void) restoreTrack_l("getPosition");
   1117             // FIXME: for compatibility with the Java API we ignore the restoreTrack_l()
   1118             // error here (e.g. DEAD_OBJECT) and return OK with the last recorded server position.
   1119         }
   1120 
   1121         // IAudioTrack::stop() isn't synchronous; we don't know when presentation completes
   1122         *position = (mState == STATE_STOPPED || mState == STATE_FLUSHED) ?
   1123                 0 : updateAndGetPosition_l().value();
   1124     }
   1125     return NO_ERROR;
   1126 }
   1127 
   1128 status_t AudioTrack::getBufferPosition(uint32_t *position)
   1129 {
   1130     if (mSharedBuffer == 0) {
   1131         return INVALID_OPERATION;
   1132     }
   1133     if (position == NULL) {
   1134         return BAD_VALUE;
   1135     }
   1136 
   1137     AutoMutex lock(mLock);
   1138     *position = mStaticProxy->getBufferPosition();
   1139     return NO_ERROR;
   1140 }
   1141 
   1142 status_t AudioTrack::reload()
   1143 {
   1144     if (mSharedBuffer == 0 || isOffloadedOrDirect()) {
   1145         return INVALID_OPERATION;
   1146     }
   1147 
   1148     AutoMutex lock(mLock);
   1149     // See setPosition() regarding setting parameters such as loop points or position while active
   1150     if (mState == STATE_ACTIVE) {
   1151         return INVALID_OPERATION;
   1152     }
   1153     mNewPosition = mUpdatePeriod;
   1154     (void) updateAndGetPosition_l();
   1155     mPosition = 0;
   1156     mPreviousTimestampValid = false;
   1157 #if 0
   1158     // The documentation is not clear on the behavior of reload() and the restoration
   1159     // of loop count. Historically we have not restored loop count, start, end,
   1160     // but it makes sense if one desires to repeat playing a particular sound.
   1161     if (mLoopCount != 0) {
   1162         mLoopCountNotified = mLoopCount;
   1163         mStaticProxy->setLoop(mLoopStart, mLoopEnd, mLoopCount);
   1164     }
   1165 #endif
   1166     mStaticProxy->setBufferPosition(0);
   1167     return NO_ERROR;
   1168 }
   1169 
   1170 audio_io_handle_t AudioTrack::getOutput() const
   1171 {
   1172     AutoMutex lock(mLock);
   1173     return mOutput;
   1174 }
   1175 
   1176 status_t AudioTrack::setOutputDevice(audio_port_handle_t deviceId) {
   1177     AutoMutex lock(mLock);
   1178     if (mSelectedDeviceId != deviceId) {
   1179         mSelectedDeviceId = deviceId;
   1180         android_atomic_or(CBLK_INVALID, &mCblk->mFlags);
   1181     }
   1182     return NO_ERROR;
   1183 }
   1184 
   1185 audio_port_handle_t AudioTrack::getOutputDevice() {
   1186     AutoMutex lock(mLock);
   1187     return mSelectedDeviceId;
   1188 }
   1189 
   1190 audio_port_handle_t AudioTrack::getRoutedDeviceId() {
   1191     AutoMutex lock(mLock);
   1192     if (mOutput == AUDIO_IO_HANDLE_NONE) {
   1193         return AUDIO_PORT_HANDLE_NONE;
   1194     }
   1195     return AudioSystem::getDeviceIdForIo(mOutput);
   1196 }
   1197 
   1198 status_t AudioTrack::attachAuxEffect(int effectId)
   1199 {
   1200     AutoMutex lock(mLock);
   1201     status_t status = mAudioTrack->attachAuxEffect(effectId);
   1202     if (status == NO_ERROR) {
   1203         mAuxEffectId = effectId;
   1204     }
   1205     return status;
   1206 }
   1207 
   1208 audio_stream_type_t AudioTrack::streamType() const
   1209 {
   1210     if (mStreamType == AUDIO_STREAM_DEFAULT) {
   1211         return audio_attributes_to_stream_type(&mAttributes);
   1212     }
   1213     return mStreamType;
   1214 }
   1215 
   1216 // -------------------------------------------------------------------------
   1217 
   1218 // must be called with mLock held
   1219 status_t AudioTrack::createTrack_l()
   1220 {
   1221     const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();
   1222     if (audioFlinger == 0) {
   1223         ALOGE("Could not get audioflinger");
   1224         return NO_INIT;
   1225     }
   1226 
   1227     if (mDeviceCallback != 0 && mOutput != AUDIO_IO_HANDLE_NONE) {
   1228         AudioSystem::removeAudioDeviceCallback(mDeviceCallback, mOutput);
   1229     }
   1230     audio_io_handle_t output;
   1231     audio_stream_type_t streamType = mStreamType;
   1232     audio_attributes_t *attr = (mStreamType == AUDIO_STREAM_DEFAULT) ? &mAttributes : NULL;
   1233 
   1234     // mFlags (not mOrigFlags) is modified depending on whether fast request is accepted.
   1235     // After fast request is denied, we will request again if IAudioTrack is re-created.
   1236 
   1237     status_t status;
   1238     status = AudioSystem::getOutputForAttr(attr, &output,
   1239                                            mSessionId, &streamType, mClientUid,
   1240                                            mSampleRate, mFormat, mChannelMask,
   1241                                            mFlags, mSelectedDeviceId, mOffloadInfo);
   1242 
   1243     if (status != NO_ERROR || output == AUDIO_IO_HANDLE_NONE) {
   1244         ALOGE("Could not get audio output for session %d, stream type %d, usage %d, sample rate %u, format %#x,"
   1245               " channel mask %#x, flags %#x",
   1246               mSessionId, streamType, mAttributes.usage, mSampleRate, mFormat, mChannelMask, mFlags);
   1247         return BAD_VALUE;
   1248     }
   1249     {
   1250     // Now that we have a reference to an I/O handle and have not yet handed it off to AudioFlinger,
   1251     // we must release it ourselves if anything goes wrong.
   1252 
   1253     // Not all of these values are needed under all conditions, but it is easier to get them all
   1254     status = AudioSystem::getLatency(output, &mAfLatency);
   1255     if (status != NO_ERROR) {
   1256         ALOGE("getLatency(%d) failed status %d", output, status);
   1257         goto release;
   1258     }
   1259     ALOGV("createTrack_l() output %d afLatency %u", output, mAfLatency);
   1260 
   1261     status = AudioSystem::getFrameCount(output, &mAfFrameCount);
   1262     if (status != NO_ERROR) {
   1263         ALOGE("getFrameCount(output=%d) status %d", output, status);
   1264         goto release;
   1265     }
   1266 
   1267     // TODO consider making this a member variable if there are other uses for it later
   1268     size_t afFrameCountHAL;
   1269     status = AudioSystem::getFrameCountHAL(output, &afFrameCountHAL);
   1270     if (status != NO_ERROR) {
   1271         ALOGE("getFrameCountHAL(output=%d) status %d", output, status);
   1272         goto release;
   1273     }
   1274     ALOG_ASSERT(afFrameCountHAL > 0);
   1275 
   1276     status = AudioSystem::getSamplingRate(output, &mAfSampleRate);
   1277     if (status != NO_ERROR) {
   1278         ALOGE("getSamplingRate(output=%d) status %d", output, status);
   1279         goto release;
   1280     }
   1281     if (mSampleRate == 0) {
   1282         mSampleRate = mAfSampleRate;
   1283         mOriginalSampleRate = mAfSampleRate;
   1284     }
   1285 
   1286     // Client can only express a preference for FAST.  Server will perform additional tests.
   1287     if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
   1288         bool useCaseAllowed =
   1289             // either of these use cases:
   1290             // use case 1: shared buffer
   1291             (mSharedBuffer != 0) ||
   1292             // use case 2: callback transfer mode
   1293             (mTransfer == TRANSFER_CALLBACK) ||
   1294             // use case 3: obtain/release mode
   1295             (mTransfer == TRANSFER_OBTAIN) ||
   1296             // use case 4: synchronous write
   1297             ((mTransfer == TRANSFER_SYNC) && mThreadCanCallJava);
   1298         // sample rates must also match
   1299         bool fastAllowed = useCaseAllowed && (mSampleRate == mAfSampleRate);
   1300         if (!fastAllowed) {
   1301             ALOGW("AUDIO_OUTPUT_FLAG_FAST denied by client; transfer %d, "
   1302                 "track %u Hz, output %u Hz",
   1303                 mTransfer, mSampleRate, mAfSampleRate);
   1304             mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_FAST);
   1305         }
   1306     }
   1307 
   1308     mNotificationFramesAct = mNotificationFramesReq;
   1309 
   1310     size_t frameCount = mReqFrameCount;
   1311     if (!audio_has_proportional_frames(mFormat)) {
   1312 
   1313         if (mSharedBuffer != 0) {
   1314             // Same comment as below about ignoring frameCount parameter for set()
   1315             frameCount = mSharedBuffer->size();
   1316         } else if (frameCount == 0) {
   1317             frameCount = mAfFrameCount;
   1318         }
   1319         if (mNotificationFramesAct != frameCount) {
   1320             mNotificationFramesAct = frameCount;
   1321         }
   1322     } else if (mSharedBuffer != 0) {
   1323         // FIXME: Ensure client side memory buffers need
   1324         // not have additional alignment beyond sample
   1325         // (e.g. 16 bit stereo accessed as 32 bit frame).
   1326         size_t alignment = audio_bytes_per_sample(mFormat);
   1327         if (alignment & 1) {
   1328             // for AUDIO_FORMAT_PCM_24_BIT_PACKED (not exposed through Java).
   1329             alignment = 1;
   1330         }
   1331         if (mChannelCount > 1) {
   1332             // More than 2 channels does not require stronger alignment than stereo
   1333             alignment <<= 1;
   1334         }
   1335         if (((uintptr_t)mSharedBuffer->pointer() & (alignment - 1)) != 0) {
   1336             ALOGE("Invalid buffer alignment: address %p, channel count %u",
   1337                     mSharedBuffer->pointer(), mChannelCount);
   1338             status = BAD_VALUE;
   1339             goto release;
   1340         }
   1341 
   1342         // When initializing a shared buffer AudioTrack via constructors,
   1343         // there's no frameCount parameter.
   1344         // But when initializing a shared buffer AudioTrack via set(),
   1345         // there _is_ a frameCount parameter.  We silently ignore it.
   1346         frameCount = mSharedBuffer->size() / mFrameSize;
   1347     } else {
   1348         size_t minFrameCount = 0;
   1349         // For fast tracks the frame count calculations and checks are mostly done by server,
   1350         // but we try to respect the application's request for notifications per buffer.
   1351         if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
   1352             if (mNotificationsPerBufferReq > 0) {
   1353                 // Avoid possible arithmetic overflow during multiplication.
   1354                 // mNotificationsPerBuffer is clamped to a small integer earlier, so it is unlikely.
   1355                 if (mNotificationsPerBufferReq > SIZE_MAX / afFrameCountHAL) {
   1356                     ALOGE("Requested notificationPerBuffer=%u ignored for HAL frameCount=%zu",
   1357                             mNotificationsPerBufferReq, afFrameCountHAL);
   1358                 } else {
   1359                     minFrameCount = afFrameCountHAL * mNotificationsPerBufferReq;
   1360                 }
   1361             }
   1362         } else {
   1363             // for normal tracks precompute the frame count based on speed.
   1364             const float speed = !isPurePcmData_l() || isOffloadedOrDirect_l() ? 1.0f :
   1365                             max(mMaxRequiredSpeed, mPlaybackRate.mSpeed);
   1366             minFrameCount = calculateMinFrameCount(
   1367                     mAfLatency, mAfFrameCount, mAfSampleRate, mSampleRate,
   1368                     speed /*, 0 mNotificationsPerBufferReq*/);
   1369         }
   1370         if (frameCount < minFrameCount) {
   1371             frameCount = minFrameCount;
   1372         }
   1373     }
   1374 
   1375     IAudioFlinger::track_flags_t trackFlags = IAudioFlinger::TRACK_DEFAULT;
   1376 
   1377     pid_t tid = -1;
   1378     if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
   1379         trackFlags |= IAudioFlinger::TRACK_FAST;
   1380         if (mAudioTrackThread != 0 && !mThreadCanCallJava) {
   1381             tid = mAudioTrackThread->getTid();
   1382         }
   1383     }
   1384 
   1385     if (mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
   1386         trackFlags |= IAudioFlinger::TRACK_OFFLOAD;
   1387     }
   1388 
   1389     if (mFlags & AUDIO_OUTPUT_FLAG_DIRECT) {
   1390         trackFlags |= IAudioFlinger::TRACK_DIRECT;
   1391     }
   1392 
   1393     size_t temp = frameCount;   // temp may be replaced by a revised value of frameCount,
   1394                                 // but we will still need the original value also
   1395     audio_session_t originalSessionId = mSessionId;
   1396     sp<IAudioTrack> track = audioFlinger->createTrack(streamType,
   1397                                                       mSampleRate,
   1398                                                       mFormat,
   1399                                                       mChannelMask,
   1400                                                       &temp,
   1401                                                       &trackFlags,
   1402                                                       mSharedBuffer,
   1403                                                       output,
   1404                                                       mClientPid,
   1405                                                       tid,
   1406                                                       &mSessionId,
   1407                                                       mClientUid,
   1408                                                       &status);
   1409     ALOGE_IF(originalSessionId != AUDIO_SESSION_ALLOCATE && mSessionId != originalSessionId,
   1410             "session ID changed from %d to %d", originalSessionId, mSessionId);
   1411 
   1412     if (status != NO_ERROR) {
   1413         ALOGE("AudioFlinger could not create track, status: %d", status);
   1414         goto release;
   1415     }
   1416     ALOG_ASSERT(track != 0);
   1417 
   1418     // AudioFlinger now owns the reference to the I/O handle,
   1419     // so we are no longer responsible for releasing it.
   1420 
   1421     // FIXME compare to AudioRecord
   1422     sp<IMemory> iMem = track->getCblk();
   1423     if (iMem == 0) {
   1424         ALOGE("Could not get control block");
   1425         return NO_INIT;
   1426     }
   1427     void *iMemPointer = iMem->pointer();
   1428     if (iMemPointer == NULL) {
   1429         ALOGE("Could not get control block pointer");
   1430         return NO_INIT;
   1431     }
   1432     // invariant that mAudioTrack != 0 is true only after set() returns successfully
   1433     if (mAudioTrack != 0) {
   1434         IInterface::asBinder(mAudioTrack)->unlinkToDeath(mDeathNotifier, this);
   1435         mDeathNotifier.clear();
   1436     }
   1437     mAudioTrack = track;
   1438     mCblkMemory = iMem;
   1439     IPCThreadState::self()->flushCommands();
   1440 
   1441     audio_track_cblk_t* cblk = static_cast<audio_track_cblk_t*>(iMemPointer);
   1442     mCblk = cblk;
   1443     // note that temp is the (possibly revised) value of frameCount
   1444     if (temp < frameCount || (frameCount == 0 && temp == 0)) {
   1445         // In current design, AudioTrack client checks and ensures frame count validity before
   1446         // passing it to AudioFlinger so AudioFlinger should not return a different value except
   1447         // for fast track as it uses a special method of assigning frame count.
   1448         ALOGW("Requested frameCount %zu but received frameCount %zu", frameCount, temp);
   1449     }
   1450     frameCount = temp;
   1451 
   1452     mAwaitBoost = false;
   1453     if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
   1454         if (trackFlags & IAudioFlinger::TRACK_FAST) {
   1455             ALOGV("AUDIO_OUTPUT_FLAG_FAST successful; frameCount %zu", frameCount);
   1456             if (!mThreadCanCallJava) {
   1457                 mAwaitBoost = true;
   1458             }
   1459         } else {
   1460             ALOGW("AUDIO_OUTPUT_FLAG_FAST denied by server; frameCount %zu", frameCount);
   1461             mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_FAST);
   1462         }
   1463     }
   1464 
   1465     // Make sure that application is notified with sufficient margin before underrun.
   1466     // The client can divide the AudioTrack buffer into sub-buffers,
   1467     // and expresses its desire to server as the notification frame count.
   1468     if (mSharedBuffer == 0 && audio_is_linear_pcm(mFormat)) {
   1469         size_t maxNotificationFrames;
   1470         if (trackFlags & IAudioFlinger::TRACK_FAST) {
   1471             // notify every HAL buffer, regardless of the size of the track buffer
   1472             maxNotificationFrames = afFrameCountHAL;
   1473         } else {
   1474             // For normal tracks, use at least double-buffering if no sample rate conversion,
   1475             // or at least triple-buffering if there is sample rate conversion
   1476             const int nBuffering = mOriginalSampleRate == mAfSampleRate ? 2 : 3;
   1477             maxNotificationFrames = frameCount / nBuffering;
   1478         }
   1479         if (mNotificationFramesAct == 0 || mNotificationFramesAct > maxNotificationFrames) {
   1480             if (mNotificationFramesAct == 0) {
   1481                 ALOGD("Client defaulted notificationFrames to %zu for frameCount %zu",
   1482                     maxNotificationFrames, frameCount);
   1483             } else {
   1484                 ALOGW("Client adjusted notificationFrames from %u to %zu for frameCount %zu",
   1485                     mNotificationFramesAct, maxNotificationFrames, frameCount);
   1486             }
   1487             mNotificationFramesAct = (uint32_t) maxNotificationFrames;
   1488         }
   1489     }
   1490 
   1491     // We retain a copy of the I/O handle, but don't own the reference
   1492     mOutput = output;
   1493     mRefreshRemaining = true;
   1494 
   1495     // Starting address of buffers in shared memory.  If there is a shared buffer, buffers
   1496     // is the value of pointer() for the shared buffer, otherwise buffers points
   1497     // immediately after the control block.  This address is for the mapping within client
   1498     // address space.  AudioFlinger::TrackBase::mBuffer is for the server address space.
   1499     void* buffers;
   1500     if (mSharedBuffer == 0) {
   1501         buffers = cblk + 1;
   1502     } else {
   1503         buffers = mSharedBuffer->pointer();
   1504         if (buffers == NULL) {
   1505             ALOGE("Could not get buffer pointer");
   1506             return NO_INIT;
   1507         }
   1508     }
   1509 
   1510     mAudioTrack->attachAuxEffect(mAuxEffectId);
   1511     // FIXME doesn't take into account speed or future sample rate changes (until restoreTrack)
   1512     // FIXME don't believe this lie
   1513     mLatency = mAfLatency + (1000*frameCount) / mSampleRate;
   1514 
   1515     mFrameCount = frameCount;
   1516     // If IAudioTrack is re-created, don't let the requested frameCount
   1517     // decrease.  This can confuse clients that cache frameCount().
   1518     if (frameCount > mReqFrameCount) {
   1519         mReqFrameCount = frameCount;
   1520     }
   1521 
   1522     // reset server position to 0 as we have new cblk.
   1523     mServer = 0;
   1524 
   1525     // update proxy
   1526     if (mSharedBuffer == 0) {
   1527         mStaticProxy.clear();
   1528         mProxy = new AudioTrackClientProxy(cblk, buffers, frameCount, mFrameSize);
   1529     } else {
   1530         mStaticProxy = new StaticAudioTrackClientProxy(cblk, buffers, frameCount, mFrameSize);
   1531         mProxy = mStaticProxy;
   1532     }
   1533 
   1534     mProxy->setVolumeLR(gain_minifloat_pack(
   1535             gain_from_float(mVolume[AUDIO_INTERLEAVE_LEFT]),
   1536             gain_from_float(mVolume[AUDIO_INTERLEAVE_RIGHT])));
   1537 
   1538     mProxy->setSendLevel(mSendLevel);
   1539     const uint32_t effectiveSampleRate = adjustSampleRate(mSampleRate, mPlaybackRate.mPitch);
   1540     const float effectiveSpeed = adjustSpeed(mPlaybackRate.mSpeed, mPlaybackRate.mPitch);
   1541     const float effectivePitch = adjustPitch(mPlaybackRate.mPitch);
   1542     mProxy->setSampleRate(effectiveSampleRate);
   1543 
   1544     AudioPlaybackRate playbackRateTemp = mPlaybackRate;
   1545     playbackRateTemp.mSpeed = effectiveSpeed;
   1546     playbackRateTemp.mPitch = effectivePitch;
   1547     mProxy->setPlaybackRate(playbackRateTemp);
   1548     mProxy->setMinimum(mNotificationFramesAct);
   1549 
   1550     mDeathNotifier = new DeathNotifier(this);
   1551     IInterface::asBinder(mAudioTrack)->linkToDeath(mDeathNotifier, this);
   1552 
   1553     if (mDeviceCallback != 0) {
   1554         AudioSystem::addAudioDeviceCallback(mDeviceCallback, mOutput);
   1555     }
   1556 
   1557     return NO_ERROR;
   1558     }
   1559 
   1560 release:
   1561     AudioSystem::releaseOutput(output, streamType, mSessionId);
   1562     if (status == NO_ERROR) {
   1563         status = NO_INIT;
   1564     }
   1565     return status;
   1566 }
   1567 
   1568 status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, int32_t waitCount, size_t *nonContig)
   1569 {
   1570     if (audioBuffer == NULL) {
   1571         if (nonContig != NULL) {
   1572             *nonContig = 0;
   1573         }
   1574         return BAD_VALUE;
   1575     }
   1576     if (mTransfer != TRANSFER_OBTAIN) {
   1577         audioBuffer->frameCount = 0;
   1578         audioBuffer->size = 0;
   1579         audioBuffer->raw = NULL;
   1580         if (nonContig != NULL) {
   1581             *nonContig = 0;
   1582         }
   1583         return INVALID_OPERATION;
   1584     }
   1585 
   1586     const struct timespec *requested;
   1587     struct timespec timeout;
   1588     if (waitCount == -1) {
   1589         requested = &ClientProxy::kForever;
   1590     } else if (waitCount == 0) {
   1591         requested = &ClientProxy::kNonBlocking;
   1592     } else if (waitCount > 0) {
   1593         long long ms = WAIT_PERIOD_MS * (long long) waitCount;
   1594         timeout.tv_sec = ms / 1000;
   1595         timeout.tv_nsec = (int) (ms % 1000) * 1000000;
   1596         requested = &timeout;
   1597     } else {
   1598         ALOGE("%s invalid waitCount %d", __func__, waitCount);
   1599         requested = NULL;
   1600     }
   1601     return obtainBuffer(audioBuffer, requested, NULL /*elapsed*/, nonContig);
   1602 }
   1603 
   1604 status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, const struct timespec *requested,
   1605         struct timespec *elapsed, size_t *nonContig)
   1606 {
   1607     // previous and new IAudioTrack sequence numbers are used to detect track re-creation
   1608     uint32_t oldSequence = 0;
   1609     uint32_t newSequence;
   1610 
   1611     Proxy::Buffer buffer;
   1612     status_t status = NO_ERROR;
   1613 
   1614     static const int32_t kMaxTries = 5;
   1615     int32_t tryCounter = kMaxTries;
   1616 
   1617     do {
   1618         // obtainBuffer() is called with mutex unlocked, so keep extra references to these fields to
   1619         // keep them from going away if another thread re-creates the track during obtainBuffer()
   1620         sp<AudioTrackClientProxy> proxy;
   1621         sp<IMemory> iMem;
   1622 
   1623         {   // start of lock scope
   1624             AutoMutex lock(mLock);
   1625 
   1626             newSequence = mSequence;
   1627             // did previous obtainBuffer() fail due to media server death or voluntary invalidation?
   1628             if (status == DEAD_OBJECT) {
   1629                 // re-create track, unless someone else has already done so
   1630                 if (newSequence == oldSequence) {
   1631                     status = restoreTrack_l("obtainBuffer");
   1632                     if (status != NO_ERROR) {
   1633                         buffer.mFrameCount = 0;
   1634                         buffer.mRaw = NULL;
   1635                         buffer.mNonContig = 0;
   1636                         break;
   1637                     }
   1638                 }
   1639             }
   1640             oldSequence = newSequence;
   1641 
   1642             if (status == NOT_ENOUGH_DATA) {
   1643                 restartIfDisabled();
   1644             }
   1645 
   1646             // Keep the extra references
   1647             proxy = mProxy;
   1648             iMem = mCblkMemory;
   1649 
   1650             if (mState == STATE_STOPPING) {
   1651                 status = -EINTR;
   1652                 buffer.mFrameCount = 0;
   1653                 buffer.mRaw = NULL;
   1654                 buffer.mNonContig = 0;
   1655                 break;
   1656             }
   1657 
   1658             // Non-blocking if track is stopped or paused
   1659             if (mState != STATE_ACTIVE) {
   1660                 requested = &ClientProxy::kNonBlocking;
   1661             }
   1662 
   1663         }   // end of lock scope
   1664 
   1665         buffer.mFrameCount = audioBuffer->frameCount;
   1666         // FIXME starts the requested timeout and elapsed over from scratch
   1667         status = proxy->obtainBuffer(&buffer, requested, elapsed);
   1668     } while (((status == DEAD_OBJECT) || (status == NOT_ENOUGH_DATA)) && (tryCounter-- > 0));
   1669 
   1670     audioBuffer->frameCount = buffer.mFrameCount;
   1671     audioBuffer->size = buffer.mFrameCount * mFrameSize;
   1672     audioBuffer->raw = buffer.mRaw;
   1673     if (nonContig != NULL) {
   1674         *nonContig = buffer.mNonContig;
   1675     }
   1676     return status;
   1677 }
   1678 
   1679 void AudioTrack::releaseBuffer(const Buffer* audioBuffer)
   1680 {
   1681     // FIXME add error checking on mode, by adding an internal version
   1682     if (mTransfer == TRANSFER_SHARED) {
   1683         return;
   1684     }
   1685 
   1686     size_t stepCount = audioBuffer->size / mFrameSize;
   1687     if (stepCount == 0) {
   1688         return;
   1689     }
   1690 
   1691     Proxy::Buffer buffer;
   1692     buffer.mFrameCount = stepCount;
   1693     buffer.mRaw = audioBuffer->raw;
   1694 
   1695     AutoMutex lock(mLock);
   1696     mReleased += stepCount;
   1697     mInUnderrun = false;
   1698     mProxy->releaseBuffer(&buffer);
   1699 
   1700     // restart track if it was disabled by audioflinger due to previous underrun
   1701     restartIfDisabled();
   1702 }
   1703 
   1704 void AudioTrack::restartIfDisabled()
   1705 {
   1706     int32_t flags = android_atomic_and(~CBLK_DISABLED, &mCblk->mFlags);
   1707     if ((mState == STATE_ACTIVE) && (flags & CBLK_DISABLED)) {
   1708         ALOGW("releaseBuffer() track %p disabled due to previous underrun, restarting", this);
   1709         // FIXME ignoring status
   1710         mAudioTrack->start();
   1711     }
   1712 }
   1713 
   1714 // -------------------------------------------------------------------------
   1715 
   1716 ssize_t AudioTrack::write(const void* buffer, size_t userSize, bool blocking)
   1717 {
   1718     if (mTransfer != TRANSFER_SYNC) {
   1719         return INVALID_OPERATION;
   1720     }
   1721 
   1722     if (isDirect()) {
   1723         AutoMutex lock(mLock);
   1724         int32_t flags = android_atomic_and(
   1725                             ~(CBLK_UNDERRUN | CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL | CBLK_BUFFER_END),
   1726                             &mCblk->mFlags);
   1727         if (flags & CBLK_INVALID) {
   1728             return DEAD_OBJECT;
   1729         }
   1730     }
   1731 
   1732     if (ssize_t(userSize) < 0 || (buffer == NULL && userSize != 0)) {
   1733         // Sanity-check: user is most-likely passing an error code, and it would
   1734         // make the return value ambiguous (actualSize vs error).
   1735         ALOGE("AudioTrack::write(buffer=%p, size=%zu (%zd)", buffer, userSize, userSize);
   1736         return BAD_VALUE;
   1737     }
   1738 
   1739     size_t written = 0;
   1740     Buffer audioBuffer;
   1741 
   1742     while (userSize >= mFrameSize) {
   1743         audioBuffer.frameCount = userSize / mFrameSize;
   1744 
   1745         status_t err = obtainBuffer(&audioBuffer,
   1746                 blocking ? &ClientProxy::kForever : &ClientProxy::kNonBlocking);
   1747         if (err < 0) {
   1748             if (written > 0) {
   1749                 break;
   1750             }
   1751             return ssize_t(err);
   1752         }
   1753 
   1754         size_t toWrite = audioBuffer.size;
   1755         memcpy(audioBuffer.i8, buffer, toWrite);
   1756         buffer = ((const char *) buffer) + toWrite;
   1757         userSize -= toWrite;
   1758         written += toWrite;
   1759 
   1760         releaseBuffer(&audioBuffer);
   1761     }
   1762 
   1763     if (written > 0) {
   1764         mFramesWritten += written / mFrameSize;
   1765     }
   1766     return written;
   1767 }
   1768 
   1769 // -------------------------------------------------------------------------
   1770 
   1771 nsecs_t AudioTrack::processAudioBuffer()
   1772 {
   1773     // Currently the AudioTrack thread is not created if there are no callbacks.
   1774     // Would it ever make sense to run the thread, even without callbacks?
   1775     // If so, then replace this by checks at each use for mCbf != NULL.
   1776     LOG_ALWAYS_FATAL_IF(mCblk == NULL);
   1777 
   1778     mLock.lock();
   1779     if (mAwaitBoost) {
   1780         mAwaitBoost = false;
   1781         mLock.unlock();
   1782         static const int32_t kMaxTries = 5;
   1783         int32_t tryCounter = kMaxTries;
   1784         uint32_t pollUs = 10000;
   1785         do {
   1786             int policy = sched_getscheduler(0);
   1787             if (policy == SCHED_FIFO || policy == SCHED_RR) {
   1788                 break;
   1789             }
   1790             usleep(pollUs);
   1791             pollUs <<= 1;
   1792         } while (tryCounter-- > 0);
   1793         if (tryCounter < 0) {
   1794             ALOGE("did not receive expected priority boost on time");
   1795         }
   1796         // Run again immediately
   1797         return 0;
   1798     }
   1799 
   1800     // Can only reference mCblk while locked
   1801     int32_t flags = android_atomic_and(
   1802         ~(CBLK_UNDERRUN | CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL | CBLK_BUFFER_END), &mCblk->mFlags);
   1803 
   1804     // Check for track invalidation
   1805     if (flags & CBLK_INVALID) {
   1806         // for offloaded tracks restoreTrack_l() will just update the sequence and clear
   1807         // AudioSystem cache. We should not exit here but after calling the callback so
   1808         // that the upper layers can recreate the track
   1809         if (!isOffloadedOrDirect_l() || (mSequence == mObservedSequence)) {
   1810             status_t status __unused = restoreTrack_l("processAudioBuffer");
   1811             // FIXME unused status
   1812             // after restoration, continue below to make sure that the loop and buffer events
   1813             // are notified because they have been cleared from mCblk->mFlags above.
   1814         }
   1815     }
   1816 
   1817     bool waitStreamEnd = mState == STATE_STOPPING;
   1818     bool active = mState == STATE_ACTIVE;
   1819 
   1820     // Manage underrun callback, must be done under lock to avoid race with releaseBuffer()
   1821     bool newUnderrun = false;
   1822     if (flags & CBLK_UNDERRUN) {
   1823 #if 0
   1824         // Currently in shared buffer mode, when the server reaches the end of buffer,
   1825         // the track stays active in continuous underrun state.  It's up to the application
   1826         // to pause or stop the track, or set the position to a new offset within buffer.
   1827         // This was some experimental code to auto-pause on underrun.   Keeping it here
   1828         // in "if 0" so we can re-visit this if we add a real sequencer for shared memory content.
   1829         if (mTransfer == TRANSFER_SHARED) {
   1830             mState = STATE_PAUSED;
   1831             active = false;
   1832         }
   1833 #endif
   1834         if (!mInUnderrun) {
   1835             mInUnderrun = true;
   1836             newUnderrun = true;
   1837         }
   1838     }
   1839 
   1840     // Get current position of server
   1841     Modulo<uint32_t> position(updateAndGetPosition_l());
   1842 
   1843     // Manage marker callback
   1844     bool markerReached = false;
   1845     Modulo<uint32_t> markerPosition(mMarkerPosition);
   1846     // uses 32 bit wraparound for comparison with position.
   1847     if (!mMarkerReached && markerPosition.value() > 0 && position >= markerPosition) {
   1848         mMarkerReached = markerReached = true;
   1849     }
   1850 
   1851     // Determine number of new position callback(s) that will be needed, while locked
   1852     size_t newPosCount = 0;
   1853     Modulo<uint32_t> newPosition(mNewPosition);
   1854     uint32_t updatePeriod = mUpdatePeriod;
   1855     // FIXME fails for wraparound, need 64 bits
   1856     if (updatePeriod > 0 && position >= newPosition) {
   1857         newPosCount = ((position - newPosition).value() / updatePeriod) + 1;
   1858         mNewPosition += updatePeriod * newPosCount;
   1859     }
   1860 
   1861     // Cache other fields that will be needed soon
   1862     uint32_t sampleRate = mSampleRate;
   1863     float speed = mPlaybackRate.mSpeed;
   1864     const uint32_t notificationFrames = mNotificationFramesAct;
   1865     if (mRefreshRemaining) {
   1866         mRefreshRemaining = false;
   1867         mRemainingFrames = notificationFrames;
   1868         mRetryOnPartialBuffer = false;
   1869     }
   1870     size_t misalignment = mProxy->getMisalignment();
   1871     uint32_t sequence = mSequence;
   1872     sp<AudioTrackClientProxy> proxy = mProxy;
   1873 
   1874     // Determine the number of new loop callback(s) that will be needed, while locked.
   1875     int loopCountNotifications = 0;
   1876     uint32_t loopPeriod = 0; // time in frames for next EVENT_LOOP_END or EVENT_BUFFER_END
   1877 
   1878     if (mLoopCount > 0) {
   1879         int loopCount;
   1880         size_t bufferPosition;
   1881         mStaticProxy->getBufferPositionAndLoopCount(&bufferPosition, &loopCount);
   1882         loopPeriod = ((loopCount > 0) ? mLoopEnd : mFrameCount) - bufferPosition;
   1883         loopCountNotifications = min(mLoopCountNotified - loopCount, kMaxLoopCountNotifications);
   1884         mLoopCountNotified = loopCount; // discard any excess notifications
   1885     } else if (mLoopCount < 0) {
   1886         // FIXME: We're not accurate with notification count and position with infinite looping
   1887         // since loopCount from server side will always return -1 (we could decrement it).
   1888         size_t bufferPosition = mStaticProxy->getBufferPosition();
   1889         loopCountNotifications = int((flags & (CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL)) != 0);
   1890         loopPeriod = mLoopEnd - bufferPosition;
   1891     } else if (/* mLoopCount == 0 && */ mSharedBuffer != 0) {
   1892         size_t bufferPosition = mStaticProxy->getBufferPosition();
   1893         loopPeriod = mFrameCount - bufferPosition;
   1894     }
   1895 
   1896     // These fields don't need to be cached, because they are assigned only by set():
   1897     //     mTransfer, mCbf, mUserData, mFormat, mFrameSize, mFlags
   1898     // mFlags is also assigned by createTrack_l(), but not the bit we care about.
   1899 
   1900     mLock.unlock();
   1901 
   1902     // get anchor time to account for callbacks.
   1903     const nsecs_t timeBeforeCallbacks = systemTime();
   1904 
   1905     if (waitStreamEnd) {
   1906         // FIXME:  Instead of blocking in proxy->waitStreamEndDone(), Callback thread
   1907         // should wait on proxy futex and handle CBLK_STREAM_END_DONE within this function
   1908         // (and make sure we don't callback for more data while we're stopping).
   1909         // This helps with position, marker notifications, and track invalidation.
   1910         struct timespec timeout;
   1911         timeout.tv_sec = WAIT_STREAM_END_TIMEOUT_SEC;
   1912         timeout.tv_nsec = 0;
   1913 
   1914         status_t status = proxy->waitStreamEndDone(&timeout);
   1915         switch (status) {
   1916         case NO_ERROR:
   1917         case DEAD_OBJECT:
   1918         case TIMED_OUT:
   1919             if (status != DEAD_OBJECT) {
   1920                 // for DEAD_OBJECT, we do not send a EVENT_STREAM_END after stop();
   1921                 // instead, the application should handle the EVENT_NEW_IAUDIOTRACK.
   1922                 mCbf(EVENT_STREAM_END, mUserData, NULL);
   1923             }
   1924             {
   1925                 AutoMutex lock(mLock);
   1926                 // The previously assigned value of waitStreamEnd is no longer valid,
   1927                 // since the mutex has been unlocked and either the callback handler
   1928                 // or another thread could have re-started the AudioTrack during that time.
   1929                 waitStreamEnd = mState == STATE_STOPPING;
   1930                 if (waitStreamEnd) {
   1931                     mState = STATE_STOPPED;
   1932                     mReleased = 0;
   1933                 }
   1934             }
   1935             if (waitStreamEnd && status != DEAD_OBJECT) {
   1936                return NS_INACTIVE;
   1937             }
   1938             break;
   1939         }
   1940         return 0;
   1941     }
   1942 
   1943     // perform callbacks while unlocked
   1944     if (newUnderrun) {
   1945         mCbf(EVENT_UNDERRUN, mUserData, NULL);
   1946     }
   1947     while (loopCountNotifications > 0) {
   1948         mCbf(EVENT_LOOP_END, mUserData, NULL);
   1949         --loopCountNotifications;
   1950     }
   1951     if (flags & CBLK_BUFFER_END) {
   1952         mCbf(EVENT_BUFFER_END, mUserData, NULL);
   1953     }
   1954     if (markerReached) {
   1955         mCbf(EVENT_MARKER, mUserData, &markerPosition);
   1956     }
   1957     while (newPosCount > 0) {
   1958         size_t temp = newPosition.value(); // FIXME size_t != uint32_t
   1959         mCbf(EVENT_NEW_POS, mUserData, &temp);
   1960         newPosition += updatePeriod;
   1961         newPosCount--;
   1962     }
   1963 
   1964     if (mObservedSequence != sequence) {
   1965         mObservedSequence = sequence;
   1966         mCbf(EVENT_NEW_IAUDIOTRACK, mUserData, NULL);
   1967         // for offloaded tracks, just wait for the upper layers to recreate the track
   1968         if (isOffloadedOrDirect()) {
   1969             return NS_INACTIVE;
   1970         }
   1971     }
   1972 
   1973     // if inactive, then don't run me again until re-started
   1974     if (!active) {
   1975         return NS_INACTIVE;
   1976     }
   1977 
   1978     // Compute the estimated time until the next timed event (position, markers, loops)
   1979     // FIXME only for non-compressed audio
   1980     uint32_t minFrames = ~0;
   1981     if (!markerReached && position < markerPosition) {
   1982         minFrames = (markerPosition - position).value();
   1983     }
   1984     if (loopPeriod > 0 && loopPeriod < minFrames) {
   1985         // loopPeriod is already adjusted for actual position.
   1986         minFrames = loopPeriod;
   1987     }
   1988     if (updatePeriod > 0) {
   1989         minFrames = min(minFrames, (newPosition - position).value());
   1990     }
   1991 
   1992     // If > 0, poll periodically to recover from a stuck server.  A good value is 2.
   1993     static const uint32_t kPoll = 0;
   1994     if (kPoll > 0 && mTransfer == TRANSFER_CALLBACK && kPoll * notificationFrames < minFrames) {
   1995         minFrames = kPoll * notificationFrames;
   1996     }
   1997 
   1998     // This "fudge factor" avoids soaking CPU, and compensates for late progress by server
   1999     static const nsecs_t kWaitPeriodNs = WAIT_PERIOD_MS * 1000000LL;
   2000     const nsecs_t timeAfterCallbacks = systemTime();
   2001 
   2002     // Convert frame units to time units
   2003     nsecs_t ns = NS_WHENEVER;
   2004     if (minFrames != (uint32_t) ~0) {
   2005         ns = framesToNanoseconds(minFrames, sampleRate, speed) + kWaitPeriodNs;
   2006         ns -= (timeAfterCallbacks - timeBeforeCallbacks);  // account for callback time
   2007         // TODO: Should we warn if the callback time is too long?
   2008         if (ns < 0) ns = 0;
   2009     }
   2010 
   2011     // If not supplying data by EVENT_MORE_DATA, then we're done
   2012     if (mTransfer != TRANSFER_CALLBACK) {
   2013         return ns;
   2014     }
   2015 
   2016     // EVENT_MORE_DATA callback handling.
   2017     // Timing for linear pcm audio data formats can be derived directly from the
   2018     // buffer fill level.
   2019     // Timing for compressed data is not directly available from the buffer fill level,
   2020     // rather indirectly from waiting for blocking mode callbacks or waiting for obtain()
   2021     // to return a certain fill level.
   2022 
   2023     struct timespec timeout;
   2024     const struct timespec *requested = &ClientProxy::kForever;
   2025     if (ns != NS_WHENEVER) {
   2026         timeout.tv_sec = ns / 1000000000LL;
   2027         timeout.tv_nsec = ns % 1000000000LL;
   2028         ALOGV("timeout %ld.%03d", timeout.tv_sec, (int) timeout.tv_nsec / 1000000);
   2029         requested = &timeout;
   2030     }
   2031 
   2032     size_t writtenFrames = 0;
   2033     while (mRemainingFrames > 0) {
   2034 
   2035         Buffer audioBuffer;
   2036         audioBuffer.frameCount = mRemainingFrames;
   2037         size_t nonContig;
   2038         status_t err = obtainBuffer(&audioBuffer, requested, NULL, &nonContig);
   2039         LOG_ALWAYS_FATAL_IF((err != NO_ERROR) != (audioBuffer.frameCount == 0),
   2040                 "obtainBuffer() err=%d frameCount=%zu", err, audioBuffer.frameCount);
   2041         requested = &ClientProxy::kNonBlocking;
   2042         size_t avail = audioBuffer.frameCount + nonContig;
   2043         ALOGV("obtainBuffer(%u) returned %zu = %zu + %zu err %d",
   2044                 mRemainingFrames, avail, audioBuffer.frameCount, nonContig, err);
   2045         if (err != NO_ERROR) {
   2046             if (err == TIMED_OUT || err == WOULD_BLOCK || err == -EINTR ||
   2047                     (isOffloaded() && (err == DEAD_OBJECT))) {
   2048                 // FIXME bug 25195759
   2049                 return 1000000;
   2050             }
   2051             ALOGE("Error %d obtaining an audio buffer, giving up.", err);
   2052             return NS_NEVER;
   2053         }
   2054 
   2055         if (mRetryOnPartialBuffer && audio_has_proportional_frames(mFormat)) {
   2056             mRetryOnPartialBuffer = false;
   2057             if (avail < mRemainingFrames) {
   2058                 if (ns > 0) { // account for obtain time
   2059                     const nsecs_t timeNow = systemTime();
   2060                     ns = max((nsecs_t)0, ns - (timeNow - timeAfterCallbacks));
   2061                 }
   2062                 nsecs_t myns = framesToNanoseconds(mRemainingFrames - avail, sampleRate, speed);
   2063                 if (ns < 0 /* NS_WHENEVER */ || myns < ns) {
   2064                     ns = myns;
   2065                 }
   2066                 return ns;
   2067             }
   2068         }
   2069 
   2070         size_t reqSize = audioBuffer.size;
   2071         mCbf(EVENT_MORE_DATA, mUserData, &audioBuffer);
   2072         size_t writtenSize = audioBuffer.size;
   2073 
   2074         // Sanity check on returned size
   2075         if (ssize_t(writtenSize) < 0 || writtenSize > reqSize) {
   2076             ALOGE("EVENT_MORE_DATA requested %zu bytes but callback returned %zd bytes",
   2077                     reqSize, ssize_t(writtenSize));
   2078             return NS_NEVER;
   2079         }
   2080 
   2081         if (writtenSize == 0) {
   2082             // The callback is done filling buffers
   2083             // Keep this thread going to handle timed events and
   2084             // still try to get more data in intervals of WAIT_PERIOD_MS
   2085             // but don't just loop and block the CPU, so wait
   2086 
   2087             // mCbf(EVENT_MORE_DATA, ...) might either
   2088             // (1) Block until it can fill the buffer, returning 0 size on EOS.
   2089             // (2) Block until it can fill the buffer, returning 0 data (silence) on EOS.
   2090             // (3) Return 0 size when no data is available, does not wait for more data.
   2091             //
   2092             // (1) and (2) occurs with AudioPlayer/AwesomePlayer; (3) occurs with NuPlayer.
   2093             // We try to compute the wait time to avoid a tight sleep-wait cycle,
   2094             // especially for case (3).
   2095             //
   2096             // The decision to support (1) and (2) affect the sizing of mRemainingFrames
   2097             // and this loop; whereas for case (3) we could simply check once with the full
   2098             // buffer size and skip the loop entirely.
   2099 
   2100             nsecs_t myns;
   2101             if (audio_has_proportional_frames(mFormat)) {
   2102                 // time to wait based on buffer occupancy
   2103                 const nsecs_t datans = mRemainingFrames <= avail ? 0 :
   2104                         framesToNanoseconds(mRemainingFrames - avail, sampleRate, speed);
   2105                 // audio flinger thread buffer size (TODO: adjust for fast tracks)
   2106                 // FIXME: use mAfFrameCountHAL instead of mAfFrameCount below for fast tracks.
   2107                 const nsecs_t afns = framesToNanoseconds(mAfFrameCount, mAfSampleRate, speed);
   2108                 // add a half the AudioFlinger buffer time to avoid soaking CPU if datans is 0.
   2109                 myns = datans + (afns / 2);
   2110             } else {
   2111                 // FIXME: This could ping quite a bit if the buffer isn't full.
   2112                 // Note that when mState is stopping we waitStreamEnd, so it never gets here.
   2113                 myns = kWaitPeriodNs;
   2114             }
   2115             if (ns > 0) { // account for obtain and callback time
   2116                 const nsecs_t timeNow = systemTime();
   2117                 ns = max((nsecs_t)0, ns - (timeNow - timeAfterCallbacks));
   2118             }
   2119             if (ns < 0 /* NS_WHENEVER */ || myns < ns) {
   2120                 ns = myns;
   2121             }
   2122             return ns;
   2123         }
   2124 
   2125         size_t releasedFrames = writtenSize / mFrameSize;
   2126         audioBuffer.frameCount = releasedFrames;
   2127         mRemainingFrames -= releasedFrames;
   2128         if (misalignment >= releasedFrames) {
   2129             misalignment -= releasedFrames;
   2130         } else {
   2131             misalignment = 0;
   2132         }
   2133 
   2134         releaseBuffer(&audioBuffer);
   2135         writtenFrames += releasedFrames;
   2136 
   2137         // FIXME here is where we would repeat EVENT_MORE_DATA again on same advanced buffer
   2138         // if callback doesn't like to accept the full chunk
   2139         if (writtenSize < reqSize) {
   2140             continue;
   2141         }
   2142 
   2143         // There could be enough non-contiguous frames available to satisfy the remaining request
   2144         if (mRemainingFrames <= nonContig) {
   2145             continue;
   2146         }
   2147 
   2148 #if 0
   2149         // This heuristic tries to collapse a series of EVENT_MORE_DATA that would total to a
   2150         // sum <= notificationFrames.  It replaces that series by at most two EVENT_MORE_DATA
   2151         // that total to a sum == notificationFrames.
   2152         if (0 < misalignment && misalignment <= mRemainingFrames) {
   2153             mRemainingFrames = misalignment;
   2154             return ((double)mRemainingFrames * 1100000000) / ((double)sampleRate * speed);
   2155         }
   2156 #endif
   2157 
   2158     }
   2159     if (writtenFrames > 0) {
   2160         AutoMutex lock(mLock);
   2161         mFramesWritten += writtenFrames;
   2162     }
   2163     mRemainingFrames = notificationFrames;
   2164     mRetryOnPartialBuffer = true;
   2165 
   2166     // A lot has transpired since ns was calculated, so run again immediately and re-calculate
   2167     return 0;
   2168 }
   2169 
   2170 status_t AudioTrack::restoreTrack_l(const char *from)
   2171 {
   2172     ALOGW("dead IAudioTrack, %s, creating a new one from %s()",
   2173           isOffloadedOrDirect_l() ? "Offloaded or Direct" : "PCM", from);
   2174     ++mSequence;
   2175 
   2176     // refresh the audio configuration cache in this process to make sure we get new
   2177     // output parameters and new IAudioFlinger in createTrack_l()
   2178     AudioSystem::clearAudioConfigCache();
   2179 
   2180     if (isOffloadedOrDirect_l() || mDoNotReconnect) {
   2181         // FIXME re-creation of offloaded and direct tracks is not yet implemented;
   2182         // reconsider enabling for linear PCM encodings when position can be preserved.
   2183         return DEAD_OBJECT;
   2184     }
   2185 
   2186     // Save so we can return count since creation.
   2187     mUnderrunCountOffset = getUnderrunCount_l();
   2188 
   2189     // save the old static buffer position
   2190     size_t bufferPosition = 0;
   2191     int loopCount = 0;
   2192     if (mStaticProxy != 0) {
   2193         mStaticProxy->getBufferPositionAndLoopCount(&bufferPosition, &loopCount);
   2194     }
   2195 
   2196     mFlags = mOrigFlags;
   2197 
   2198     // If a new IAudioTrack is successfully created, createTrack_l() will modify the
   2199     // following member variables: mAudioTrack, mCblkMemory and mCblk.
   2200     // It will also delete the strong references on previous IAudioTrack and IMemory.
   2201     // If a new IAudioTrack cannot be created, the previous (dead) instance will be left intact.
   2202     status_t result = createTrack_l();
   2203 
   2204     if (result == NO_ERROR) {
   2205         // take the frames that will be lost by track recreation into account in saved position
   2206         // For streaming tracks, this is the amount we obtained from the user/client
   2207         // (not the number actually consumed at the server - those are already lost).
   2208         if (mStaticProxy == 0) {
   2209             mPosition = mReleased;
   2210         }
   2211         // Continue playback from last known position and restore loop.
   2212         if (mStaticProxy != 0) {
   2213             if (loopCount != 0) {
   2214                 mStaticProxy->setBufferPositionAndLoop(bufferPosition,
   2215                         mLoopStart, mLoopEnd, loopCount);
   2216             } else {
   2217                 mStaticProxy->setBufferPosition(bufferPosition);
   2218                 if (bufferPosition == mFrameCount) {
   2219                     ALOGD("restoring track at end of static buffer");
   2220                 }
   2221             }
   2222         }
   2223         if (mState == STATE_ACTIVE) {
   2224             result = mAudioTrack->start();
   2225             mFramesWrittenServerOffset = mFramesWritten; // server resets to zero so we offset
   2226         }
   2227     }
   2228     if (result != NO_ERROR) {
   2229         ALOGW("restoreTrack_l() failed status %d", result);
   2230         mState = STATE_STOPPED;
   2231         mReleased = 0;
   2232     }
   2233 
   2234     return result;
   2235 }
   2236 
   2237 Modulo<uint32_t> AudioTrack::updateAndGetPosition_l()
   2238 {
   2239     // This is the sole place to read server consumed frames
   2240     Modulo<uint32_t> newServer(mProxy->getPosition());
   2241     const int32_t delta = (newServer - mServer).signedValue();
   2242     // TODO There is controversy about whether there can be "negative jitter" in server position.
   2243     //      This should be investigated further, and if possible, it should be addressed.
   2244     //      A more definite failure mode is infrequent polling by client.
   2245     //      One could call (void)getPosition_l() in releaseBuffer(),
   2246     //      so mReleased and mPosition are always lock-step as best possible.
   2247     //      That should ensure delta never goes negative for infrequent polling
   2248     //      unless the server has more than 2^31 frames in its buffer,
   2249     //      in which case the use of uint32_t for these counters has bigger issues.
   2250     ALOGE_IF(delta < 0,
   2251             "detected illegal retrograde motion by the server: mServer advanced by %d",
   2252             delta);
   2253     mServer = newServer;
   2254     if (delta > 0) { // avoid retrograde
   2255         mPosition += delta;
   2256     }
   2257     return mPosition;
   2258 }
   2259 
   2260 bool AudioTrack::isSampleRateSpeedAllowed_l(uint32_t sampleRate, float speed) const
   2261 {
   2262     // applicable for mixing tracks only (not offloaded or direct)
   2263     if (mStaticProxy != 0) {
   2264         return true; // static tracks do not have issues with buffer sizing.
   2265     }
   2266     const size_t minFrameCount =
   2267             calculateMinFrameCount(mAfLatency, mAfFrameCount, mAfSampleRate, sampleRate, speed
   2268                 /*, 0 mNotificationsPerBufferReq*/);
   2269     ALOGV("isSampleRateSpeedAllowed_l mFrameCount %zu  minFrameCount %zu",
   2270             mFrameCount, minFrameCount);
   2271     return mFrameCount >= minFrameCount;
   2272 }
   2273 
   2274 status_t AudioTrack::setParameters(const String8& keyValuePairs)
   2275 {
   2276     AutoMutex lock(mLock);
   2277     return mAudioTrack->setParameters(keyValuePairs);
   2278 }
   2279 
   2280 status_t AudioTrack::getTimestamp(ExtendedTimestamp *timestamp)
   2281 {
   2282     if (timestamp == nullptr) {
   2283         return BAD_VALUE;
   2284     }
   2285     AutoMutex lock(mLock);
   2286     return getTimestamp_l(timestamp);
   2287 }
   2288 
   2289 status_t AudioTrack::getTimestamp_l(ExtendedTimestamp *timestamp)
   2290 {
   2291     if (mCblk->mFlags & CBLK_INVALID) {
   2292         const status_t status = restoreTrack_l("getTimestampExtended");
   2293         if (status != OK) {
   2294             // per getTimestamp() API doc in header, we return DEAD_OBJECT here,
   2295             // recommending that the track be recreated.
   2296             return DEAD_OBJECT;
   2297         }
   2298     }
   2299     // check for offloaded/direct here in case restoring somehow changed those flags.
   2300     if (isOffloadedOrDirect_l()) {
   2301         return INVALID_OPERATION; // not supported
   2302     }
   2303     status_t status = mProxy->getTimestamp(timestamp);
   2304     LOG_ALWAYS_FATAL_IF(status != OK, "status %d not allowed from proxy getTimestamp", status);
   2305     bool found = false;
   2306     timestamp->mPosition[ExtendedTimestamp::LOCATION_CLIENT] = mFramesWritten;
   2307     timestamp->mTimeNs[ExtendedTimestamp::LOCATION_CLIENT] = 0;
   2308     // server side frame offset in case AudioTrack has been restored.
   2309     for (int i = ExtendedTimestamp::LOCATION_SERVER;
   2310             i < ExtendedTimestamp::LOCATION_MAX; ++i) {
   2311         if (timestamp->mTimeNs[i] >= 0) {
   2312             // apply server offset (frames flushed is ignored
   2313             // so we don't report the jump when the flush occurs).
   2314             timestamp->mPosition[i] += mFramesWrittenServerOffset;
   2315             found = true;
   2316         }
   2317     }
   2318     return found ? OK : WOULD_BLOCK;
   2319 }
   2320 
   2321 status_t AudioTrack::getTimestamp(AudioTimestamp& timestamp)
   2322 {
   2323     AutoMutex lock(mLock);
   2324 
   2325     bool previousTimestampValid = mPreviousTimestampValid;
   2326     // Set false here to cover all the error return cases.
   2327     mPreviousTimestampValid = false;
   2328 
   2329     switch (mState) {
   2330     case STATE_ACTIVE:
   2331     case STATE_PAUSED:
   2332         break; // handle below
   2333     case STATE_FLUSHED:
   2334     case STATE_STOPPED:
   2335         return WOULD_BLOCK;
   2336     case STATE_STOPPING:
   2337     case STATE_PAUSED_STOPPING:
   2338         if (!isOffloaded_l()) {
   2339             return INVALID_OPERATION;
   2340         }
   2341         break; // offloaded tracks handled below
   2342     default:
   2343         LOG_ALWAYS_FATAL("Invalid mState in getTimestamp(): %d", mState);
   2344         break;
   2345     }
   2346 
   2347     if (mCblk->mFlags & CBLK_INVALID) {
   2348         const status_t status = restoreTrack_l("getTimestamp");
   2349         if (status != OK) {
   2350             // per getTimestamp() API doc in header, we return DEAD_OBJECT here,
   2351             // recommending that the track be recreated.
   2352             return DEAD_OBJECT;
   2353         }
   2354     }
   2355 
   2356     // The presented frame count must always lag behind the consumed frame count.
   2357     // To avoid a race, read the presented frames first.  This ensures that presented <= consumed.
   2358 
   2359     status_t status;
   2360     if (isOffloadedOrDirect_l()) {
   2361         // use Binder to get timestamp
   2362         status = mAudioTrack->getTimestamp(timestamp);
   2363     } else {
   2364         // read timestamp from shared memory
   2365         ExtendedTimestamp ets;
   2366         status = mProxy->getTimestamp(&ets);
   2367         if (status == OK) {
   2368             ExtendedTimestamp::Location location;
   2369             status = ets.getBestTimestamp(&timestamp, &location);
   2370 
   2371             if (status == OK) {
   2372                 // It is possible that the best location has moved from the kernel to the server.
   2373                 // In this case we adjust the position from the previous computed latency.
   2374                 if (location == ExtendedTimestamp::LOCATION_SERVER) {
   2375                     ALOGW_IF(mPreviousLocation == ExtendedTimestamp::LOCATION_KERNEL,
   2376                             "getTimestamp() location moved from kernel to server");
   2377                     // check that the last kernel OK time info exists and the positions
   2378                     // are valid (if they predate the current track, the positions may
   2379                     // be zero or negative).
   2380                     const int64_t frames =
   2381                             (ets.mTimeNs[ExtendedTimestamp::LOCATION_SERVER_LASTKERNELOK] < 0 ||
   2382                             ets.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL_LASTKERNELOK] < 0 ||
   2383                             ets.mPosition[ExtendedTimestamp::LOCATION_SERVER_LASTKERNELOK] <= 0 ||
   2384                             ets.mPosition[ExtendedTimestamp::LOCATION_KERNEL_LASTKERNELOK] <= 0)
   2385                             ?
   2386                             int64_t((double)mAfLatency * mSampleRate * mPlaybackRate.mSpeed
   2387                                     / 1000)
   2388                             :
   2389                             (ets.mPosition[ExtendedTimestamp::LOCATION_SERVER_LASTKERNELOK]
   2390                             - ets.mPosition[ExtendedTimestamp::LOCATION_KERNEL_LASTKERNELOK]);
   2391                     ALOGV("frame adjustment:%lld  timestamp:%s",
   2392                             (long long)frames, ets.toString().c_str());
   2393                     if (frames >= ets.mPosition[location]) {
   2394                         timestamp.mPosition = 0;
   2395                     } else {
   2396                         timestamp.mPosition = (uint32_t)(ets.mPosition[location] - frames);
   2397                     }
   2398                 } else if (location == ExtendedTimestamp::LOCATION_KERNEL) {
   2399                     ALOGV_IF(mPreviousLocation == ExtendedTimestamp::LOCATION_SERVER,
   2400                             "getTimestamp() location moved from server to kernel");
   2401                 }
   2402                 mPreviousLocation = location;
   2403             } else {
   2404                 // right after AudioTrack is started, one may not find a timestamp
   2405                 ALOGV("getBestTimestamp did not find timestamp");
   2406             }
   2407         }
   2408         if (status == INVALID_OPERATION) {
   2409             status = WOULD_BLOCK;
   2410         }
   2411     }
   2412     if (status != NO_ERROR) {
   2413         ALOGV_IF(status != WOULD_BLOCK, "getTimestamp error:%#x", status);
   2414         return status;
   2415     }
   2416     if (isOffloadedOrDirect_l()) {
   2417         if (isOffloaded_l() && (mState == STATE_PAUSED || mState == STATE_PAUSED_STOPPING)) {
   2418             // use cached paused position in case another offloaded track is running.
   2419             timestamp.mPosition = mPausedPosition;
   2420             clock_gettime(CLOCK_MONOTONIC, &timestamp.mTime);
   2421             return NO_ERROR;
   2422         }
   2423 
   2424         // Check whether a pending flush or stop has completed, as those commands may
   2425         // be asynchronous or return near finish or exhibit glitchy behavior.
   2426         //
   2427         // Originally this showed up as the first timestamp being a continuation of
   2428         // the previous song under gapless playback.
   2429         // However, we sometimes see zero timestamps, then a glitch of
   2430         // the previous song's position, and then correct timestamps afterwards.
   2431         if (mStartUs != 0 && mSampleRate != 0) {
   2432             static const int kTimeJitterUs = 100000; // 100 ms
   2433             static const int k1SecUs = 1000000;
   2434 
   2435             const int64_t timeNow = getNowUs();
   2436 
   2437             if (timeNow < mStartUs + k1SecUs) { // within first second of starting
   2438                 const int64_t timestampTimeUs = convertTimespecToUs(timestamp.mTime);
   2439                 if (timestampTimeUs < mStartUs) {
   2440                     return WOULD_BLOCK;  // stale timestamp time, occurs before start.
   2441                 }
   2442                 const int64_t deltaTimeUs = timestampTimeUs - mStartUs;
   2443                 const int64_t deltaPositionByUs = (double)timestamp.mPosition * 1000000
   2444                         / ((double)mSampleRate * mPlaybackRate.mSpeed);
   2445 
   2446                 if (deltaPositionByUs > deltaTimeUs + kTimeJitterUs) {
   2447                     // Verify that the counter can't count faster than the sample rate
   2448                     // since the start time.  If greater, then that means we may have failed
   2449                     // to completely flush or stop the previous playing track.
   2450                     ALOGW_IF(!mTimestampStartupGlitchReported,
   2451                             "getTimestamp startup glitch detected"
   2452                             " deltaTimeUs(%lld) deltaPositionUs(%lld) tsmPosition(%u)",
   2453                             (long long)deltaTimeUs, (long long)deltaPositionByUs,
   2454                             timestamp.mPosition);
   2455                     mTimestampStartupGlitchReported = true;
   2456                     if (previousTimestampValid
   2457                             && mPreviousTimestamp.mPosition == 0 /* should be true if valid */) {
   2458                         timestamp = mPreviousTimestamp;
   2459                         mPreviousTimestampValid = true;
   2460                         return NO_ERROR;
   2461                     }
   2462                     return WOULD_BLOCK;
   2463                 }
   2464                 if (deltaPositionByUs != 0) {
   2465                     mStartUs = 0; // don't check again, we got valid nonzero position.
   2466                 }
   2467             } else {
   2468                 mStartUs = 0; // don't check again, start time expired.
   2469             }
   2470             mTimestampStartupGlitchReported = false;
   2471         }
   2472     } else {
   2473         // Update the mapping between local consumed (mPosition) and server consumed (mServer)
   2474         (void) updateAndGetPosition_l();
   2475         // Server consumed (mServer) and presented both use the same server time base,
   2476         // and server consumed is always >= presented.
   2477         // The delta between these represents the number of frames in the buffer pipeline.
   2478         // If this delta between these is greater than the client position, it means that
   2479         // actually presented is still stuck at the starting line (figuratively speaking),
   2480         // waiting for the first frame to go by.  So we can't report a valid timestamp yet.
   2481         // Note: We explicitly use non-Modulo comparison here - potential wrap issue when
   2482         // mPosition exceeds 32 bits.
   2483         // TODO Remove when timestamp is updated to contain pipeline status info.
   2484         const int32_t pipelineDepthInFrames = (mServer - timestamp.mPosition).signedValue();
   2485         if (pipelineDepthInFrames > 0 /* should be true, but we check anyways */
   2486                 && (uint32_t)pipelineDepthInFrames > mPosition.value()) {
   2487             return INVALID_OPERATION;
   2488         }
   2489         // Convert timestamp position from server time base to client time base.
   2490         // TODO The following code should work OK now because timestamp.mPosition is 32-bit.
   2491         // But if we change it to 64-bit then this could fail.
   2492         // Use Modulo computation here.
   2493         timestamp.mPosition = (mPosition - mServer + timestamp.mPosition).value();
   2494         // Immediately after a call to getPosition_l(), mPosition and
   2495         // mServer both represent the same frame position.  mPosition is
   2496         // in client's point of view, and mServer is in server's point of
   2497         // view.  So the difference between them is the "fudge factor"
   2498         // between client and server views due to stop() and/or new
   2499         // IAudioTrack.  And timestamp.mPosition is initially in server's
   2500         // point of view, so we need to apply the same fudge factor to it.
   2501     }
   2502 
   2503     // Prevent retrograde motion in timestamp.
   2504     // This is sometimes caused by erratic reports of the available space in the ALSA drivers.
   2505     if (status == NO_ERROR) {
   2506         if (previousTimestampValid) {
   2507 #define TIME_TO_NANOS(time) ((int64_t)time.tv_sec * 1000000000 + time.tv_nsec)
   2508             const int64_t previousTimeNanos = TIME_TO_NANOS(mPreviousTimestamp.mTime);
   2509             const int64_t currentTimeNanos = TIME_TO_NANOS(timestamp.mTime);
   2510 #undef TIME_TO_NANOS
   2511             if (currentTimeNanos < previousTimeNanos) {
   2512                 ALOGW("retrograde timestamp time");
   2513                 // FIXME Consider blocking this from propagating upwards.
   2514             }
   2515 
   2516             // Looking at signed delta will work even when the timestamps
   2517             // are wrapping around.
   2518             int32_t deltaPosition = (Modulo<uint32_t>(timestamp.mPosition)
   2519                     - mPreviousTimestamp.mPosition).signedValue();
   2520             // position can bobble slightly as an artifact; this hides the bobble
   2521             static const int32_t MINIMUM_POSITION_DELTA = 8;
   2522             if (deltaPosition < 0) {
   2523                 // Only report once per position instead of spamming the log.
   2524                 if (!mRetrogradeMotionReported) {
   2525                     ALOGW("retrograde timestamp position corrected, %d = %u - %u",
   2526                             deltaPosition,
   2527                             timestamp.mPosition,
   2528                             mPreviousTimestamp.mPosition);
   2529                     mRetrogradeMotionReported = true;
   2530                 }
   2531             } else {
   2532                 mRetrogradeMotionReported = false;
   2533             }
   2534             if (deltaPosition < MINIMUM_POSITION_DELTA) {
   2535                 timestamp = mPreviousTimestamp;  // Use last valid timestamp.
   2536             }
   2537         }
   2538         mPreviousTimestamp = timestamp;
   2539         mPreviousTimestampValid = true;
   2540     }
   2541 
   2542     return status;
   2543 }
   2544 
   2545 String8 AudioTrack::getParameters(const String8& keys)
   2546 {
   2547     audio_io_handle_t output = getOutput();
   2548     if (output != AUDIO_IO_HANDLE_NONE) {
   2549         return AudioSystem::getParameters(output, keys);
   2550     } else {
   2551         return String8::empty();
   2552     }
   2553 }
   2554 
   2555 bool AudioTrack::isOffloaded() const
   2556 {
   2557     AutoMutex lock(mLock);
   2558     return isOffloaded_l();
   2559 }
   2560 
   2561 bool AudioTrack::isDirect() const
   2562 {
   2563     AutoMutex lock(mLock);
   2564     return isDirect_l();
   2565 }
   2566 
   2567 bool AudioTrack::isOffloadedOrDirect() const
   2568 {
   2569     AutoMutex lock(mLock);
   2570     return isOffloadedOrDirect_l();
   2571 }
   2572 
   2573 
   2574 status_t AudioTrack::dump(int fd, const Vector<String16>& args __unused) const
   2575 {
   2576 
   2577     const size_t SIZE = 256;
   2578     char buffer[SIZE];
   2579     String8 result;
   2580 
   2581     result.append(" AudioTrack::dump\n");
   2582     snprintf(buffer, 255, "  stream type(%d), left - right volume(%f, %f)\n", mStreamType,
   2583             mVolume[AUDIO_INTERLEAVE_LEFT], mVolume[AUDIO_INTERLEAVE_RIGHT]);
   2584     result.append(buffer);
   2585     snprintf(buffer, 255, "  format(%d), channel count(%d), frame count(%zu)\n", mFormat,
   2586             mChannelCount, mFrameCount);
   2587     result.append(buffer);
   2588     snprintf(buffer, 255, "  sample rate(%u), speed(%f), status(%d)\n",
   2589             mSampleRate, mPlaybackRate.mSpeed, mStatus);
   2590     result.append(buffer);
   2591     snprintf(buffer, 255, "  state(%d), latency (%d)\n", mState, mLatency);
   2592     result.append(buffer);
   2593     ::write(fd, result.string(), result.size());
   2594     return NO_ERROR;
   2595 }
   2596 
   2597 uint32_t AudioTrack::getUnderrunCount() const
   2598 {
   2599     AutoMutex lock(mLock);
   2600     return getUnderrunCount_l();
   2601 }
   2602 
   2603 uint32_t AudioTrack::getUnderrunCount_l() const
   2604 {
   2605     return mProxy->getUnderrunCount() + mUnderrunCountOffset;
   2606 }
   2607 
   2608 uint32_t AudioTrack::getUnderrunFrames() const
   2609 {
   2610     AutoMutex lock(mLock);
   2611     return mProxy->getUnderrunFrames();
   2612 }
   2613 
   2614 status_t AudioTrack::addAudioDeviceCallback(const sp<AudioSystem::AudioDeviceCallback>& callback)
   2615 {
   2616     if (callback == 0) {
   2617         ALOGW("%s adding NULL callback!", __FUNCTION__);
   2618         return BAD_VALUE;
   2619     }
   2620     AutoMutex lock(mLock);
   2621     if (mDeviceCallback == callback) {
   2622         ALOGW("%s adding same callback!", __FUNCTION__);
   2623         return INVALID_OPERATION;
   2624     }
   2625     status_t status = NO_ERROR;
   2626     if (mOutput != AUDIO_IO_HANDLE_NONE) {
   2627         if (mDeviceCallback != 0) {
   2628             ALOGW("%s callback already present!", __FUNCTION__);
   2629             AudioSystem::removeAudioDeviceCallback(mDeviceCallback, mOutput);
   2630         }
   2631         status = AudioSystem::addAudioDeviceCallback(callback, mOutput);
   2632     }
   2633     mDeviceCallback = callback;
   2634     return status;
   2635 }
   2636 
   2637 status_t AudioTrack::removeAudioDeviceCallback(
   2638         const sp<AudioSystem::AudioDeviceCallback>& callback)
   2639 {
   2640     if (callback == 0) {
   2641         ALOGW("%s removing NULL callback!", __FUNCTION__);
   2642         return BAD_VALUE;
   2643     }
   2644     AutoMutex lock(mLock);
   2645     if (mDeviceCallback != callback) {
   2646         ALOGW("%s removing different callback!", __FUNCTION__);
   2647         return INVALID_OPERATION;
   2648     }
   2649     if (mOutput != AUDIO_IO_HANDLE_NONE) {
   2650         AudioSystem::removeAudioDeviceCallback(mDeviceCallback, mOutput);
   2651     }
   2652     mDeviceCallback = 0;
   2653     return NO_ERROR;
   2654 }
   2655 
   2656 status_t AudioTrack::pendingDuration(int32_t *msec, ExtendedTimestamp::Location location)
   2657 {
   2658     if (msec == nullptr ||
   2659             (location != ExtendedTimestamp::LOCATION_SERVER
   2660                     && location != ExtendedTimestamp::LOCATION_KERNEL)) {
   2661         return BAD_VALUE;
   2662     }
   2663     AutoMutex lock(mLock);
   2664     // inclusive of offloaded and direct tracks.
   2665     //
   2666     // It is possible, but not enabled, to allow duration computation for non-pcm
   2667     // audio_has_proportional_frames() formats because currently they have
   2668     // the drain rate equivalent to the pcm sample rate * framesize.
   2669     if (!isPurePcmData_l()) {
   2670         return INVALID_OPERATION;
   2671     }
   2672     ExtendedTimestamp ets;
   2673     if (getTimestamp_l(&ets) == OK
   2674             && ets.mTimeNs[location] > 0) {
   2675         int64_t diff = ets.mPosition[ExtendedTimestamp::LOCATION_CLIENT]
   2676                 - ets.mPosition[location];
   2677         if (diff < 0) {
   2678             *msec = 0;
   2679         } else {
   2680             // ms is the playback time by frames
   2681             int64_t ms = (int64_t)((double)diff * 1000 /
   2682                     ((double)mSampleRate * mPlaybackRate.mSpeed));
   2683             // clockdiff is the timestamp age (negative)
   2684             int64_t clockdiff = (mState != STATE_ACTIVE) ? 0 :
   2685                     ets.mTimeNs[location]
   2686                     + ets.mTimebaseOffset[ExtendedTimestamp::TIMEBASE_MONOTONIC]
   2687                     - systemTime(SYSTEM_TIME_MONOTONIC);
   2688 
   2689             //ALOGV("ms: %lld  clockdiff: %lld", (long long)ms, (long long)clockdiff);
   2690             static const int NANOS_PER_MILLIS = 1000000;
   2691             *msec = (int32_t)(ms + clockdiff / NANOS_PER_MILLIS);
   2692         }
   2693         return NO_ERROR;
   2694     }
   2695     if (location != ExtendedTimestamp::LOCATION_SERVER) {
   2696         return INVALID_OPERATION; // LOCATION_KERNEL is not available
   2697     }
   2698     // use server position directly (offloaded and direct arrive here)
   2699     updateAndGetPosition_l();
   2700     int32_t diff = (Modulo<uint32_t>(mFramesWritten) - mPosition).signedValue();
   2701     *msec = (diff <= 0) ? 0
   2702             : (int32_t)((double)diff * 1000 / ((double)mSampleRate * mPlaybackRate.mSpeed));
   2703     return NO_ERROR;
   2704 }
   2705 
   2706 // =========================================================================
   2707 
   2708 void AudioTrack::DeathNotifier::binderDied(const wp<IBinder>& who __unused)
   2709 {
   2710     sp<AudioTrack> audioTrack = mAudioTrack.promote();
   2711     if (audioTrack != 0) {
   2712         AutoMutex lock(audioTrack->mLock);
   2713         audioTrack->mProxy->binderDied();
   2714     }
   2715 }
   2716 
   2717 // =========================================================================
   2718 
   2719 AudioTrack::AudioTrackThread::AudioTrackThread(AudioTrack& receiver, bool bCanCallJava)
   2720     : Thread(bCanCallJava), mReceiver(receiver), mPaused(true), mPausedInt(false), mPausedNs(0LL),
   2721       mIgnoreNextPausedInt(false)
   2722 {
   2723 }
   2724 
   2725 AudioTrack::AudioTrackThread::~AudioTrackThread()
   2726 {
   2727 }
   2728 
   2729 bool AudioTrack::AudioTrackThread::threadLoop()
   2730 {
   2731     {
   2732         AutoMutex _l(mMyLock);
   2733         if (mPaused) {
   2734             mMyCond.wait(mMyLock);
   2735             // caller will check for exitPending()
   2736             return true;
   2737         }
   2738         if (mIgnoreNextPausedInt) {
   2739             mIgnoreNextPausedInt = false;
   2740             mPausedInt = false;
   2741         }
   2742         if (mPausedInt) {
   2743             if (mPausedNs > 0) {
   2744                 (void) mMyCond.waitRelative(mMyLock, mPausedNs);
   2745             } else {
   2746                 mMyCond.wait(mMyLock);
   2747             }
   2748             mPausedInt = false;
   2749             return true;
   2750         }
   2751     }
   2752     if (exitPending()) {
   2753         return false;
   2754     }
   2755     nsecs_t ns = mReceiver.processAudioBuffer();
   2756     switch (ns) {
   2757     case 0:
   2758         return true;
   2759     case NS_INACTIVE:
   2760         pauseInternal();
   2761         return true;
   2762     case NS_NEVER:
   2763         return false;
   2764     case NS_WHENEVER:
   2765         // Event driven: call wake() when callback notifications conditions change.
   2766         ns = INT64_MAX;
   2767         // fall through
   2768     default:
   2769         LOG_ALWAYS_FATAL_IF(ns < 0, "processAudioBuffer() returned %" PRId64, ns);
   2770         pauseInternal(ns);
   2771         return true;
   2772     }
   2773 }
   2774 
   2775 void AudioTrack::AudioTrackThread::requestExit()
   2776 {
   2777     // must be in this order to avoid a race condition
   2778     Thread::requestExit();
   2779     resume();
   2780 }
   2781 
   2782 void AudioTrack::AudioTrackThread::pause()
   2783 {
   2784     AutoMutex _l(mMyLock);
   2785     mPaused = true;
   2786 }
   2787 
   2788 void AudioTrack::AudioTrackThread::resume()
   2789 {
   2790     AutoMutex _l(mMyLock);
   2791     mIgnoreNextPausedInt = true;
   2792     if (mPaused || mPausedInt) {
   2793         mPaused = false;
   2794         mPausedInt = false;
   2795         mMyCond.signal();
   2796     }
   2797 }
   2798 
   2799 void AudioTrack::AudioTrackThread::wake()
   2800 {
   2801     AutoMutex _l(mMyLock);
   2802     if (!mPaused) {
   2803         // wake() might be called while servicing a callback - ignore the next
   2804         // pause time and call processAudioBuffer.
   2805         mIgnoreNextPausedInt = true;
   2806         if (mPausedInt && mPausedNs > 0) {
   2807             // audio track is active and internally paused with timeout.
   2808             mPausedInt = false;
   2809             mMyCond.signal();
   2810         }
   2811     }
   2812 }
   2813 
   2814 void AudioTrack::AudioTrackThread::pauseInternal(nsecs_t ns)
   2815 {
   2816     AutoMutex _l(mMyLock);
   2817     mPausedInt = true;
   2818     mPausedNs = ns;
   2819 }
   2820 
   2821 } // namespace android
   2822