Home | History | Annotate | Download | only in nuplayer
      1 /*
      2  * Copyright (C) 2010 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 //#define LOG_NDEBUG 0
     18 #define LOG_TAG "NuPlayerRenderer"
     19 #include <utils/Log.h>
     20 
     21 #include "AWakeLock.h"
     22 #include "NuPlayerRenderer.h"
     23 #include <algorithm>
     24 #include <cutils/properties.h>
     25 #include <media/stagefright/foundation/ADebug.h>
     26 #include <media/stagefright/foundation/AMessage.h>
     27 #include <media/stagefright/foundation/AUtils.h>
     28 #include <media/stagefright/MediaClock.h>
     29 #include <media/stagefright/MediaCodecConstants.h>
     30 #include <media/stagefright/MediaDefs.h>
     31 #include <media/stagefright/MediaErrors.h>
     32 #include <media/stagefright/MetaData.h>
     33 #include <media/stagefright/Utils.h>
     34 #include <media/stagefright/VideoFrameScheduler.h>
     35 #include <media/MediaCodecBuffer.h>
     36 
     37 #include <inttypes.h>
     38 
     39 namespace android {
     40 
     41 /*
     42  * Example of common configuration settings in shell script form
     43 
     44    #Turn offload audio off (use PCM for Play Music) -- AudioPolicyManager
     45    adb shell setprop audio.offload.disable 1
     46 
     47    #Allow offload audio with video (requires offloading to be enabled) -- AudioPolicyManager
     48    adb shell setprop audio.offload.video 1
     49 
     50    #Use audio callbacks for PCM data
     51    adb shell setprop media.stagefright.audio.cbk 1
     52 
     53    #Use deep buffer for PCM data with video (it is generally enabled for audio-only)
     54    adb shell setprop media.stagefright.audio.deep 1
     55 
     56    #Set size of buffers for pcm audio sink in msec (example: 1000 msec)
     57    adb shell setprop media.stagefright.audio.sink 1000
     58 
     59  * These configurations take effect for the next track played (not the current track).
     60  */
     61 
     62 static inline bool getUseAudioCallbackSetting() {
     63     return property_get_bool("media.stagefright.audio.cbk", false /* default_value */);
     64 }
     65 
     66 static inline int32_t getAudioSinkPcmMsSetting() {
     67     return property_get_int32(
     68             "media.stagefright.audio.sink", 500 /* default_value */);
     69 }
     70 
     71 // Maximum time in paused state when offloading audio decompression. When elapsed, the AudioSink
     72 // is closed to allow the audio DSP to power down.
     73 static const int64_t kOffloadPauseMaxUs = 10000000LL;
     74 
     75 // Maximum allowed delay from AudioSink, 1.5 seconds.
     76 static const int64_t kMaxAllowedAudioSinkDelayUs = 1500000LL;
     77 
     78 static const int64_t kMinimumAudioClockUpdatePeriodUs = 20 /* msec */ * 1000;
     79 
     80 // Default video frame display duration when only video exists.
     81 // Used to set max media time in MediaClock.
     82 static const int64_t kDefaultVideoFrameIntervalUs = 100000LL;
     83 
     84 // static
     85 const NuPlayer::Renderer::PcmInfo NuPlayer::Renderer::AUDIO_PCMINFO_INITIALIZER = {
     86         AUDIO_CHANNEL_NONE,
     87         AUDIO_OUTPUT_FLAG_NONE,
     88         AUDIO_FORMAT_INVALID,
     89         0, // mNumChannels
     90         0 // mSampleRate
     91 };
     92 
     93 // static
     94 const int64_t NuPlayer::Renderer::kMinPositionUpdateDelayUs = 100000ll;
     95 
     96 static audio_format_t constexpr audioFormatFromEncoding(int32_t pcmEncoding) {
     97     switch (pcmEncoding) {
     98     case kAudioEncodingPcmFloat:
     99         return AUDIO_FORMAT_PCM_FLOAT;
    100     case kAudioEncodingPcm16bit:
    101         return AUDIO_FORMAT_PCM_16_BIT;
    102     case kAudioEncodingPcm8bit:
    103         return AUDIO_FORMAT_PCM_8_BIT; // TODO: do we want to support this?
    104     default:
    105         ALOGE("%s: Invalid encoding: %d", __func__, pcmEncoding);
    106         return AUDIO_FORMAT_INVALID;
    107     }
    108 }
    109 
    110 NuPlayer::Renderer::Renderer(
    111         const sp<MediaPlayerBase::AudioSink> &sink,
    112         const sp<MediaClock> &mediaClock,
    113         const sp<AMessage> &notify,
    114         uint32_t flags)
    115     : mAudioSink(sink),
    116       mUseVirtualAudioSink(false),
    117       mNotify(notify),
    118       mFlags(flags),
    119       mNumFramesWritten(0),
    120       mDrainAudioQueuePending(false),
    121       mDrainVideoQueuePending(false),
    122       mAudioQueueGeneration(0),
    123       mVideoQueueGeneration(0),
    124       mAudioDrainGeneration(0),
    125       mVideoDrainGeneration(0),
    126       mAudioEOSGeneration(0),
    127       mMediaClock(mediaClock),
    128       mPlaybackSettings(AUDIO_PLAYBACK_RATE_DEFAULT),
    129       mAudioFirstAnchorTimeMediaUs(-1),
    130       mAnchorTimeMediaUs(-1),
    131       mAnchorNumFramesWritten(-1),
    132       mVideoLateByUs(0LL),
    133       mNextVideoTimeMediaUs(-1),
    134       mHasAudio(false),
    135       mHasVideo(false),
    136       mNotifyCompleteAudio(false),
    137       mNotifyCompleteVideo(false),
    138       mSyncQueues(false),
    139       mPaused(false),
    140       mPauseDrainAudioAllowedUs(0),
    141       mVideoSampleReceived(false),
    142       mVideoRenderingStarted(false),
    143       mVideoRenderingStartGeneration(0),
    144       mAudioRenderingStartGeneration(0),
    145       mRenderingDataDelivered(false),
    146       mNextAudioClockUpdateTimeUs(-1),
    147       mLastAudioMediaTimeUs(-1),
    148       mAudioOffloadPauseTimeoutGeneration(0),
    149       mAudioTornDown(false),
    150       mCurrentOffloadInfo(AUDIO_INFO_INITIALIZER),
    151       mCurrentPcmInfo(AUDIO_PCMINFO_INITIALIZER),
    152       mTotalBuffersQueued(0),
    153       mLastAudioBufferDrained(0),
    154       mUseAudioCallback(false),
    155       mWakeLock(new AWakeLock()) {
    156     CHECK(mediaClock != NULL);
    157     mPlaybackRate = mPlaybackSettings.mSpeed;
    158     mMediaClock->setPlaybackRate(mPlaybackRate);
    159 }
    160 
    161 NuPlayer::Renderer::~Renderer() {
    162     if (offloadingAudio()) {
    163         mAudioSink->stop();
    164         mAudioSink->flush();
    165         mAudioSink->close();
    166     }
    167 
    168     // Try to avoid racing condition in case callback is still on.
    169     Mutex::Autolock autoLock(mLock);
    170     if (mUseAudioCallback) {
    171         flushQueue(&mAudioQueue);
    172         flushQueue(&mVideoQueue);
    173     }
    174     mWakeLock.clear();
    175     mVideoScheduler.clear();
    176     mNotify.clear();
    177     mAudioSink.clear();
    178 }
    179 
    180 void NuPlayer::Renderer::queueBuffer(
    181         bool audio,
    182         const sp<MediaCodecBuffer> &buffer,
    183         const sp<AMessage> &notifyConsumed) {
    184     sp<AMessage> msg = new AMessage(kWhatQueueBuffer, this);
    185     msg->setInt32("queueGeneration", getQueueGeneration(audio));
    186     msg->setInt32("audio", static_cast<int32_t>(audio));
    187     msg->setObject("buffer", buffer);
    188     msg->setMessage("notifyConsumed", notifyConsumed);
    189     msg->post();
    190 }
    191 
    192 void NuPlayer::Renderer::queueEOS(bool audio, status_t finalResult) {
    193     CHECK_NE(finalResult, (status_t)OK);
    194 
    195     sp<AMessage> msg = new AMessage(kWhatQueueEOS, this);
    196     msg->setInt32("queueGeneration", getQueueGeneration(audio));
    197     msg->setInt32("audio", static_cast<int32_t>(audio));
    198     msg->setInt32("finalResult", finalResult);
    199     msg->post();
    200 }
    201 
    202 status_t NuPlayer::Renderer::setPlaybackSettings(const AudioPlaybackRate &rate) {
    203     sp<AMessage> msg = new AMessage(kWhatConfigPlayback, this);
    204     writeToAMessage(msg, rate);
    205     sp<AMessage> response;
    206     status_t err = msg->postAndAwaitResponse(&response);
    207     if (err == OK && response != NULL) {
    208         CHECK(response->findInt32("err", &err));
    209     }
    210     return err;
    211 }
    212 
    213 status_t NuPlayer::Renderer::onConfigPlayback(const AudioPlaybackRate &rate /* sanitized */) {
    214     if (rate.mSpeed == 0.f) {
    215         onPause();
    216         // don't call audiosink's setPlaybackRate if pausing, as pitch does not
    217         // have to correspond to the any non-0 speed (e.g old speed). Keep
    218         // settings nonetheless, using the old speed, in case audiosink changes.
    219         AudioPlaybackRate newRate = rate;
    220         newRate.mSpeed = mPlaybackSettings.mSpeed;
    221         mPlaybackSettings = newRate;
    222         return OK;
    223     }
    224 
    225     if (mAudioSink != NULL && mAudioSink->ready()) {
    226         status_t err = mAudioSink->setPlaybackRate(rate);
    227         if (err != OK) {
    228             return err;
    229         }
    230     }
    231     mPlaybackSettings = rate;
    232     mPlaybackRate = rate.mSpeed;
    233     mMediaClock->setPlaybackRate(mPlaybackRate);
    234     return OK;
    235 }
    236 
    237 status_t NuPlayer::Renderer::getPlaybackSettings(AudioPlaybackRate *rate /* nonnull */) {
    238     sp<AMessage> msg = new AMessage(kWhatGetPlaybackSettings, this);
    239     sp<AMessage> response;
    240     status_t err = msg->postAndAwaitResponse(&response);
    241     if (err == OK && response != NULL) {
    242         CHECK(response->findInt32("err", &err));
    243         if (err == OK) {
    244             readFromAMessage(response, rate);
    245         }
    246     }
    247     return err;
    248 }
    249 
    250 status_t NuPlayer::Renderer::onGetPlaybackSettings(AudioPlaybackRate *rate /* nonnull */) {
    251     if (mAudioSink != NULL && mAudioSink->ready()) {
    252         status_t err = mAudioSink->getPlaybackRate(rate);
    253         if (err == OK) {
    254             if (!isAudioPlaybackRateEqual(*rate, mPlaybackSettings)) {
    255                 ALOGW("correcting mismatch in internal/external playback rate");
    256             }
    257             // get playback settings used by audiosink, as it may be
    258             // slightly off due to audiosink not taking small changes.
    259             mPlaybackSettings = *rate;
    260             if (mPaused) {
    261                 rate->mSpeed = 0.f;
    262             }
    263         }
    264         return err;
    265     }
    266     *rate = mPlaybackSettings;
    267     return OK;
    268 }
    269 
    270 status_t NuPlayer::Renderer::setSyncSettings(const AVSyncSettings &sync, float videoFpsHint) {
    271     sp<AMessage> msg = new AMessage(kWhatConfigSync, this);
    272     writeToAMessage(msg, sync, videoFpsHint);
    273     sp<AMessage> response;
    274     status_t err = msg->postAndAwaitResponse(&response);
    275     if (err == OK && response != NULL) {
    276         CHECK(response->findInt32("err", &err));
    277     }
    278     return err;
    279 }
    280 
    281 status_t NuPlayer::Renderer::onConfigSync(const AVSyncSettings &sync, float videoFpsHint __unused) {
    282     if (sync.mSource != AVSYNC_SOURCE_DEFAULT) {
    283         return BAD_VALUE;
    284     }
    285     // TODO: support sync sources
    286     return INVALID_OPERATION;
    287 }
    288 
    289 status_t NuPlayer::Renderer::getSyncSettings(AVSyncSettings *sync, float *videoFps) {
    290     sp<AMessage> msg = new AMessage(kWhatGetSyncSettings, this);
    291     sp<AMessage> response;
    292     status_t err = msg->postAndAwaitResponse(&response);
    293     if (err == OK && response != NULL) {
    294         CHECK(response->findInt32("err", &err));
    295         if (err == OK) {
    296             readFromAMessage(response, sync, videoFps);
    297         }
    298     }
    299     return err;
    300 }
    301 
    302 status_t NuPlayer::Renderer::onGetSyncSettings(
    303         AVSyncSettings *sync /* nonnull */, float *videoFps /* nonnull */) {
    304     *sync = mSyncSettings;
    305     *videoFps = -1.f;
    306     return OK;
    307 }
    308 
    309 void NuPlayer::Renderer::flush(bool audio, bool notifyComplete) {
    310     {
    311         Mutex::Autolock autoLock(mLock);
    312         if (audio) {
    313             mNotifyCompleteAudio |= notifyComplete;
    314             clearAudioFirstAnchorTime_l();
    315             ++mAudioQueueGeneration;
    316             ++mAudioDrainGeneration;
    317         } else {
    318             mNotifyCompleteVideo |= notifyComplete;
    319             ++mVideoQueueGeneration;
    320             ++mVideoDrainGeneration;
    321             mNextVideoTimeMediaUs = -1;
    322         }
    323 
    324         mMediaClock->clearAnchor();
    325         mVideoLateByUs = 0;
    326         mSyncQueues = false;
    327     }
    328 
    329     sp<AMessage> msg = new AMessage(kWhatFlush, this);
    330     msg->setInt32("audio", static_cast<int32_t>(audio));
    331     msg->post();
    332 }
    333 
    334 void NuPlayer::Renderer::signalTimeDiscontinuity() {
    335 }
    336 
    337 void NuPlayer::Renderer::signalDisableOffloadAudio() {
    338     (new AMessage(kWhatDisableOffloadAudio, this))->post();
    339 }
    340 
    341 void NuPlayer::Renderer::signalEnableOffloadAudio() {
    342     (new AMessage(kWhatEnableOffloadAudio, this))->post();
    343 }
    344 
    345 void NuPlayer::Renderer::pause() {
    346     (new AMessage(kWhatPause, this))->post();
    347 }
    348 
    349 void NuPlayer::Renderer::resume() {
    350     (new AMessage(kWhatResume, this))->post();
    351 }
    352 
    353 void NuPlayer::Renderer::setVideoFrameRate(float fps) {
    354     sp<AMessage> msg = new AMessage(kWhatSetVideoFrameRate, this);
    355     msg->setFloat("frame-rate", fps);
    356     msg->post();
    357 }
    358 
    359 // Called on any threads without mLock acquired.
    360 status_t NuPlayer::Renderer::getCurrentPosition(int64_t *mediaUs) {
    361     status_t result = mMediaClock->getMediaTime(ALooper::GetNowUs(), mediaUs);
    362     if (result == OK) {
    363         return result;
    364     }
    365 
    366     // MediaClock has not started yet. Try to start it if possible.
    367     {
    368         Mutex::Autolock autoLock(mLock);
    369         if (mAudioFirstAnchorTimeMediaUs == -1) {
    370             return result;
    371         }
    372 
    373         AudioTimestamp ts;
    374         status_t res = mAudioSink->getTimestamp(ts);
    375         if (res != OK) {
    376             return result;
    377         }
    378 
    379         // AudioSink has rendered some frames.
    380         int64_t nowUs = ALooper::GetNowUs();
    381         int64_t playedOutDurationUs = mAudioSink->getPlayedOutDurationUs(nowUs);
    382         if (playedOutDurationUs == 0) {
    383             *mediaUs = mAudioFirstAnchorTimeMediaUs;
    384             return OK;
    385         }
    386         int64_t nowMediaUs = playedOutDurationUs + mAudioFirstAnchorTimeMediaUs;
    387         mMediaClock->updateAnchor(nowMediaUs, nowUs, -1);
    388     }
    389 
    390     return mMediaClock->getMediaTime(ALooper::GetNowUs(), mediaUs);
    391 }
    392 
    393 void NuPlayer::Renderer::clearAudioFirstAnchorTime_l() {
    394     mAudioFirstAnchorTimeMediaUs = -1;
    395     mMediaClock->setStartingTimeMedia(-1);
    396 }
    397 
    398 void NuPlayer::Renderer::setAudioFirstAnchorTimeIfNeeded_l(int64_t mediaUs) {
    399     if (mAudioFirstAnchorTimeMediaUs == -1) {
    400         mAudioFirstAnchorTimeMediaUs = mediaUs;
    401         mMediaClock->setStartingTimeMedia(mediaUs);
    402     }
    403 }
    404 
    405 // Called on renderer looper.
    406 void NuPlayer::Renderer::clearAnchorTime() {
    407     mMediaClock->clearAnchor();
    408     mAnchorTimeMediaUs = -1;
    409     mAnchorNumFramesWritten = -1;
    410 }
    411 
    412 void NuPlayer::Renderer::setVideoLateByUs(int64_t lateUs) {
    413     Mutex::Autolock autoLock(mLock);
    414     mVideoLateByUs = lateUs;
    415 }
    416 
    417 int64_t NuPlayer::Renderer::getVideoLateByUs() {
    418     Mutex::Autolock autoLock(mLock);
    419     return mVideoLateByUs;
    420 }
    421 
    422 status_t NuPlayer::Renderer::openAudioSink(
    423         const sp<AMessage> &format,
    424         bool offloadOnly,
    425         bool hasVideo,
    426         uint32_t flags,
    427         bool *isOffloaded,
    428         bool isStreaming) {
    429     sp<AMessage> msg = new AMessage(kWhatOpenAudioSink, this);
    430     msg->setMessage("format", format);
    431     msg->setInt32("offload-only", offloadOnly);
    432     msg->setInt32("has-video", hasVideo);
    433     msg->setInt32("flags", flags);
    434     msg->setInt32("isStreaming", isStreaming);
    435 
    436     sp<AMessage> response;
    437     status_t postStatus = msg->postAndAwaitResponse(&response);
    438 
    439     int32_t err;
    440     if (postStatus != OK || response.get() == nullptr || !response->findInt32("err", &err)) {
    441         err = INVALID_OPERATION;
    442     } else if (err == OK && isOffloaded != NULL) {
    443         int32_t offload;
    444         CHECK(response->findInt32("offload", &offload));
    445         *isOffloaded = (offload != 0);
    446     }
    447     return err;
    448 }
    449 
    450 void NuPlayer::Renderer::closeAudioSink() {
    451     sp<AMessage> msg = new AMessage(kWhatCloseAudioSink, this);
    452 
    453     sp<AMessage> response;
    454     msg->postAndAwaitResponse(&response);
    455 }
    456 
    457 void NuPlayer::Renderer::changeAudioFormat(
    458         const sp<AMessage> &format,
    459         bool offloadOnly,
    460         bool hasVideo,
    461         uint32_t flags,
    462         bool isStreaming,
    463         const sp<AMessage> &notify) {
    464     sp<AMessage> meta = new AMessage;
    465     meta->setMessage("format", format);
    466     meta->setInt32("offload-only", offloadOnly);
    467     meta->setInt32("has-video", hasVideo);
    468     meta->setInt32("flags", flags);
    469     meta->setInt32("isStreaming", isStreaming);
    470 
    471     sp<AMessage> msg = new AMessage(kWhatChangeAudioFormat, this);
    472     msg->setInt32("queueGeneration", getQueueGeneration(true /* audio */));
    473     msg->setMessage("notify", notify);
    474     msg->setMessage("meta", meta);
    475     msg->post();
    476 }
    477 
    478 void NuPlayer::Renderer::onMessageReceived(const sp<AMessage> &msg) {
    479     switch (msg->what()) {
    480         case kWhatOpenAudioSink:
    481         {
    482             sp<AMessage> format;
    483             CHECK(msg->findMessage("format", &format));
    484 
    485             int32_t offloadOnly;
    486             CHECK(msg->findInt32("offload-only", &offloadOnly));
    487 
    488             int32_t hasVideo;
    489             CHECK(msg->findInt32("has-video", &hasVideo));
    490 
    491             uint32_t flags;
    492             CHECK(msg->findInt32("flags", (int32_t *)&flags));
    493 
    494             uint32_t isStreaming;
    495             CHECK(msg->findInt32("isStreaming", (int32_t *)&isStreaming));
    496 
    497             status_t err = onOpenAudioSink(format, offloadOnly, hasVideo, flags, isStreaming);
    498 
    499             sp<AMessage> response = new AMessage;
    500             response->setInt32("err", err);
    501             response->setInt32("offload", offloadingAudio());
    502 
    503             sp<AReplyToken> replyID;
    504             CHECK(msg->senderAwaitsResponse(&replyID));
    505             response->postReply(replyID);
    506 
    507             break;
    508         }
    509 
    510         case kWhatCloseAudioSink:
    511         {
    512             sp<AReplyToken> replyID;
    513             CHECK(msg->senderAwaitsResponse(&replyID));
    514 
    515             onCloseAudioSink();
    516 
    517             sp<AMessage> response = new AMessage;
    518             response->postReply(replyID);
    519             break;
    520         }
    521 
    522         case kWhatStopAudioSink:
    523         {
    524             mAudioSink->stop();
    525             break;
    526         }
    527 
    528         case kWhatChangeAudioFormat:
    529         {
    530             int32_t queueGeneration;
    531             CHECK(msg->findInt32("queueGeneration", &queueGeneration));
    532 
    533             sp<AMessage> notify;
    534             CHECK(msg->findMessage("notify", &notify));
    535 
    536             if (offloadingAudio()) {
    537                 ALOGW("changeAudioFormat should NOT be called in offload mode");
    538                 notify->setInt32("err", INVALID_OPERATION);
    539                 notify->post();
    540                 break;
    541             }
    542 
    543             sp<AMessage> meta;
    544             CHECK(msg->findMessage("meta", &meta));
    545 
    546             if (queueGeneration != getQueueGeneration(true /* audio */)
    547                     || mAudioQueue.empty()) {
    548                 onChangeAudioFormat(meta, notify);
    549                 break;
    550             }
    551 
    552             QueueEntry entry;
    553             entry.mNotifyConsumed = notify;
    554             entry.mMeta = meta;
    555 
    556             Mutex::Autolock autoLock(mLock);
    557             mAudioQueue.push_back(entry);
    558             postDrainAudioQueue_l();
    559 
    560             break;
    561         }
    562 
    563         case kWhatDrainAudioQueue:
    564         {
    565             mDrainAudioQueuePending = false;
    566 
    567             int32_t generation;
    568             CHECK(msg->findInt32("drainGeneration", &generation));
    569             if (generation != getDrainGeneration(true /* audio */)) {
    570                 break;
    571             }
    572 
    573             if (onDrainAudioQueue()) {
    574                 uint32_t numFramesPlayed;
    575                 CHECK_EQ(mAudioSink->getPosition(&numFramesPlayed),
    576                          (status_t)OK);
    577 
    578                 // Handle AudioTrack race when start is immediately called after flush.
    579                 uint32_t numFramesPendingPlayout =
    580                     (mNumFramesWritten > numFramesPlayed ?
    581                         mNumFramesWritten - numFramesPlayed : 0);
    582 
    583                 // This is how long the audio sink will have data to
    584                 // play back.
    585                 int64_t delayUs =
    586                     mAudioSink->msecsPerFrame()
    587                         * numFramesPendingPlayout * 1000LL;
    588                 if (mPlaybackRate > 1.0f) {
    589                     delayUs /= mPlaybackRate;
    590                 }
    591 
    592                 // Let's give it more data after about half that time
    593                 // has elapsed.
    594                 delayUs /= 2;
    595                 // check the buffer size to estimate maximum delay permitted.
    596                 const int64_t maxDrainDelayUs = std::max(
    597                         mAudioSink->getBufferDurationInUs(), (int64_t)500000 /* half second */);
    598                 ALOGD_IF(delayUs > maxDrainDelayUs, "postDrainAudioQueue long delay: %lld > %lld",
    599                         (long long)delayUs, (long long)maxDrainDelayUs);
    600                 Mutex::Autolock autoLock(mLock);
    601                 postDrainAudioQueue_l(delayUs);
    602             }
    603             break;
    604         }
    605 
    606         case kWhatDrainVideoQueue:
    607         {
    608             int32_t generation;
    609             CHECK(msg->findInt32("drainGeneration", &generation));
    610             if (generation != getDrainGeneration(false /* audio */)) {
    611                 break;
    612             }
    613 
    614             mDrainVideoQueuePending = false;
    615 
    616             onDrainVideoQueue();
    617 
    618             postDrainVideoQueue();
    619             break;
    620         }
    621 
    622         case kWhatPostDrainVideoQueue:
    623         {
    624             int32_t generation;
    625             CHECK(msg->findInt32("drainGeneration", &generation));
    626             if (generation != getDrainGeneration(false /* audio */)) {
    627                 break;
    628             }
    629 
    630             mDrainVideoQueuePending = false;
    631             postDrainVideoQueue();
    632             break;
    633         }
    634 
    635         case kWhatQueueBuffer:
    636         {
    637             onQueueBuffer(msg);
    638             break;
    639         }
    640 
    641         case kWhatQueueEOS:
    642         {
    643             onQueueEOS(msg);
    644             break;
    645         }
    646 
    647         case kWhatEOS:
    648         {
    649             int32_t generation;
    650             CHECK(msg->findInt32("audioEOSGeneration", &generation));
    651             if (generation != mAudioEOSGeneration) {
    652                 break;
    653             }
    654             status_t finalResult;
    655             CHECK(msg->findInt32("finalResult", &finalResult));
    656             notifyEOS(true /* audio */, finalResult);
    657             break;
    658         }
    659 
    660         case kWhatConfigPlayback:
    661         {
    662             sp<AReplyToken> replyID;
    663             CHECK(msg->senderAwaitsResponse(&replyID));
    664             AudioPlaybackRate rate;
    665             readFromAMessage(msg, &rate);
    666             status_t err = onConfigPlayback(rate);
    667             sp<AMessage> response = new AMessage;
    668             response->setInt32("err", err);
    669             response->postReply(replyID);
    670             break;
    671         }
    672 
    673         case kWhatGetPlaybackSettings:
    674         {
    675             sp<AReplyToken> replyID;
    676             CHECK(msg->senderAwaitsResponse(&replyID));
    677             AudioPlaybackRate rate = AUDIO_PLAYBACK_RATE_DEFAULT;
    678             status_t err = onGetPlaybackSettings(&rate);
    679             sp<AMessage> response = new AMessage;
    680             if (err == OK) {
    681                 writeToAMessage(response, rate);
    682             }
    683             response->setInt32("err", err);
    684             response->postReply(replyID);
    685             break;
    686         }
    687 
    688         case kWhatConfigSync:
    689         {
    690             sp<AReplyToken> replyID;
    691             CHECK(msg->senderAwaitsResponse(&replyID));
    692             AVSyncSettings sync;
    693             float videoFpsHint;
    694             readFromAMessage(msg, &sync, &videoFpsHint);
    695             status_t err = onConfigSync(sync, videoFpsHint);
    696             sp<AMessage> response = new AMessage;
    697             response->setInt32("err", err);
    698             response->postReply(replyID);
    699             break;
    700         }
    701 
    702         case kWhatGetSyncSettings:
    703         {
    704             sp<AReplyToken> replyID;
    705             CHECK(msg->senderAwaitsResponse(&replyID));
    706 
    707             ALOGV("kWhatGetSyncSettings");
    708             AVSyncSettings sync;
    709             float videoFps = -1.f;
    710             status_t err = onGetSyncSettings(&sync, &videoFps);
    711             sp<AMessage> response = new AMessage;
    712             if (err == OK) {
    713                 writeToAMessage(response, sync, videoFps);
    714             }
    715             response->setInt32("err", err);
    716             response->postReply(replyID);
    717             break;
    718         }
    719 
    720         case kWhatFlush:
    721         {
    722             onFlush(msg);
    723             break;
    724         }
    725 
    726         case kWhatDisableOffloadAudio:
    727         {
    728             onDisableOffloadAudio();
    729             break;
    730         }
    731 
    732         case kWhatEnableOffloadAudio:
    733         {
    734             onEnableOffloadAudio();
    735             break;
    736         }
    737 
    738         case kWhatPause:
    739         {
    740             onPause();
    741             break;
    742         }
    743 
    744         case kWhatResume:
    745         {
    746             onResume();
    747             break;
    748         }
    749 
    750         case kWhatSetVideoFrameRate:
    751         {
    752             float fps;
    753             CHECK(msg->findFloat("frame-rate", &fps));
    754             onSetVideoFrameRate(fps);
    755             break;
    756         }
    757 
    758         case kWhatAudioTearDown:
    759         {
    760             int32_t reason;
    761             CHECK(msg->findInt32("reason", &reason));
    762 
    763             onAudioTearDown((AudioTearDownReason)reason);
    764             break;
    765         }
    766 
    767         case kWhatAudioOffloadPauseTimeout:
    768         {
    769             int32_t generation;
    770             CHECK(msg->findInt32("drainGeneration", &generation));
    771             if (generation != mAudioOffloadPauseTimeoutGeneration) {
    772                 break;
    773             }
    774             ALOGV("Audio Offload tear down due to pause timeout.");
    775             onAudioTearDown(kDueToTimeout);
    776             mWakeLock->release();
    777             break;
    778         }
    779 
    780         default:
    781             TRESPASS();
    782             break;
    783     }
    784 }
    785 
    786 void NuPlayer::Renderer::postDrainAudioQueue_l(int64_t delayUs) {
    787     if (mDrainAudioQueuePending || mSyncQueues || mUseAudioCallback) {
    788         return;
    789     }
    790 
    791     if (mAudioQueue.empty()) {
    792         return;
    793     }
    794 
    795     // FIXME: if paused, wait until AudioTrack stop() is complete before delivering data.
    796     if (mPaused) {
    797         const int64_t diffUs = mPauseDrainAudioAllowedUs - ALooper::GetNowUs();
    798         if (diffUs > delayUs) {
    799             delayUs = diffUs;
    800         }
    801     }
    802 
    803     mDrainAudioQueuePending = true;
    804     sp<AMessage> msg = new AMessage(kWhatDrainAudioQueue, this);
    805     msg->setInt32("drainGeneration", mAudioDrainGeneration);
    806     msg->post(delayUs);
    807 }
    808 
    809 void NuPlayer::Renderer::prepareForMediaRenderingStart_l() {
    810     mAudioRenderingStartGeneration = mAudioDrainGeneration;
    811     mVideoRenderingStartGeneration = mVideoDrainGeneration;
    812     mRenderingDataDelivered = false;
    813 }
    814 
    815 void NuPlayer::Renderer::notifyIfMediaRenderingStarted_l() {
    816     if (mVideoRenderingStartGeneration == mVideoDrainGeneration &&
    817         mAudioRenderingStartGeneration == mAudioDrainGeneration) {
    818         mRenderingDataDelivered = true;
    819         if (mPaused) {
    820             return;
    821         }
    822         mVideoRenderingStartGeneration = -1;
    823         mAudioRenderingStartGeneration = -1;
    824 
    825         sp<AMessage> notify = mNotify->dup();
    826         notify->setInt32("what", kWhatMediaRenderingStart);
    827         notify->post();
    828     }
    829 }
    830 
    831 // static
    832 size_t NuPlayer::Renderer::AudioSinkCallback(
    833         MediaPlayerBase::AudioSink * /* audioSink */,
    834         void *buffer,
    835         size_t size,
    836         void *cookie,
    837         MediaPlayerBase::AudioSink::cb_event_t event) {
    838     NuPlayer::Renderer *me = (NuPlayer::Renderer *)cookie;
    839 
    840     switch (event) {
    841         case MediaPlayerBase::AudioSink::CB_EVENT_FILL_BUFFER:
    842         {
    843             return me->fillAudioBuffer(buffer, size);
    844             break;
    845         }
    846 
    847         case MediaPlayerBase::AudioSink::CB_EVENT_STREAM_END:
    848         {
    849             ALOGV("AudioSink::CB_EVENT_STREAM_END");
    850             me->notifyEOSCallback();
    851             break;
    852         }
    853 
    854         case MediaPlayerBase::AudioSink::CB_EVENT_TEAR_DOWN:
    855         {
    856             ALOGV("AudioSink::CB_EVENT_TEAR_DOWN");
    857             me->notifyAudioTearDown(kDueToError);
    858             break;
    859         }
    860     }
    861 
    862     return 0;
    863 }
    864 
    865 void NuPlayer::Renderer::notifyEOSCallback() {
    866     Mutex::Autolock autoLock(mLock);
    867 
    868     if (!mUseAudioCallback) {
    869         return;
    870     }
    871 
    872     notifyEOS_l(true /* audio */, ERROR_END_OF_STREAM);
    873 }
    874 
    875 size_t NuPlayer::Renderer::fillAudioBuffer(void *buffer, size_t size) {
    876     Mutex::Autolock autoLock(mLock);
    877 
    878     if (!mUseAudioCallback) {
    879         return 0;
    880     }
    881 
    882     bool hasEOS = false;
    883 
    884     size_t sizeCopied = 0;
    885     bool firstEntry = true;
    886     QueueEntry *entry;  // will be valid after while loop if hasEOS is set.
    887     while (sizeCopied < size && !mAudioQueue.empty()) {
    888         entry = &*mAudioQueue.begin();
    889 
    890         if (entry->mBuffer == NULL) { // EOS
    891             hasEOS = true;
    892             mAudioQueue.erase(mAudioQueue.begin());
    893             break;
    894         }
    895 
    896         if (firstEntry && entry->mOffset == 0) {
    897             firstEntry = false;
    898             int64_t mediaTimeUs;
    899             CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
    900             ALOGV("fillAudioBuffer: rendering audio at media time %.2f secs", mediaTimeUs / 1E6);
    901             setAudioFirstAnchorTimeIfNeeded_l(mediaTimeUs);
    902         }
    903 
    904         size_t copy = entry->mBuffer->size() - entry->mOffset;
    905         size_t sizeRemaining = size - sizeCopied;
    906         if (copy > sizeRemaining) {
    907             copy = sizeRemaining;
    908         }
    909 
    910         memcpy((char *)buffer + sizeCopied,
    911                entry->mBuffer->data() + entry->mOffset,
    912                copy);
    913 
    914         entry->mOffset += copy;
    915         if (entry->mOffset == entry->mBuffer->size()) {
    916             entry->mNotifyConsumed->post();
    917             mAudioQueue.erase(mAudioQueue.begin());
    918             entry = NULL;
    919         }
    920         sizeCopied += copy;
    921 
    922         notifyIfMediaRenderingStarted_l();
    923     }
    924 
    925     if (mAudioFirstAnchorTimeMediaUs >= 0) {
    926         int64_t nowUs = ALooper::GetNowUs();
    927         int64_t nowMediaUs =
    928             mAudioFirstAnchorTimeMediaUs + mAudioSink->getPlayedOutDurationUs(nowUs);
    929         // we don't know how much data we are queueing for offloaded tracks.
    930         mMediaClock->updateAnchor(nowMediaUs, nowUs, INT64_MAX);
    931     }
    932 
    933     // for non-offloaded audio, we need to compute the frames written because
    934     // there is no EVENT_STREAM_END notification. The frames written gives
    935     // an estimate on the pending played out duration.
    936     if (!offloadingAudio()) {
    937         mNumFramesWritten += sizeCopied / mAudioSink->frameSize();
    938     }
    939 
    940     if (hasEOS) {
    941         (new AMessage(kWhatStopAudioSink, this))->post();
    942         // As there is currently no EVENT_STREAM_END callback notification for
    943         // non-offloaded audio tracks, we need to post the EOS ourselves.
    944         if (!offloadingAudio()) {
    945             int64_t postEOSDelayUs = 0;
    946             if (mAudioSink->needsTrailingPadding()) {
    947                 postEOSDelayUs = getPendingAudioPlayoutDurationUs(ALooper::GetNowUs());
    948             }
    949             ALOGV("fillAudioBuffer: notifyEOS_l "
    950                     "mNumFramesWritten:%u  finalResult:%d  postEOSDelay:%lld",
    951                     mNumFramesWritten, entry->mFinalResult, (long long)postEOSDelayUs);
    952             notifyEOS_l(true /* audio */, entry->mFinalResult, postEOSDelayUs);
    953         }
    954     }
    955     return sizeCopied;
    956 }
    957 
    958 void NuPlayer::Renderer::drainAudioQueueUntilLastEOS() {
    959     List<QueueEntry>::iterator it = mAudioQueue.begin(), itEOS = it;
    960     bool foundEOS = false;
    961     while (it != mAudioQueue.end()) {
    962         int32_t eos;
    963         QueueEntry *entry = &*it++;
    964         if ((entry->mBuffer == nullptr && entry->mNotifyConsumed == nullptr)
    965                 || (entry->mNotifyConsumed->findInt32("eos", &eos) && eos != 0)) {
    966             itEOS = it;
    967             foundEOS = true;
    968         }
    969     }
    970 
    971     if (foundEOS) {
    972         // post all replies before EOS and drop the samples
    973         for (it = mAudioQueue.begin(); it != itEOS; it++) {
    974             if (it->mBuffer == nullptr) {
    975                 if (it->mNotifyConsumed == nullptr) {
    976                     // delay doesn't matter as we don't even have an AudioTrack
    977                     notifyEOS(true /* audio */, it->mFinalResult);
    978                 } else {
    979                     // TAG for re-opening audio sink.
    980                     onChangeAudioFormat(it->mMeta, it->mNotifyConsumed);
    981                 }
    982             } else {
    983                 it->mNotifyConsumed->post();
    984             }
    985         }
    986         mAudioQueue.erase(mAudioQueue.begin(), itEOS);
    987     }
    988 }
    989 
    990 bool NuPlayer::Renderer::onDrainAudioQueue() {
    991     // do not drain audio during teardown as queued buffers may be invalid.
    992     if (mAudioTornDown) {
    993         return false;
    994     }
    995     // TODO: This call to getPosition checks if AudioTrack has been created
    996     // in AudioSink before draining audio. If AudioTrack doesn't exist, then
    997     // CHECKs on getPosition will fail.
    998     // We still need to figure out why AudioTrack is not created when
    999     // this function is called. One possible reason could be leftover
   1000     // audio. Another possible place is to check whether decoder
   1001     // has received INFO_FORMAT_CHANGED as the first buffer since
   1002     // AudioSink is opened there, and possible interactions with flush
   1003     // immediately after start. Investigate error message
   1004     // "vorbis_dsp_synthesis returned -135", along with RTSP.
   1005     uint32_t numFramesPlayed;
   1006     if (mAudioSink->getPosition(&numFramesPlayed) != OK) {
   1007         // When getPosition fails, renderer will not reschedule the draining
   1008         // unless new samples are queued.
   1009         // If we have pending EOS (or "eos" marker for discontinuities), we need
   1010         // to post these now as NuPlayerDecoder might be waiting for it.
   1011         drainAudioQueueUntilLastEOS();
   1012 
   1013         ALOGW("onDrainAudioQueue(): audio sink is not ready");
   1014         return false;
   1015     }
   1016 
   1017 #if 0
   1018     ssize_t numFramesAvailableToWrite =
   1019         mAudioSink->frameCount() - (mNumFramesWritten - numFramesPlayed);
   1020 
   1021     if (numFramesAvailableToWrite == mAudioSink->frameCount()) {
   1022         ALOGI("audio sink underrun");
   1023     } else {
   1024         ALOGV("audio queue has %d frames left to play",
   1025              mAudioSink->frameCount() - numFramesAvailableToWrite);
   1026     }
   1027 #endif
   1028 
   1029     uint32_t prevFramesWritten = mNumFramesWritten;
   1030     while (!mAudioQueue.empty()) {
   1031         QueueEntry *entry = &*mAudioQueue.begin();
   1032 
   1033         if (entry->mBuffer == NULL) {
   1034             if (entry->mNotifyConsumed != nullptr) {
   1035                 // TAG for re-open audio sink.
   1036                 onChangeAudioFormat(entry->mMeta, entry->mNotifyConsumed);
   1037                 mAudioQueue.erase(mAudioQueue.begin());
   1038                 continue;
   1039             }
   1040 
   1041             // EOS
   1042             if (mPaused) {
   1043                 // Do not notify EOS when paused.
   1044                 // This is needed to avoid switch to next clip while in pause.
   1045                 ALOGV("onDrainAudioQueue(): Do not notify EOS when paused");
   1046                 return false;
   1047             }
   1048 
   1049             int64_t postEOSDelayUs = 0;
   1050             if (mAudioSink->needsTrailingPadding()) {
   1051                 postEOSDelayUs = getPendingAudioPlayoutDurationUs(ALooper::GetNowUs());
   1052             }
   1053             notifyEOS(true /* audio */, entry->mFinalResult, postEOSDelayUs);
   1054             mLastAudioMediaTimeUs = getDurationUsIfPlayedAtSampleRate(mNumFramesWritten);
   1055 
   1056             mAudioQueue.erase(mAudioQueue.begin());
   1057             entry = NULL;
   1058             if (mAudioSink->needsTrailingPadding()) {
   1059                 // If we're not in gapless playback (i.e. through setNextPlayer), we
   1060                 // need to stop the track here, because that will play out the last
   1061                 // little bit at the end of the file. Otherwise short files won't play.
   1062                 mAudioSink->stop();
   1063                 mNumFramesWritten = 0;
   1064             }
   1065             return false;
   1066         }
   1067 
   1068         mLastAudioBufferDrained = entry->mBufferOrdinal;
   1069 
   1070         // ignore 0-sized buffer which could be EOS marker with no data
   1071         if (entry->mOffset == 0 && entry->mBuffer->size() > 0) {
   1072             int64_t mediaTimeUs;
   1073             CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
   1074             ALOGV("onDrainAudioQueue: rendering audio at media time %.2f secs",
   1075                     mediaTimeUs / 1E6);
   1076             onNewAudioMediaTime(mediaTimeUs);
   1077         }
   1078 
   1079         size_t copy = entry->mBuffer->size() - entry->mOffset;
   1080 
   1081         ssize_t written = mAudioSink->write(entry->mBuffer->data() + entry->mOffset,
   1082                                             copy, false /* blocking */);
   1083         if (written < 0) {
   1084             // An error in AudioSink write. Perhaps the AudioSink was not properly opened.
   1085             if (written == WOULD_BLOCK) {
   1086                 ALOGV("AudioSink write would block when writing %zu bytes", copy);
   1087             } else {
   1088                 ALOGE("AudioSink write error(%zd) when writing %zu bytes", written, copy);
   1089                 // This can only happen when AudioSink was opened with doNotReconnect flag set to
   1090                 // true, in which case the NuPlayer will handle the reconnect.
   1091                 notifyAudioTearDown(kDueToError);
   1092             }
   1093             break;
   1094         }
   1095 
   1096         entry->mOffset += written;
   1097         size_t remainder = entry->mBuffer->size() - entry->mOffset;
   1098         if ((ssize_t)remainder < mAudioSink->frameSize()) {
   1099             if (remainder > 0) {
   1100                 ALOGW("Corrupted audio buffer has fractional frames, discarding %zu bytes.",
   1101                         remainder);
   1102                 entry->mOffset += remainder;
   1103                 copy -= remainder;
   1104             }
   1105 
   1106             entry->mNotifyConsumed->post();
   1107             mAudioQueue.erase(mAudioQueue.begin());
   1108 
   1109             entry = NULL;
   1110         }
   1111 
   1112         size_t copiedFrames = written / mAudioSink->frameSize();
   1113         mNumFramesWritten += copiedFrames;
   1114 
   1115         {
   1116             Mutex::Autolock autoLock(mLock);
   1117             int64_t maxTimeMedia;
   1118             maxTimeMedia =
   1119                 mAnchorTimeMediaUs +
   1120                         (int64_t)(max((long long)mNumFramesWritten - mAnchorNumFramesWritten, 0LL)
   1121                                 * 1000LL * mAudioSink->msecsPerFrame());
   1122             mMediaClock->updateMaxTimeMedia(maxTimeMedia);
   1123 
   1124             notifyIfMediaRenderingStarted_l();
   1125         }
   1126 
   1127         if (written != (ssize_t)copy) {
   1128             // A short count was received from AudioSink::write()
   1129             //
   1130             // AudioSink write is called in non-blocking mode.
   1131             // It may return with a short count when:
   1132             //
   1133             // 1) Size to be copied is not a multiple of the frame size. Fractional frames are
   1134             //    discarded.
   1135             // 2) The data to be copied exceeds the available buffer in AudioSink.
   1136             // 3) An error occurs and data has been partially copied to the buffer in AudioSink.
   1137             // 4) AudioSink is an AudioCache for data retrieval, and the AudioCache is exceeded.
   1138 
   1139             // (Case 1)
   1140             // Must be a multiple of the frame size.  If it is not a multiple of a frame size, it
   1141             // needs to fail, as we should not carry over fractional frames between calls.
   1142             CHECK_EQ(copy % mAudioSink->frameSize(), 0u);
   1143 
   1144             // (Case 2, 3, 4)
   1145             // Return early to the caller.
   1146             // Beware of calling immediately again as this may busy-loop if you are not careful.
   1147             ALOGV("AudioSink write short frame count %zd < %zu", written, copy);
   1148             break;
   1149         }
   1150     }
   1151 
   1152     // calculate whether we need to reschedule another write.
   1153     bool reschedule = !mAudioQueue.empty()
   1154             && (!mPaused
   1155                 || prevFramesWritten != mNumFramesWritten); // permit pause to fill buffers
   1156     //ALOGD("reschedule:%d  empty:%d  mPaused:%d  prevFramesWritten:%u  mNumFramesWritten:%u",
   1157     //        reschedule, mAudioQueue.empty(), mPaused, prevFramesWritten, mNumFramesWritten);
   1158     return reschedule;
   1159 }
   1160 
   1161 int64_t NuPlayer::Renderer::getDurationUsIfPlayedAtSampleRate(uint32_t numFrames) {
   1162     int32_t sampleRate = offloadingAudio() ?
   1163             mCurrentOffloadInfo.sample_rate : mCurrentPcmInfo.mSampleRate;
   1164     if (sampleRate == 0) {
   1165         ALOGE("sampleRate is 0 in %s mode", offloadingAudio() ? "offload" : "non-offload");
   1166         return 0;
   1167     }
   1168 
   1169     return (int64_t)(numFrames * 1000000LL / sampleRate);
   1170 }
   1171 
   1172 // Calculate duration of pending samples if played at normal rate (i.e., 1.0).
   1173 int64_t NuPlayer::Renderer::getPendingAudioPlayoutDurationUs(int64_t nowUs) {
   1174     int64_t writtenAudioDurationUs = getDurationUsIfPlayedAtSampleRate(mNumFramesWritten);
   1175     if (mUseVirtualAudioSink) {
   1176         int64_t nowUs = ALooper::GetNowUs();
   1177         int64_t mediaUs;
   1178         if (mMediaClock->getMediaTime(nowUs, &mediaUs) != OK) {
   1179             return 0LL;
   1180         } else {
   1181             return writtenAudioDurationUs - (mediaUs - mAudioFirstAnchorTimeMediaUs);
   1182         }
   1183     }
   1184 
   1185     const int64_t audioSinkPlayedUs = mAudioSink->getPlayedOutDurationUs(nowUs);
   1186     int64_t pendingUs = writtenAudioDurationUs - audioSinkPlayedUs;
   1187     if (pendingUs < 0) {
   1188         // This shouldn't happen unless the timestamp is stale.
   1189         ALOGW("%s: pendingUs %lld < 0, clamping to zero, potential resume after pause "
   1190                 "writtenAudioDurationUs: %lld, audioSinkPlayedUs: %lld",
   1191                 __func__, (long long)pendingUs,
   1192                 (long long)writtenAudioDurationUs, (long long)audioSinkPlayedUs);
   1193         pendingUs = 0;
   1194     }
   1195     return pendingUs;
   1196 }
   1197 
   1198 int64_t NuPlayer::Renderer::getRealTimeUs(int64_t mediaTimeUs, int64_t nowUs) {
   1199     int64_t realUs;
   1200     if (mMediaClock->getRealTimeFor(mediaTimeUs, &realUs) != OK) {
   1201         // If failed to get current position, e.g. due to audio clock is
   1202         // not ready, then just play out video immediately without delay.
   1203         return nowUs;
   1204     }
   1205     return realUs;
   1206 }
   1207 
   1208 void NuPlayer::Renderer::onNewAudioMediaTime(int64_t mediaTimeUs) {
   1209     Mutex::Autolock autoLock(mLock);
   1210     // TRICKY: vorbis decoder generates multiple frames with the same
   1211     // timestamp, so only update on the first frame with a given timestamp
   1212     if (mediaTimeUs == mAnchorTimeMediaUs) {
   1213         return;
   1214     }
   1215     setAudioFirstAnchorTimeIfNeeded_l(mediaTimeUs);
   1216 
   1217     // mNextAudioClockUpdateTimeUs is -1 if we're waiting for audio sink to start
   1218     if (mNextAudioClockUpdateTimeUs == -1) {
   1219         AudioTimestamp ts;
   1220         if (mAudioSink->getTimestamp(ts) == OK && ts.mPosition > 0) {
   1221             mNextAudioClockUpdateTimeUs = 0; // start our clock updates
   1222         }
   1223     }
   1224     int64_t nowUs = ALooper::GetNowUs();
   1225     if (mNextAudioClockUpdateTimeUs >= 0) {
   1226         if (nowUs >= mNextAudioClockUpdateTimeUs) {
   1227             int64_t nowMediaUs = mediaTimeUs - getPendingAudioPlayoutDurationUs(nowUs);
   1228             mMediaClock->updateAnchor(nowMediaUs, nowUs, mediaTimeUs);
   1229             mUseVirtualAudioSink = false;
   1230             mNextAudioClockUpdateTimeUs = nowUs + kMinimumAudioClockUpdatePeriodUs;
   1231         }
   1232     } else {
   1233         int64_t unused;
   1234         if ((mMediaClock->getMediaTime(nowUs, &unused) != OK)
   1235                 && (getDurationUsIfPlayedAtSampleRate(mNumFramesWritten)
   1236                         > kMaxAllowedAudioSinkDelayUs)) {
   1237             // Enough data has been sent to AudioSink, but AudioSink has not rendered
   1238             // any data yet. Something is wrong with AudioSink, e.g., the device is not
   1239             // connected to audio out.
   1240             // Switch to system clock. This essentially creates a virtual AudioSink with
   1241             // initial latenty of getDurationUsIfPlayedAtSampleRate(mNumFramesWritten).
   1242             // This virtual AudioSink renders audio data starting from the very first sample
   1243             // and it's paced by system clock.
   1244             ALOGW("AudioSink stuck. ARE YOU CONNECTED TO AUDIO OUT? Switching to system clock.");
   1245             mMediaClock->updateAnchor(mAudioFirstAnchorTimeMediaUs, nowUs, mediaTimeUs);
   1246             mUseVirtualAudioSink = true;
   1247         }
   1248     }
   1249     mAnchorNumFramesWritten = mNumFramesWritten;
   1250     mAnchorTimeMediaUs = mediaTimeUs;
   1251 }
   1252 
   1253 // Called without mLock acquired.
   1254 void NuPlayer::Renderer::postDrainVideoQueue() {
   1255     if (mDrainVideoQueuePending
   1256             || getSyncQueues()
   1257             || (mPaused && mVideoSampleReceived)) {
   1258         return;
   1259     }
   1260 
   1261     if (mVideoQueue.empty()) {
   1262         return;
   1263     }
   1264 
   1265     QueueEntry &entry = *mVideoQueue.begin();
   1266 
   1267     sp<AMessage> msg = new AMessage(kWhatDrainVideoQueue, this);
   1268     msg->setInt32("drainGeneration", getDrainGeneration(false /* audio */));
   1269 
   1270     if (entry.mBuffer == NULL) {
   1271         // EOS doesn't carry a timestamp.
   1272         msg->post();
   1273         mDrainVideoQueuePending = true;
   1274         return;
   1275     }
   1276 
   1277     int64_t nowUs = ALooper::GetNowUs();
   1278     if (mFlags & FLAG_REAL_TIME) {
   1279         int64_t realTimeUs;
   1280         CHECK(entry.mBuffer->meta()->findInt64("timeUs", &realTimeUs));
   1281 
   1282         realTimeUs = mVideoScheduler->schedule(realTimeUs * 1000) / 1000;
   1283 
   1284         int64_t twoVsyncsUs = 2 * (mVideoScheduler->getVsyncPeriod() / 1000);
   1285 
   1286         int64_t delayUs = realTimeUs - nowUs;
   1287 
   1288         ALOGW_IF(delayUs > 500000, "unusually high delayUs: %lld", (long long)delayUs);
   1289         // post 2 display refreshes before rendering is due
   1290         msg->post(delayUs > twoVsyncsUs ? delayUs - twoVsyncsUs : 0);
   1291 
   1292         mDrainVideoQueuePending = true;
   1293         return;
   1294     }
   1295 
   1296     int64_t mediaTimeUs;
   1297     CHECK(entry.mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
   1298 
   1299     {
   1300         Mutex::Autolock autoLock(mLock);
   1301         if (mAnchorTimeMediaUs < 0) {
   1302             mMediaClock->updateAnchor(mediaTimeUs, nowUs, mediaTimeUs);
   1303             mAnchorTimeMediaUs = mediaTimeUs;
   1304         }
   1305     }
   1306     mNextVideoTimeMediaUs = mediaTimeUs;
   1307     if (!mHasAudio) {
   1308         // smooth out videos >= 10fps
   1309         mMediaClock->updateMaxTimeMedia(mediaTimeUs + kDefaultVideoFrameIntervalUs);
   1310     }
   1311 
   1312     if (!mVideoSampleReceived || mediaTimeUs < mAudioFirstAnchorTimeMediaUs) {
   1313         msg->post();
   1314     } else {
   1315         int64_t twoVsyncsUs = 2 * (mVideoScheduler->getVsyncPeriod() / 1000);
   1316 
   1317         // post 2 display refreshes before rendering is due
   1318         mMediaClock->addTimer(msg, mediaTimeUs, -twoVsyncsUs);
   1319     }
   1320 
   1321     mDrainVideoQueuePending = true;
   1322 }
   1323 
   1324 void NuPlayer::Renderer::onDrainVideoQueue() {
   1325     if (mVideoQueue.empty()) {
   1326         return;
   1327     }
   1328 
   1329     QueueEntry *entry = &*mVideoQueue.begin();
   1330 
   1331     if (entry->mBuffer == NULL) {
   1332         // EOS
   1333 
   1334         notifyEOS(false /* audio */, entry->mFinalResult);
   1335 
   1336         mVideoQueue.erase(mVideoQueue.begin());
   1337         entry = NULL;
   1338 
   1339         setVideoLateByUs(0);
   1340         return;
   1341     }
   1342 
   1343     int64_t nowUs = ALooper::GetNowUs();
   1344     int64_t realTimeUs;
   1345     int64_t mediaTimeUs = -1;
   1346     if (mFlags & FLAG_REAL_TIME) {
   1347         CHECK(entry->mBuffer->meta()->findInt64("timeUs", &realTimeUs));
   1348     } else {
   1349         CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
   1350 
   1351         realTimeUs = getRealTimeUs(mediaTimeUs, nowUs);
   1352     }
   1353     realTimeUs = mVideoScheduler->schedule(realTimeUs * 1000) / 1000;
   1354 
   1355     bool tooLate = false;
   1356 
   1357     if (!mPaused) {
   1358         setVideoLateByUs(nowUs - realTimeUs);
   1359         tooLate = (mVideoLateByUs > 40000);
   1360 
   1361         if (tooLate) {
   1362             ALOGV("video late by %lld us (%.2f secs)",
   1363                  (long long)mVideoLateByUs, mVideoLateByUs / 1E6);
   1364         } else {
   1365             int64_t mediaUs = 0;
   1366             mMediaClock->getMediaTime(realTimeUs, &mediaUs);
   1367             ALOGV("rendering video at media time %.2f secs",
   1368                     (mFlags & FLAG_REAL_TIME ? realTimeUs :
   1369                     mediaUs) / 1E6);
   1370 
   1371             if (!(mFlags & FLAG_REAL_TIME)
   1372                     && mLastAudioMediaTimeUs != -1
   1373                     && mediaTimeUs > mLastAudioMediaTimeUs) {
   1374                 // If audio ends before video, video continues to drive media clock.
   1375                 // Also smooth out videos >= 10fps.
   1376                 mMediaClock->updateMaxTimeMedia(mediaTimeUs + kDefaultVideoFrameIntervalUs);
   1377             }
   1378         }
   1379     } else {
   1380         setVideoLateByUs(0);
   1381         if (!mVideoSampleReceived && !mHasAudio) {
   1382             // This will ensure that the first frame after a flush won't be used as anchor
   1383             // when renderer is in paused state, because resume can happen any time after seek.
   1384             clearAnchorTime();
   1385         }
   1386     }
   1387 
   1388     // Always render the first video frame while keeping stats on A/V sync.
   1389     if (!mVideoSampleReceived) {
   1390         realTimeUs = nowUs;
   1391         tooLate = false;
   1392     }
   1393 
   1394     entry->mNotifyConsumed->setInt64("timestampNs", realTimeUs * 1000LL);
   1395     entry->mNotifyConsumed->setInt32("render", !tooLate);
   1396     entry->mNotifyConsumed->post();
   1397     mVideoQueue.erase(mVideoQueue.begin());
   1398     entry = NULL;
   1399 
   1400     mVideoSampleReceived = true;
   1401 
   1402     if (!mPaused) {
   1403         if (!mVideoRenderingStarted) {
   1404             mVideoRenderingStarted = true;
   1405             notifyVideoRenderingStart();
   1406         }
   1407         Mutex::Autolock autoLock(mLock);
   1408         notifyIfMediaRenderingStarted_l();
   1409     }
   1410 }
   1411 
   1412 void NuPlayer::Renderer::notifyVideoRenderingStart() {
   1413     sp<AMessage> notify = mNotify->dup();
   1414     notify->setInt32("what", kWhatVideoRenderingStart);
   1415     notify->post();
   1416 }
   1417 
   1418 void NuPlayer::Renderer::notifyEOS(bool audio, status_t finalResult, int64_t delayUs) {
   1419     Mutex::Autolock autoLock(mLock);
   1420     notifyEOS_l(audio, finalResult, delayUs);
   1421 }
   1422 
   1423 void NuPlayer::Renderer::notifyEOS_l(bool audio, status_t finalResult, int64_t delayUs) {
   1424     if (audio && delayUs > 0) {
   1425         sp<AMessage> msg = new AMessage(kWhatEOS, this);
   1426         msg->setInt32("audioEOSGeneration", mAudioEOSGeneration);
   1427         msg->setInt32("finalResult", finalResult);
   1428         msg->post(delayUs);
   1429         return;
   1430     }
   1431     sp<AMessage> notify = mNotify->dup();
   1432     notify->setInt32("what", kWhatEOS);
   1433     notify->setInt32("audio", static_cast<int32_t>(audio));
   1434     notify->setInt32("finalResult", finalResult);
   1435     notify->post(delayUs);
   1436 
   1437     if (audio) {
   1438         // Video might outlive audio. Clear anchor to enable video only case.
   1439         mAnchorTimeMediaUs = -1;
   1440         mHasAudio = false;
   1441         if (mNextVideoTimeMediaUs >= 0) {
   1442             int64_t mediaUs = 0;
   1443             int64_t nowUs = ALooper::GetNowUs();
   1444             status_t result = mMediaClock->getMediaTime(nowUs, &mediaUs);
   1445             if (result == OK) {
   1446                 if (mNextVideoTimeMediaUs > mediaUs) {
   1447                     mMediaClock->updateMaxTimeMedia(mNextVideoTimeMediaUs);
   1448                 }
   1449             } else {
   1450                 mMediaClock->updateAnchor(
   1451                         mNextVideoTimeMediaUs, nowUs,
   1452                         mNextVideoTimeMediaUs + kDefaultVideoFrameIntervalUs);
   1453             }
   1454         }
   1455     }
   1456 }
   1457 
   1458 void NuPlayer::Renderer::notifyAudioTearDown(AudioTearDownReason reason) {
   1459     sp<AMessage> msg = new AMessage(kWhatAudioTearDown, this);
   1460     msg->setInt32("reason", reason);
   1461     msg->post();
   1462 }
   1463 
   1464 void NuPlayer::Renderer::onQueueBuffer(const sp<AMessage> &msg) {
   1465     int32_t audio;
   1466     CHECK(msg->findInt32("audio", &audio));
   1467 
   1468     if (dropBufferIfStale(audio, msg)) {
   1469         return;
   1470     }
   1471 
   1472     if (audio) {
   1473         mHasAudio = true;
   1474     } else {
   1475         mHasVideo = true;
   1476     }
   1477 
   1478     if (mHasVideo) {
   1479         if (mVideoScheduler == NULL) {
   1480             mVideoScheduler = new VideoFrameScheduler();
   1481             mVideoScheduler->init();
   1482         }
   1483     }
   1484 
   1485     sp<RefBase> obj;
   1486     CHECK(msg->findObject("buffer", &obj));
   1487     sp<MediaCodecBuffer> buffer = static_cast<MediaCodecBuffer *>(obj.get());
   1488 
   1489     sp<AMessage> notifyConsumed;
   1490     CHECK(msg->findMessage("notifyConsumed", &notifyConsumed));
   1491 
   1492     QueueEntry entry;
   1493     entry.mBuffer = buffer;
   1494     entry.mNotifyConsumed = notifyConsumed;
   1495     entry.mOffset = 0;
   1496     entry.mFinalResult = OK;
   1497     entry.mBufferOrdinal = ++mTotalBuffersQueued;
   1498 
   1499     if (audio) {
   1500         Mutex::Autolock autoLock(mLock);
   1501         mAudioQueue.push_back(entry);
   1502         postDrainAudioQueue_l();
   1503     } else {
   1504         mVideoQueue.push_back(entry);
   1505         postDrainVideoQueue();
   1506     }
   1507 
   1508     Mutex::Autolock autoLock(mLock);
   1509     if (!mSyncQueues || mAudioQueue.empty() || mVideoQueue.empty()) {
   1510         return;
   1511     }
   1512 
   1513     sp<MediaCodecBuffer> firstAudioBuffer = (*mAudioQueue.begin()).mBuffer;
   1514     sp<MediaCodecBuffer> firstVideoBuffer = (*mVideoQueue.begin()).mBuffer;
   1515 
   1516     if (firstAudioBuffer == NULL || firstVideoBuffer == NULL) {
   1517         // EOS signalled on either queue.
   1518         syncQueuesDone_l();
   1519         return;
   1520     }
   1521 
   1522     int64_t firstAudioTimeUs;
   1523     int64_t firstVideoTimeUs;
   1524     CHECK(firstAudioBuffer->meta()
   1525             ->findInt64("timeUs", &firstAudioTimeUs));
   1526     CHECK(firstVideoBuffer->meta()
   1527             ->findInt64("timeUs", &firstVideoTimeUs));
   1528 
   1529     int64_t diff = firstVideoTimeUs - firstAudioTimeUs;
   1530 
   1531     ALOGV("queueDiff = %.2f secs", diff / 1E6);
   1532 
   1533     if (diff > 100000LL) {
   1534         // Audio data starts More than 0.1 secs before video.
   1535         // Drop some audio.
   1536 
   1537         (*mAudioQueue.begin()).mNotifyConsumed->post();
   1538         mAudioQueue.erase(mAudioQueue.begin());
   1539         return;
   1540     }
   1541 
   1542     syncQueuesDone_l();
   1543 }
   1544 
   1545 void NuPlayer::Renderer::syncQueuesDone_l() {
   1546     if (!mSyncQueues) {
   1547         return;
   1548     }
   1549 
   1550     mSyncQueues = false;
   1551 
   1552     if (!mAudioQueue.empty()) {
   1553         postDrainAudioQueue_l();
   1554     }
   1555 
   1556     if (!mVideoQueue.empty()) {
   1557         mLock.unlock();
   1558         postDrainVideoQueue();
   1559         mLock.lock();
   1560     }
   1561 }
   1562 
   1563 void NuPlayer::Renderer::onQueueEOS(const sp<AMessage> &msg) {
   1564     int32_t audio;
   1565     CHECK(msg->findInt32("audio", &audio));
   1566 
   1567     if (dropBufferIfStale(audio, msg)) {
   1568         return;
   1569     }
   1570 
   1571     int32_t finalResult;
   1572     CHECK(msg->findInt32("finalResult", &finalResult));
   1573 
   1574     QueueEntry entry;
   1575     entry.mOffset = 0;
   1576     entry.mFinalResult = finalResult;
   1577 
   1578     if (audio) {
   1579         Mutex::Autolock autoLock(mLock);
   1580         if (mAudioQueue.empty() && mSyncQueues) {
   1581             syncQueuesDone_l();
   1582         }
   1583         mAudioQueue.push_back(entry);
   1584         postDrainAudioQueue_l();
   1585     } else {
   1586         if (mVideoQueue.empty() && getSyncQueues()) {
   1587             Mutex::Autolock autoLock(mLock);
   1588             syncQueuesDone_l();
   1589         }
   1590         mVideoQueue.push_back(entry);
   1591         postDrainVideoQueue();
   1592     }
   1593 }
   1594 
   1595 void NuPlayer::Renderer::onFlush(const sp<AMessage> &msg) {
   1596     int32_t audio, notifyComplete;
   1597     CHECK(msg->findInt32("audio", &audio));
   1598 
   1599     {
   1600         Mutex::Autolock autoLock(mLock);
   1601         if (audio) {
   1602             notifyComplete = mNotifyCompleteAudio;
   1603             mNotifyCompleteAudio = false;
   1604             mLastAudioMediaTimeUs = -1;
   1605 
   1606             mHasAudio = false;
   1607             if (mNextVideoTimeMediaUs >= 0) {
   1608                 int64_t nowUs = ALooper::GetNowUs();
   1609                 mMediaClock->updateAnchor(
   1610                         mNextVideoTimeMediaUs, nowUs,
   1611                         mNextVideoTimeMediaUs + kDefaultVideoFrameIntervalUs);
   1612             }
   1613         } else {
   1614             notifyComplete = mNotifyCompleteVideo;
   1615             mNotifyCompleteVideo = false;
   1616         }
   1617 
   1618         // If we're currently syncing the queues, i.e. dropping audio while
   1619         // aligning the first audio/video buffer times and only one of the
   1620         // two queues has data, we may starve that queue by not requesting
   1621         // more buffers from the decoder. If the other source then encounters
   1622         // a discontinuity that leads to flushing, we'll never find the
   1623         // corresponding discontinuity on the other queue.
   1624         // Therefore we'll stop syncing the queues if at least one of them
   1625         // is flushed.
   1626         syncQueuesDone_l();
   1627     }
   1628     clearAnchorTime();
   1629 
   1630     ALOGV("flushing %s", audio ? "audio" : "video");
   1631     if (audio) {
   1632         {
   1633             Mutex::Autolock autoLock(mLock);
   1634             flushQueue(&mAudioQueue);
   1635 
   1636             ++mAudioDrainGeneration;
   1637             ++mAudioEOSGeneration;
   1638             prepareForMediaRenderingStart_l();
   1639 
   1640             // the frame count will be reset after flush.
   1641             clearAudioFirstAnchorTime_l();
   1642         }
   1643 
   1644         mDrainAudioQueuePending = false;
   1645 
   1646         if (offloadingAudio()) {
   1647             mAudioSink->pause();
   1648             mAudioSink->flush();
   1649             if (!mPaused) {
   1650                 mAudioSink->start();
   1651             }
   1652         } else {
   1653             mAudioSink->pause();
   1654             mAudioSink->flush();
   1655             // Call stop() to signal to the AudioSink to completely fill the
   1656             // internal buffer before resuming playback.
   1657             // FIXME: this is ignored after flush().
   1658             mAudioSink->stop();
   1659             if (!mPaused) {
   1660                 mAudioSink->start();
   1661             }
   1662             mNumFramesWritten = 0;
   1663         }
   1664         mNextAudioClockUpdateTimeUs = -1;
   1665     } else {
   1666         flushQueue(&mVideoQueue);
   1667 
   1668         mDrainVideoQueuePending = false;
   1669 
   1670         if (mVideoScheduler != NULL) {
   1671             mVideoScheduler->restart();
   1672         }
   1673 
   1674         Mutex::Autolock autoLock(mLock);
   1675         ++mVideoDrainGeneration;
   1676         prepareForMediaRenderingStart_l();
   1677     }
   1678 
   1679     mVideoSampleReceived = false;
   1680 
   1681     if (notifyComplete) {
   1682         notifyFlushComplete(audio);
   1683     }
   1684 }
   1685 
   1686 void NuPlayer::Renderer::flushQueue(List<QueueEntry> *queue) {
   1687     while (!queue->empty()) {
   1688         QueueEntry *entry = &*queue->begin();
   1689 
   1690         if (entry->mBuffer != NULL) {
   1691             entry->mNotifyConsumed->post();
   1692         } else if (entry->mNotifyConsumed != nullptr) {
   1693             // Is it needed to open audio sink now?
   1694             onChangeAudioFormat(entry->mMeta, entry->mNotifyConsumed);
   1695         }
   1696 
   1697         queue->erase(queue->begin());
   1698         entry = NULL;
   1699     }
   1700 }
   1701 
   1702 void NuPlayer::Renderer::notifyFlushComplete(bool audio) {
   1703     sp<AMessage> notify = mNotify->dup();
   1704     notify->setInt32("what", kWhatFlushComplete);
   1705     notify->setInt32("audio", static_cast<int32_t>(audio));
   1706     notify->post();
   1707 }
   1708 
   1709 bool NuPlayer::Renderer::dropBufferIfStale(
   1710         bool audio, const sp<AMessage> &msg) {
   1711     int32_t queueGeneration;
   1712     CHECK(msg->findInt32("queueGeneration", &queueGeneration));
   1713 
   1714     if (queueGeneration == getQueueGeneration(audio)) {
   1715         return false;
   1716     }
   1717 
   1718     sp<AMessage> notifyConsumed;
   1719     if (msg->findMessage("notifyConsumed", &notifyConsumed)) {
   1720         notifyConsumed->post();
   1721     }
   1722 
   1723     return true;
   1724 }
   1725 
   1726 void NuPlayer::Renderer::onAudioSinkChanged() {
   1727     if (offloadingAudio()) {
   1728         return;
   1729     }
   1730     CHECK(!mDrainAudioQueuePending);
   1731     mNumFramesWritten = 0;
   1732     mAnchorNumFramesWritten = -1;
   1733     uint32_t written;
   1734     if (mAudioSink->getFramesWritten(&written) == OK) {
   1735         mNumFramesWritten = written;
   1736     }
   1737 }
   1738 
   1739 void NuPlayer::Renderer::onDisableOffloadAudio() {
   1740     Mutex::Autolock autoLock(mLock);
   1741     mFlags &= ~FLAG_OFFLOAD_AUDIO;
   1742     ++mAudioDrainGeneration;
   1743     if (mAudioRenderingStartGeneration != -1) {
   1744         prepareForMediaRenderingStart_l();
   1745         // PauseTimeout is applied to offload mode only. Cancel pending timer.
   1746         cancelAudioOffloadPauseTimeout();
   1747     }
   1748 }
   1749 
   1750 void NuPlayer::Renderer::onEnableOffloadAudio() {
   1751     Mutex::Autolock autoLock(mLock);
   1752     mFlags |= FLAG_OFFLOAD_AUDIO;
   1753     ++mAudioDrainGeneration;
   1754     if (mAudioRenderingStartGeneration != -1) {
   1755         prepareForMediaRenderingStart_l();
   1756     }
   1757 }
   1758 
   1759 void NuPlayer::Renderer::onPause() {
   1760     if (mPaused) {
   1761         return;
   1762     }
   1763 
   1764     {
   1765         Mutex::Autolock autoLock(mLock);
   1766         // we do not increment audio drain generation so that we fill audio buffer during pause.
   1767         ++mVideoDrainGeneration;
   1768         prepareForMediaRenderingStart_l();
   1769         mPaused = true;
   1770         mMediaClock->setPlaybackRate(0.0);
   1771     }
   1772 
   1773     mDrainAudioQueuePending = false;
   1774     mDrainVideoQueuePending = false;
   1775 
   1776     // Note: audio data may not have been decoded, and the AudioSink may not be opened.
   1777     mAudioSink->pause();
   1778     startAudioOffloadPauseTimeout();
   1779 
   1780     ALOGV("now paused audio queue has %zu entries, video has %zu entries",
   1781           mAudioQueue.size(), mVideoQueue.size());
   1782 }
   1783 
   1784 void NuPlayer::Renderer::onResume() {
   1785     if (!mPaused) {
   1786         return;
   1787     }
   1788 
   1789     // Note: audio data may not have been decoded, and the AudioSink may not be opened.
   1790     cancelAudioOffloadPauseTimeout();
   1791     if (mAudioSink->ready()) {
   1792         status_t err = mAudioSink->start();
   1793         if (err != OK) {
   1794             ALOGE("cannot start AudioSink err %d", err);
   1795             notifyAudioTearDown(kDueToError);
   1796         }
   1797     }
   1798 
   1799     {
   1800         Mutex::Autolock autoLock(mLock);
   1801         mPaused = false;
   1802         // rendering started message may have been delayed if we were paused.
   1803         if (mRenderingDataDelivered) {
   1804             notifyIfMediaRenderingStarted_l();
   1805         }
   1806         // configure audiosink as we did not do it when pausing
   1807         if (mAudioSink != NULL && mAudioSink->ready()) {
   1808             mAudioSink->setPlaybackRate(mPlaybackSettings);
   1809         }
   1810 
   1811         mMediaClock->setPlaybackRate(mPlaybackRate);
   1812 
   1813         if (!mAudioQueue.empty()) {
   1814             postDrainAudioQueue_l();
   1815         }
   1816     }
   1817 
   1818     if (!mVideoQueue.empty()) {
   1819         postDrainVideoQueue();
   1820     }
   1821 }
   1822 
   1823 void NuPlayer::Renderer::onSetVideoFrameRate(float fps) {
   1824     if (mVideoScheduler == NULL) {
   1825         mVideoScheduler = new VideoFrameScheduler();
   1826     }
   1827     mVideoScheduler->init(fps);
   1828 }
   1829 
   1830 int32_t NuPlayer::Renderer::getQueueGeneration(bool audio) {
   1831     Mutex::Autolock autoLock(mLock);
   1832     return (audio ? mAudioQueueGeneration : mVideoQueueGeneration);
   1833 }
   1834 
   1835 int32_t NuPlayer::Renderer::getDrainGeneration(bool audio) {
   1836     Mutex::Autolock autoLock(mLock);
   1837     return (audio ? mAudioDrainGeneration : mVideoDrainGeneration);
   1838 }
   1839 
   1840 bool NuPlayer::Renderer::getSyncQueues() {
   1841     Mutex::Autolock autoLock(mLock);
   1842     return mSyncQueues;
   1843 }
   1844 
   1845 void NuPlayer::Renderer::onAudioTearDown(AudioTearDownReason reason) {
   1846     if (mAudioTornDown) {
   1847         return;
   1848     }
   1849 
   1850     // TimeoutWhenPaused is only for offload mode.
   1851     if (reason == kDueToTimeout && !offloadingAudio()) {
   1852         return;
   1853     }
   1854 
   1855     mAudioTornDown = true;
   1856 
   1857     int64_t currentPositionUs;
   1858     sp<AMessage> notify = mNotify->dup();
   1859     if (getCurrentPosition(&currentPositionUs) == OK) {
   1860         notify->setInt64("positionUs", currentPositionUs);
   1861     }
   1862 
   1863     mAudioSink->stop();
   1864     mAudioSink->flush();
   1865 
   1866     notify->setInt32("what", kWhatAudioTearDown);
   1867     notify->setInt32("reason", reason);
   1868     notify->post();
   1869 }
   1870 
   1871 void NuPlayer::Renderer::startAudioOffloadPauseTimeout() {
   1872     if (offloadingAudio()) {
   1873         mWakeLock->acquire();
   1874         sp<AMessage> msg = new AMessage(kWhatAudioOffloadPauseTimeout, this);
   1875         msg->setInt32("drainGeneration", mAudioOffloadPauseTimeoutGeneration);
   1876         msg->post(kOffloadPauseMaxUs);
   1877     }
   1878 }
   1879 
   1880 void NuPlayer::Renderer::cancelAudioOffloadPauseTimeout() {
   1881     // We may have called startAudioOffloadPauseTimeout() without
   1882     // the AudioSink open and with offloadingAudio enabled.
   1883     //
   1884     // When we cancel, it may be that offloadingAudio is subsequently disabled, so regardless
   1885     // we always release the wakelock and increment the pause timeout generation.
   1886     //
   1887     // Note: The acquired wakelock prevents the device from suspending
   1888     // immediately after offload pause (in case a resume happens shortly thereafter).
   1889     mWakeLock->release(true);
   1890     ++mAudioOffloadPauseTimeoutGeneration;
   1891 }
   1892 
   1893 status_t NuPlayer::Renderer::onOpenAudioSink(
   1894         const sp<AMessage> &format,
   1895         bool offloadOnly,
   1896         bool hasVideo,
   1897         uint32_t flags,
   1898         bool isStreaming) {
   1899     ALOGV("openAudioSink: offloadOnly(%d) offloadingAudio(%d)",
   1900             offloadOnly, offloadingAudio());
   1901     bool audioSinkChanged = false;
   1902 
   1903     int32_t numChannels;
   1904     CHECK(format->findInt32("channel-count", &numChannels));
   1905 
   1906     int32_t channelMask;
   1907     if (!format->findInt32("channel-mask", &channelMask)) {
   1908         // signal to the AudioSink to derive the mask from count.
   1909         channelMask = CHANNEL_MASK_USE_CHANNEL_ORDER;
   1910     }
   1911 
   1912     int32_t sampleRate;
   1913     CHECK(format->findInt32("sample-rate", &sampleRate));
   1914 
   1915     // read pcm encoding from MediaCodec output format, if available
   1916     int32_t pcmEncoding;
   1917     audio_format_t audioFormat =
   1918             format->findInt32(KEY_PCM_ENCODING, &pcmEncoding) ?
   1919                     audioFormatFromEncoding(pcmEncoding) : AUDIO_FORMAT_PCM_16_BIT;
   1920 
   1921     if (offloadingAudio()) {
   1922         AString mime;
   1923         CHECK(format->findString("mime", &mime));
   1924         status_t err = mapMimeToAudioFormat(audioFormat, mime.c_str());
   1925 
   1926         if (err != OK) {
   1927             ALOGE("Couldn't map mime \"%s\" to a valid "
   1928                     "audio_format", mime.c_str());
   1929             onDisableOffloadAudio();
   1930         } else {
   1931             ALOGV("Mime \"%s\" mapped to audio_format 0x%x",
   1932                     mime.c_str(), audioFormat);
   1933 
   1934             int avgBitRate = -1;
   1935             format->findInt32("bitrate", &avgBitRate);
   1936 
   1937             int32_t aacProfile = -1;
   1938             if (audioFormat == AUDIO_FORMAT_AAC
   1939                     && format->findInt32("aac-profile", &aacProfile)) {
   1940                 // Redefine AAC format as per aac profile
   1941                 mapAACProfileToAudioFormat(
   1942                         audioFormat,
   1943                         aacProfile);
   1944             }
   1945 
   1946             audio_offload_info_t offloadInfo = AUDIO_INFO_INITIALIZER;
   1947             offloadInfo.duration_us = -1;
   1948             format->findInt64(
   1949                     "durationUs", &offloadInfo.duration_us);
   1950             offloadInfo.sample_rate = sampleRate;
   1951             offloadInfo.channel_mask = channelMask;
   1952             offloadInfo.format = audioFormat;
   1953             offloadInfo.stream_type = AUDIO_STREAM_MUSIC;
   1954             offloadInfo.bit_rate = avgBitRate;
   1955             offloadInfo.has_video = hasVideo;
   1956             offloadInfo.is_streaming = isStreaming;
   1957 
   1958             if (memcmp(&mCurrentOffloadInfo, &offloadInfo, sizeof(offloadInfo)) == 0) {
   1959                 ALOGV("openAudioSink: no change in offload mode");
   1960                 // no change from previous configuration, everything ok.
   1961                 return OK;
   1962             }
   1963             mCurrentPcmInfo = AUDIO_PCMINFO_INITIALIZER;
   1964 
   1965             ALOGV("openAudioSink: try to open AudioSink in offload mode");
   1966             uint32_t offloadFlags = flags;
   1967             offloadFlags |= AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD;
   1968             offloadFlags &= ~AUDIO_OUTPUT_FLAG_DEEP_BUFFER;
   1969             audioSinkChanged = true;
   1970             mAudioSink->close();
   1971 
   1972             err = mAudioSink->open(
   1973                     sampleRate,
   1974                     numChannels,
   1975                     (audio_channel_mask_t)channelMask,
   1976                     audioFormat,
   1977                     0 /* bufferCount - unused */,
   1978                     &NuPlayer::Renderer::AudioSinkCallback,
   1979                     this,
   1980                     (audio_output_flags_t)offloadFlags,
   1981                     &offloadInfo);
   1982 
   1983             if (err == OK) {
   1984                 err = mAudioSink->setPlaybackRate(mPlaybackSettings);
   1985             }
   1986 
   1987             if (err == OK) {
   1988                 // If the playback is offloaded to h/w, we pass
   1989                 // the HAL some metadata information.
   1990                 // We don't want to do this for PCM because it
   1991                 // will be going through the AudioFlinger mixer
   1992                 // before reaching the hardware.
   1993                 // TODO
   1994                 mCurrentOffloadInfo = offloadInfo;
   1995                 if (!mPaused) { // for preview mode, don't start if paused
   1996                     err = mAudioSink->start();
   1997                 }
   1998                 ALOGV_IF(err == OK, "openAudioSink: offload succeeded");
   1999             }
   2000             if (err != OK) {
   2001                 // Clean up, fall back to non offload mode.
   2002                 mAudioSink->close();
   2003                 onDisableOffloadAudio();
   2004                 mCurrentOffloadInfo = AUDIO_INFO_INITIALIZER;
   2005                 ALOGV("openAudioSink: offload failed");
   2006                 if (offloadOnly) {
   2007                     notifyAudioTearDown(kForceNonOffload);
   2008                 }
   2009             } else {
   2010                 mUseAudioCallback = true;  // offload mode transfers data through callback
   2011                 ++mAudioDrainGeneration;  // discard pending kWhatDrainAudioQueue message.
   2012             }
   2013         }
   2014     }
   2015     if (!offloadOnly && !offloadingAudio()) {
   2016         ALOGV("openAudioSink: open AudioSink in NON-offload mode");
   2017         uint32_t pcmFlags = flags;
   2018         pcmFlags &= ~AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD;
   2019 
   2020         const PcmInfo info = {
   2021                 (audio_channel_mask_t)channelMask,
   2022                 (audio_output_flags_t)pcmFlags,
   2023                 audioFormat,
   2024                 numChannels,
   2025                 sampleRate
   2026         };
   2027         if (memcmp(&mCurrentPcmInfo, &info, sizeof(info)) == 0) {
   2028             ALOGV("openAudioSink: no change in pcm mode");
   2029             // no change from previous configuration, everything ok.
   2030             return OK;
   2031         }
   2032 
   2033         audioSinkChanged = true;
   2034         mAudioSink->close();
   2035         mCurrentOffloadInfo = AUDIO_INFO_INITIALIZER;
   2036         // Note: It is possible to set up the callback, but not use it to send audio data.
   2037         // This requires a fix in AudioSink to explicitly specify the transfer mode.
   2038         mUseAudioCallback = getUseAudioCallbackSetting();
   2039         if (mUseAudioCallback) {
   2040             ++mAudioDrainGeneration;  // discard pending kWhatDrainAudioQueue message.
   2041         }
   2042 
   2043         // Compute the desired buffer size.
   2044         // For callback mode, the amount of time before wakeup is about half the buffer size.
   2045         const uint32_t frameCount =
   2046                 (unsigned long long)sampleRate * getAudioSinkPcmMsSetting() / 1000;
   2047 
   2048         // The doNotReconnect means AudioSink will signal back and let NuPlayer to re-construct
   2049         // AudioSink. We don't want this when there's video because it will cause a video seek to
   2050         // the previous I frame. But we do want this when there's only audio because it will give
   2051         // NuPlayer a chance to switch from non-offload mode to offload mode.
   2052         // So we only set doNotReconnect when there's no video.
   2053         const bool doNotReconnect = !hasVideo;
   2054 
   2055         // We should always be able to set our playback settings if the sink is closed.
   2056         LOG_ALWAYS_FATAL_IF(mAudioSink->setPlaybackRate(mPlaybackSettings) != OK,
   2057                 "onOpenAudioSink: can't set playback rate on closed sink");
   2058         status_t err = mAudioSink->open(
   2059                     sampleRate,
   2060                     numChannels,
   2061                     (audio_channel_mask_t)channelMask,
   2062                     audioFormat,
   2063                     0 /* bufferCount - unused */,
   2064                     mUseAudioCallback ? &NuPlayer::Renderer::AudioSinkCallback : NULL,
   2065                     mUseAudioCallback ? this : NULL,
   2066                     (audio_output_flags_t)pcmFlags,
   2067                     NULL,
   2068                     doNotReconnect,
   2069                     frameCount);
   2070         if (err != OK) {
   2071             ALOGW("openAudioSink: non offloaded open failed status: %d", err);
   2072             mAudioSink->close();
   2073             mCurrentPcmInfo = AUDIO_PCMINFO_INITIALIZER;
   2074             return err;
   2075         }
   2076         mCurrentPcmInfo = info;
   2077         if (!mPaused) { // for preview mode, don't start if paused
   2078             mAudioSink->start();
   2079         }
   2080     }
   2081     if (audioSinkChanged) {
   2082         onAudioSinkChanged();
   2083     }
   2084     mAudioTornDown = false;
   2085     return OK;
   2086 }
   2087 
   2088 void NuPlayer::Renderer::onCloseAudioSink() {
   2089     mAudioSink->close();
   2090     mCurrentOffloadInfo = AUDIO_INFO_INITIALIZER;
   2091     mCurrentPcmInfo = AUDIO_PCMINFO_INITIALIZER;
   2092 }
   2093 
   2094 void NuPlayer::Renderer::onChangeAudioFormat(
   2095         const sp<AMessage> &meta, const sp<AMessage> &notify) {
   2096     sp<AMessage> format;
   2097     CHECK(meta->findMessage("format", &format));
   2098 
   2099     int32_t offloadOnly;
   2100     CHECK(meta->findInt32("offload-only", &offloadOnly));
   2101 
   2102     int32_t hasVideo;
   2103     CHECK(meta->findInt32("has-video", &hasVideo));
   2104 
   2105     uint32_t flags;
   2106     CHECK(meta->findInt32("flags", (int32_t *)&flags));
   2107 
   2108     uint32_t isStreaming;
   2109     CHECK(meta->findInt32("isStreaming", (int32_t *)&isStreaming));
   2110 
   2111     status_t err = onOpenAudioSink(format, offloadOnly, hasVideo, flags, isStreaming);
   2112 
   2113     if (err != OK) {
   2114         notify->setInt32("err", err);
   2115     }
   2116     notify->post();
   2117 }
   2118 
   2119 }  // namespace android
   2120