Home | History | Annotate | Download | only in surfaceflinger
      1 /*
      2  * Copyright (C) 2013 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #define ATRACE_TAG ATRACE_TAG_GRAPHICS
     18 //#define LOG_NDEBUG 0
     19 
     20 // This is needed for stdint.h to define INT64_MAX in C++
     21 #define __STDC_LIMIT_MACROS
     22 
     23 #include <math.h>
     24 
     25 #include <algorithm>
     26 
     27 #include <log/log.h>
     28 #include <utils/String8.h>
     29 #include <utils/Thread.h>
     30 #include <utils/Trace.h>
     31 #include <utils/Vector.h>
     32 
     33 #include <ui/FenceTime.h>
     34 
     35 #include "DispSync.h"
     36 #include "EventLog/EventLog.h"
     37 #include "SurfaceFlinger.h"
     38 
     39 using std::max;
     40 using std::min;
     41 
     42 namespace android {
     43 
     44 // Setting this to true enables verbose tracing that can be used to debug
     45 // vsync event model or phase issues.
     46 static const bool kTraceDetailedInfo = false;
     47 
     48 // Setting this to true adds a zero-phase tracer for correlating with hardware
     49 // vsync events
     50 static const bool kEnableZeroPhaseTracer = false;
     51 
     52 // This is the threshold used to determine when hardware vsync events are
     53 // needed to re-synchronize the software vsync model with the hardware.  The
     54 // error metric used is the mean of the squared difference between each
     55 // present time and the nearest software-predicted vsync.
     56 static const nsecs_t kErrorThreshold = 160000000000; // 400 usec squared
     57 
     58 #undef LOG_TAG
     59 #define LOG_TAG "DispSyncThread"
     60 class DispSyncThread : public Thread {
     61 public:
     62     explicit DispSyncThread(const char* name)
     63           : mName(name),
     64             mStop(false),
     65             mPeriod(0),
     66             mPhase(0),
     67             mReferenceTime(0),
     68             mWakeupLatency(0),
     69             mFrameNumber(0) {}
     70 
     71     virtual ~DispSyncThread() {}
     72 
     73     void updateModel(nsecs_t period, nsecs_t phase, nsecs_t referenceTime) {
     74         if (kTraceDetailedInfo) ATRACE_CALL();
     75         Mutex::Autolock lock(mMutex);
     76         mPeriod = period;
     77         mPhase = phase;
     78         mReferenceTime = referenceTime;
     79         ALOGV("[%s] updateModel: mPeriod = %" PRId64 ", mPhase = %" PRId64
     80               " mReferenceTime = %" PRId64,
     81               mName, ns2us(mPeriod), ns2us(mPhase), ns2us(mReferenceTime));
     82         mCond.signal();
     83     }
     84 
     85     void stop() {
     86         if (kTraceDetailedInfo) ATRACE_CALL();
     87         Mutex::Autolock lock(mMutex);
     88         mStop = true;
     89         mCond.signal();
     90     }
     91 
     92     virtual bool threadLoop() {
     93         status_t err;
     94         nsecs_t now = systemTime(SYSTEM_TIME_MONOTONIC);
     95 
     96         while (true) {
     97             Vector<CallbackInvocation> callbackInvocations;
     98 
     99             nsecs_t targetTime = 0;
    100 
    101             { // Scope for lock
    102                 Mutex::Autolock lock(mMutex);
    103 
    104                 if (kTraceDetailedInfo) {
    105                     ATRACE_INT64("DispSync:Frame", mFrameNumber);
    106                 }
    107                 ALOGV("[%s] Frame %" PRId64, mName, mFrameNumber);
    108                 ++mFrameNumber;
    109 
    110                 if (mStop) {
    111                     return false;
    112                 }
    113 
    114                 if (mPeriod == 0) {
    115                     err = mCond.wait(mMutex);
    116                     if (err != NO_ERROR) {
    117                         ALOGE("error waiting for new events: %s (%d)", strerror(-err), err);
    118                         return false;
    119                     }
    120                     continue;
    121                 }
    122 
    123                 targetTime = computeNextEventTimeLocked(now);
    124 
    125                 bool isWakeup = false;
    126 
    127                 if (now < targetTime) {
    128                     if (kTraceDetailedInfo) ATRACE_NAME("DispSync waiting");
    129 
    130                     if (targetTime == INT64_MAX) {
    131                         ALOGV("[%s] Waiting forever", mName);
    132                         err = mCond.wait(mMutex);
    133                     } else {
    134                         ALOGV("[%s] Waiting until %" PRId64, mName, ns2us(targetTime));
    135                         err = mCond.waitRelative(mMutex, targetTime - now);
    136                     }
    137 
    138                     if (err == TIMED_OUT) {
    139                         isWakeup = true;
    140                     } else if (err != NO_ERROR) {
    141                         ALOGE("error waiting for next event: %s (%d)", strerror(-err), err);
    142                         return false;
    143                     }
    144                 }
    145 
    146                 now = systemTime(SYSTEM_TIME_MONOTONIC);
    147 
    148                 // Don't correct by more than 1.5 ms
    149                 static const nsecs_t kMaxWakeupLatency = us2ns(1500);
    150 
    151                 if (isWakeup) {
    152                     mWakeupLatency = ((mWakeupLatency * 63) + (now - targetTime)) / 64;
    153                     mWakeupLatency = min(mWakeupLatency, kMaxWakeupLatency);
    154                     if (kTraceDetailedInfo) {
    155                         ATRACE_INT64("DispSync:WakeupLat", now - targetTime);
    156                         ATRACE_INT64("DispSync:AvgWakeupLat", mWakeupLatency);
    157                     }
    158                 }
    159 
    160                 callbackInvocations = gatherCallbackInvocationsLocked(now);
    161             }
    162 
    163             if (callbackInvocations.size() > 0) {
    164                 fireCallbackInvocations(callbackInvocations);
    165             }
    166         }
    167 
    168         return false;
    169     }
    170 
    171     status_t addEventListener(const char* name, nsecs_t phase, DispSync::Callback* callback) {
    172         if (kTraceDetailedInfo) ATRACE_CALL();
    173         Mutex::Autolock lock(mMutex);
    174 
    175         for (size_t i = 0; i < mEventListeners.size(); i++) {
    176             if (mEventListeners[i].mCallback == callback) {
    177                 return BAD_VALUE;
    178             }
    179         }
    180 
    181         EventListener listener;
    182         listener.mName = name;
    183         listener.mPhase = phase;
    184         listener.mCallback = callback;
    185 
    186         // We want to allow the firstmost future event to fire without
    187         // allowing any past events to fire
    188         listener.mLastEventTime = systemTime() - mPeriod / 2 + mPhase - mWakeupLatency;
    189 
    190         mEventListeners.push(listener);
    191 
    192         mCond.signal();
    193 
    194         return NO_ERROR;
    195     }
    196 
    197     status_t removeEventListener(DispSync::Callback* callback) {
    198         if (kTraceDetailedInfo) ATRACE_CALL();
    199         Mutex::Autolock lock(mMutex);
    200 
    201         for (size_t i = 0; i < mEventListeners.size(); i++) {
    202             if (mEventListeners[i].mCallback == callback) {
    203                 mEventListeners.removeAt(i);
    204                 mCond.signal();
    205                 return NO_ERROR;
    206             }
    207         }
    208 
    209         return BAD_VALUE;
    210     }
    211 
    212     status_t changePhaseOffset(DispSync::Callback* callback, nsecs_t phase) {
    213         if (kTraceDetailedInfo) ATRACE_CALL();
    214         Mutex::Autolock lock(mMutex);
    215 
    216         for (size_t i = 0; i < mEventListeners.size(); i++) {
    217             if (mEventListeners[i].mCallback == callback) {
    218                 EventListener& listener = mEventListeners.editItemAt(i);
    219                 const nsecs_t oldPhase = listener.mPhase;
    220                 listener.mPhase = phase;
    221 
    222                 // Pretend that the last time this event was handled at the same frame but with the
    223                 // new offset to allow for a seamless offset change without double-firing or
    224                 // skipping.
    225                 listener.mLastEventTime -= (oldPhase - phase);
    226                 mCond.signal();
    227                 return NO_ERROR;
    228             }
    229         }
    230 
    231         return BAD_VALUE;
    232     }
    233 
    234     // This method is only here to handle the !SurfaceFlinger::hasSyncFramework
    235     // case.
    236     bool hasAnyEventListeners() {
    237         if (kTraceDetailedInfo) ATRACE_CALL();
    238         Mutex::Autolock lock(mMutex);
    239         return !mEventListeners.empty();
    240     }
    241 
    242 private:
    243     struct EventListener {
    244         const char* mName;
    245         nsecs_t mPhase;
    246         nsecs_t mLastEventTime;
    247         DispSync::Callback* mCallback;
    248     };
    249 
    250     struct CallbackInvocation {
    251         DispSync::Callback* mCallback;
    252         nsecs_t mEventTime;
    253     };
    254 
    255     nsecs_t computeNextEventTimeLocked(nsecs_t now) {
    256         if (kTraceDetailedInfo) ATRACE_CALL();
    257         ALOGV("[%s] computeNextEventTimeLocked", mName);
    258         nsecs_t nextEventTime = INT64_MAX;
    259         for (size_t i = 0; i < mEventListeners.size(); i++) {
    260             nsecs_t t = computeListenerNextEventTimeLocked(mEventListeners[i], now);
    261 
    262             if (t < nextEventTime) {
    263                 nextEventTime = t;
    264             }
    265         }
    266 
    267         ALOGV("[%s] nextEventTime = %" PRId64, mName, ns2us(nextEventTime));
    268         return nextEventTime;
    269     }
    270 
    271     Vector<CallbackInvocation> gatherCallbackInvocationsLocked(nsecs_t now) {
    272         if (kTraceDetailedInfo) ATRACE_CALL();
    273         ALOGV("[%s] gatherCallbackInvocationsLocked @ %" PRId64, mName, ns2us(now));
    274 
    275         Vector<CallbackInvocation> callbackInvocations;
    276         nsecs_t onePeriodAgo = now - mPeriod;
    277 
    278         for (size_t i = 0; i < mEventListeners.size(); i++) {
    279             nsecs_t t = computeListenerNextEventTimeLocked(mEventListeners[i], onePeriodAgo);
    280 
    281             if (t < now) {
    282                 CallbackInvocation ci;
    283                 ci.mCallback = mEventListeners[i].mCallback;
    284                 ci.mEventTime = t;
    285                 ALOGV("[%s] [%s] Preparing to fire", mName, mEventListeners[i].mName);
    286                 callbackInvocations.push(ci);
    287                 mEventListeners.editItemAt(i).mLastEventTime = t;
    288             }
    289         }
    290 
    291         return callbackInvocations;
    292     }
    293 
    294     nsecs_t computeListenerNextEventTimeLocked(const EventListener& listener, nsecs_t baseTime) {
    295         if (kTraceDetailedInfo) ATRACE_CALL();
    296         ALOGV("[%s] [%s] computeListenerNextEventTimeLocked(%" PRId64 ")", mName, listener.mName,
    297               ns2us(baseTime));
    298 
    299         nsecs_t lastEventTime = listener.mLastEventTime + mWakeupLatency;
    300         ALOGV("[%s] lastEventTime: %" PRId64, mName, ns2us(lastEventTime));
    301         if (baseTime < lastEventTime) {
    302             baseTime = lastEventTime;
    303             ALOGV("[%s] Clamping baseTime to lastEventTime -> %" PRId64, mName, ns2us(baseTime));
    304         }
    305 
    306         baseTime -= mReferenceTime;
    307         ALOGV("[%s] Relative baseTime = %" PRId64, mName, ns2us(baseTime));
    308         nsecs_t phase = mPhase + listener.mPhase;
    309         ALOGV("[%s] Phase = %" PRId64, mName, ns2us(phase));
    310         baseTime -= phase;
    311         ALOGV("[%s] baseTime - phase = %" PRId64, mName, ns2us(baseTime));
    312 
    313         // If our previous time is before the reference (because the reference
    314         // has since been updated), the division by mPeriod will truncate
    315         // towards zero instead of computing the floor. Since in all cases
    316         // before the reference we want the next time to be effectively now, we
    317         // set baseTime to -mPeriod so that numPeriods will be -1.
    318         // When we add 1 and the phase, we will be at the correct event time for
    319         // this period.
    320         if (baseTime < 0) {
    321             ALOGV("[%s] Correcting negative baseTime", mName);
    322             baseTime = -mPeriod;
    323         }
    324 
    325         nsecs_t numPeriods = baseTime / mPeriod;
    326         ALOGV("[%s] numPeriods = %" PRId64, mName, numPeriods);
    327         nsecs_t t = (numPeriods + 1) * mPeriod + phase;
    328         ALOGV("[%s] t = %" PRId64, mName, ns2us(t));
    329         t += mReferenceTime;
    330         ALOGV("[%s] Absolute t = %" PRId64, mName, ns2us(t));
    331 
    332         // Check that it's been slightly more than half a period since the last
    333         // event so that we don't accidentally fall into double-rate vsyncs
    334         if (t - listener.mLastEventTime < (3 * mPeriod / 5)) {
    335             t += mPeriod;
    336             ALOGV("[%s] Modifying t -> %" PRId64, mName, ns2us(t));
    337         }
    338 
    339         t -= mWakeupLatency;
    340         ALOGV("[%s] Corrected for wakeup latency -> %" PRId64, mName, ns2us(t));
    341 
    342         return t;
    343     }
    344 
    345     void fireCallbackInvocations(const Vector<CallbackInvocation>& callbacks) {
    346         if (kTraceDetailedInfo) ATRACE_CALL();
    347         for (size_t i = 0; i < callbacks.size(); i++) {
    348             callbacks[i].mCallback->onDispSyncEvent(callbacks[i].mEventTime);
    349         }
    350     }
    351 
    352     const char* const mName;
    353 
    354     bool mStop;
    355 
    356     nsecs_t mPeriod;
    357     nsecs_t mPhase;
    358     nsecs_t mReferenceTime;
    359     nsecs_t mWakeupLatency;
    360 
    361     int64_t mFrameNumber;
    362 
    363     Vector<EventListener> mEventListeners;
    364 
    365     Mutex mMutex;
    366     Condition mCond;
    367 };
    368 
    369 #undef LOG_TAG
    370 #define LOG_TAG "DispSync"
    371 
    372 class ZeroPhaseTracer : public DispSync::Callback {
    373 public:
    374     ZeroPhaseTracer() : mParity(false) {}
    375 
    376     virtual void onDispSyncEvent(nsecs_t /*when*/) {
    377         mParity = !mParity;
    378         ATRACE_INT("ZERO_PHASE_VSYNC", mParity ? 1 : 0);
    379     }
    380 
    381 private:
    382     bool mParity;
    383 };
    384 
    385 DispSync::DispSync(const char* name)
    386       : mName(name), mRefreshSkipCount(0), mThread(new DispSyncThread(name)) {}
    387 
    388 DispSync::~DispSync() {}
    389 
    390 void DispSync::init(bool hasSyncFramework, int64_t dispSyncPresentTimeOffset) {
    391     mIgnorePresentFences = !hasSyncFramework;
    392     mPresentTimeOffset = dispSyncPresentTimeOffset;
    393     mThread->run("DispSync", PRIORITY_URGENT_DISPLAY + PRIORITY_MORE_FAVORABLE);
    394 
    395     // set DispSync to SCHED_FIFO to minimize jitter
    396     struct sched_param param = {0};
    397     param.sched_priority = 2;
    398     if (sched_setscheduler(mThread->getTid(), SCHED_FIFO, &param) != 0) {
    399         ALOGE("Couldn't set SCHED_FIFO for DispSyncThread");
    400     }
    401 
    402     reset();
    403     beginResync();
    404 
    405     if (kTraceDetailedInfo) {
    406         // If we're not getting present fences then the ZeroPhaseTracer
    407         // would prevent HW vsync event from ever being turned off.
    408         // Even if we're just ignoring the fences, the zero-phase tracing is
    409         // not needed because any time there is an event registered we will
    410         // turn on the HW vsync events.
    411         if (!mIgnorePresentFences && kEnableZeroPhaseTracer) {
    412             mZeroPhaseTracer = std::make_unique<ZeroPhaseTracer>();
    413             addEventListener("ZeroPhaseTracer", 0, mZeroPhaseTracer.get());
    414         }
    415     }
    416 }
    417 
    418 void DispSync::reset() {
    419     Mutex::Autolock lock(mMutex);
    420 
    421     mPhase = 0;
    422     mReferenceTime = 0;
    423     mModelUpdated = false;
    424     mNumResyncSamples = 0;
    425     mFirstResyncSample = 0;
    426     mNumResyncSamplesSincePresent = 0;
    427     resetErrorLocked();
    428 }
    429 
    430 bool DispSync::addPresentFence(const std::shared_ptr<FenceTime>& fenceTime) {
    431     Mutex::Autolock lock(mMutex);
    432 
    433     mPresentFences[mPresentSampleOffset] = fenceTime;
    434     mPresentSampleOffset = (mPresentSampleOffset + 1) % NUM_PRESENT_SAMPLES;
    435     mNumResyncSamplesSincePresent = 0;
    436 
    437     updateErrorLocked();
    438 
    439     return !mModelUpdated || mError > kErrorThreshold;
    440 }
    441 
    442 void DispSync::beginResync() {
    443     Mutex::Autolock lock(mMutex);
    444     ALOGV("[%s] beginResync", mName);
    445     mModelUpdated = false;
    446     mNumResyncSamples = 0;
    447 }
    448 
    449 bool DispSync::addResyncSample(nsecs_t timestamp) {
    450     Mutex::Autolock lock(mMutex);
    451 
    452     ALOGV("[%s] addResyncSample(%" PRId64 ")", mName, ns2us(timestamp));
    453 
    454     size_t idx = (mFirstResyncSample + mNumResyncSamples) % MAX_RESYNC_SAMPLES;
    455     mResyncSamples[idx] = timestamp;
    456     if (mNumResyncSamples == 0) {
    457         mPhase = 0;
    458         mReferenceTime = timestamp;
    459         ALOGV("[%s] First resync sample: mPeriod = %" PRId64 ", mPhase = 0, "
    460               "mReferenceTime = %" PRId64,
    461               mName, ns2us(mPeriod), ns2us(mReferenceTime));
    462         mThread->updateModel(mPeriod, mPhase, mReferenceTime);
    463     }
    464 
    465     if (mNumResyncSamples < MAX_RESYNC_SAMPLES) {
    466         mNumResyncSamples++;
    467     } else {
    468         mFirstResyncSample = (mFirstResyncSample + 1) % MAX_RESYNC_SAMPLES;
    469     }
    470 
    471     updateModelLocked();
    472 
    473     if (mNumResyncSamplesSincePresent++ > MAX_RESYNC_SAMPLES_WITHOUT_PRESENT) {
    474         resetErrorLocked();
    475     }
    476 
    477     if (mIgnorePresentFences) {
    478         // If we don't have the sync framework we will never have
    479         // addPresentFence called.  This means we have no way to know whether
    480         // or not we're synchronized with the HW vsyncs, so we just request
    481         // that the HW vsync events be turned on whenever we need to generate
    482         // SW vsync events.
    483         return mThread->hasAnyEventListeners();
    484     }
    485 
    486     // Check against kErrorThreshold / 2 to add some hysteresis before having to
    487     // resync again
    488     bool modelLocked = mModelUpdated && mError < (kErrorThreshold / 2);
    489     ALOGV("[%s] addResyncSample returning %s", mName, modelLocked ? "locked" : "unlocked");
    490     return !modelLocked;
    491 }
    492 
    493 void DispSync::endResync() {}
    494 
    495 status_t DispSync::addEventListener(const char* name, nsecs_t phase, Callback* callback) {
    496     Mutex::Autolock lock(mMutex);
    497     return mThread->addEventListener(name, phase, callback);
    498 }
    499 
    500 void DispSync::setRefreshSkipCount(int count) {
    501     Mutex::Autolock lock(mMutex);
    502     ALOGD("setRefreshSkipCount(%d)", count);
    503     mRefreshSkipCount = count;
    504     updateModelLocked();
    505 }
    506 
    507 status_t DispSync::removeEventListener(Callback* callback) {
    508     Mutex::Autolock lock(mMutex);
    509     return mThread->removeEventListener(callback);
    510 }
    511 
    512 status_t DispSync::changePhaseOffset(Callback* callback, nsecs_t phase) {
    513     Mutex::Autolock lock(mMutex);
    514     return mThread->changePhaseOffset(callback, phase);
    515 }
    516 
    517 void DispSync::setPeriod(nsecs_t period) {
    518     Mutex::Autolock lock(mMutex);
    519     mPeriod = period;
    520     mPhase = 0;
    521     mReferenceTime = 0;
    522     mThread->updateModel(mPeriod, mPhase, mReferenceTime);
    523 }
    524 
    525 nsecs_t DispSync::getPeriod() {
    526     // lock mutex as mPeriod changes multiple times in updateModelLocked
    527     Mutex::Autolock lock(mMutex);
    528     return mPeriod;
    529 }
    530 
    531 void DispSync::updateModelLocked() {
    532     ALOGV("[%s] updateModelLocked %zu", mName, mNumResyncSamples);
    533     if (mNumResyncSamples >= MIN_RESYNC_SAMPLES_FOR_UPDATE) {
    534         ALOGV("[%s] Computing...", mName);
    535         nsecs_t durationSum = 0;
    536         nsecs_t minDuration = INT64_MAX;
    537         nsecs_t maxDuration = 0;
    538         for (size_t i = 1; i < mNumResyncSamples; i++) {
    539             size_t idx = (mFirstResyncSample + i) % MAX_RESYNC_SAMPLES;
    540             size_t prev = (idx + MAX_RESYNC_SAMPLES - 1) % MAX_RESYNC_SAMPLES;
    541             nsecs_t duration = mResyncSamples[idx] - mResyncSamples[prev];
    542             durationSum += duration;
    543             minDuration = min(minDuration, duration);
    544             maxDuration = max(maxDuration, duration);
    545         }
    546 
    547         // Exclude the min and max from the average
    548         durationSum -= minDuration + maxDuration;
    549         mPeriod = durationSum / (mNumResyncSamples - 3);
    550 
    551         ALOGV("[%s] mPeriod = %" PRId64, mName, ns2us(mPeriod));
    552 
    553         double sampleAvgX = 0;
    554         double sampleAvgY = 0;
    555         double scale = 2.0 * M_PI / double(mPeriod);
    556         // Intentionally skip the first sample
    557         for (size_t i = 1; i < mNumResyncSamples; i++) {
    558             size_t idx = (mFirstResyncSample + i) % MAX_RESYNC_SAMPLES;
    559             nsecs_t sample = mResyncSamples[idx] - mReferenceTime;
    560             double samplePhase = double(sample % mPeriod) * scale;
    561             sampleAvgX += cos(samplePhase);
    562             sampleAvgY += sin(samplePhase);
    563         }
    564 
    565         sampleAvgX /= double(mNumResyncSamples - 1);
    566         sampleAvgY /= double(mNumResyncSamples - 1);
    567 
    568         mPhase = nsecs_t(atan2(sampleAvgY, sampleAvgX) / scale);
    569 
    570         ALOGV("[%s] mPhase = %" PRId64, mName, ns2us(mPhase));
    571 
    572         if (mPhase < -(mPeriod / 2)) {
    573             mPhase += mPeriod;
    574             ALOGV("[%s] Adjusting mPhase -> %" PRId64, mName, ns2us(mPhase));
    575         }
    576 
    577         if (kTraceDetailedInfo) {
    578             ATRACE_INT64("DispSync:Period", mPeriod);
    579             ATRACE_INT64("DispSync:Phase", mPhase + mPeriod / 2);
    580         }
    581 
    582         // Artificially inflate the period if requested.
    583         mPeriod += mPeriod * mRefreshSkipCount;
    584 
    585         mThread->updateModel(mPeriod, mPhase, mReferenceTime);
    586         mModelUpdated = true;
    587     }
    588 }
    589 
    590 void DispSync::updateErrorLocked() {
    591     if (!mModelUpdated) {
    592         return;
    593     }
    594 
    595     // Need to compare present fences against the un-adjusted refresh period,
    596     // since they might arrive between two events.
    597     nsecs_t period = mPeriod / (1 + mRefreshSkipCount);
    598 
    599     int numErrSamples = 0;
    600     nsecs_t sqErrSum = 0;
    601 
    602     for (size_t i = 0; i < NUM_PRESENT_SAMPLES; i++) {
    603         // Only check for the cached value of signal time to avoid unecessary
    604         // syscalls. It is the responsibility of the DispSync owner to
    605         // call getSignalTime() periodically so the cache is updated when the
    606         // fence signals.
    607         nsecs_t time = mPresentFences[i]->getCachedSignalTime();
    608         if (time == Fence::SIGNAL_TIME_PENDING || time == Fence::SIGNAL_TIME_INVALID) {
    609             continue;
    610         }
    611 
    612         nsecs_t sample = time - mReferenceTime;
    613         if (sample <= mPhase) {
    614             continue;
    615         }
    616 
    617         nsecs_t sampleErr = (sample - mPhase) % period;
    618         if (sampleErr > period / 2) {
    619             sampleErr -= period;
    620         }
    621         sqErrSum += sampleErr * sampleErr;
    622         numErrSamples++;
    623     }
    624 
    625     if (numErrSamples > 0) {
    626         mError = sqErrSum / numErrSamples;
    627         mZeroErrSamplesCount = 0;
    628     } else {
    629         mError = 0;
    630         // Use mod ACCEPTABLE_ZERO_ERR_SAMPLES_COUNT to avoid log spam.
    631         mZeroErrSamplesCount++;
    632         ALOGE_IF((mZeroErrSamplesCount % ACCEPTABLE_ZERO_ERR_SAMPLES_COUNT) == 0,
    633                  "No present times for model error.");
    634     }
    635 
    636     if (kTraceDetailedInfo) {
    637         ATRACE_INT64("DispSync:Error", mError);
    638     }
    639 }
    640 
    641 void DispSync::resetErrorLocked() {
    642     mPresentSampleOffset = 0;
    643     mError = 0;
    644     mZeroErrSamplesCount = 0;
    645     for (size_t i = 0; i < NUM_PRESENT_SAMPLES; i++) {
    646         mPresentFences[i] = FenceTime::NO_FENCE;
    647     }
    648 }
    649 
    650 nsecs_t DispSync::computeNextRefresh(int periodOffset) const {
    651     Mutex::Autolock lock(mMutex);
    652     nsecs_t now = systemTime(SYSTEM_TIME_MONOTONIC);
    653     nsecs_t phase = mReferenceTime + mPhase;
    654     return (((now - phase) / mPeriod) + periodOffset + 1) * mPeriod + phase;
    655 }
    656 
    657 void DispSync::dump(String8& result) const {
    658     Mutex::Autolock lock(mMutex);
    659     result.appendFormat("present fences are %s\n", mIgnorePresentFences ? "ignored" : "used");
    660     result.appendFormat("mPeriod: %" PRId64 " ns (%.3f fps; skipCount=%d)\n", mPeriod,
    661                         1000000000.0 / mPeriod, mRefreshSkipCount);
    662     result.appendFormat("mPhase: %" PRId64 " ns\n", mPhase);
    663     result.appendFormat("mError: %" PRId64 " ns (sqrt=%.1f)\n", mError, sqrt(mError));
    664     result.appendFormat("mNumResyncSamplesSincePresent: %d (limit %d)\n",
    665                         mNumResyncSamplesSincePresent, MAX_RESYNC_SAMPLES_WITHOUT_PRESENT);
    666     result.appendFormat("mNumResyncSamples: %zd (max %d)\n", mNumResyncSamples, MAX_RESYNC_SAMPLES);
    667 
    668     result.appendFormat("mResyncSamples:\n");
    669     nsecs_t previous = -1;
    670     for (size_t i = 0; i < mNumResyncSamples; i++) {
    671         size_t idx = (mFirstResyncSample + i) % MAX_RESYNC_SAMPLES;
    672         nsecs_t sampleTime = mResyncSamples[idx];
    673         if (i == 0) {
    674             result.appendFormat("  %" PRId64 "\n", sampleTime);
    675         } else {
    676             result.appendFormat("  %" PRId64 " (+%" PRId64 ")\n", sampleTime,
    677                                 sampleTime - previous);
    678         }
    679         previous = sampleTime;
    680     }
    681 
    682     result.appendFormat("mPresentFences [%d]:\n", NUM_PRESENT_SAMPLES);
    683     nsecs_t now = systemTime(SYSTEM_TIME_MONOTONIC);
    684     previous = Fence::SIGNAL_TIME_INVALID;
    685     for (size_t i = 0; i < NUM_PRESENT_SAMPLES; i++) {
    686         size_t idx = (i + mPresentSampleOffset) % NUM_PRESENT_SAMPLES;
    687         nsecs_t presentTime = mPresentFences[idx]->getSignalTime();
    688         if (presentTime == Fence::SIGNAL_TIME_PENDING) {
    689             result.appendFormat("  [unsignaled fence]\n");
    690         } else if (presentTime == Fence::SIGNAL_TIME_INVALID) {
    691             result.appendFormat("  [invalid fence]\n");
    692         } else if (previous == Fence::SIGNAL_TIME_PENDING ||
    693                    previous == Fence::SIGNAL_TIME_INVALID) {
    694             result.appendFormat("  %" PRId64 "  (%.3f ms ago)\n", presentTime,
    695                                 (now - presentTime) / 1000000.0);
    696         } else {
    697             result.appendFormat("  %" PRId64 " (+%" PRId64 " / %.3f)  (%.3f ms ago)\n", presentTime,
    698                                 presentTime - previous, (presentTime - previous) / (double)mPeriod,
    699                                 (now - presentTime) / 1000000.0);
    700         }
    701         previous = presentTime;
    702     }
    703 
    704     result.appendFormat("current monotonic time: %" PRId64 "\n", now);
    705 }
    706 
    707 } // namespace android
    708