Home | History | Annotate | Download | only in surfaceflinger
      1 /*
      2  * Copyright (C) 2013 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #define ATRACE_TAG ATRACE_TAG_GRAPHICS
     18 //#define LOG_NDEBUG 0
     19 
     20 // This is needed for stdint.h to define INT64_MAX in C++
     21 #define __STDC_LIMIT_MACROS
     22 
     23 #include <math.h>
     24 
     25 #include <algorithm>
     26 
     27 #include <log/log.h>
     28 #include <utils/String8.h>
     29 #include <utils/Thread.h>
     30 #include <utils/Trace.h>
     31 #include <utils/Vector.h>
     32 
     33 #include <ui/FenceTime.h>
     34 
     35 #include "DispSync.h"
     36 #include "SurfaceFlinger.h"
     37 #include "EventLog/EventLog.h"
     38 
     39 using std::max;
     40 using std::min;
     41 
     42 namespace android {
     43 
     44 // Setting this to true enables verbose tracing that can be used to debug
     45 // vsync event model or phase issues.
     46 static const bool kTraceDetailedInfo = false;
     47 
     48 // Setting this to true adds a zero-phase tracer for correlating with hardware
     49 // vsync events
     50 static const bool kEnableZeroPhaseTracer = false;
     51 
     52 // This is the threshold used to determine when hardware vsync events are
     53 // needed to re-synchronize the software vsync model with the hardware.  The
     54 // error metric used is the mean of the squared difference between each
     55 // present time and the nearest software-predicted vsync.
     56 static const nsecs_t kErrorThreshold = 160000000000;    // 400 usec squared
     57 
     58 #undef LOG_TAG
     59 #define LOG_TAG "DispSyncThread"
     60 class DispSyncThread: public Thread {
     61 public:
     62 
     63     explicit DispSyncThread(const char* name):
     64             mName(name),
     65             mStop(false),
     66             mPeriod(0),
     67             mPhase(0),
     68             mReferenceTime(0),
     69             mWakeupLatency(0),
     70             mFrameNumber(0) {}
     71 
     72     virtual ~DispSyncThread() {}
     73 
     74     void updateModel(nsecs_t period, nsecs_t phase, nsecs_t referenceTime) {
     75         if (kTraceDetailedInfo) ATRACE_CALL();
     76         Mutex::Autolock lock(mMutex);
     77         mPeriod = period;
     78         mPhase = phase;
     79         mReferenceTime = referenceTime;
     80         ALOGV("[%s] updateModel: mPeriod = %" PRId64 ", mPhase = %" PRId64
     81                 " mReferenceTime = %" PRId64, mName, ns2us(mPeriod),
     82                 ns2us(mPhase), ns2us(mReferenceTime));
     83         mCond.signal();
     84     }
     85 
     86     void stop() {
     87         if (kTraceDetailedInfo) ATRACE_CALL();
     88         Mutex::Autolock lock(mMutex);
     89         mStop = true;
     90         mCond.signal();
     91     }
     92 
     93     virtual bool threadLoop() {
     94         status_t err;
     95         nsecs_t now = systemTime(SYSTEM_TIME_MONOTONIC);
     96 
     97         while (true) {
     98             Vector<CallbackInvocation> callbackInvocations;
     99 
    100             nsecs_t targetTime = 0;
    101 
    102             { // Scope for lock
    103                 Mutex::Autolock lock(mMutex);
    104 
    105                 if (kTraceDetailedInfo) {
    106                     ATRACE_INT64("DispSync:Frame", mFrameNumber);
    107                 }
    108                 ALOGV("[%s] Frame %" PRId64, mName, mFrameNumber);
    109                 ++mFrameNumber;
    110 
    111                 if (mStop) {
    112                     return false;
    113                 }
    114 
    115                 if (mPeriod == 0) {
    116                     err = mCond.wait(mMutex);
    117                     if (err != NO_ERROR) {
    118                         ALOGE("error waiting for new events: %s (%d)",
    119                                 strerror(-err), err);
    120                         return false;
    121                     }
    122                     continue;
    123                 }
    124 
    125                 targetTime = computeNextEventTimeLocked(now);
    126 
    127                 bool isWakeup = false;
    128 
    129                 if (now < targetTime) {
    130                     if (kTraceDetailedInfo) ATRACE_NAME("DispSync waiting");
    131 
    132                     if (targetTime == INT64_MAX) {
    133                         ALOGV("[%s] Waiting forever", mName);
    134                         err = mCond.wait(mMutex);
    135                     } else {
    136                         ALOGV("[%s] Waiting until %" PRId64, mName,
    137                                 ns2us(targetTime));
    138                         err = mCond.waitRelative(mMutex, targetTime - now);
    139                     }
    140 
    141                     if (err == TIMED_OUT) {
    142                         isWakeup = true;
    143                     } else if (err != NO_ERROR) {
    144                         ALOGE("error waiting for next event: %s (%d)",
    145                                 strerror(-err), err);
    146                         return false;
    147                     }
    148                 }
    149 
    150                 now = systemTime(SYSTEM_TIME_MONOTONIC);
    151 
    152                 // Don't correct by more than 1.5 ms
    153                 static const nsecs_t kMaxWakeupLatency = us2ns(1500);
    154 
    155                 if (isWakeup) {
    156                     mWakeupLatency = ((mWakeupLatency * 63) +
    157                             (now - targetTime)) / 64;
    158                     mWakeupLatency = min(mWakeupLatency, kMaxWakeupLatency);
    159                     if (kTraceDetailedInfo) {
    160                         ATRACE_INT64("DispSync:WakeupLat", now - targetTime);
    161                         ATRACE_INT64("DispSync:AvgWakeupLat", mWakeupLatency);
    162                     }
    163                 }
    164 
    165                 callbackInvocations = gatherCallbackInvocationsLocked(now);
    166             }
    167 
    168             if (callbackInvocations.size() > 0) {
    169                 fireCallbackInvocations(callbackInvocations);
    170             }
    171         }
    172 
    173         return false;
    174     }
    175 
    176     status_t addEventListener(const char* name, nsecs_t phase,
    177             const sp<DispSync::Callback>& callback) {
    178         if (kTraceDetailedInfo) ATRACE_CALL();
    179         Mutex::Autolock lock(mMutex);
    180 
    181         for (size_t i = 0; i < mEventListeners.size(); i++) {
    182             if (mEventListeners[i].mCallback == callback) {
    183                 return BAD_VALUE;
    184             }
    185         }
    186 
    187         EventListener listener;
    188         listener.mName = name;
    189         listener.mPhase = phase;
    190         listener.mCallback = callback;
    191 
    192         // We want to allow the firstmost future event to fire without
    193         // allowing any past events to fire
    194         listener.mLastEventTime = systemTime() - mPeriod / 2 + mPhase -
    195                 mWakeupLatency;
    196 
    197         mEventListeners.push(listener);
    198 
    199         mCond.signal();
    200 
    201         return NO_ERROR;
    202     }
    203 
    204     status_t removeEventListener(const sp<DispSync::Callback>& callback) {
    205         if (kTraceDetailedInfo) ATRACE_CALL();
    206         Mutex::Autolock lock(mMutex);
    207 
    208         for (size_t i = 0; i < mEventListeners.size(); i++) {
    209             if (mEventListeners[i].mCallback == callback) {
    210                 mEventListeners.removeAt(i);
    211                 mCond.signal();
    212                 return NO_ERROR;
    213             }
    214         }
    215 
    216         return BAD_VALUE;
    217     }
    218 
    219     // This method is only here to handle the !SurfaceFlinger::hasSyncFramework
    220     // case.
    221     bool hasAnyEventListeners() {
    222         if (kTraceDetailedInfo) ATRACE_CALL();
    223         Mutex::Autolock lock(mMutex);
    224         return !mEventListeners.empty();
    225     }
    226 
    227 private:
    228 
    229     struct EventListener {
    230         const char* mName;
    231         nsecs_t mPhase;
    232         nsecs_t mLastEventTime;
    233         sp<DispSync::Callback> mCallback;
    234     };
    235 
    236     struct CallbackInvocation {
    237         sp<DispSync::Callback> mCallback;
    238         nsecs_t mEventTime;
    239     };
    240 
    241     nsecs_t computeNextEventTimeLocked(nsecs_t now) {
    242         if (kTraceDetailedInfo) ATRACE_CALL();
    243         ALOGV("[%s] computeNextEventTimeLocked", mName);
    244         nsecs_t nextEventTime = INT64_MAX;
    245         for (size_t i = 0; i < mEventListeners.size(); i++) {
    246             nsecs_t t = computeListenerNextEventTimeLocked(mEventListeners[i],
    247                     now);
    248 
    249             if (t < nextEventTime) {
    250                 nextEventTime = t;
    251             }
    252         }
    253 
    254         ALOGV("[%s] nextEventTime = %" PRId64, mName, ns2us(nextEventTime));
    255         return nextEventTime;
    256     }
    257 
    258     Vector<CallbackInvocation> gatherCallbackInvocationsLocked(nsecs_t now) {
    259         if (kTraceDetailedInfo) ATRACE_CALL();
    260         ALOGV("[%s] gatherCallbackInvocationsLocked @ %" PRId64, mName,
    261                 ns2us(now));
    262 
    263         Vector<CallbackInvocation> callbackInvocations;
    264         nsecs_t onePeriodAgo = now - mPeriod;
    265 
    266         for (size_t i = 0; i < mEventListeners.size(); i++) {
    267             nsecs_t t = computeListenerNextEventTimeLocked(mEventListeners[i],
    268                     onePeriodAgo);
    269 
    270             if (t < now) {
    271                 CallbackInvocation ci;
    272                 ci.mCallback = mEventListeners[i].mCallback;
    273                 ci.mEventTime = t;
    274                 ALOGV("[%s] [%s] Preparing to fire", mName,
    275                         mEventListeners[i].mName);
    276                 callbackInvocations.push(ci);
    277                 mEventListeners.editItemAt(i).mLastEventTime = t;
    278             }
    279         }
    280 
    281         return callbackInvocations;
    282     }
    283 
    284     nsecs_t computeListenerNextEventTimeLocked(const EventListener& listener,
    285             nsecs_t baseTime) {
    286         if (kTraceDetailedInfo) ATRACE_CALL();
    287         ALOGV("[%s] [%s] computeListenerNextEventTimeLocked(%" PRId64 ")",
    288                 mName, listener.mName, ns2us(baseTime));
    289 
    290         nsecs_t lastEventTime = listener.mLastEventTime + mWakeupLatency;
    291         ALOGV("[%s] lastEventTime: %" PRId64, mName, ns2us(lastEventTime));
    292         if (baseTime < lastEventTime) {
    293             baseTime = lastEventTime;
    294             ALOGV("[%s] Clamping baseTime to lastEventTime -> %" PRId64, mName,
    295                     ns2us(baseTime));
    296         }
    297 
    298         baseTime -= mReferenceTime;
    299         ALOGV("[%s] Relative baseTime = %" PRId64, mName, ns2us(baseTime));
    300         nsecs_t phase = mPhase + listener.mPhase;
    301         ALOGV("[%s] Phase = %" PRId64, mName, ns2us(phase));
    302         baseTime -= phase;
    303         ALOGV("[%s] baseTime - phase = %" PRId64, mName, ns2us(baseTime));
    304 
    305         // If our previous time is before the reference (because the reference
    306         // has since been updated), the division by mPeriod will truncate
    307         // towards zero instead of computing the floor. Since in all cases
    308         // before the reference we want the next time to be effectively now, we
    309         // set baseTime to -mPeriod so that numPeriods will be -1.
    310         // When we add 1 and the phase, we will be at the correct event time for
    311         // this period.
    312         if (baseTime < 0) {
    313             ALOGV("[%s] Correcting negative baseTime", mName);
    314             baseTime = -mPeriod;
    315         }
    316 
    317         nsecs_t numPeriods = baseTime / mPeriod;
    318         ALOGV("[%s] numPeriods = %" PRId64, mName, numPeriods);
    319         nsecs_t t = (numPeriods + 1) * mPeriod + phase;
    320         ALOGV("[%s] t = %" PRId64, mName, ns2us(t));
    321         t += mReferenceTime;
    322         ALOGV("[%s] Absolute t = %" PRId64, mName, ns2us(t));
    323 
    324         // Check that it's been slightly more than half a period since the last
    325         // event so that we don't accidentally fall into double-rate vsyncs
    326         if (t - listener.mLastEventTime < (3 * mPeriod / 5)) {
    327             t += mPeriod;
    328             ALOGV("[%s] Modifying t -> %" PRId64, mName, ns2us(t));
    329         }
    330 
    331         t -= mWakeupLatency;
    332         ALOGV("[%s] Corrected for wakeup latency -> %" PRId64, mName, ns2us(t));
    333 
    334         return t;
    335     }
    336 
    337     void fireCallbackInvocations(const Vector<CallbackInvocation>& callbacks) {
    338         if (kTraceDetailedInfo) ATRACE_CALL();
    339         for (size_t i = 0; i < callbacks.size(); i++) {
    340             callbacks[i].mCallback->onDispSyncEvent(callbacks[i].mEventTime);
    341         }
    342     }
    343 
    344     const char* const mName;
    345 
    346     bool mStop;
    347 
    348     nsecs_t mPeriod;
    349     nsecs_t mPhase;
    350     nsecs_t mReferenceTime;
    351     nsecs_t mWakeupLatency;
    352 
    353     int64_t mFrameNumber;
    354 
    355     Vector<EventListener> mEventListeners;
    356 
    357     Mutex mMutex;
    358     Condition mCond;
    359 };
    360 
    361 #undef LOG_TAG
    362 #define LOG_TAG "DispSync"
    363 
    364 class ZeroPhaseTracer : public DispSync::Callback {
    365 public:
    366     ZeroPhaseTracer() : mParity(false) {}
    367 
    368     virtual void onDispSyncEvent(nsecs_t /*when*/) {
    369         mParity = !mParity;
    370         ATRACE_INT("ZERO_PHASE_VSYNC", mParity ? 1 : 0);
    371     }
    372 
    373 private:
    374     bool mParity;
    375 };
    376 
    377 DispSync::DispSync(const char* name) :
    378         mName(name),
    379         mRefreshSkipCount(0),
    380         mThread(new DispSyncThread(name)) {
    381 }
    382 
    383 DispSync::~DispSync() {}
    384 
    385 void DispSync::init(bool hasSyncFramework, int64_t dispSyncPresentTimeOffset) {
    386     mIgnorePresentFences = !hasSyncFramework;
    387     mPresentTimeOffset = dispSyncPresentTimeOffset;
    388     mThread->run("DispSync", PRIORITY_URGENT_DISPLAY + PRIORITY_MORE_FAVORABLE);
    389 
    390     // set DispSync to SCHED_FIFO to minimize jitter
    391     struct sched_param param = {0};
    392     param.sched_priority = 2;
    393     if (sched_setscheduler(mThread->getTid(), SCHED_FIFO, &param) != 0) {
    394         ALOGE("Couldn't set SCHED_FIFO for DispSyncThread");
    395     }
    396 
    397     reset();
    398     beginResync();
    399 
    400     if (kTraceDetailedInfo) {
    401         // If we're not getting present fences then the ZeroPhaseTracer
    402         // would prevent HW vsync event from ever being turned off.
    403         // Even if we're just ignoring the fences, the zero-phase tracing is
    404         // not needed because any time there is an event registered we will
    405         // turn on the HW vsync events.
    406         if (!mIgnorePresentFences && kEnableZeroPhaseTracer) {
    407             addEventListener("ZeroPhaseTracer", 0, new ZeroPhaseTracer());
    408         }
    409     }
    410 }
    411 
    412 void DispSync::reset() {
    413     Mutex::Autolock lock(mMutex);
    414 
    415     mPhase = 0;
    416     mReferenceTime = 0;
    417     mModelUpdated = false;
    418     mNumResyncSamples = 0;
    419     mFirstResyncSample = 0;
    420     mNumResyncSamplesSincePresent = 0;
    421     resetErrorLocked();
    422 }
    423 
    424 bool DispSync::addPresentFence(const std::shared_ptr<FenceTime>& fenceTime) {
    425     Mutex::Autolock lock(mMutex);
    426 
    427     mPresentFences[mPresentSampleOffset] = fenceTime;
    428     mPresentSampleOffset = (mPresentSampleOffset + 1) % NUM_PRESENT_SAMPLES;
    429     mNumResyncSamplesSincePresent = 0;
    430 
    431     updateErrorLocked();
    432 
    433     return !mModelUpdated || mError > kErrorThreshold;
    434 }
    435 
    436 void DispSync::beginResync() {
    437     Mutex::Autolock lock(mMutex);
    438     ALOGV("[%s] beginResync", mName);
    439     mModelUpdated = false;
    440     mNumResyncSamples = 0;
    441 }
    442 
    443 bool DispSync::addResyncSample(nsecs_t timestamp) {
    444     Mutex::Autolock lock(mMutex);
    445 
    446     ALOGV("[%s] addResyncSample(%" PRId64 ")", mName, ns2us(timestamp));
    447 
    448     size_t idx = (mFirstResyncSample + mNumResyncSamples) % MAX_RESYNC_SAMPLES;
    449     mResyncSamples[idx] = timestamp;
    450     if (mNumResyncSamples == 0) {
    451         mPhase = 0;
    452         mReferenceTime = timestamp;
    453         ALOGV("[%s] First resync sample: mPeriod = %" PRId64 ", mPhase = 0, "
    454                 "mReferenceTime = %" PRId64, mName, ns2us(mPeriod),
    455                 ns2us(mReferenceTime));
    456         mThread->updateModel(mPeriod, mPhase, mReferenceTime);
    457     }
    458 
    459     if (mNumResyncSamples < MAX_RESYNC_SAMPLES) {
    460         mNumResyncSamples++;
    461     } else {
    462         mFirstResyncSample = (mFirstResyncSample + 1) % MAX_RESYNC_SAMPLES;
    463     }
    464 
    465     updateModelLocked();
    466 
    467     if (mNumResyncSamplesSincePresent++ > MAX_RESYNC_SAMPLES_WITHOUT_PRESENT) {
    468         resetErrorLocked();
    469     }
    470 
    471     if (mIgnorePresentFences) {
    472         // If we don't have the sync framework we will never have
    473         // addPresentFence called.  This means we have no way to know whether
    474         // or not we're synchronized with the HW vsyncs, so we just request
    475         // that the HW vsync events be turned on whenever we need to generate
    476         // SW vsync events.
    477         return mThread->hasAnyEventListeners();
    478     }
    479 
    480     // Check against kErrorThreshold / 2 to add some hysteresis before having to
    481     // resync again
    482     bool modelLocked = mModelUpdated && mError < (kErrorThreshold / 2);
    483     ALOGV("[%s] addResyncSample returning %s", mName,
    484             modelLocked ? "locked" : "unlocked");
    485     return !modelLocked;
    486 }
    487 
    488 void DispSync::endResync() {
    489 }
    490 
    491 status_t DispSync::addEventListener(const char* name, nsecs_t phase,
    492         const sp<Callback>& callback) {
    493     Mutex::Autolock lock(mMutex);
    494     return mThread->addEventListener(name, phase, callback);
    495 }
    496 
    497 void DispSync::setRefreshSkipCount(int count) {
    498     Mutex::Autolock lock(mMutex);
    499     ALOGD("setRefreshSkipCount(%d)", count);
    500     mRefreshSkipCount = count;
    501     updateModelLocked();
    502 }
    503 
    504 status_t DispSync::removeEventListener(const sp<Callback>& callback) {
    505     Mutex::Autolock lock(mMutex);
    506     return mThread->removeEventListener(callback);
    507 }
    508 
    509 void DispSync::setPeriod(nsecs_t period) {
    510     Mutex::Autolock lock(mMutex);
    511     mPeriod = period;
    512     mPhase = 0;
    513     mReferenceTime = 0;
    514     mThread->updateModel(mPeriod, mPhase, mReferenceTime);
    515 }
    516 
    517 nsecs_t DispSync::getPeriod() {
    518     // lock mutex as mPeriod changes multiple times in updateModelLocked
    519     Mutex::Autolock lock(mMutex);
    520     return mPeriod;
    521 }
    522 
    523 void DispSync::updateModelLocked() {
    524     ALOGV("[%s] updateModelLocked %zu", mName, mNumResyncSamples);
    525     if (mNumResyncSamples >= MIN_RESYNC_SAMPLES_FOR_UPDATE) {
    526         ALOGV("[%s] Computing...", mName);
    527         nsecs_t durationSum = 0;
    528         nsecs_t minDuration = INT64_MAX;
    529         nsecs_t maxDuration = 0;
    530         for (size_t i = 1; i < mNumResyncSamples; i++) {
    531             size_t idx = (mFirstResyncSample + i) % MAX_RESYNC_SAMPLES;
    532             size_t prev = (idx + MAX_RESYNC_SAMPLES - 1) % MAX_RESYNC_SAMPLES;
    533             nsecs_t duration = mResyncSamples[idx] - mResyncSamples[prev];
    534             durationSum += duration;
    535             minDuration = min(minDuration, duration);
    536             maxDuration = max(maxDuration, duration);
    537         }
    538 
    539         // Exclude the min and max from the average
    540         durationSum -= minDuration + maxDuration;
    541         mPeriod = durationSum / (mNumResyncSamples - 3);
    542 
    543         ALOGV("[%s] mPeriod = %" PRId64, mName, ns2us(mPeriod));
    544 
    545         double sampleAvgX = 0;
    546         double sampleAvgY = 0;
    547         double scale = 2.0 * M_PI / double(mPeriod);
    548         // Intentionally skip the first sample
    549         for (size_t i = 1; i < mNumResyncSamples; i++) {
    550             size_t idx = (mFirstResyncSample + i) % MAX_RESYNC_SAMPLES;
    551             nsecs_t sample = mResyncSamples[idx] - mReferenceTime;
    552             double samplePhase = double(sample % mPeriod) * scale;
    553             sampleAvgX += cos(samplePhase);
    554             sampleAvgY += sin(samplePhase);
    555         }
    556 
    557         sampleAvgX /= double(mNumResyncSamples - 1);
    558         sampleAvgY /= double(mNumResyncSamples - 1);
    559 
    560         mPhase = nsecs_t(atan2(sampleAvgY, sampleAvgX) / scale);
    561 
    562         ALOGV("[%s] mPhase = %" PRId64, mName, ns2us(mPhase));
    563 
    564         if (mPhase < -(mPeriod / 2)) {
    565             mPhase += mPeriod;
    566             ALOGV("[%s] Adjusting mPhase -> %" PRId64, mName, ns2us(mPhase));
    567         }
    568 
    569         if (kTraceDetailedInfo) {
    570             ATRACE_INT64("DispSync:Period", mPeriod);
    571             ATRACE_INT64("DispSync:Phase", mPhase + mPeriod / 2);
    572         }
    573 
    574         // Artificially inflate the period if requested.
    575         mPeriod += mPeriod * mRefreshSkipCount;
    576 
    577         mThread->updateModel(mPeriod, mPhase, mReferenceTime);
    578         mModelUpdated = true;
    579     }
    580 }
    581 
    582 void DispSync::updateErrorLocked() {
    583     if (!mModelUpdated) {
    584         return;
    585     }
    586 
    587     // Need to compare present fences against the un-adjusted refresh period,
    588     // since they might arrive between two events.
    589     nsecs_t period = mPeriod / (1 + mRefreshSkipCount);
    590 
    591     int numErrSamples = 0;
    592     nsecs_t sqErrSum = 0;
    593 
    594     for (size_t i = 0; i < NUM_PRESENT_SAMPLES; i++) {
    595         // Only check for the cached value of signal time to avoid unecessary
    596         // syscalls. It is the responsibility of the DispSync owner to
    597         // call getSignalTime() periodically so the cache is updated when the
    598         // fence signals.
    599         nsecs_t time = mPresentFences[i]->getCachedSignalTime();
    600         if (time == Fence::SIGNAL_TIME_PENDING ||
    601                 time == Fence::SIGNAL_TIME_INVALID) {
    602             continue;
    603         }
    604 
    605         nsecs_t sample = time - mReferenceTime;
    606         if (sample <= mPhase) {
    607             continue;
    608         }
    609 
    610         nsecs_t sampleErr = (sample - mPhase) % period;
    611         if (sampleErr > period / 2) {
    612             sampleErr -= period;
    613         }
    614         sqErrSum += sampleErr * sampleErr;
    615         numErrSamples++;
    616     }
    617 
    618     if (numErrSamples > 0) {
    619         mError = sqErrSum / numErrSamples;
    620         mZeroErrSamplesCount = 0;
    621     } else {
    622         mError = 0;
    623         // Use mod ACCEPTABLE_ZERO_ERR_SAMPLES_COUNT to avoid log spam.
    624         mZeroErrSamplesCount++;
    625         ALOGE_IF(
    626                 (mZeroErrSamplesCount % ACCEPTABLE_ZERO_ERR_SAMPLES_COUNT) == 0,
    627                 "No present times for model error.");
    628     }
    629 
    630     if (kTraceDetailedInfo) {
    631         ATRACE_INT64("DispSync:Error", mError);
    632     }
    633 }
    634 
    635 void DispSync::resetErrorLocked() {
    636     mPresentSampleOffset = 0;
    637     mError = 0;
    638     mZeroErrSamplesCount = 0;
    639     for (size_t i = 0; i < NUM_PRESENT_SAMPLES; i++) {
    640         mPresentFences[i] = FenceTime::NO_FENCE;
    641     }
    642 }
    643 
    644 nsecs_t DispSync::computeNextRefresh(int periodOffset) const {
    645     Mutex::Autolock lock(mMutex);
    646     nsecs_t now = systemTime(SYSTEM_TIME_MONOTONIC);
    647     nsecs_t phase = mReferenceTime + mPhase;
    648     return (((now - phase) / mPeriod) + periodOffset + 1) * mPeriod + phase;
    649 }
    650 
    651 void DispSync::dump(String8& result) const {
    652     Mutex::Autolock lock(mMutex);
    653     result.appendFormat("present fences are %s\n",
    654             mIgnorePresentFences ? "ignored" : "used");
    655     result.appendFormat("mPeriod: %" PRId64 " ns (%.3f fps; skipCount=%d)\n",
    656             mPeriod, 1000000000.0 / mPeriod, mRefreshSkipCount);
    657     result.appendFormat("mPhase: %" PRId64 " ns\n", mPhase);
    658     result.appendFormat("mError: %" PRId64 " ns (sqrt=%.1f)\n",
    659             mError, sqrt(mError));
    660     result.appendFormat("mNumResyncSamplesSincePresent: %d (limit %d)\n",
    661             mNumResyncSamplesSincePresent, MAX_RESYNC_SAMPLES_WITHOUT_PRESENT);
    662     result.appendFormat("mNumResyncSamples: %zd (max %d)\n",
    663             mNumResyncSamples, MAX_RESYNC_SAMPLES);
    664 
    665     result.appendFormat("mResyncSamples:\n");
    666     nsecs_t previous = -1;
    667     for (size_t i = 0; i < mNumResyncSamples; i++) {
    668         size_t idx = (mFirstResyncSample + i) % MAX_RESYNC_SAMPLES;
    669         nsecs_t sampleTime = mResyncSamples[idx];
    670         if (i == 0) {
    671             result.appendFormat("  %" PRId64 "\n", sampleTime);
    672         } else {
    673             result.appendFormat("  %" PRId64 " (+%" PRId64 ")\n",
    674                     sampleTime, sampleTime - previous);
    675         }
    676         previous = sampleTime;
    677     }
    678 
    679     result.appendFormat("mPresentFences [%d]:\n",
    680             NUM_PRESENT_SAMPLES);
    681     nsecs_t now = systemTime(SYSTEM_TIME_MONOTONIC);
    682     previous = Fence::SIGNAL_TIME_INVALID;
    683     for (size_t i = 0; i < NUM_PRESENT_SAMPLES; i++) {
    684         size_t idx = (i + mPresentSampleOffset) % NUM_PRESENT_SAMPLES;
    685         nsecs_t presentTime = mPresentFences[idx]->getSignalTime();
    686         if (presentTime == Fence::SIGNAL_TIME_PENDING) {
    687             result.appendFormat("  [unsignaled fence]\n");
    688         } else if(presentTime == Fence::SIGNAL_TIME_INVALID) {
    689             result.appendFormat("  [invalid fence]\n");
    690         } else if (previous == Fence::SIGNAL_TIME_PENDING ||
    691                 previous == Fence::SIGNAL_TIME_INVALID) {
    692             result.appendFormat("  %" PRId64 "  (%.3f ms ago)\n", presentTime,
    693                     (now - presentTime) / 1000000.0);
    694         } else {
    695             result.appendFormat("  %" PRId64 " (+%" PRId64 " / %.3f)  (%.3f ms ago)\n",
    696                     presentTime, presentTime - previous,
    697                     (presentTime - previous) / (double) mPeriod,
    698                     (now - presentTime) / 1000000.0);
    699         }
    700         previous = presentTime;
    701     }
    702 
    703     result.appendFormat("current monotonic time: %" PRId64 "\n", now);
    704 }
    705 
    706 } // namespace android
    707