Home | History | Annotate | Download | only in surfaceflinger
      1 /*
      2  * Copyright (C) 2013 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #define ATRACE_TAG ATRACE_TAG_GRAPHICS
     18 
     19 // This is needed for stdint.h to define INT64_MAX in C++
     20 #define __STDC_LIMIT_MACROS
     21 
     22 #include <math.h>
     23 
     24 #include <cutils/log.h>
     25 
     26 #include <ui/Fence.h>
     27 
     28 #include <utils/String8.h>
     29 #include <utils/Thread.h>
     30 #include <utils/Trace.h>
     31 #include <utils/Vector.h>
     32 
     33 #include "DispSync.h"
     34 #include "EventLog/EventLog.h"
     35 
     36 namespace android {
     37 
     38 // Setting this to true enables verbose tracing that can be used to debug
     39 // vsync event model or phase issues.
     40 static const bool traceDetailedInfo = false;
     41 
     42 // This is the threshold used to determine when hardware vsync events are
     43 // needed to re-synchronize the software vsync model with the hardware.  The
     44 // error metric used is the mean of the squared difference between each
     45 // present time and the nearest software-predicted vsync.
     46 static const nsecs_t errorThreshold = 160000000000;
     47 
     48 // This works around the lack of support for the sync framework on some
     49 // devices.
     50 #ifdef RUNNING_WITHOUT_SYNC_FRAMEWORK
     51 static const bool runningWithoutSyncFramework = true;
     52 #else
     53 static const bool runningWithoutSyncFramework = false;
     54 #endif
     55 
     56 // This is the offset from the present fence timestamps to the corresponding
     57 // vsync event.
     58 static const int64_t presentTimeOffset = PRESENT_TIME_OFFSET_FROM_VSYNC_NS;
     59 
     60 class DispSyncThread: public Thread {
     61 public:
     62 
     63     DispSyncThread():
     64             mStop(false),
     65             mPeriod(0),
     66             mPhase(0),
     67             mWakeupLatency(0) {
     68     }
     69 
     70     virtual ~DispSyncThread() {}
     71 
     72     void updateModel(nsecs_t period, nsecs_t phase) {
     73         Mutex::Autolock lock(mMutex);
     74         mPeriod = period;
     75         mPhase = phase;
     76         mCond.signal();
     77     }
     78 
     79     void stop() {
     80         Mutex::Autolock lock(mMutex);
     81         mStop = true;
     82         mCond.signal();
     83     }
     84 
     85     virtual bool threadLoop() {
     86         status_t err;
     87         nsecs_t now = systemTime(SYSTEM_TIME_MONOTONIC);
     88         nsecs_t nextEventTime = 0;
     89 
     90         while (true) {
     91             Vector<CallbackInvocation> callbackInvocations;
     92 
     93             nsecs_t targetTime = 0;
     94 
     95             { // Scope for lock
     96                 Mutex::Autolock lock(mMutex);
     97 
     98                 if (mStop) {
     99                     return false;
    100                 }
    101 
    102                 if (mPeriod == 0) {
    103                     err = mCond.wait(mMutex);
    104                     if (err != NO_ERROR) {
    105                         ALOGE("error waiting for new events: %s (%d)",
    106                                 strerror(-err), err);
    107                         return false;
    108                     }
    109                     continue;
    110                 }
    111 
    112                 nextEventTime = computeNextEventTimeLocked(now);
    113                 targetTime = nextEventTime;
    114 
    115                 bool isWakeup = false;
    116 
    117                 if (now < targetTime) {
    118                     err = mCond.waitRelative(mMutex, targetTime - now);
    119 
    120                     if (err == TIMED_OUT) {
    121                         isWakeup = true;
    122                     } else if (err != NO_ERROR) {
    123                         ALOGE("error waiting for next event: %s (%d)",
    124                                 strerror(-err), err);
    125                         return false;
    126                     }
    127                 }
    128 
    129                 now = systemTime(SYSTEM_TIME_MONOTONIC);
    130 
    131                 if (isWakeup) {
    132                     mWakeupLatency = ((mWakeupLatency * 63) +
    133                             (now - targetTime)) / 64;
    134                     if (mWakeupLatency > 500000) {
    135                         // Don't correct by more than 500 us
    136                         mWakeupLatency = 500000;
    137                     }
    138                     if (traceDetailedInfo) {
    139                         ATRACE_INT64("DispSync:WakeupLat", now - nextEventTime);
    140                         ATRACE_INT64("DispSync:AvgWakeupLat", mWakeupLatency);
    141                     }
    142                 }
    143 
    144                 callbackInvocations = gatherCallbackInvocationsLocked(now);
    145             }
    146 
    147             if (callbackInvocations.size() > 0) {
    148                 fireCallbackInvocations(callbackInvocations);
    149             }
    150         }
    151 
    152         return false;
    153     }
    154 
    155     status_t addEventListener(nsecs_t phase, const sp<DispSync::Callback>& callback) {
    156         Mutex::Autolock lock(mMutex);
    157 
    158         for (size_t i = 0; i < mEventListeners.size(); i++) {
    159             if (mEventListeners[i].mCallback == callback) {
    160                 return BAD_VALUE;
    161             }
    162         }
    163 
    164         EventListener listener;
    165         listener.mPhase = phase;
    166         listener.mCallback = callback;
    167         listener.mLastEventTime = systemTime(SYSTEM_TIME_MONOTONIC);
    168         mEventListeners.push(listener);
    169 
    170         mCond.signal();
    171 
    172         return NO_ERROR;
    173     }
    174 
    175     status_t removeEventListener(const sp<DispSync::Callback>& callback) {
    176         Mutex::Autolock lock(mMutex);
    177 
    178         for (size_t i = 0; i < mEventListeners.size(); i++) {
    179             if (mEventListeners[i].mCallback == callback) {
    180                 mEventListeners.removeAt(i);
    181                 mCond.signal();
    182                 return NO_ERROR;
    183             }
    184         }
    185 
    186         return BAD_VALUE;
    187     }
    188 
    189     // This method is only here to handle the runningWithoutSyncFramework
    190     // case.
    191     bool hasAnyEventListeners() {
    192         Mutex::Autolock lock(mMutex);
    193         return !mEventListeners.empty();
    194     }
    195 
    196 private:
    197 
    198     struct EventListener {
    199         nsecs_t mPhase;
    200         nsecs_t mLastEventTime;
    201         sp<DispSync::Callback> mCallback;
    202     };
    203 
    204     struct CallbackInvocation {
    205         sp<DispSync::Callback> mCallback;
    206         nsecs_t mEventTime;
    207     };
    208 
    209     nsecs_t computeNextEventTimeLocked(nsecs_t now) {
    210         nsecs_t nextEventTime = INT64_MAX;
    211         for (size_t i = 0; i < mEventListeners.size(); i++) {
    212             nsecs_t t = computeListenerNextEventTimeLocked(mEventListeners[i],
    213                     now);
    214 
    215             if (t < nextEventTime) {
    216                 nextEventTime = t;
    217             }
    218         }
    219 
    220         return nextEventTime;
    221     }
    222 
    223     Vector<CallbackInvocation> gatherCallbackInvocationsLocked(nsecs_t now) {
    224         Vector<CallbackInvocation> callbackInvocations;
    225         nsecs_t ref = now - mPeriod;
    226 
    227         for (size_t i = 0; i < mEventListeners.size(); i++) {
    228             nsecs_t t = computeListenerNextEventTimeLocked(mEventListeners[i],
    229                     ref);
    230 
    231             if (t < now) {
    232                 CallbackInvocation ci;
    233                 ci.mCallback = mEventListeners[i].mCallback;
    234                 ci.mEventTime = t;
    235                 callbackInvocations.push(ci);
    236                 mEventListeners.editItemAt(i).mLastEventTime = t;
    237             }
    238         }
    239 
    240         return callbackInvocations;
    241     }
    242 
    243     nsecs_t computeListenerNextEventTimeLocked(const EventListener& listener,
    244             nsecs_t ref) {
    245 
    246         nsecs_t lastEventTime = listener.mLastEventTime;
    247         if (ref < lastEventTime) {
    248             ref = lastEventTime;
    249         }
    250 
    251         nsecs_t phase = mPhase + listener.mPhase;
    252         nsecs_t t = (((ref - phase) / mPeriod) + 1) * mPeriod + phase;
    253 
    254         if (t - listener.mLastEventTime < mPeriod / 2) {
    255             t += mPeriod;
    256         }
    257 
    258         return t;
    259     }
    260 
    261     void fireCallbackInvocations(const Vector<CallbackInvocation>& callbacks) {
    262         for (size_t i = 0; i < callbacks.size(); i++) {
    263             callbacks[i].mCallback->onDispSyncEvent(callbacks[i].mEventTime);
    264         }
    265     }
    266 
    267     bool mStop;
    268 
    269     nsecs_t mPeriod;
    270     nsecs_t mPhase;
    271     nsecs_t mWakeupLatency;
    272 
    273     Vector<EventListener> mEventListeners;
    274 
    275     Mutex mMutex;
    276     Condition mCond;
    277 };
    278 
    279 class ZeroPhaseTracer : public DispSync::Callback {
    280 public:
    281     ZeroPhaseTracer() : mParity(false) {}
    282 
    283     virtual void onDispSyncEvent(nsecs_t when) {
    284         mParity = !mParity;
    285         ATRACE_INT("ZERO_PHASE_VSYNC", mParity ? 1 : 0);
    286     }
    287 
    288 private:
    289     bool mParity;
    290 };
    291 
    292 DispSync::DispSync() {
    293     mThread = new DispSyncThread();
    294     mThread->run("DispSync", PRIORITY_URGENT_DISPLAY + PRIORITY_MORE_FAVORABLE);
    295 
    296     reset();
    297     beginResync();
    298 
    299     if (traceDetailedInfo) {
    300         // If runningWithoutSyncFramework is true then the ZeroPhaseTracer
    301         // would prevent HW vsync event from ever being turned off.
    302         // Furthermore the zero-phase tracing is not needed because any time
    303         // there is an event registered we will turn on the HW vsync events.
    304         if (!runningWithoutSyncFramework) {
    305             addEventListener(0, new ZeroPhaseTracer());
    306         }
    307     }
    308 }
    309 
    310 DispSync::~DispSync() {}
    311 
    312 void DispSync::reset() {
    313     Mutex::Autolock lock(mMutex);
    314 
    315     mNumResyncSamples = 0;
    316     mFirstResyncSample = 0;
    317     mNumResyncSamplesSincePresent = 0;
    318     resetErrorLocked();
    319 }
    320 
    321 bool DispSync::addPresentFence(const sp<Fence>& fence) {
    322     Mutex::Autolock lock(mMutex);
    323 
    324     mPresentFences[mPresentSampleOffset] = fence;
    325     mPresentTimes[mPresentSampleOffset] = 0;
    326     mPresentSampleOffset = (mPresentSampleOffset + 1) % NUM_PRESENT_SAMPLES;
    327     mNumResyncSamplesSincePresent = 0;
    328 
    329     for (size_t i = 0; i < NUM_PRESENT_SAMPLES; i++) {
    330         const sp<Fence>& f(mPresentFences[i]);
    331         if (f != NULL) {
    332             nsecs_t t = f->getSignalTime();
    333             if (t < INT64_MAX) {
    334                 mPresentFences[i].clear();
    335                 mPresentTimes[i] = t + presentTimeOffset;
    336             }
    337         }
    338     }
    339 
    340     updateErrorLocked();
    341 
    342     return mPeriod == 0 || mError > errorThreshold;
    343 }
    344 
    345 void DispSync::beginResync() {
    346     Mutex::Autolock lock(mMutex);
    347 
    348     mNumResyncSamples = 0;
    349 }
    350 
    351 bool DispSync::addResyncSample(nsecs_t timestamp) {
    352     Mutex::Autolock lock(mMutex);
    353 
    354     size_t idx = (mFirstResyncSample + mNumResyncSamples) % MAX_RESYNC_SAMPLES;
    355     mResyncSamples[idx] = timestamp;
    356 
    357     if (mNumResyncSamples < MAX_RESYNC_SAMPLES) {
    358         mNumResyncSamples++;
    359     } else {
    360         mFirstResyncSample = (mFirstResyncSample + 1) % MAX_RESYNC_SAMPLES;
    361     }
    362 
    363     updateModelLocked();
    364 
    365     if (mNumResyncSamplesSincePresent++ > MAX_RESYNC_SAMPLES_WITHOUT_PRESENT) {
    366         resetErrorLocked();
    367     }
    368 
    369     if (runningWithoutSyncFramework) {
    370         // If we don't have the sync framework we will never have
    371         // addPresentFence called.  This means we have no way to know whether
    372         // or not we're synchronized with the HW vsyncs, so we just request
    373         // that the HW vsync events be turned on whenever we need to generate
    374         // SW vsync events.
    375         return mThread->hasAnyEventListeners();
    376     }
    377 
    378     return mPeriod == 0 || mError > errorThreshold;
    379 }
    380 
    381 void DispSync::endResync() {
    382 }
    383 
    384 status_t DispSync::addEventListener(nsecs_t phase,
    385         const sp<Callback>& callback) {
    386 
    387     Mutex::Autolock lock(mMutex);
    388     return mThread->addEventListener(phase, callback);
    389 }
    390 
    391 status_t DispSync::removeEventListener(const sp<Callback>& callback) {
    392     Mutex::Autolock lock(mMutex);
    393     return mThread->removeEventListener(callback);
    394 }
    395 
    396 void DispSync::setPeriod(nsecs_t period) {
    397     Mutex::Autolock lock(mMutex);
    398     mPeriod = period;
    399     mPhase = 0;
    400     mThread->updateModel(mPeriod, mPhase);
    401 }
    402 
    403 void DispSync::updateModelLocked() {
    404     if (mNumResyncSamples >= MIN_RESYNC_SAMPLES_FOR_UPDATE) {
    405         nsecs_t durationSum = 0;
    406         for (size_t i = 1; i < mNumResyncSamples; i++) {
    407             size_t idx = (mFirstResyncSample + i) % MAX_RESYNC_SAMPLES;
    408             size_t prev = (idx + MAX_RESYNC_SAMPLES - 1) % MAX_RESYNC_SAMPLES;
    409             durationSum += mResyncSamples[idx] - mResyncSamples[prev];
    410         }
    411 
    412         mPeriod = durationSum / (mNumResyncSamples - 1);
    413 
    414         double sampleAvgX = 0;
    415         double sampleAvgY = 0;
    416         double scale = 2.0 * M_PI / double(mPeriod);
    417         for (size_t i = 0; i < mNumResyncSamples; i++) {
    418             size_t idx = (mFirstResyncSample + i) % MAX_RESYNC_SAMPLES;
    419             nsecs_t sample = mResyncSamples[idx];
    420             double samplePhase = double(sample % mPeriod) * scale;
    421             sampleAvgX += cos(samplePhase);
    422             sampleAvgY += sin(samplePhase);
    423         }
    424 
    425         sampleAvgX /= double(mNumResyncSamples);
    426         sampleAvgY /= double(mNumResyncSamples);
    427 
    428         mPhase = nsecs_t(atan2(sampleAvgY, sampleAvgX) / scale);
    429 
    430         if (mPhase < 0) {
    431             mPhase += mPeriod;
    432         }
    433 
    434         if (traceDetailedInfo) {
    435             ATRACE_INT64("DispSync:Period", mPeriod);
    436             ATRACE_INT64("DispSync:Phase", mPhase);
    437         }
    438 
    439         mThread->updateModel(mPeriod, mPhase);
    440     }
    441 }
    442 
    443 void DispSync::updateErrorLocked() {
    444     if (mPeriod == 0) {
    445         return;
    446     }
    447 
    448     int numErrSamples = 0;
    449     nsecs_t sqErrSum = 0;
    450 
    451     for (size_t i = 0; i < NUM_PRESENT_SAMPLES; i++) {
    452         nsecs_t sample = mPresentTimes[i];
    453         if (sample > mPhase) {
    454             nsecs_t sampleErr = (sample - mPhase) % mPeriod;
    455             if (sampleErr > mPeriod / 2) {
    456                 sampleErr -= mPeriod;
    457             }
    458             sqErrSum += sampleErr * sampleErr;
    459             numErrSamples++;
    460         }
    461     }
    462 
    463     if (numErrSamples > 0) {
    464         mError = sqErrSum / numErrSamples;
    465     } else {
    466         mError = 0;
    467     }
    468 
    469     if (traceDetailedInfo) {
    470         ATRACE_INT64("DispSync:Error", mError);
    471     }
    472 }
    473 
    474 void DispSync::resetErrorLocked() {
    475     mPresentSampleOffset = 0;
    476     mError = 0;
    477     for (size_t i = 0; i < NUM_PRESENT_SAMPLES; i++) {
    478         mPresentFences[i].clear();
    479         mPresentTimes[i] = 0;
    480     }
    481 }
    482 
    483 } // namespace android
    484