Home | History | Annotate | Download | only in surfaceflinger
      1 /*
      2  * Copyright (C) 2013 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #define ATRACE_TAG ATRACE_TAG_GRAPHICS
     18 
     19 // This is needed for stdint.h to define INT64_MAX in C++
     20 #define __STDC_LIMIT_MACROS
     21 
     22 #include <math.h>
     23 
     24 #include <cutils/log.h>
     25 
     26 #include <ui/Fence.h>
     27 
     28 #include <utils/String8.h>
     29 #include <utils/Thread.h>
     30 #include <utils/Trace.h>
     31 #include <utils/Vector.h>
     32 
     33 #include "DispSync.h"
     34 #include "EventLog/EventLog.h"
     35 
     36 namespace android {
     37 
     38 // Setting this to true enables verbose tracing that can be used to debug
     39 // vsync event model or phase issues.
     40 static const bool traceDetailedInfo = false;
     41 
     42 // This is the threshold used to determine when hardware vsync events are
     43 // needed to re-synchronize the software vsync model with the hardware.  The
     44 // error metric used is the mean of the squared difference between each
     45 // present time and the nearest software-predicted vsync.
     46 static const nsecs_t errorThreshold = 160000000000;
     47 
     48 // This works around the lack of support for the sync framework on some
     49 // devices.
     50 #ifdef RUNNING_WITHOUT_SYNC_FRAMEWORK
     51 static const bool runningWithoutSyncFramework = true;
     52 #else
     53 static const bool runningWithoutSyncFramework = false;
     54 #endif
     55 
     56 // This is the offset from the present fence timestamps to the corresponding
     57 // vsync event.
     58 static const int64_t presentTimeOffset = PRESENT_TIME_OFFSET_FROM_VSYNC_NS;
     59 
     60 class DispSyncThread: public Thread {
     61 public:
     62 
     63     DispSyncThread():
     64             mStop(false),
     65             mPeriod(0),
     66             mPhase(0),
     67             mWakeupLatency(0) {
     68     }
     69 
     70     virtual ~DispSyncThread() {}
     71 
     72     void updateModel(nsecs_t period, nsecs_t phase) {
     73         Mutex::Autolock lock(mMutex);
     74         mPeriod = period;
     75         mPhase = phase;
     76         mCond.signal();
     77     }
     78 
     79     void stop() {
     80         Mutex::Autolock lock(mMutex);
     81         mStop = true;
     82         mCond.signal();
     83     }
     84 
     85     virtual bool threadLoop() {
     86         status_t err;
     87         nsecs_t now = systemTime(SYSTEM_TIME_MONOTONIC);
     88         nsecs_t nextEventTime = 0;
     89 
     90         while (true) {
     91             Vector<CallbackInvocation> callbackInvocations;
     92 
     93             nsecs_t targetTime = 0;
     94 
     95             { // Scope for lock
     96                 Mutex::Autolock lock(mMutex);
     97 
     98                 if (mStop) {
     99                     return false;
    100                 }
    101 
    102                 if (mPeriod == 0) {
    103                     err = mCond.wait(mMutex);
    104                     if (err != NO_ERROR) {
    105                         ALOGE("error waiting for new events: %s (%d)",
    106                                 strerror(-err), err);
    107                         return false;
    108                     }
    109                     continue;
    110                 }
    111 
    112                 nextEventTime = computeNextEventTimeLocked(now);
    113                 targetTime = nextEventTime;
    114 
    115                 bool isWakeup = false;
    116 
    117                 if (now < targetTime) {
    118                     err = mCond.waitRelative(mMutex, targetTime - now);
    119 
    120                     if (err == TIMED_OUT) {
    121                         isWakeup = true;
    122                     } else if (err != NO_ERROR) {
    123                         ALOGE("error waiting for next event: %s (%d)",
    124                                 strerror(-err), err);
    125                         return false;
    126                     }
    127                 }
    128 
    129                 now = systemTime(SYSTEM_TIME_MONOTONIC);
    130 
    131                 if (isWakeup) {
    132                     mWakeupLatency = ((mWakeupLatency * 63) +
    133                             (now - targetTime)) / 64;
    134                     if (mWakeupLatency > 500000) {
    135                         // Don't correct by more than 500 us
    136                         mWakeupLatency = 500000;
    137                     }
    138                     if (traceDetailedInfo) {
    139                         ATRACE_INT64("DispSync:WakeupLat", now - nextEventTime);
    140                         ATRACE_INT64("DispSync:AvgWakeupLat", mWakeupLatency);
    141                     }
    142                 }
    143 
    144                 callbackInvocations = gatherCallbackInvocationsLocked(now);
    145             }
    146 
    147             if (callbackInvocations.size() > 0) {
    148                 fireCallbackInvocations(callbackInvocations);
    149             }
    150         }
    151 
    152         return false;
    153     }
    154 
    155     status_t addEventListener(nsecs_t phase, const sp<DispSync::Callback>& callback) {
    156         Mutex::Autolock lock(mMutex);
    157 
    158         for (size_t i = 0; i < mEventListeners.size(); i++) {
    159             if (mEventListeners[i].mCallback == callback) {
    160                 return BAD_VALUE;
    161             }
    162         }
    163 
    164         EventListener listener;
    165         listener.mPhase = phase;
    166         listener.mCallback = callback;
    167 
    168         // We want to allow the firstmost future event to fire without
    169         // allowing any past events to fire.  Because
    170         // computeListenerNextEventTimeLocked filters out events within a half
    171         // a period of the last event time, we need to initialize the last
    172         // event time to a half a period in the past.
    173         listener.mLastEventTime = systemTime(SYSTEM_TIME_MONOTONIC) - mPeriod / 2;
    174 
    175         mEventListeners.push(listener);
    176 
    177         mCond.signal();
    178 
    179         return NO_ERROR;
    180     }
    181 
    182     status_t removeEventListener(const sp<DispSync::Callback>& callback) {
    183         Mutex::Autolock lock(mMutex);
    184 
    185         for (size_t i = 0; i < mEventListeners.size(); i++) {
    186             if (mEventListeners[i].mCallback == callback) {
    187                 mEventListeners.removeAt(i);
    188                 mCond.signal();
    189                 return NO_ERROR;
    190             }
    191         }
    192 
    193         return BAD_VALUE;
    194     }
    195 
    196     // This method is only here to handle the runningWithoutSyncFramework
    197     // case.
    198     bool hasAnyEventListeners() {
    199         Mutex::Autolock lock(mMutex);
    200         return !mEventListeners.empty();
    201     }
    202 
    203 private:
    204 
    205     struct EventListener {
    206         nsecs_t mPhase;
    207         nsecs_t mLastEventTime;
    208         sp<DispSync::Callback> mCallback;
    209     };
    210 
    211     struct CallbackInvocation {
    212         sp<DispSync::Callback> mCallback;
    213         nsecs_t mEventTime;
    214     };
    215 
    216     nsecs_t computeNextEventTimeLocked(nsecs_t now) {
    217         nsecs_t nextEventTime = INT64_MAX;
    218         for (size_t i = 0; i < mEventListeners.size(); i++) {
    219             nsecs_t t = computeListenerNextEventTimeLocked(mEventListeners[i],
    220                     now);
    221 
    222             if (t < nextEventTime) {
    223                 nextEventTime = t;
    224             }
    225         }
    226 
    227         return nextEventTime;
    228     }
    229 
    230     Vector<CallbackInvocation> gatherCallbackInvocationsLocked(nsecs_t now) {
    231         Vector<CallbackInvocation> callbackInvocations;
    232         nsecs_t ref = now - mPeriod;
    233 
    234         for (size_t i = 0; i < mEventListeners.size(); i++) {
    235             nsecs_t t = computeListenerNextEventTimeLocked(mEventListeners[i],
    236                     ref);
    237 
    238             if (t < now) {
    239                 CallbackInvocation ci;
    240                 ci.mCallback = mEventListeners[i].mCallback;
    241                 ci.mEventTime = t;
    242                 callbackInvocations.push(ci);
    243                 mEventListeners.editItemAt(i).mLastEventTime = t;
    244             }
    245         }
    246 
    247         return callbackInvocations;
    248     }
    249 
    250     nsecs_t computeListenerNextEventTimeLocked(const EventListener& listener,
    251             nsecs_t ref) {
    252 
    253         nsecs_t lastEventTime = listener.mLastEventTime;
    254         if (ref < lastEventTime) {
    255             ref = lastEventTime;
    256         }
    257 
    258         nsecs_t phase = mPhase + listener.mPhase;
    259         nsecs_t t = (((ref - phase) / mPeriod) + 1) * mPeriod + phase;
    260 
    261         if (t - listener.mLastEventTime < mPeriod / 2) {
    262             t += mPeriod;
    263         }
    264 
    265         return t;
    266     }
    267 
    268     void fireCallbackInvocations(const Vector<CallbackInvocation>& callbacks) {
    269         for (size_t i = 0; i < callbacks.size(); i++) {
    270             callbacks[i].mCallback->onDispSyncEvent(callbacks[i].mEventTime);
    271         }
    272     }
    273 
    274     bool mStop;
    275 
    276     nsecs_t mPeriod;
    277     nsecs_t mPhase;
    278     nsecs_t mWakeupLatency;
    279 
    280     Vector<EventListener> mEventListeners;
    281 
    282     Mutex mMutex;
    283     Condition mCond;
    284 };
    285 
    286 class ZeroPhaseTracer : public DispSync::Callback {
    287 public:
    288     ZeroPhaseTracer() : mParity(false) {}
    289 
    290     virtual void onDispSyncEvent(nsecs_t when) {
    291         mParity = !mParity;
    292         ATRACE_INT("ZERO_PHASE_VSYNC", mParity ? 1 : 0);
    293     }
    294 
    295 private:
    296     bool mParity;
    297 };
    298 
    299 DispSync::DispSync() {
    300     mThread = new DispSyncThread();
    301     mThread->run("DispSync", PRIORITY_URGENT_DISPLAY + PRIORITY_MORE_FAVORABLE);
    302 
    303     reset();
    304     beginResync();
    305 
    306     if (traceDetailedInfo) {
    307         // If runningWithoutSyncFramework is true then the ZeroPhaseTracer
    308         // would prevent HW vsync event from ever being turned off.
    309         // Furthermore the zero-phase tracing is not needed because any time
    310         // there is an event registered we will turn on the HW vsync events.
    311         if (!runningWithoutSyncFramework) {
    312             addEventListener(0, new ZeroPhaseTracer());
    313         }
    314     }
    315 }
    316 
    317 DispSync::~DispSync() {}
    318 
    319 void DispSync::reset() {
    320     Mutex::Autolock lock(mMutex);
    321 
    322     mNumResyncSamples = 0;
    323     mFirstResyncSample = 0;
    324     mNumResyncSamplesSincePresent = 0;
    325     resetErrorLocked();
    326 }
    327 
    328 bool DispSync::addPresentFence(const sp<Fence>& fence) {
    329     Mutex::Autolock lock(mMutex);
    330 
    331     mPresentFences[mPresentSampleOffset] = fence;
    332     mPresentTimes[mPresentSampleOffset] = 0;
    333     mPresentSampleOffset = (mPresentSampleOffset + 1) % NUM_PRESENT_SAMPLES;
    334     mNumResyncSamplesSincePresent = 0;
    335 
    336     for (size_t i = 0; i < NUM_PRESENT_SAMPLES; i++) {
    337         const sp<Fence>& f(mPresentFences[i]);
    338         if (f != NULL) {
    339             nsecs_t t = f->getSignalTime();
    340             if (t < INT64_MAX) {
    341                 mPresentFences[i].clear();
    342                 mPresentTimes[i] = t + presentTimeOffset;
    343             }
    344         }
    345     }
    346 
    347     updateErrorLocked();
    348 
    349     return mPeriod == 0 || mError > errorThreshold;
    350 }
    351 
    352 void DispSync::beginResync() {
    353     Mutex::Autolock lock(mMutex);
    354 
    355     mNumResyncSamples = 0;
    356 }
    357 
    358 bool DispSync::addResyncSample(nsecs_t timestamp) {
    359     Mutex::Autolock lock(mMutex);
    360 
    361     size_t idx = (mFirstResyncSample + mNumResyncSamples) % MAX_RESYNC_SAMPLES;
    362     mResyncSamples[idx] = timestamp;
    363 
    364     if (mNumResyncSamples < MAX_RESYNC_SAMPLES) {
    365         mNumResyncSamples++;
    366     } else {
    367         mFirstResyncSample = (mFirstResyncSample + 1) % MAX_RESYNC_SAMPLES;
    368     }
    369 
    370     updateModelLocked();
    371 
    372     if (mNumResyncSamplesSincePresent++ > MAX_RESYNC_SAMPLES_WITHOUT_PRESENT) {
    373         resetErrorLocked();
    374     }
    375 
    376     if (runningWithoutSyncFramework) {
    377         // If we don't have the sync framework we will never have
    378         // addPresentFence called.  This means we have no way to know whether
    379         // or not we're synchronized with the HW vsyncs, so we just request
    380         // that the HW vsync events be turned on whenever we need to generate
    381         // SW vsync events.
    382         return mThread->hasAnyEventListeners();
    383     }
    384 
    385     return mPeriod == 0 || mError > errorThreshold;
    386 }
    387 
    388 void DispSync::endResync() {
    389 }
    390 
    391 status_t DispSync::addEventListener(nsecs_t phase,
    392         const sp<Callback>& callback) {
    393 
    394     Mutex::Autolock lock(mMutex);
    395     return mThread->addEventListener(phase, callback);
    396 }
    397 
    398 status_t DispSync::removeEventListener(const sp<Callback>& callback) {
    399     Mutex::Autolock lock(mMutex);
    400     return mThread->removeEventListener(callback);
    401 }
    402 
    403 void DispSync::setPeriod(nsecs_t period) {
    404     Mutex::Autolock lock(mMutex);
    405     mPeriod = period;
    406     mPhase = 0;
    407     mThread->updateModel(mPeriod, mPhase);
    408 }
    409 
    410 void DispSync::updateModelLocked() {
    411     if (mNumResyncSamples >= MIN_RESYNC_SAMPLES_FOR_UPDATE) {
    412         nsecs_t durationSum = 0;
    413         for (size_t i = 1; i < mNumResyncSamples; i++) {
    414             size_t idx = (mFirstResyncSample + i) % MAX_RESYNC_SAMPLES;
    415             size_t prev = (idx + MAX_RESYNC_SAMPLES - 1) % MAX_RESYNC_SAMPLES;
    416             durationSum += mResyncSamples[idx] - mResyncSamples[prev];
    417         }
    418 
    419         mPeriod = durationSum / (mNumResyncSamples - 1);
    420 
    421         double sampleAvgX = 0;
    422         double sampleAvgY = 0;
    423         double scale = 2.0 * M_PI / double(mPeriod);
    424         for (size_t i = 0; i < mNumResyncSamples; i++) {
    425             size_t idx = (mFirstResyncSample + i) % MAX_RESYNC_SAMPLES;
    426             nsecs_t sample = mResyncSamples[idx];
    427             double samplePhase = double(sample % mPeriod) * scale;
    428             sampleAvgX += cos(samplePhase);
    429             sampleAvgY += sin(samplePhase);
    430         }
    431 
    432         sampleAvgX /= double(mNumResyncSamples);
    433         sampleAvgY /= double(mNumResyncSamples);
    434 
    435         mPhase = nsecs_t(atan2(sampleAvgY, sampleAvgX) / scale);
    436 
    437         if (mPhase < 0) {
    438             mPhase += mPeriod;
    439         }
    440 
    441         if (traceDetailedInfo) {
    442             ATRACE_INT64("DispSync:Period", mPeriod);
    443             ATRACE_INT64("DispSync:Phase", mPhase);
    444         }
    445 
    446         mThread->updateModel(mPeriod, mPhase);
    447     }
    448 }
    449 
    450 void DispSync::updateErrorLocked() {
    451     if (mPeriod == 0) {
    452         return;
    453     }
    454 
    455     int numErrSamples = 0;
    456     nsecs_t sqErrSum = 0;
    457 
    458     for (size_t i = 0; i < NUM_PRESENT_SAMPLES; i++) {
    459         nsecs_t sample = mPresentTimes[i];
    460         if (sample > mPhase) {
    461             nsecs_t sampleErr = (sample - mPhase) % mPeriod;
    462             if (sampleErr > mPeriod / 2) {
    463                 sampleErr -= mPeriod;
    464             }
    465             sqErrSum += sampleErr * sampleErr;
    466             numErrSamples++;
    467         }
    468     }
    469 
    470     if (numErrSamples > 0) {
    471         mError = sqErrSum / numErrSamples;
    472     } else {
    473         mError = 0;
    474     }
    475 
    476     if (traceDetailedInfo) {
    477         ATRACE_INT64("DispSync:Error", mError);
    478     }
    479 }
    480 
    481 void DispSync::resetErrorLocked() {
    482     mPresentSampleOffset = 0;
    483     mError = 0;
    484     for (size_t i = 0; i < NUM_PRESENT_SAMPLES; i++) {
    485         mPresentFences[i].clear();
    486         mPresentTimes[i] = 0;
    487     }
    488 }
    489 
    490 } // namespace android
    491