1 /* 2 * Copyright (C) 2013 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #define ATRACE_TAG ATRACE_TAG_GRAPHICS 18 //#define LOG_NDEBUG 0 19 20 // This is needed for stdint.h to define INT64_MAX in C++ 21 #define __STDC_LIMIT_MACROS 22 23 #include <math.h> 24 25 #include <algorithm> 26 27 #include <android-base/stringprintf.h> 28 #include <cutils/properties.h> 29 #include <log/log.h> 30 #include <utils/Thread.h> 31 #include <utils/Trace.h> 32 33 #include <ui/FenceTime.h> 34 35 #include "DispSync.h" 36 #include "EventLog/EventLog.h" 37 #include "SurfaceFlinger.h" 38 39 using android::base::StringAppendF; 40 using std::max; 41 using std::min; 42 43 namespace android { 44 45 DispSync::~DispSync() = default; 46 DispSync::Callback::~Callback() = default; 47 48 namespace impl { 49 50 // Setting this to true adds a zero-phase tracer for correlating with hardware 51 // vsync events 52 static const bool kEnableZeroPhaseTracer = false; 53 54 // This is the threshold used to determine when hardware vsync events are 55 // needed to re-synchronize the software vsync model with the hardware. The 56 // error metric used is the mean of the squared difference between each 57 // present time and the nearest software-predicted vsync. 58 static const nsecs_t kErrorThreshold = 160000000000; // 400 usec squared 59 60 #undef LOG_TAG 61 #define LOG_TAG "DispSyncThread" 62 class DispSyncThread : public Thread { 63 public: 64 DispSyncThread(const char* name, bool showTraceDetailedInfo) 65 : mName(name), 66 mStop(false), 67 mModelLocked(false), 68 mPeriod(0), 69 mPhase(0), 70 mReferenceTime(0), 71 mWakeupLatency(0), 72 mFrameNumber(0), 73 mTraceDetailedInfo(showTraceDetailedInfo) {} 74 75 virtual ~DispSyncThread() {} 76 77 void updateModel(nsecs_t period, nsecs_t phase, nsecs_t referenceTime) { 78 if (mTraceDetailedInfo) ATRACE_CALL(); 79 Mutex::Autolock lock(mMutex); 80 81 mPhase = phase; 82 if (mReferenceTime != referenceTime) { 83 for (auto& eventListener : mEventListeners) { 84 eventListener.mHasFired = false; 85 } 86 } 87 mReferenceTime = referenceTime; 88 if (mPeriod != 0 && mPeriod != period && mReferenceTime != 0) { 89 // Inflate the reference time to be the most recent predicted 90 // vsync before the current time. 91 const nsecs_t now = systemTime(SYSTEM_TIME_MONOTONIC); 92 const nsecs_t baseTime = now - mReferenceTime; 93 const nsecs_t numOldPeriods = baseTime / mPeriod; 94 mReferenceTime = mReferenceTime + (numOldPeriods)*mPeriod; 95 } 96 mPeriod = period; 97 if (mTraceDetailedInfo) { 98 ATRACE_INT64("DispSync:Period", mPeriod); 99 ATRACE_INT64("DispSync:Phase", mPhase + mPeriod / 2); 100 ATRACE_INT64("DispSync:Reference Time", mReferenceTime); 101 } 102 ALOGV("[%s] updateModel: mPeriod = %" PRId64 ", mPhase = %" PRId64 103 " mReferenceTime = %" PRId64, 104 mName, ns2us(mPeriod), ns2us(mPhase), ns2us(mReferenceTime)); 105 mCond.signal(); 106 } 107 108 void stop() { 109 if (mTraceDetailedInfo) ATRACE_CALL(); 110 Mutex::Autolock lock(mMutex); 111 mStop = true; 112 mCond.signal(); 113 } 114 115 void lockModel() { 116 Mutex::Autolock lock(mMutex); 117 mModelLocked = true; 118 } 119 120 void unlockModel() { 121 Mutex::Autolock lock(mMutex); 122 mModelLocked = false; 123 } 124 125 virtual bool threadLoop() { 126 status_t err; 127 nsecs_t now = systemTime(SYSTEM_TIME_MONOTONIC); 128 129 while (true) { 130 std::vector<CallbackInvocation> callbackInvocations; 131 132 nsecs_t targetTime = 0; 133 134 { // Scope for lock 135 Mutex::Autolock lock(mMutex); 136 137 if (mTraceDetailedInfo) { 138 ATRACE_INT64("DispSync:Frame", mFrameNumber); 139 } 140 ALOGV("[%s] Frame %" PRId64, mName, mFrameNumber); 141 ++mFrameNumber; 142 143 if (mStop) { 144 return false; 145 } 146 147 if (mPeriod == 0) { 148 err = mCond.wait(mMutex); 149 if (err != NO_ERROR) { 150 ALOGE("error waiting for new events: %s (%d)", strerror(-err), err); 151 return false; 152 } 153 continue; 154 } 155 156 targetTime = computeNextEventTimeLocked(now); 157 158 bool isWakeup = false; 159 160 if (now < targetTime) { 161 if (mTraceDetailedInfo) ATRACE_NAME("DispSync waiting"); 162 163 if (targetTime == INT64_MAX) { 164 ALOGV("[%s] Waiting forever", mName); 165 err = mCond.wait(mMutex); 166 } else { 167 ALOGV("[%s] Waiting until %" PRId64, mName, ns2us(targetTime)); 168 err = mCond.waitRelative(mMutex, targetTime - now); 169 } 170 171 if (err == TIMED_OUT) { 172 isWakeup = true; 173 } else if (err != NO_ERROR) { 174 ALOGE("error waiting for next event: %s (%d)", strerror(-err), err); 175 return false; 176 } 177 } 178 179 now = systemTime(SYSTEM_TIME_MONOTONIC); 180 181 // Don't correct by more than 1.5 ms 182 static const nsecs_t kMaxWakeupLatency = us2ns(1500); 183 184 if (isWakeup) { 185 mWakeupLatency = ((mWakeupLatency * 63) + (now - targetTime)) / 64; 186 mWakeupLatency = min(mWakeupLatency, kMaxWakeupLatency); 187 if (mTraceDetailedInfo) { 188 ATRACE_INT64("DispSync:WakeupLat", now - targetTime); 189 ATRACE_INT64("DispSync:AvgWakeupLat", mWakeupLatency); 190 } 191 } 192 193 callbackInvocations = gatherCallbackInvocationsLocked(now); 194 } 195 196 if (callbackInvocations.size() > 0) { 197 fireCallbackInvocations(callbackInvocations); 198 } 199 } 200 201 return false; 202 } 203 204 status_t addEventListener(const char* name, nsecs_t phase, DispSync::Callback* callback, 205 nsecs_t lastCallbackTime) { 206 if (mTraceDetailedInfo) ATRACE_CALL(); 207 Mutex::Autolock lock(mMutex); 208 209 for (size_t i = 0; i < mEventListeners.size(); i++) { 210 if (mEventListeners[i].mCallback == callback) { 211 return BAD_VALUE; 212 } 213 } 214 215 EventListener listener; 216 listener.mName = name; 217 listener.mPhase = phase; 218 listener.mCallback = callback; 219 220 // We want to allow the firstmost future event to fire without 221 // allowing any past events to fire. To do this extrapolate from 222 // mReferenceTime the most recent hardware vsync, and pin the 223 // last event time there. 224 const nsecs_t now = systemTime(SYSTEM_TIME_MONOTONIC); 225 if (mPeriod != 0) { 226 const nsecs_t baseTime = now - mReferenceTime; 227 const nsecs_t numPeriodsSinceReference = baseTime / mPeriod; 228 const nsecs_t predictedReference = mReferenceTime + numPeriodsSinceReference * mPeriod; 229 const nsecs_t phaseCorrection = mPhase + listener.mPhase; 230 const nsecs_t predictedLastEventTime = predictedReference + phaseCorrection; 231 if (predictedLastEventTime >= now) { 232 // Make sure that the last event time does not exceed the current time. 233 // If it would, then back the last event time by a period. 234 listener.mLastEventTime = predictedLastEventTime - mPeriod; 235 } else { 236 listener.mLastEventTime = predictedLastEventTime; 237 } 238 } else { 239 listener.mLastEventTime = now + mPhase - mWakeupLatency; 240 } 241 242 if (lastCallbackTime <= 0) { 243 // If there is no prior callback time, try to infer one based on the 244 // logical last event time. 245 listener.mLastCallbackTime = listener.mLastEventTime + mWakeupLatency; 246 } else { 247 listener.mLastCallbackTime = lastCallbackTime; 248 } 249 250 mEventListeners.push_back(listener); 251 252 mCond.signal(); 253 254 return NO_ERROR; 255 } 256 257 status_t removeEventListener(DispSync::Callback* callback, nsecs_t* outLastCallback) { 258 if (mTraceDetailedInfo) ATRACE_CALL(); 259 Mutex::Autolock lock(mMutex); 260 261 for (std::vector<EventListener>::iterator it = mEventListeners.begin(); 262 it != mEventListeners.end(); ++it) { 263 if (it->mCallback == callback) { 264 *outLastCallback = it->mLastCallbackTime; 265 mEventListeners.erase(it); 266 mCond.signal(); 267 return NO_ERROR; 268 } 269 } 270 271 return BAD_VALUE; 272 } 273 274 status_t changePhaseOffset(DispSync::Callback* callback, nsecs_t phase) { 275 if (mTraceDetailedInfo) ATRACE_CALL(); 276 Mutex::Autolock lock(mMutex); 277 278 for (auto& eventListener : mEventListeners) { 279 if (eventListener.mCallback == callback) { 280 const nsecs_t oldPhase = eventListener.mPhase; 281 eventListener.mPhase = phase; 282 283 // Pretend that the last time this event was handled at the same frame but with the 284 // new offset to allow for a seamless offset change without double-firing or 285 // skipping. 286 nsecs_t diff = oldPhase - phase; 287 if (diff > mPeriod / 2) { 288 diff -= mPeriod; 289 } else if (diff < -mPeriod / 2) { 290 diff += mPeriod; 291 } 292 eventListener.mLastEventTime -= diff; 293 mCond.signal(); 294 return NO_ERROR; 295 } 296 } 297 return BAD_VALUE; 298 } 299 300 private: 301 struct EventListener { 302 const char* mName; 303 nsecs_t mPhase; 304 nsecs_t mLastEventTime; 305 nsecs_t mLastCallbackTime; 306 DispSync::Callback* mCallback; 307 bool mHasFired = false; 308 }; 309 310 struct CallbackInvocation { 311 DispSync::Callback* mCallback; 312 nsecs_t mEventTime; 313 }; 314 315 nsecs_t computeNextEventTimeLocked(nsecs_t now) { 316 if (mTraceDetailedInfo) ATRACE_CALL(); 317 ALOGV("[%s] computeNextEventTimeLocked", mName); 318 nsecs_t nextEventTime = INT64_MAX; 319 for (size_t i = 0; i < mEventListeners.size(); i++) { 320 nsecs_t t = computeListenerNextEventTimeLocked(mEventListeners[i], now); 321 322 if (t < nextEventTime) { 323 nextEventTime = t; 324 } 325 } 326 327 ALOGV("[%s] nextEventTime = %" PRId64, mName, ns2us(nextEventTime)); 328 return nextEventTime; 329 } 330 331 // Sanity check that the duration is close enough in length to a period without 332 // falling into double-rate vsyncs. 333 bool isCloseToPeriod(nsecs_t duration) { 334 // Ratio of 3/5 is arbitrary, but it must be greater than 1/2. 335 return duration < (3 * mPeriod) / 5; 336 } 337 338 std::vector<CallbackInvocation> gatherCallbackInvocationsLocked(nsecs_t now) { 339 if (mTraceDetailedInfo) ATRACE_CALL(); 340 ALOGV("[%s] gatherCallbackInvocationsLocked @ %" PRId64, mName, ns2us(now)); 341 342 std::vector<CallbackInvocation> callbackInvocations; 343 nsecs_t onePeriodAgo = now - mPeriod; 344 345 for (auto& eventListener : mEventListeners) { 346 nsecs_t t = computeListenerNextEventTimeLocked(eventListener, onePeriodAgo); 347 348 if (t < now) { 349 if (isCloseToPeriod(now - eventListener.mLastCallbackTime)) { 350 eventListener.mLastEventTime = t; 351 ALOGV("[%s] [%s] Skipping event due to model error", mName, 352 eventListener.mName); 353 continue; 354 } 355 if (eventListener.mHasFired && !mModelLocked) { 356 eventListener.mLastEventTime = t; 357 ALOGV("[%s] [%s] Skipping event due to already firing", mName, 358 eventListener.mName); 359 continue; 360 } 361 CallbackInvocation ci; 362 ci.mCallback = eventListener.mCallback; 363 ci.mEventTime = t; 364 ALOGV("[%s] [%s] Preparing to fire, latency: %" PRId64, mName, eventListener.mName, 365 t - eventListener.mLastEventTime); 366 callbackInvocations.push_back(ci); 367 eventListener.mLastEventTime = t; 368 eventListener.mLastCallbackTime = now; 369 eventListener.mHasFired = true; 370 } 371 } 372 373 return callbackInvocations; 374 } 375 376 nsecs_t computeListenerNextEventTimeLocked(const EventListener& listener, nsecs_t baseTime) { 377 if (mTraceDetailedInfo) ATRACE_CALL(); 378 ALOGV("[%s] [%s] computeListenerNextEventTimeLocked(%" PRId64 ")", mName, listener.mName, 379 ns2us(baseTime)); 380 381 nsecs_t lastEventTime = listener.mLastEventTime + mWakeupLatency; 382 ALOGV("[%s] lastEventTime: %" PRId64, mName, ns2us(lastEventTime)); 383 if (baseTime < lastEventTime) { 384 baseTime = lastEventTime; 385 ALOGV("[%s] Clamping baseTime to lastEventTime -> %" PRId64, mName, ns2us(baseTime)); 386 } 387 388 baseTime -= mReferenceTime; 389 ALOGV("[%s] Relative baseTime = %" PRId64, mName, ns2us(baseTime)); 390 nsecs_t phase = mPhase + listener.mPhase; 391 ALOGV("[%s] Phase = %" PRId64, mName, ns2us(phase)); 392 baseTime -= phase; 393 ALOGV("[%s] baseTime - phase = %" PRId64, mName, ns2us(baseTime)); 394 395 // If our previous time is before the reference (because the reference 396 // has since been updated), the division by mPeriod will truncate 397 // towards zero instead of computing the floor. Since in all cases 398 // before the reference we want the next time to be effectively now, we 399 // set baseTime to -mPeriod so that numPeriods will be -1. 400 // When we add 1 and the phase, we will be at the correct event time for 401 // this period. 402 if (baseTime < 0) { 403 ALOGV("[%s] Correcting negative baseTime", mName); 404 baseTime = -mPeriod; 405 } 406 407 nsecs_t numPeriods = baseTime / mPeriod; 408 ALOGV("[%s] numPeriods = %" PRId64, mName, numPeriods); 409 nsecs_t t = (numPeriods + 1) * mPeriod + phase; 410 ALOGV("[%s] t = %" PRId64, mName, ns2us(t)); 411 t += mReferenceTime; 412 ALOGV("[%s] Absolute t = %" PRId64, mName, ns2us(t)); 413 414 // Check that it's been slightly more than half a period since the last 415 // event so that we don't accidentally fall into double-rate vsyncs 416 if (isCloseToPeriod(t - listener.mLastEventTime)) { 417 t += mPeriod; 418 ALOGV("[%s] Modifying t -> %" PRId64, mName, ns2us(t)); 419 } 420 421 t -= mWakeupLatency; 422 ALOGV("[%s] Corrected for wakeup latency -> %" PRId64, mName, ns2us(t)); 423 424 return t; 425 } 426 427 void fireCallbackInvocations(const std::vector<CallbackInvocation>& callbacks) { 428 if (mTraceDetailedInfo) ATRACE_CALL(); 429 for (size_t i = 0; i < callbacks.size(); i++) { 430 callbacks[i].mCallback->onDispSyncEvent(callbacks[i].mEventTime); 431 } 432 } 433 434 const char* const mName; 435 436 bool mStop; 437 bool mModelLocked; 438 439 nsecs_t mPeriod; 440 nsecs_t mPhase; 441 nsecs_t mReferenceTime; 442 nsecs_t mWakeupLatency; 443 444 int64_t mFrameNumber; 445 446 std::vector<EventListener> mEventListeners; 447 448 Mutex mMutex; 449 Condition mCond; 450 451 // Flag to turn on logging in systrace. 452 const bool mTraceDetailedInfo; 453 }; 454 455 #undef LOG_TAG 456 #define LOG_TAG "DispSync" 457 458 class ZeroPhaseTracer : public DispSync::Callback { 459 public: 460 ZeroPhaseTracer() : mParity(false) {} 461 462 virtual void onDispSyncEvent(nsecs_t /*when*/) { 463 mParity = !mParity; 464 ATRACE_INT("ZERO_PHASE_VSYNC", mParity ? 1 : 0); 465 } 466 467 private: 468 bool mParity; 469 }; 470 471 DispSync::DispSync(const char* name) : mName(name), mRefreshSkipCount(0) { 472 // This flag offers the ability to turn on systrace logging from the shell. 473 char value[PROPERTY_VALUE_MAX]; 474 property_get("debug.sf.dispsync_trace_detailed_info", value, "0"); 475 mTraceDetailedInfo = atoi(value); 476 mThread = new DispSyncThread(name, mTraceDetailedInfo); 477 } 478 479 DispSync::~DispSync() { 480 mThread->stop(); 481 mThread->requestExitAndWait(); 482 } 483 484 void DispSync::init(bool hasSyncFramework, int64_t dispSyncPresentTimeOffset) { 485 mIgnorePresentFences = !hasSyncFramework; 486 mPresentTimeOffset = dispSyncPresentTimeOffset; 487 mThread->run("DispSync", PRIORITY_URGENT_DISPLAY + PRIORITY_MORE_FAVORABLE); 488 489 // set DispSync to SCHED_FIFO to minimize jitter 490 struct sched_param param = {0}; 491 param.sched_priority = 2; 492 if (sched_setscheduler(mThread->getTid(), SCHED_FIFO, ¶m) != 0) { 493 ALOGE("Couldn't set SCHED_FIFO for DispSyncThread"); 494 } 495 496 reset(); 497 beginResync(); 498 499 if (mTraceDetailedInfo && kEnableZeroPhaseTracer) { 500 mZeroPhaseTracer = std::make_unique<ZeroPhaseTracer>(); 501 addEventListener("ZeroPhaseTracer", 0, mZeroPhaseTracer.get(), 0); 502 } 503 } 504 505 void DispSync::reset() { 506 Mutex::Autolock lock(mMutex); 507 resetLocked(); 508 } 509 510 void DispSync::resetLocked() { 511 mPhase = 0; 512 const size_t lastSampleIdx = (mFirstResyncSample + mNumResyncSamples - 1) % MAX_RESYNC_SAMPLES; 513 // Keep the most recent sample, when we resync to hardware we'll overwrite this 514 // with a more accurate signal 515 if (mResyncSamples[lastSampleIdx] != 0) { 516 mReferenceTime = mResyncSamples[lastSampleIdx]; 517 } 518 mModelUpdated = false; 519 for (size_t i = 0; i < MAX_RESYNC_SAMPLES; i++) { 520 mResyncSamples[i] = 0; 521 } 522 mNumResyncSamples = 0; 523 mFirstResyncSample = 0; 524 mNumResyncSamplesSincePresent = 0; 525 mThread->unlockModel(); 526 resetErrorLocked(); 527 } 528 529 bool DispSync::addPresentFence(const std::shared_ptr<FenceTime>& fenceTime) { 530 Mutex::Autolock lock(mMutex); 531 532 if (mIgnorePresentFences) { 533 return true; 534 } 535 536 mPresentFences[mPresentSampleOffset] = fenceTime; 537 mPresentSampleOffset = (mPresentSampleOffset + 1) % NUM_PRESENT_SAMPLES; 538 mNumResyncSamplesSincePresent = 0; 539 540 updateErrorLocked(); 541 542 return !mModelUpdated || mError > kErrorThreshold; 543 } 544 545 void DispSync::beginResync() { 546 Mutex::Autolock lock(mMutex); 547 ALOGV("[%s] beginResync", mName); 548 mThread->unlockModel(); 549 mModelUpdated = false; 550 mNumResyncSamples = 0; 551 } 552 553 bool DispSync::addResyncSample(nsecs_t timestamp, bool* periodChanged) { 554 Mutex::Autolock lock(mMutex); 555 556 ALOGV("[%s] addResyncSample(%" PRId64 ")", mName, ns2us(timestamp)); 557 558 *periodChanged = false; 559 const size_t idx = (mFirstResyncSample + mNumResyncSamples) % MAX_RESYNC_SAMPLES; 560 mResyncSamples[idx] = timestamp; 561 if (mNumResyncSamples == 0) { 562 mPhase = 0; 563 ALOGV("[%s] First resync sample: mPeriod = %" PRId64 ", mPhase = 0, " 564 "mReferenceTime = %" PRId64, 565 mName, ns2us(mPeriod), ns2us(timestamp)); 566 } else if (mPendingPeriod > 0) { 567 // mNumResyncSamples > 0, so priorIdx won't overflow 568 const size_t priorIdx = (mFirstResyncSample + mNumResyncSamples - 1) % MAX_RESYNC_SAMPLES; 569 const nsecs_t lastTimestamp = mResyncSamples[priorIdx]; 570 571 const nsecs_t observedVsync = std::abs(timestamp - lastTimestamp); 572 if (std::abs(observedVsync - mPendingPeriod) < std::abs(observedVsync - mPeriod)) { 573 // Observed vsync is closer to the pending period, so reset the 574 // model and flush the pending period. 575 resetLocked(); 576 mPeriod = mPendingPeriod; 577 mPendingPeriod = 0; 578 if (mTraceDetailedInfo) { 579 ATRACE_INT("DispSync:PendingPeriod", mPendingPeriod); 580 } 581 *periodChanged = true; 582 } 583 } 584 // Always update the reference time with the most recent timestamp. 585 mReferenceTime = timestamp; 586 mThread->updateModel(mPeriod, mPhase, mReferenceTime); 587 588 if (mNumResyncSamples < MAX_RESYNC_SAMPLES) { 589 mNumResyncSamples++; 590 } else { 591 mFirstResyncSample = (mFirstResyncSample + 1) % MAX_RESYNC_SAMPLES; 592 } 593 594 updateModelLocked(); 595 596 if (mNumResyncSamplesSincePresent++ > MAX_RESYNC_SAMPLES_WITHOUT_PRESENT) { 597 resetErrorLocked(); 598 } 599 600 if (mIgnorePresentFences) { 601 // If we're ignoring the present fences we have no way to know whether 602 // or not we're synchronized with the HW vsyncs, so we just request 603 // that the HW vsync events be turned on. 604 return true; 605 } 606 607 // Check against kErrorThreshold / 2 to add some hysteresis before having to 608 // resync again 609 bool modelLocked = mModelUpdated && mError < (kErrorThreshold / 2) && mPendingPeriod == 0; 610 ALOGV("[%s] addResyncSample returning %s", mName, modelLocked ? "locked" : "unlocked"); 611 if (modelLocked) { 612 mThread->lockModel(); 613 } 614 return !modelLocked; 615 } 616 617 void DispSync::endResync() { 618 mThread->lockModel(); 619 } 620 621 status_t DispSync::addEventListener(const char* name, nsecs_t phase, Callback* callback, 622 nsecs_t lastCallbackTime) { 623 Mutex::Autolock lock(mMutex); 624 return mThread->addEventListener(name, phase, callback, lastCallbackTime); 625 } 626 627 void DispSync::setRefreshSkipCount(int count) { 628 Mutex::Autolock lock(mMutex); 629 ALOGD("setRefreshSkipCount(%d)", count); 630 mRefreshSkipCount = count; 631 updateModelLocked(); 632 } 633 634 status_t DispSync::removeEventListener(Callback* callback, nsecs_t* outLastCallbackTime) { 635 Mutex::Autolock lock(mMutex); 636 return mThread->removeEventListener(callback, outLastCallbackTime); 637 } 638 639 status_t DispSync::changePhaseOffset(Callback* callback, nsecs_t phase) { 640 Mutex::Autolock lock(mMutex); 641 return mThread->changePhaseOffset(callback, phase); 642 } 643 644 void DispSync::setPeriod(nsecs_t period) { 645 Mutex::Autolock lock(mMutex); 646 if (mTraceDetailedInfo) { 647 ATRACE_INT("DispSync:PendingPeriod", period); 648 } 649 mPendingPeriod = period; 650 } 651 652 nsecs_t DispSync::getPeriod() { 653 // lock mutex as mPeriod changes multiple times in updateModelLocked 654 Mutex::Autolock lock(mMutex); 655 return mPeriod; 656 } 657 658 void DispSync::updateModelLocked() { 659 ALOGV("[%s] updateModelLocked %zu", mName, mNumResyncSamples); 660 if (mNumResyncSamples >= MIN_RESYNC_SAMPLES_FOR_UPDATE) { 661 ALOGV("[%s] Computing...", mName); 662 nsecs_t durationSum = 0; 663 nsecs_t minDuration = INT64_MAX; 664 nsecs_t maxDuration = 0; 665 for (size_t i = 1; i < mNumResyncSamples; i++) { 666 size_t idx = (mFirstResyncSample + i) % MAX_RESYNC_SAMPLES; 667 size_t prev = (idx + MAX_RESYNC_SAMPLES - 1) % MAX_RESYNC_SAMPLES; 668 nsecs_t duration = mResyncSamples[idx] - mResyncSamples[prev]; 669 durationSum += duration; 670 minDuration = min(minDuration, duration); 671 maxDuration = max(maxDuration, duration); 672 } 673 674 // Exclude the min and max from the average 675 durationSum -= minDuration + maxDuration; 676 mPeriod = durationSum / (mNumResyncSamples - 3); 677 678 ALOGV("[%s] mPeriod = %" PRId64, mName, ns2us(mPeriod)); 679 680 double sampleAvgX = 0; 681 double sampleAvgY = 0; 682 double scale = 2.0 * M_PI / double(mPeriod); 683 // Intentionally skip the first sample 684 for (size_t i = 1; i < mNumResyncSamples; i++) { 685 size_t idx = (mFirstResyncSample + i) % MAX_RESYNC_SAMPLES; 686 nsecs_t sample = mResyncSamples[idx] - mReferenceTime; 687 double samplePhase = double(sample % mPeriod) * scale; 688 sampleAvgX += cos(samplePhase); 689 sampleAvgY += sin(samplePhase); 690 } 691 692 sampleAvgX /= double(mNumResyncSamples - 1); 693 sampleAvgY /= double(mNumResyncSamples - 1); 694 695 mPhase = nsecs_t(atan2(sampleAvgY, sampleAvgX) / scale); 696 697 ALOGV("[%s] mPhase = %" PRId64, mName, ns2us(mPhase)); 698 699 if (mPhase < -(mPeriod / 2)) { 700 mPhase += mPeriod; 701 ALOGV("[%s] Adjusting mPhase -> %" PRId64, mName, ns2us(mPhase)); 702 } 703 704 // Artificially inflate the period if requested. 705 mPeriod += mPeriod * mRefreshSkipCount; 706 707 mThread->updateModel(mPeriod, mPhase, mReferenceTime); 708 mModelUpdated = true; 709 } 710 } 711 712 void DispSync::updateErrorLocked() { 713 if (!mModelUpdated) { 714 return; 715 } 716 717 // Need to compare present fences against the un-adjusted refresh period, 718 // since they might arrive between two events. 719 nsecs_t period = mPeriod / (1 + mRefreshSkipCount); 720 721 int numErrSamples = 0; 722 nsecs_t sqErrSum = 0; 723 724 for (size_t i = 0; i < NUM_PRESENT_SAMPLES; i++) { 725 // Only check for the cached value of signal time to avoid unecessary 726 // syscalls. It is the responsibility of the DispSync owner to 727 // call getSignalTime() periodically so the cache is updated when the 728 // fence signals. 729 nsecs_t time = mPresentFences[i]->getCachedSignalTime(); 730 if (time == Fence::SIGNAL_TIME_PENDING || time == Fence::SIGNAL_TIME_INVALID) { 731 continue; 732 } 733 734 nsecs_t sample = time - mReferenceTime; 735 if (sample <= mPhase) { 736 continue; 737 } 738 739 nsecs_t sampleErr = (sample - mPhase) % period; 740 if (sampleErr > period / 2) { 741 sampleErr -= period; 742 } 743 sqErrSum += sampleErr * sampleErr; 744 numErrSamples++; 745 } 746 747 if (numErrSamples > 0) { 748 mError = sqErrSum / numErrSamples; 749 mZeroErrSamplesCount = 0; 750 } else { 751 mError = 0; 752 // Use mod ACCEPTABLE_ZERO_ERR_SAMPLES_COUNT to avoid log spam. 753 mZeroErrSamplesCount++; 754 ALOGE_IF((mZeroErrSamplesCount % ACCEPTABLE_ZERO_ERR_SAMPLES_COUNT) == 0, 755 "No present times for model error."); 756 } 757 758 if (mTraceDetailedInfo) { 759 ATRACE_INT64("DispSync:Error", mError); 760 } 761 } 762 763 void DispSync::resetErrorLocked() { 764 mPresentSampleOffset = 0; 765 mError = 0; 766 mZeroErrSamplesCount = 0; 767 for (size_t i = 0; i < NUM_PRESENT_SAMPLES; i++) { 768 mPresentFences[i] = FenceTime::NO_FENCE; 769 } 770 } 771 772 nsecs_t DispSync::computeNextRefresh(int periodOffset) const { 773 Mutex::Autolock lock(mMutex); 774 nsecs_t now = systemTime(SYSTEM_TIME_MONOTONIC); 775 nsecs_t phase = mReferenceTime + mPhase; 776 if (mPeriod == 0) { 777 return 0; 778 } 779 return (((now - phase) / mPeriod) + periodOffset + 1) * mPeriod + phase; 780 } 781 782 void DispSync::setIgnorePresentFences(bool ignore) { 783 Mutex::Autolock lock(mMutex); 784 if (mIgnorePresentFences != ignore) { 785 mIgnorePresentFences = ignore; 786 resetLocked(); 787 } 788 } 789 790 void DispSync::dump(std::string& result) const { 791 Mutex::Autolock lock(mMutex); 792 StringAppendF(&result, "present fences are %s\n", mIgnorePresentFences ? "ignored" : "used"); 793 StringAppendF(&result, "mPeriod: %" PRId64 " ns (%.3f fps; skipCount=%d)\n", mPeriod, 794 1000000000.0 / mPeriod, mRefreshSkipCount); 795 StringAppendF(&result, "mPhase: %" PRId64 " ns\n", mPhase); 796 StringAppendF(&result, "mError: %" PRId64 " ns (sqrt=%.1f)\n", mError, sqrt(mError)); 797 StringAppendF(&result, "mNumResyncSamplesSincePresent: %d (limit %d)\n", 798 mNumResyncSamplesSincePresent, MAX_RESYNC_SAMPLES_WITHOUT_PRESENT); 799 StringAppendF(&result, "mNumResyncSamples: %zd (max %d)\n", mNumResyncSamples, 800 MAX_RESYNC_SAMPLES); 801 802 result.append("mResyncSamples:\n"); 803 nsecs_t previous = -1; 804 for (size_t i = 0; i < mNumResyncSamples; i++) { 805 size_t idx = (mFirstResyncSample + i) % MAX_RESYNC_SAMPLES; 806 nsecs_t sampleTime = mResyncSamples[idx]; 807 if (i == 0) { 808 StringAppendF(&result, " %" PRId64 "\n", sampleTime); 809 } else { 810 StringAppendF(&result, " %" PRId64 " (+%" PRId64 ")\n", sampleTime, 811 sampleTime - previous); 812 } 813 previous = sampleTime; 814 } 815 816 StringAppendF(&result, "mPresentFences [%d]:\n", NUM_PRESENT_SAMPLES); 817 nsecs_t now = systemTime(SYSTEM_TIME_MONOTONIC); 818 previous = Fence::SIGNAL_TIME_INVALID; 819 for (size_t i = 0; i < NUM_PRESENT_SAMPLES; i++) { 820 size_t idx = (i + mPresentSampleOffset) % NUM_PRESENT_SAMPLES; 821 nsecs_t presentTime = mPresentFences[idx]->getSignalTime(); 822 if (presentTime == Fence::SIGNAL_TIME_PENDING) { 823 StringAppendF(&result, " [unsignaled fence]\n"); 824 } else if (presentTime == Fence::SIGNAL_TIME_INVALID) { 825 StringAppendF(&result, " [invalid fence]\n"); 826 } else if (previous == Fence::SIGNAL_TIME_PENDING || 827 previous == Fence::SIGNAL_TIME_INVALID) { 828 StringAppendF(&result, " %" PRId64 " (%.3f ms ago)\n", presentTime, 829 (now - presentTime) / 1000000.0); 830 } else { 831 StringAppendF(&result, " %" PRId64 " (+%" PRId64 " / %.3f) (%.3f ms ago)\n", 832 presentTime, presentTime - previous, 833 (presentTime - previous) / (double)mPeriod, 834 (now - presentTime) / 1000000.0); 835 } 836 previous = presentTime; 837 } 838 839 StringAppendF(&result, "current monotonic time: %" PRId64 "\n", now); 840 } 841 842 nsecs_t DispSync::expectedPresentTime() { 843 // The HWC doesn't currently have a way to report additional latency. 844 // Assume that whatever we submit now will appear right after the flip. 845 // For a smart panel this might be 1. This is expressed in frames, 846 // rather than time, because we expect to have a constant frame delay 847 // regardless of the refresh rate. 848 const uint32_t hwcLatency = 0; 849 850 // Ask DispSync when the next refresh will be (CLOCK_MONOTONIC). 851 return computeNextRefresh(hwcLatency); 852 } 853 854 } // namespace impl 855 856 } // namespace android 857