1 /* 2 * Copyright (C) 2013 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #define ATRACE_TAG ATRACE_TAG_GRAPHICS 18 //#define LOG_NDEBUG 0 19 20 // This is needed for stdint.h to define INT64_MAX in C++ 21 #define __STDC_LIMIT_MACROS 22 23 #include <math.h> 24 25 #include <algorithm> 26 27 #include <log/log.h> 28 #include <utils/String8.h> 29 #include <utils/Thread.h> 30 #include <utils/Trace.h> 31 #include <utils/Vector.h> 32 33 #include <ui/Fence.h> 34 35 #include "DispSync.h" 36 #include "SurfaceFlinger.h" 37 #include "EventLog/EventLog.h" 38 39 using std::max; 40 using std::min; 41 42 namespace android { 43 44 // Setting this to true enables verbose tracing that can be used to debug 45 // vsync event model or phase issues. 46 static const bool kTraceDetailedInfo = false; 47 48 // Setting this to true adds a zero-phase tracer for correlating with hardware 49 // vsync events 50 static const bool kEnableZeroPhaseTracer = false; 51 52 // This is the threshold used to determine when hardware vsync events are 53 // needed to re-synchronize the software vsync model with the hardware. The 54 // error metric used is the mean of the squared difference between each 55 // present time and the nearest software-predicted vsync. 56 static const nsecs_t kErrorThreshold = 160000000000; // 400 usec squared 57 58 #undef LOG_TAG 59 #define LOG_TAG "DispSyncThread" 60 class DispSyncThread: public Thread { 61 public: 62 63 explicit DispSyncThread(const char* name): 64 mName(name), 65 mStop(false), 66 mPeriod(0), 67 mPhase(0), 68 mReferenceTime(0), 69 mWakeupLatency(0), 70 mFrameNumber(0) {} 71 72 virtual ~DispSyncThread() {} 73 74 void updateModel(nsecs_t period, nsecs_t phase, nsecs_t referenceTime) { 75 if (kTraceDetailedInfo) ATRACE_CALL(); 76 Mutex::Autolock lock(mMutex); 77 mPeriod = period; 78 mPhase = phase; 79 mReferenceTime = referenceTime; 80 ALOGV("[%s] updateModel: mPeriod = %" PRId64 ", mPhase = %" PRId64 81 " mReferenceTime = %" PRId64, mName, ns2us(mPeriod), 82 ns2us(mPhase), ns2us(mReferenceTime)); 83 mCond.signal(); 84 } 85 86 void stop() { 87 if (kTraceDetailedInfo) ATRACE_CALL(); 88 Mutex::Autolock lock(mMutex); 89 mStop = true; 90 mCond.signal(); 91 } 92 93 virtual bool threadLoop() { 94 status_t err; 95 nsecs_t now = systemTime(SYSTEM_TIME_MONOTONIC); 96 97 while (true) { 98 Vector<CallbackInvocation> callbackInvocations; 99 100 nsecs_t targetTime = 0; 101 102 { // Scope for lock 103 Mutex::Autolock lock(mMutex); 104 105 if (kTraceDetailedInfo) { 106 ATRACE_INT64("DispSync:Frame", mFrameNumber); 107 } 108 ALOGV("[%s] Frame %" PRId64, mName, mFrameNumber); 109 ++mFrameNumber; 110 111 if (mStop) { 112 return false; 113 } 114 115 if (mPeriod == 0) { 116 err = mCond.wait(mMutex); 117 if (err != NO_ERROR) { 118 ALOGE("error waiting for new events: %s (%d)", 119 strerror(-err), err); 120 return false; 121 } 122 continue; 123 } 124 125 targetTime = computeNextEventTimeLocked(now); 126 127 bool isWakeup = false; 128 129 if (now < targetTime) { 130 if (kTraceDetailedInfo) ATRACE_NAME("DispSync waiting"); 131 132 if (targetTime == INT64_MAX) { 133 ALOGV("[%s] Waiting forever", mName); 134 err = mCond.wait(mMutex); 135 } else { 136 ALOGV("[%s] Waiting until %" PRId64, mName, 137 ns2us(targetTime)); 138 err = mCond.waitRelative(mMutex, targetTime - now); 139 } 140 141 if (err == TIMED_OUT) { 142 isWakeup = true; 143 } else if (err != NO_ERROR) { 144 ALOGE("error waiting for next event: %s (%d)", 145 strerror(-err), err); 146 return false; 147 } 148 } 149 150 now = systemTime(SYSTEM_TIME_MONOTONIC); 151 152 // Don't correct by more than 1.5 ms 153 static const nsecs_t kMaxWakeupLatency = us2ns(1500); 154 155 if (isWakeup) { 156 mWakeupLatency = ((mWakeupLatency * 63) + 157 (now - targetTime)) / 64; 158 mWakeupLatency = min(mWakeupLatency, kMaxWakeupLatency); 159 if (kTraceDetailedInfo) { 160 ATRACE_INT64("DispSync:WakeupLat", now - targetTime); 161 ATRACE_INT64("DispSync:AvgWakeupLat", mWakeupLatency); 162 } 163 } 164 165 callbackInvocations = gatherCallbackInvocationsLocked(now); 166 } 167 168 if (callbackInvocations.size() > 0) { 169 fireCallbackInvocations(callbackInvocations); 170 } 171 } 172 173 return false; 174 } 175 176 status_t addEventListener(const char* name, nsecs_t phase, 177 const sp<DispSync::Callback>& callback) { 178 if (kTraceDetailedInfo) ATRACE_CALL(); 179 Mutex::Autolock lock(mMutex); 180 181 for (size_t i = 0; i < mEventListeners.size(); i++) { 182 if (mEventListeners[i].mCallback == callback) { 183 return BAD_VALUE; 184 } 185 } 186 187 EventListener listener; 188 listener.mName = name; 189 listener.mPhase = phase; 190 listener.mCallback = callback; 191 192 // We want to allow the firstmost future event to fire without 193 // allowing any past events to fire 194 listener.mLastEventTime = systemTime() - mPeriod / 2 + mPhase - 195 mWakeupLatency; 196 197 mEventListeners.push(listener); 198 199 mCond.signal(); 200 201 return NO_ERROR; 202 } 203 204 status_t removeEventListener(const sp<DispSync::Callback>& callback) { 205 if (kTraceDetailedInfo) ATRACE_CALL(); 206 Mutex::Autolock lock(mMutex); 207 208 for (size_t i = 0; i < mEventListeners.size(); i++) { 209 if (mEventListeners[i].mCallback == callback) { 210 mEventListeners.removeAt(i); 211 mCond.signal(); 212 return NO_ERROR; 213 } 214 } 215 216 return BAD_VALUE; 217 } 218 219 // This method is only here to handle the !SurfaceFlinger::hasSyncFramework 220 // case. 221 bool hasAnyEventListeners() { 222 if (kTraceDetailedInfo) ATRACE_CALL(); 223 Mutex::Autolock lock(mMutex); 224 return !mEventListeners.empty(); 225 } 226 227 private: 228 229 struct EventListener { 230 const char* mName; 231 nsecs_t mPhase; 232 nsecs_t mLastEventTime; 233 sp<DispSync::Callback> mCallback; 234 }; 235 236 struct CallbackInvocation { 237 sp<DispSync::Callback> mCallback; 238 nsecs_t mEventTime; 239 }; 240 241 nsecs_t computeNextEventTimeLocked(nsecs_t now) { 242 if (kTraceDetailedInfo) ATRACE_CALL(); 243 ALOGV("[%s] computeNextEventTimeLocked", mName); 244 nsecs_t nextEventTime = INT64_MAX; 245 for (size_t i = 0; i < mEventListeners.size(); i++) { 246 nsecs_t t = computeListenerNextEventTimeLocked(mEventListeners[i], 247 now); 248 249 if (t < nextEventTime) { 250 nextEventTime = t; 251 } 252 } 253 254 ALOGV("[%s] nextEventTime = %" PRId64, mName, ns2us(nextEventTime)); 255 return nextEventTime; 256 } 257 258 Vector<CallbackInvocation> gatherCallbackInvocationsLocked(nsecs_t now) { 259 if (kTraceDetailedInfo) ATRACE_CALL(); 260 ALOGV("[%s] gatherCallbackInvocationsLocked @ %" PRId64, mName, 261 ns2us(now)); 262 263 Vector<CallbackInvocation> callbackInvocations; 264 nsecs_t onePeriodAgo = now - mPeriod; 265 266 for (size_t i = 0; i < mEventListeners.size(); i++) { 267 nsecs_t t = computeListenerNextEventTimeLocked(mEventListeners[i], 268 onePeriodAgo); 269 270 if (t < now) { 271 CallbackInvocation ci; 272 ci.mCallback = mEventListeners[i].mCallback; 273 ci.mEventTime = t; 274 ALOGV("[%s] [%s] Preparing to fire", mName, 275 mEventListeners[i].mName); 276 callbackInvocations.push(ci); 277 mEventListeners.editItemAt(i).mLastEventTime = t; 278 } 279 } 280 281 return callbackInvocations; 282 } 283 284 nsecs_t computeListenerNextEventTimeLocked(const EventListener& listener, 285 nsecs_t baseTime) { 286 if (kTraceDetailedInfo) ATRACE_CALL(); 287 ALOGV("[%s] [%s] computeListenerNextEventTimeLocked(%" PRId64 ")", 288 mName, listener.mName, ns2us(baseTime)); 289 290 nsecs_t lastEventTime = listener.mLastEventTime + mWakeupLatency; 291 ALOGV("[%s] lastEventTime: %" PRId64, mName, ns2us(lastEventTime)); 292 if (baseTime < lastEventTime) { 293 baseTime = lastEventTime; 294 ALOGV("[%s] Clamping baseTime to lastEventTime -> %" PRId64, mName, 295 ns2us(baseTime)); 296 } 297 298 baseTime -= mReferenceTime; 299 ALOGV("[%s] Relative baseTime = %" PRId64, mName, ns2us(baseTime)); 300 nsecs_t phase = mPhase + listener.mPhase; 301 ALOGV("[%s] Phase = %" PRId64, mName, ns2us(phase)); 302 baseTime -= phase; 303 ALOGV("[%s] baseTime - phase = %" PRId64, mName, ns2us(baseTime)); 304 305 // If our previous time is before the reference (because the reference 306 // has since been updated), the division by mPeriod will truncate 307 // towards zero instead of computing the floor. Since in all cases 308 // before the reference we want the next time to be effectively now, we 309 // set baseTime to -mPeriod so that numPeriods will be -1. 310 // When we add 1 and the phase, we will be at the correct event time for 311 // this period. 312 if (baseTime < 0) { 313 ALOGV("[%s] Correcting negative baseTime", mName); 314 baseTime = -mPeriod; 315 } 316 317 nsecs_t numPeriods = baseTime / mPeriod; 318 ALOGV("[%s] numPeriods = %" PRId64, mName, numPeriods); 319 nsecs_t t = (numPeriods + 1) * mPeriod + phase; 320 ALOGV("[%s] t = %" PRId64, mName, ns2us(t)); 321 t += mReferenceTime; 322 ALOGV("[%s] Absolute t = %" PRId64, mName, ns2us(t)); 323 324 // Check that it's been slightly more than half a period since the last 325 // event so that we don't accidentally fall into double-rate vsyncs 326 if (t - listener.mLastEventTime < (3 * mPeriod / 5)) { 327 t += mPeriod; 328 ALOGV("[%s] Modifying t -> %" PRId64, mName, ns2us(t)); 329 } 330 331 t -= mWakeupLatency; 332 ALOGV("[%s] Corrected for wakeup latency -> %" PRId64, mName, ns2us(t)); 333 334 return t; 335 } 336 337 void fireCallbackInvocations(const Vector<CallbackInvocation>& callbacks) { 338 if (kTraceDetailedInfo) ATRACE_CALL(); 339 for (size_t i = 0; i < callbacks.size(); i++) { 340 callbacks[i].mCallback->onDispSyncEvent(callbacks[i].mEventTime); 341 } 342 } 343 344 const char* const mName; 345 346 bool mStop; 347 348 nsecs_t mPeriod; 349 nsecs_t mPhase; 350 nsecs_t mReferenceTime; 351 nsecs_t mWakeupLatency; 352 353 int64_t mFrameNumber; 354 355 Vector<EventListener> mEventListeners; 356 357 Mutex mMutex; 358 Condition mCond; 359 }; 360 361 #undef LOG_TAG 362 #define LOG_TAG "DispSync" 363 364 class ZeroPhaseTracer : public DispSync::Callback { 365 public: 366 ZeroPhaseTracer() : mParity(false) {} 367 368 virtual void onDispSyncEvent(nsecs_t /*when*/) { 369 mParity = !mParity; 370 ATRACE_INT("ZERO_PHASE_VSYNC", mParity ? 1 : 0); 371 } 372 373 private: 374 bool mParity; 375 }; 376 377 DispSync::DispSync(const char* name) : 378 mName(name), 379 mRefreshSkipCount(0), 380 mThread(new DispSyncThread(name)), 381 mIgnorePresentFences(!SurfaceFlinger::hasSyncFramework){ 382 383 mPresentTimeOffset = SurfaceFlinger::dispSyncPresentTimeOffset; 384 mThread->run("DispSync", PRIORITY_URGENT_DISPLAY + PRIORITY_MORE_FAVORABLE); 385 // set DispSync to SCHED_FIFO to minimize jitter 386 struct sched_param param = {0}; 387 param.sched_priority = 2; 388 if (sched_setscheduler(mThread->getTid(), SCHED_FIFO, ¶m) != 0) { 389 ALOGE("Couldn't set SCHED_FIFO for DispSyncThread"); 390 } 391 392 393 reset(); 394 beginResync(); 395 396 if (kTraceDetailedInfo) { 397 // If we're not getting present fences then the ZeroPhaseTracer 398 // would prevent HW vsync event from ever being turned off. 399 // Even if we're just ignoring the fences, the zero-phase tracing is 400 // not needed because any time there is an event registered we will 401 // turn on the HW vsync events. 402 if (!mIgnorePresentFences && kEnableZeroPhaseTracer) { 403 addEventListener("ZeroPhaseTracer", 0, new ZeroPhaseTracer()); 404 } 405 } 406 } 407 408 DispSync::~DispSync() {} 409 410 void DispSync::reset() { 411 Mutex::Autolock lock(mMutex); 412 413 mPhase = 0; 414 mReferenceTime = 0; 415 mModelUpdated = false; 416 mNumResyncSamples = 0; 417 mFirstResyncSample = 0; 418 mNumResyncSamplesSincePresent = 0; 419 resetErrorLocked(); 420 } 421 422 bool DispSync::addPresentFence(const sp<Fence>& fence) { 423 Mutex::Autolock lock(mMutex); 424 425 mPresentFences[mPresentSampleOffset] = fence; 426 mPresentTimes[mPresentSampleOffset] = 0; 427 mPresentSampleOffset = (mPresentSampleOffset + 1) % NUM_PRESENT_SAMPLES; 428 mNumResyncSamplesSincePresent = 0; 429 430 for (size_t i = 0; i < NUM_PRESENT_SAMPLES; i++) { 431 const sp<Fence>& f(mPresentFences[i]); 432 if (f != NULL) { 433 nsecs_t t = f->getSignalTime(); 434 if (t < INT64_MAX) { 435 mPresentFences[i].clear(); 436 mPresentTimes[i] = t + mPresentTimeOffset; 437 } 438 } 439 } 440 441 updateErrorLocked(); 442 443 return !mModelUpdated || mError > kErrorThreshold; 444 } 445 446 void DispSync::beginResync() { 447 Mutex::Autolock lock(mMutex); 448 ALOGV("[%s] beginResync", mName); 449 mModelUpdated = false; 450 mNumResyncSamples = 0; 451 } 452 453 bool DispSync::addResyncSample(nsecs_t timestamp) { 454 Mutex::Autolock lock(mMutex); 455 456 ALOGV("[%s] addResyncSample(%" PRId64 ")", mName, ns2us(timestamp)); 457 458 size_t idx = (mFirstResyncSample + mNumResyncSamples) % MAX_RESYNC_SAMPLES; 459 mResyncSamples[idx] = timestamp; 460 if (mNumResyncSamples == 0) { 461 mPhase = 0; 462 mReferenceTime = timestamp; 463 ALOGV("[%s] First resync sample: mPeriod = %" PRId64 ", mPhase = 0, " 464 "mReferenceTime = %" PRId64, mName, ns2us(mPeriod), 465 ns2us(mReferenceTime)); 466 mThread->updateModel(mPeriod, mPhase, mReferenceTime); 467 } 468 469 if (mNumResyncSamples < MAX_RESYNC_SAMPLES) { 470 mNumResyncSamples++; 471 } else { 472 mFirstResyncSample = (mFirstResyncSample + 1) % MAX_RESYNC_SAMPLES; 473 } 474 475 updateModelLocked(); 476 477 if (mNumResyncSamplesSincePresent++ > MAX_RESYNC_SAMPLES_WITHOUT_PRESENT) { 478 resetErrorLocked(); 479 } 480 481 if (mIgnorePresentFences) { 482 // If we don't have the sync framework we will never have 483 // addPresentFence called. This means we have no way to know whether 484 // or not we're synchronized with the HW vsyncs, so we just request 485 // that the HW vsync events be turned on whenever we need to generate 486 // SW vsync events. 487 return mThread->hasAnyEventListeners(); 488 } 489 490 // Check against kErrorThreshold / 2 to add some hysteresis before having to 491 // resync again 492 bool modelLocked = mModelUpdated && mError < (kErrorThreshold / 2); 493 ALOGV("[%s] addResyncSample returning %s", mName, 494 modelLocked ? "locked" : "unlocked"); 495 return !modelLocked; 496 } 497 498 void DispSync::endResync() { 499 } 500 501 status_t DispSync::addEventListener(const char* name, nsecs_t phase, 502 const sp<Callback>& callback) { 503 Mutex::Autolock lock(mMutex); 504 return mThread->addEventListener(name, phase, callback); 505 } 506 507 void DispSync::setRefreshSkipCount(int count) { 508 Mutex::Autolock lock(mMutex); 509 ALOGD("setRefreshSkipCount(%d)", count); 510 mRefreshSkipCount = count; 511 updateModelLocked(); 512 } 513 514 status_t DispSync::removeEventListener(const sp<Callback>& callback) { 515 Mutex::Autolock lock(mMutex); 516 return mThread->removeEventListener(callback); 517 } 518 519 void DispSync::setPeriod(nsecs_t period) { 520 Mutex::Autolock lock(mMutex); 521 mPeriod = period; 522 mPhase = 0; 523 mReferenceTime = 0; 524 mThread->updateModel(mPeriod, mPhase, mReferenceTime); 525 } 526 527 nsecs_t DispSync::getPeriod() { 528 // lock mutex as mPeriod changes multiple times in updateModelLocked 529 Mutex::Autolock lock(mMutex); 530 return mPeriod; 531 } 532 533 void DispSync::updateModelLocked() { 534 ALOGV("[%s] updateModelLocked %zu", mName, mNumResyncSamples); 535 if (mNumResyncSamples >= MIN_RESYNC_SAMPLES_FOR_UPDATE) { 536 ALOGV("[%s] Computing...", mName); 537 nsecs_t durationSum = 0; 538 nsecs_t minDuration = INT64_MAX; 539 nsecs_t maxDuration = 0; 540 for (size_t i = 1; i < mNumResyncSamples; i++) { 541 size_t idx = (mFirstResyncSample + i) % MAX_RESYNC_SAMPLES; 542 size_t prev = (idx + MAX_RESYNC_SAMPLES - 1) % MAX_RESYNC_SAMPLES; 543 nsecs_t duration = mResyncSamples[idx] - mResyncSamples[prev]; 544 durationSum += duration; 545 minDuration = min(minDuration, duration); 546 maxDuration = max(maxDuration, duration); 547 } 548 549 // Exclude the min and max from the average 550 durationSum -= minDuration + maxDuration; 551 mPeriod = durationSum / (mNumResyncSamples - 3); 552 553 ALOGV("[%s] mPeriod = %" PRId64, mName, ns2us(mPeriod)); 554 555 double sampleAvgX = 0; 556 double sampleAvgY = 0; 557 double scale = 2.0 * M_PI / double(mPeriod); 558 // Intentionally skip the first sample 559 for (size_t i = 1; i < mNumResyncSamples; i++) { 560 size_t idx = (mFirstResyncSample + i) % MAX_RESYNC_SAMPLES; 561 nsecs_t sample = mResyncSamples[idx] - mReferenceTime; 562 double samplePhase = double(sample % mPeriod) * scale; 563 sampleAvgX += cos(samplePhase); 564 sampleAvgY += sin(samplePhase); 565 } 566 567 sampleAvgX /= double(mNumResyncSamples - 1); 568 sampleAvgY /= double(mNumResyncSamples - 1); 569 570 mPhase = nsecs_t(atan2(sampleAvgY, sampleAvgX) / scale); 571 572 ALOGV("[%s] mPhase = %" PRId64, mName, ns2us(mPhase)); 573 574 if (mPhase < -(mPeriod / 2)) { 575 mPhase += mPeriod; 576 ALOGV("[%s] Adjusting mPhase -> %" PRId64, mName, ns2us(mPhase)); 577 } 578 579 if (kTraceDetailedInfo) { 580 ATRACE_INT64("DispSync:Period", mPeriod); 581 ATRACE_INT64("DispSync:Phase", mPhase + mPeriod / 2); 582 } 583 584 // Artificially inflate the period if requested. 585 mPeriod += mPeriod * mRefreshSkipCount; 586 587 mThread->updateModel(mPeriod, mPhase, mReferenceTime); 588 mModelUpdated = true; 589 } 590 } 591 592 void DispSync::updateErrorLocked() { 593 if (!mModelUpdated) { 594 return; 595 } 596 597 // Need to compare present fences against the un-adjusted refresh period, 598 // since they might arrive between two events. 599 nsecs_t period = mPeriod / (1 + mRefreshSkipCount); 600 601 int numErrSamples = 0; 602 nsecs_t sqErrSum = 0; 603 604 for (size_t i = 0; i < NUM_PRESENT_SAMPLES; i++) { 605 nsecs_t sample = mPresentTimes[i] - mReferenceTime; 606 if (sample > mPhase) { 607 nsecs_t sampleErr = (sample - mPhase) % period; 608 if (sampleErr > period / 2) { 609 sampleErr -= period; 610 } 611 sqErrSum += sampleErr * sampleErr; 612 numErrSamples++; 613 } 614 } 615 616 if (numErrSamples > 0) { 617 mError = sqErrSum / numErrSamples; 618 } else { 619 mError = 0; 620 } 621 622 if (kTraceDetailedInfo) { 623 ATRACE_INT64("DispSync:Error", mError); 624 } 625 } 626 627 void DispSync::resetErrorLocked() { 628 mPresentSampleOffset = 0; 629 mError = 0; 630 for (size_t i = 0; i < NUM_PRESENT_SAMPLES; i++) { 631 mPresentFences[i].clear(); 632 mPresentTimes[i] = 0; 633 } 634 } 635 636 nsecs_t DispSync::computeNextRefresh(int periodOffset) const { 637 Mutex::Autolock lock(mMutex); 638 nsecs_t now = systemTime(SYSTEM_TIME_MONOTONIC); 639 nsecs_t phase = mReferenceTime + mPhase; 640 return (((now - phase) / mPeriod) + periodOffset + 1) * mPeriod + phase; 641 } 642 643 void DispSync::dump(String8& result) const { 644 Mutex::Autolock lock(mMutex); 645 result.appendFormat("present fences are %s\n", 646 mIgnorePresentFences ? "ignored" : "used"); 647 result.appendFormat("mPeriod: %" PRId64 " ns (%.3f fps; skipCount=%d)\n", 648 mPeriod, 1000000000.0 / mPeriod, mRefreshSkipCount); 649 result.appendFormat("mPhase: %" PRId64 " ns\n", mPhase); 650 result.appendFormat("mError: %" PRId64 " ns (sqrt=%.1f)\n", 651 mError, sqrt(mError)); 652 result.appendFormat("mNumResyncSamplesSincePresent: %d (limit %d)\n", 653 mNumResyncSamplesSincePresent, MAX_RESYNC_SAMPLES_WITHOUT_PRESENT); 654 result.appendFormat("mNumResyncSamples: %zd (max %d)\n", 655 mNumResyncSamples, MAX_RESYNC_SAMPLES); 656 657 result.appendFormat("mResyncSamples:\n"); 658 nsecs_t previous = -1; 659 for (size_t i = 0; i < mNumResyncSamples; i++) { 660 size_t idx = (mFirstResyncSample + i) % MAX_RESYNC_SAMPLES; 661 nsecs_t sampleTime = mResyncSamples[idx]; 662 if (i == 0) { 663 result.appendFormat(" %" PRId64 "\n", sampleTime); 664 } else { 665 result.appendFormat(" %" PRId64 " (+%" PRId64 ")\n", 666 sampleTime, sampleTime - previous); 667 } 668 previous = sampleTime; 669 } 670 671 result.appendFormat("mPresentFences / mPresentTimes [%d]:\n", 672 NUM_PRESENT_SAMPLES); 673 nsecs_t now = systemTime(SYSTEM_TIME_MONOTONIC); 674 previous = 0; 675 for (size_t i = 0; i < NUM_PRESENT_SAMPLES; i++) { 676 size_t idx = (i + mPresentSampleOffset) % NUM_PRESENT_SAMPLES; 677 bool signaled = mPresentFences[idx] == NULL; 678 nsecs_t presentTime = mPresentTimes[idx]; 679 if (!signaled) { 680 result.appendFormat(" [unsignaled fence]\n"); 681 } else if (presentTime == 0) { 682 result.appendFormat(" 0\n"); 683 } else if (previous == 0) { 684 result.appendFormat(" %" PRId64 " (%.3f ms ago)\n", presentTime, 685 (now - presentTime) / 1000000.0); 686 } else { 687 result.appendFormat(" %" PRId64 " (+%" PRId64 " / %.3f) (%.3f ms ago)\n", 688 presentTime, presentTime - previous, 689 (presentTime - previous) / (double) mPeriod, 690 (now - presentTime) / 1000000.0); 691 } 692 previous = presentTime; 693 } 694 695 result.appendFormat("current monotonic time: %" PRId64 "\n", now); 696 } 697 698 } // namespace android 699