1 /* 2 * Copyright (C) 2014 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #include "CanvasContext.h" 18 #include <GpuMemoryTracker.h> 19 20 #include "../Properties.h" 21 #include "AnimationContext.h" 22 #include "EglManager.h" 23 #include "Frame.h" 24 #include "LayerUpdateQueue.h" 25 #include "Properties.h" 26 #include "RenderThread.h" 27 #include "hwui/Canvas.h" 28 #include "pipeline/skia/SkiaOpenGLPipeline.h" 29 #include "pipeline/skia/SkiaPipeline.h" 30 #include "pipeline/skia/SkiaVulkanPipeline.h" 31 #include "thread/CommonPool.h" 32 #include "utils/GLUtils.h" 33 #include "utils/TimeUtils.h" 34 #include "utils/TraceUtils.h" 35 36 #include <cutils/properties.h> 37 #include <private/hwui/DrawGlInfo.h> 38 #include <strings.h> 39 40 #include <fcntl.h> 41 #include <sys/stat.h> 42 #include <algorithm> 43 44 #include <cstdint> 45 #include <cstdlib> 46 #include <functional> 47 48 #define TRIM_MEMORY_COMPLETE 80 49 #define TRIM_MEMORY_UI_HIDDEN 20 50 51 #define LOG_FRAMETIME_MMA 0 52 53 #if LOG_FRAMETIME_MMA 54 static float sBenchMma = 0; 55 static int sFrameCount = 0; 56 static const float NANOS_PER_MILLIS_F = 1000000.0f; 57 #endif 58 59 namespace android { 60 namespace uirenderer { 61 namespace renderthread { 62 63 CanvasContext* CanvasContext::create(RenderThread& thread, bool translucent, 64 RenderNode* rootRenderNode, IContextFactory* contextFactory) { 65 auto renderType = Properties::getRenderPipelineType(); 66 67 switch (renderType) { 68 case RenderPipelineType::SkiaGL: 69 return new CanvasContext(thread, translucent, rootRenderNode, contextFactory, 70 std::make_unique<skiapipeline::SkiaOpenGLPipeline>(thread)); 71 case RenderPipelineType::SkiaVulkan: 72 return new CanvasContext(thread, translucent, rootRenderNode, contextFactory, 73 std::make_unique<skiapipeline::SkiaVulkanPipeline>(thread)); 74 default: 75 LOG_ALWAYS_FATAL("canvas context type %d not supported", (int32_t)renderType); 76 break; 77 } 78 return nullptr; 79 } 80 81 void CanvasContext::invokeFunctor(const RenderThread& thread, Functor* functor) { 82 ATRACE_CALL(); 83 auto renderType = Properties::getRenderPipelineType(); 84 switch (renderType) { 85 case RenderPipelineType::SkiaGL: 86 skiapipeline::SkiaOpenGLPipeline::invokeFunctor(thread, functor); 87 break; 88 case RenderPipelineType::SkiaVulkan: 89 skiapipeline::SkiaVulkanPipeline::invokeFunctor(thread, functor); 90 break; 91 default: 92 LOG_ALWAYS_FATAL("canvas context type %d not supported", (int32_t)renderType); 93 break; 94 } 95 } 96 97 void CanvasContext::prepareToDraw(const RenderThread& thread, Bitmap* bitmap) { 98 skiapipeline::SkiaPipeline::prepareToDraw(thread, bitmap); 99 } 100 101 CanvasContext::CanvasContext(RenderThread& thread, bool translucent, RenderNode* rootRenderNode, 102 IContextFactory* contextFactory, 103 std::unique_ptr<IRenderPipeline> renderPipeline) 104 : mRenderThread(thread) 105 , mGenerationID(0) 106 , mOpaque(!translucent) 107 , mAnimationContext(contextFactory->createAnimationContext(mRenderThread.timeLord())) 108 , mJankTracker(&thread.globalProfileData(), DeviceInfo::get()->displayInfo()) 109 , mProfiler(mJankTracker.frames(), thread.timeLord().frameIntervalNanos()) 110 , mContentDrawBounds(0, 0, 0, 0) 111 , mRenderPipeline(std::move(renderPipeline)) { 112 rootRenderNode->makeRoot(); 113 mRenderNodes.emplace_back(rootRenderNode); 114 mProfiler.setDensity(DeviceInfo::get()->displayInfo().density); 115 setRenderAheadDepth(Properties::defaultRenderAhead); 116 } 117 118 CanvasContext::~CanvasContext() { 119 destroy(); 120 for (auto& node : mRenderNodes) { 121 node->clearRoot(); 122 } 123 mRenderNodes.clear(); 124 } 125 126 void CanvasContext::addRenderNode(RenderNode* node, bool placeFront) { 127 int pos = placeFront ? 0 : static_cast<int>(mRenderNodes.size()); 128 node->makeRoot(); 129 mRenderNodes.emplace(mRenderNodes.begin() + pos, node); 130 } 131 132 void CanvasContext::removeRenderNode(RenderNode* node) { 133 node->clearRoot(); 134 mRenderNodes.erase(std::remove(mRenderNodes.begin(), mRenderNodes.end(), node), 135 mRenderNodes.end()); 136 } 137 138 void CanvasContext::destroy() { 139 stopDrawing(); 140 setSurface(nullptr); 141 freePrefetchedLayers(); 142 destroyHardwareResources(); 143 mAnimationContext->destroy(); 144 } 145 146 void CanvasContext::setSurface(sp<Surface>&& surface) { 147 ATRACE_CALL(); 148 149 if (surface) { 150 mNativeSurface = new ReliableSurface{std::move(surface)}; 151 mNativeSurface->setDequeueTimeout(500_ms); 152 } else { 153 mNativeSurface = nullptr; 154 } 155 156 if (mRenderAheadDepth == 0 && DeviceInfo::get()->getMaxRefreshRate() > 66.6f) { 157 mFixedRenderAhead = false; 158 mRenderAheadCapacity = 1; 159 } else { 160 mFixedRenderAhead = true; 161 mRenderAheadCapacity = mRenderAheadDepth; 162 } 163 164 ColorMode colorMode = mWideColorGamut ? ColorMode::WideColorGamut : ColorMode::SRGB; 165 bool hasSurface = mRenderPipeline->setSurface(mNativeSurface.get(), mSwapBehavior, colorMode, 166 mRenderAheadCapacity); 167 168 mFrameNumber = -1; 169 170 if (hasSurface) { 171 mHaveNewSurface = true; 172 mSwapHistory.clear(); 173 } else { 174 mRenderThread.removeFrameCallback(this); 175 mGenerationID++; 176 } 177 } 178 179 void CanvasContext::setSwapBehavior(SwapBehavior swapBehavior) { 180 mSwapBehavior = swapBehavior; 181 } 182 183 bool CanvasContext::pauseSurface() { 184 mGenerationID++; 185 return mRenderThread.removeFrameCallback(this); 186 } 187 188 void CanvasContext::setStopped(bool stopped) { 189 if (mStopped != stopped) { 190 mStopped = stopped; 191 if (mStopped) { 192 mGenerationID++; 193 mRenderThread.removeFrameCallback(this); 194 mRenderPipeline->onStop(); 195 } else if (mIsDirty && hasSurface()) { 196 mRenderThread.postFrameCallback(this); 197 } 198 } 199 } 200 201 void CanvasContext::allocateBuffers() { 202 if (mNativeSurface) { 203 mNativeSurface->allocateBuffers(); 204 } 205 } 206 207 void CanvasContext::setLightAlpha(uint8_t ambientShadowAlpha, uint8_t spotShadowAlpha) { 208 mLightInfo.ambientShadowAlpha = ambientShadowAlpha; 209 mLightInfo.spotShadowAlpha = spotShadowAlpha; 210 } 211 212 void CanvasContext::setLightGeometry(const Vector3& lightCenter, float lightRadius) { 213 mLightGeometry.center = lightCenter; 214 mLightGeometry.radius = lightRadius; 215 } 216 217 void CanvasContext::setOpaque(bool opaque) { 218 mOpaque = opaque; 219 } 220 221 void CanvasContext::setWideGamut(bool wideGamut) { 222 mWideColorGamut = wideGamut; 223 } 224 225 bool CanvasContext::makeCurrent() { 226 if (mStopped) return false; 227 228 auto result = mRenderPipeline->makeCurrent(); 229 switch (result) { 230 case MakeCurrentResult::AlreadyCurrent: 231 return true; 232 case MakeCurrentResult::Failed: 233 mHaveNewSurface = true; 234 setSurface(nullptr); 235 return false; 236 case MakeCurrentResult::Succeeded: 237 mHaveNewSurface = true; 238 return true; 239 default: 240 LOG_ALWAYS_FATAL("unexpected result %d from IRenderPipeline::makeCurrent", 241 (int32_t)result); 242 } 243 244 return true; 245 } 246 247 static bool wasSkipped(FrameInfo* info) { 248 return info && ((*info)[FrameInfoIndex::Flags] & FrameInfoFlags::SkippedFrame); 249 } 250 251 bool CanvasContext::isSwapChainStuffed() { 252 static const auto SLOW_THRESHOLD = 6_ms; 253 254 if (mSwapHistory.size() != mSwapHistory.capacity()) { 255 // We want at least 3 frames of history before attempting to 256 // guess if the queue is stuffed 257 return false; 258 } 259 nsecs_t frameInterval = mRenderThread.timeLord().frameIntervalNanos(); 260 auto& swapA = mSwapHistory[0]; 261 262 // Was there a happy queue & dequeue time? If so, don't 263 // consider it stuffed 264 if (swapA.dequeueDuration < SLOW_THRESHOLD && swapA.queueDuration < SLOW_THRESHOLD) { 265 return false; 266 } 267 268 for (size_t i = 1; i < mSwapHistory.size(); i++) { 269 auto& swapB = mSwapHistory[i]; 270 271 // If there's a multi-frameInterval gap we effectively already dropped a frame, 272 // so consider the queue healthy. 273 if (std::abs(swapA.swapCompletedTime - swapB.swapCompletedTime) > frameInterval * 3) { 274 return false; 275 } 276 277 // Was there a happy queue & dequeue time? If so, don't 278 // consider it stuffed 279 if (swapB.dequeueDuration < SLOW_THRESHOLD && swapB.queueDuration < SLOW_THRESHOLD) { 280 return false; 281 } 282 283 swapA = swapB; 284 } 285 286 // All signs point to a stuffed swap chain 287 ATRACE_NAME("swap chain stuffed"); 288 return true; 289 } 290 291 void CanvasContext::prepareTree(TreeInfo& info, int64_t* uiFrameInfo, int64_t syncQueued, 292 RenderNode* target) { 293 mRenderThread.removeFrameCallback(this); 294 295 // If the previous frame was dropped we don't need to hold onto it, so 296 // just keep using the previous frame's structure instead 297 if (!wasSkipped(mCurrentFrameInfo)) { 298 mCurrentFrameInfo = mJankTracker.startFrame(); 299 } 300 mCurrentFrameInfo->importUiThreadInfo(uiFrameInfo); 301 mCurrentFrameInfo->set(FrameInfoIndex::SyncQueued) = syncQueued; 302 mCurrentFrameInfo->markSyncStart(); 303 304 info.damageAccumulator = &mDamageAccumulator; 305 info.layerUpdateQueue = &mLayerUpdateQueue; 306 info.out.canDrawThisFrame = true; 307 308 mAnimationContext->startFrame(info.mode); 309 mRenderPipeline->onPrepareTree(); 310 for (const sp<RenderNode>& node : mRenderNodes) { 311 // Only the primary target node will be drawn full - all other nodes would get drawn in 312 // real time mode. In case of a window, the primary node is the window content and the other 313 // node(s) are non client / filler nodes. 314 info.mode = (node.get() == target ? TreeInfo::MODE_FULL : TreeInfo::MODE_RT_ONLY); 315 node->prepareTree(info); 316 GL_CHECKPOINT(MODERATE); 317 } 318 mAnimationContext->runRemainingAnimations(info); 319 GL_CHECKPOINT(MODERATE); 320 321 freePrefetchedLayers(); 322 GL_CHECKPOINT(MODERATE); 323 324 mIsDirty = true; 325 326 if (CC_UNLIKELY(!hasSurface())) { 327 mCurrentFrameInfo->addFlag(FrameInfoFlags::SkippedFrame); 328 info.out.canDrawThisFrame = false; 329 return; 330 } 331 332 if (CC_LIKELY(mSwapHistory.size() && !Properties::forceDrawFrame)) { 333 nsecs_t latestVsync = mRenderThread.timeLord().latestVsync(); 334 SwapHistory& lastSwap = mSwapHistory.back(); 335 nsecs_t vsyncDelta = std::abs(lastSwap.vsyncTime - latestVsync); 336 // The slight fudge-factor is to deal with cases where 337 // the vsync was estimated due to being slow handling the signal. 338 // See the logic in TimeLord#computeFrameTimeNanos or in 339 // Choreographer.java for details on when this happens 340 if (vsyncDelta < 2_ms) { 341 // Already drew for this vsync pulse, UI draw request missed 342 // the deadline for RT animations 343 info.out.canDrawThisFrame = false; 344 } 345 } else { 346 info.out.canDrawThisFrame = true; 347 } 348 349 // TODO: Do we need to abort out if the backdrop is added but not ready? Should that even 350 // be an allowable combination? 351 if (mRenderNodes.size() > 2 && !mRenderNodes[1]->isRenderable()) { 352 info.out.canDrawThisFrame = false; 353 } 354 355 if (info.out.canDrawThisFrame) { 356 int err = mNativeSurface->reserveNext(); 357 if (err != OK) { 358 mCurrentFrameInfo->addFlag(FrameInfoFlags::SkippedFrame); 359 info.out.canDrawThisFrame = false; 360 ALOGW("reserveNext failed, error = %d (%s)", err, strerror(-err)); 361 if (err != TIMED_OUT) { 362 // A timed out surface can still recover, but assume others are permanently dead. 363 setSurface(nullptr); 364 return; 365 } 366 } 367 } else { 368 mCurrentFrameInfo->addFlag(FrameInfoFlags::SkippedFrame); 369 } 370 371 bool postedFrameCallback = false; 372 if (info.out.hasAnimations || !info.out.canDrawThisFrame) { 373 if (CC_UNLIKELY(!Properties::enableRTAnimations)) { 374 info.out.requiresUiRedraw = true; 375 } 376 if (!info.out.requiresUiRedraw) { 377 // If animationsNeedsRedraw is set don't bother posting for an RT anim 378 // as we will just end up fighting the UI thread. 379 mRenderThread.postFrameCallback(this); 380 postedFrameCallback = true; 381 } 382 } 383 384 if (!postedFrameCallback && 385 info.out.animatedImageDelay != TreeInfo::Out::kNoAnimatedImageDelay) { 386 // Subtract the time of one frame so it can be displayed on time. 387 const nsecs_t kFrameTime = mRenderThread.timeLord().frameIntervalNanos(); 388 if (info.out.animatedImageDelay <= kFrameTime) { 389 mRenderThread.postFrameCallback(this); 390 } else { 391 const auto delay = info.out.animatedImageDelay - kFrameTime; 392 int genId = mGenerationID; 393 mRenderThread.queue().postDelayed(delay, [this, genId]() { 394 if (mGenerationID == genId) { 395 mRenderThread.postFrameCallback(this); 396 } 397 }); 398 } 399 } 400 } 401 402 void CanvasContext::stopDrawing() { 403 mRenderThread.removeFrameCallback(this); 404 mAnimationContext->pauseAnimators(); 405 mGenerationID++; 406 } 407 408 void CanvasContext::notifyFramePending() { 409 ATRACE_CALL(); 410 mRenderThread.pushBackFrameCallback(this); 411 } 412 413 void CanvasContext::setPresentTime() { 414 int64_t presentTime = NATIVE_WINDOW_TIMESTAMP_AUTO; 415 int renderAhead = 0; 416 const auto frameIntervalNanos = mRenderThread.timeLord().frameIntervalNanos(); 417 if (mFixedRenderAhead) { 418 renderAhead = std::min(mRenderAheadDepth, mRenderAheadCapacity); 419 } else if (frameIntervalNanos < 15_ms) { 420 renderAhead = std::min(1, static_cast<int>(mRenderAheadCapacity)); 421 } 422 423 if (renderAhead) { 424 presentTime = mCurrentFrameInfo->get(FrameInfoIndex::Vsync) + 425 (frameIntervalNanos * (renderAhead + 1)); 426 } 427 native_window_set_buffers_timestamp(mNativeSurface.get(), presentTime); 428 } 429 430 void CanvasContext::draw() { 431 SkRect dirty; 432 mDamageAccumulator.finish(&dirty); 433 434 if (dirty.isEmpty() && Properties::skipEmptyFrames && !surfaceRequiresRedraw()) { 435 mCurrentFrameInfo->addFlag(FrameInfoFlags::SkippedFrame); 436 return; 437 } 438 439 mCurrentFrameInfo->markIssueDrawCommandsStart(); 440 441 Frame frame = mRenderPipeline->getFrame(); 442 setPresentTime(); 443 444 SkRect windowDirty = computeDirtyRect(frame, &dirty); 445 446 bool drew = mRenderPipeline->draw(frame, windowDirty, dirty, mLightGeometry, &mLayerUpdateQueue, 447 mContentDrawBounds, mOpaque, mLightInfo, mRenderNodes, 448 &(profiler())); 449 450 int64_t frameCompleteNr = mFrameCompleteCallbacks.size() ? getFrameNumber() : -1; 451 452 waitOnFences(); 453 454 bool requireSwap = false; 455 bool didSwap = 456 mRenderPipeline->swapBuffers(frame, drew, windowDirty, mCurrentFrameInfo, &requireSwap); 457 458 mIsDirty = false; 459 460 if (requireSwap) { 461 if (!didSwap) { // some error happened 462 setSurface(nullptr); 463 } 464 SwapHistory& swap = mSwapHistory.next(); 465 swap.damage = windowDirty; 466 swap.swapCompletedTime = systemTime(CLOCK_MONOTONIC); 467 swap.vsyncTime = mRenderThread.timeLord().latestVsync(); 468 if (mNativeSurface.get()) { 469 int durationUs; 470 nsecs_t dequeueStart = mNativeSurface->getLastDequeueStartTime(); 471 if (dequeueStart < mCurrentFrameInfo->get(FrameInfoIndex::SyncStart)) { 472 // Ignoring dequeue duration as it happened prior to frame render start 473 // and thus is not part of the frame. 474 swap.dequeueDuration = 0; 475 } else { 476 mNativeSurface->query(NATIVE_WINDOW_LAST_DEQUEUE_DURATION, &durationUs); 477 swap.dequeueDuration = us2ns(durationUs); 478 } 479 mNativeSurface->query(NATIVE_WINDOW_LAST_QUEUE_DURATION, &durationUs); 480 swap.queueDuration = us2ns(durationUs); 481 } else { 482 swap.dequeueDuration = 0; 483 swap.queueDuration = 0; 484 } 485 mCurrentFrameInfo->set(FrameInfoIndex::DequeueBufferDuration) = swap.dequeueDuration; 486 mCurrentFrameInfo->set(FrameInfoIndex::QueueBufferDuration) = swap.queueDuration; 487 mHaveNewSurface = false; 488 mFrameNumber = -1; 489 } else { 490 mCurrentFrameInfo->set(FrameInfoIndex::DequeueBufferDuration) = 0; 491 mCurrentFrameInfo->set(FrameInfoIndex::QueueBufferDuration) = 0; 492 } 493 494 // TODO: Use a fence for real completion? 495 mCurrentFrameInfo->markFrameCompleted(); 496 497 #if LOG_FRAMETIME_MMA 498 float thisFrame = mCurrentFrameInfo->duration(FrameInfoIndex::IssueDrawCommandsStart, 499 FrameInfoIndex::FrameCompleted) / 500 NANOS_PER_MILLIS_F; 501 if (sFrameCount) { 502 sBenchMma = ((9 * sBenchMma) + thisFrame) / 10; 503 } else { 504 sBenchMma = thisFrame; 505 } 506 if (++sFrameCount == 10) { 507 sFrameCount = 1; 508 ALOGD("Average frame time: %.4f", sBenchMma); 509 } 510 #endif 511 512 if (didSwap) { 513 for (auto& func : mFrameCompleteCallbacks) { 514 std::invoke(func, frameCompleteNr); 515 } 516 mFrameCompleteCallbacks.clear(); 517 } 518 519 mJankTracker.finishFrame(*mCurrentFrameInfo); 520 if (CC_UNLIKELY(mFrameMetricsReporter.get() != nullptr)) { 521 mFrameMetricsReporter->reportFrameMetrics(mCurrentFrameInfo->data()); 522 } 523 524 GpuMemoryTracker::onFrameCompleted(); 525 } 526 527 // Called by choreographer to do an RT-driven animation 528 void CanvasContext::doFrame() { 529 if (!mRenderPipeline->isSurfaceReady()) return; 530 prepareAndDraw(nullptr); 531 } 532 533 SkISize CanvasContext::getNextFrameSize() const { 534 ReliableSurface* surface = mNativeSurface.get(); 535 if (surface) { 536 SkISize size; 537 surface->query(NATIVE_WINDOW_WIDTH, &size.fWidth); 538 surface->query(NATIVE_WINDOW_HEIGHT, &size.fHeight); 539 return size; 540 } 541 return {INT32_MAX, INT32_MAX}; 542 } 543 544 void CanvasContext::prepareAndDraw(RenderNode* node) { 545 ATRACE_CALL(); 546 547 nsecs_t vsync = mRenderThread.timeLord().computeFrameTimeNanos(); 548 int64_t frameInfo[UI_THREAD_FRAME_INFO_SIZE]; 549 UiFrameInfoBuilder(frameInfo).addFlag(FrameInfoFlags::RTAnimation).setVsync(vsync, vsync); 550 551 TreeInfo info(TreeInfo::MODE_RT_ONLY, *this); 552 prepareTree(info, frameInfo, systemTime(CLOCK_MONOTONIC), node); 553 if (info.out.canDrawThisFrame) { 554 draw(); 555 } else { 556 // wait on fences so tasks don't overlap next frame 557 waitOnFences(); 558 } 559 } 560 561 void CanvasContext::markLayerInUse(RenderNode* node) { 562 if (mPrefetchedLayers.erase(node)) { 563 node->decStrong(nullptr); 564 } 565 } 566 567 void CanvasContext::freePrefetchedLayers() { 568 if (mPrefetchedLayers.size()) { 569 for (auto& node : mPrefetchedLayers) { 570 ALOGW("Incorrectly called buildLayer on View: %s, destroying layer...", 571 node->getName()); 572 node->destroyLayers(); 573 node->decStrong(nullptr); 574 } 575 mPrefetchedLayers.clear(); 576 } 577 } 578 579 void CanvasContext::buildLayer(RenderNode* node) { 580 ATRACE_CALL(); 581 if (!mRenderPipeline->isContextReady()) return; 582 583 // buildLayer() will leave the tree in an unknown state, so we must stop drawing 584 stopDrawing(); 585 586 TreeInfo info(TreeInfo::MODE_FULL, *this); 587 info.damageAccumulator = &mDamageAccumulator; 588 info.layerUpdateQueue = &mLayerUpdateQueue; 589 info.runAnimations = false; 590 node->prepareTree(info); 591 SkRect ignore; 592 mDamageAccumulator.finish(&ignore); 593 // Tickle the GENERIC property on node to mark it as dirty for damaging 594 // purposes when the frame is actually drawn 595 node->setPropertyFieldsDirty(RenderNode::GENERIC); 596 597 mRenderPipeline->renderLayers(mLightGeometry, &mLayerUpdateQueue, mOpaque, mLightInfo); 598 599 node->incStrong(nullptr); 600 mPrefetchedLayers.insert(node); 601 } 602 603 void CanvasContext::destroyHardwareResources() { 604 stopDrawing(); 605 if (mRenderPipeline->isContextReady()) { 606 freePrefetchedLayers(); 607 for (const sp<RenderNode>& node : mRenderNodes) { 608 node->destroyHardwareResources(); 609 } 610 mRenderPipeline->onDestroyHardwareResources(); 611 } 612 } 613 614 void CanvasContext::trimMemory(RenderThread& thread, int level) { 615 ATRACE_CALL(); 616 if (!thread.getGrContext()) return; 617 ATRACE_CALL(); 618 if (level >= TRIM_MEMORY_COMPLETE) { 619 thread.cacheManager().trimMemory(CacheManager::TrimMemoryMode::Complete); 620 thread.destroyRenderingContext(); 621 } else if (level >= TRIM_MEMORY_UI_HIDDEN) { 622 thread.cacheManager().trimMemory(CacheManager::TrimMemoryMode::UiHidden); 623 } 624 } 625 626 DeferredLayerUpdater* CanvasContext::createTextureLayer() { 627 return mRenderPipeline->createTextureLayer(); 628 } 629 630 void CanvasContext::dumpFrames(int fd) { 631 mJankTracker.dumpStats(fd); 632 mJankTracker.dumpFrames(fd); 633 } 634 635 void CanvasContext::resetFrameStats() { 636 mJankTracker.reset(); 637 } 638 639 void CanvasContext::setName(const std::string&& name) { 640 mJankTracker.setDescription(JankTrackerType::Window, std::move(name)); 641 } 642 643 void CanvasContext::waitOnFences() { 644 if (mFrameFences.size()) { 645 ATRACE_CALL(); 646 for (auto& fence : mFrameFences) { 647 fence.get(); 648 } 649 mFrameFences.clear(); 650 } 651 } 652 653 void CanvasContext::enqueueFrameWork(std::function<void()>&& func) { 654 mFrameFences.push_back(CommonPool::async(std::move(func))); 655 } 656 657 int64_t CanvasContext::getFrameNumber() { 658 // mFrameNumber is reset to -1 when the surface changes or we swap buffers 659 if (mFrameNumber == -1 && mNativeSurface.get()) { 660 mFrameNumber = static_cast<int64_t>(mNativeSurface->getNextFrameNumber()); 661 } 662 return mFrameNumber; 663 } 664 665 bool CanvasContext::surfaceRequiresRedraw() { 666 if (!mNativeSurface) return false; 667 if (mHaveNewSurface) return true; 668 669 int width = -1; 670 int height = -1; 671 ReliableSurface* surface = mNativeSurface.get(); 672 surface->query(NATIVE_WINDOW_WIDTH, &width); 673 surface->query(NATIVE_WINDOW_HEIGHT, &height); 674 675 return width == mLastFrameWidth && height == mLastFrameHeight; 676 } 677 678 void CanvasContext::setRenderAheadDepth(int renderAhead) { 679 if (renderAhead > 2 || renderAhead < 0 || mNativeSurface) { 680 return; 681 } 682 mFixedRenderAhead = true; 683 mRenderAheadDepth = static_cast<uint32_t>(renderAhead); 684 } 685 686 SkRect CanvasContext::computeDirtyRect(const Frame& frame, SkRect* dirty) { 687 if (frame.width() != mLastFrameWidth || frame.height() != mLastFrameHeight) { 688 // can't rely on prior content of window if viewport size changes 689 dirty->setEmpty(); 690 mLastFrameWidth = frame.width(); 691 mLastFrameHeight = frame.height(); 692 } else if (mHaveNewSurface || frame.bufferAge() == 0) { 693 // New surface needs a full draw 694 dirty->setEmpty(); 695 } else { 696 if (!dirty->isEmpty() && !dirty->intersect(0, 0, frame.width(), frame.height())) { 697 ALOGW("Dirty " RECT_STRING " doesn't intersect with 0 0 %d %d ?", SK_RECT_ARGS(*dirty), 698 frame.width(), frame.height()); 699 dirty->setEmpty(); 700 } 701 profiler().unionDirty(dirty); 702 } 703 704 if (dirty->isEmpty()) { 705 dirty->set(0, 0, frame.width(), frame.height()); 706 } 707 708 // At this point dirty is the area of the window to update. However, 709 // the area of the frame we need to repaint is potentially different, so 710 // stash the screen area for later 711 SkRect windowDirty(*dirty); 712 713 // If the buffer age is 0 we do a full-screen repaint (handled above) 714 // If the buffer age is 1 the buffer contents are the same as they were 715 // last frame so there's nothing to union() against 716 // Therefore we only care about the > 1 case. 717 if (frame.bufferAge() > 1) { 718 if (frame.bufferAge() > (int)mSwapHistory.size()) { 719 // We don't have enough history to handle this old of a buffer 720 // Just do a full-draw 721 dirty->set(0, 0, frame.width(), frame.height()); 722 } else { 723 // At this point we haven't yet added the latest frame 724 // to the damage history (happens below) 725 // So we need to damage 726 for (int i = mSwapHistory.size() - 1; 727 i > ((int)mSwapHistory.size()) - frame.bufferAge(); i--) { 728 dirty->join(mSwapHistory[i].damage); 729 } 730 } 731 } 732 733 return windowDirty; 734 } 735 736 } /* namespace renderthread */ 737 } /* namespace uirenderer */ 738 } /* namespace android */ 739