1 /* 2 * Copyright (C) 2012-2015, The Linux Foundation. All rights reserved. 3 * Not a Contribution, Apache license notifications and license are retained 4 * for attribution purposes only. 5 * 6 * Licensed under the Apache License, Version 2.0 (the "License"); 7 * you may not use this file except in compliance with the License. 8 * You may obtain a copy of the License at 9 * 10 * http://www.apache.org/licenses/LICENSE-2.0 11 * 12 * Unless required by applicable law or agreed to in writing, software 13 * distributed under the License is distributed on an "AS IS" BASIS, 14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 * See the License for the specific language governing permissions and 16 * limitations under the License. 17 */ 18 19 #include <math.h> 20 #include "hwc_mdpcomp.h" 21 #include <sys/ioctl.h> 22 #include <dlfcn.h> 23 #include "hdmi.h" 24 #include "qdMetaData.h" 25 #include "mdp_version.h" 26 #include "hwc_fbupdate.h" 27 #include "hwc_ad.h" 28 #include <overlayRotator.h> 29 #include "hwc_copybit.h" 30 #include "qd_utils.h" 31 32 using namespace overlay; 33 using namespace qdutils; 34 using namespace overlay::utils; 35 namespace ovutils = overlay::utils; 36 37 namespace qhwc { 38 39 //==============MDPComp======================================================== 40 41 IdleInvalidator *MDPComp::sIdleInvalidator = NULL; 42 bool MDPComp::sIdleFallBack = false; 43 bool MDPComp::sHandleTimeout = false; 44 bool MDPComp::sDebugLogs = false; 45 bool MDPComp::sEnabled = false; 46 bool MDPComp::sEnableMixedMode = true; 47 int MDPComp::sSimulationFlags = 0; 48 int MDPComp::sMaxPipesPerMixer = 0; 49 bool MDPComp::sEnableYUVsplit = false; 50 bool MDPComp::sSrcSplitEnabled = false; 51 int MDPComp::sMaxSecLayers = 1; 52 bool MDPComp::enablePartialUpdateForMDP3 = false; 53 bool MDPComp::sIsPartialUpdateActive = true; 54 void *MDPComp::sLibPerfHint = NULL; 55 int MDPComp::sPerfLockHandle = 0; 56 int (*MDPComp::sPerfLockAcquire)(int, int, int*, int) = NULL; 57 int (*MDPComp::sPerfLockRelease)(int value) = NULL; 58 int MDPComp::sPerfHintWindow = -1; 59 60 MDPComp* MDPComp::getObject(hwc_context_t *ctx, const int& dpy) { 61 if(qdutils::MDPVersion::getInstance().isSrcSplit()) { 62 sSrcSplitEnabled = true; 63 return new MDPCompSrcSplit(dpy); 64 } else if(isDisplaySplit(ctx, dpy)) { 65 return new MDPCompSplit(dpy); 66 } 67 return new MDPCompNonSplit(dpy); 68 } 69 70 MDPComp::MDPComp(int dpy):mDpy(dpy){}; 71 72 void MDPComp::dump(android::String8& buf, hwc_context_t *ctx) 73 { 74 if(mCurrentFrame.layerCount > MAX_NUM_APP_LAYERS) 75 return; 76 77 dumpsys_log(buf,"HWC Map for Dpy: %s \n", 78 (mDpy == 0) ? "\"PRIMARY\"" : 79 (mDpy == 1) ? "\"EXTERNAL\"" : "\"VIRTUAL\""); 80 dumpsys_log(buf,"CURR_FRAME: layerCount:%2d mdpCount:%2d " 81 "fbCount:%2d \n", mCurrentFrame.layerCount, 82 mCurrentFrame.mdpCount, mCurrentFrame.fbCount); 83 dumpsys_log(buf,"needsFBRedraw:%3s pipesUsed:%2d MaxPipesPerMixer: %d \n", 84 (mCurrentFrame.needsRedraw? "YES" : "NO"), 85 mCurrentFrame.mdpCount, sMaxPipesPerMixer); 86 if(isDisplaySplit(ctx, mDpy)) { 87 dumpsys_log(buf, "Programmed ROI's: Left: [%d, %d, %d, %d] " 88 "Right: [%d, %d, %d, %d] \n", 89 ctx->listStats[mDpy].lRoi.left, ctx->listStats[mDpy].lRoi.top, 90 ctx->listStats[mDpy].lRoi.right, 91 ctx->listStats[mDpy].lRoi.bottom, 92 ctx->listStats[mDpy].rRoi.left,ctx->listStats[mDpy].rRoi.top, 93 ctx->listStats[mDpy].rRoi.right, 94 ctx->listStats[mDpy].rRoi.bottom); 95 } else { 96 dumpsys_log(buf, "Programmed ROI: [%d, %d, %d, %d] \n", 97 ctx->listStats[mDpy].lRoi.left,ctx->listStats[mDpy].lRoi.top, 98 ctx->listStats[mDpy].lRoi.right, 99 ctx->listStats[mDpy].lRoi.bottom); 100 } 101 dumpsys_log(buf," --------------------------------------------- \n"); 102 dumpsys_log(buf," listIdx | cached? | mdpIndex | comptype | Z \n"); 103 dumpsys_log(buf," --------------------------------------------- \n"); 104 for(int index = 0; index < mCurrentFrame.layerCount; index++ ) 105 dumpsys_log(buf," %7d | %7s | %8d | %9s | %2d \n", 106 index, 107 (mCurrentFrame.isFBComposed[index] ? "YES" : "NO"), 108 mCurrentFrame.layerToMDP[index], 109 (mCurrentFrame.isFBComposed[index] ? 110 (mCurrentFrame.drop[index] ? "DROP" : 111 (mCurrentFrame.needsRedraw ? "GLES" : "CACHE")) : "MDP"), 112 (mCurrentFrame.isFBComposed[index] ? mCurrentFrame.fbZ : 113 mCurrentFrame.mdpToLayer[mCurrentFrame.layerToMDP[index]].pipeInfo->zOrder)); 114 dumpsys_log(buf,"\n"); 115 } 116 117 bool MDPComp::init(hwc_context_t *ctx) { 118 119 if(!ctx) { 120 ALOGE("%s: Invalid hwc context!!",__FUNCTION__); 121 return false; 122 } 123 124 char property[PROPERTY_VALUE_MAX] = {0}; 125 126 sEnabled = false; 127 if((ctx->mMDP.version >= qdutils::MDP_V4_0) && 128 (property_get("persist.hwc.mdpcomp.enable", property, NULL) > 0) && 129 (!strncmp(property, "1", PROPERTY_VALUE_MAX ) || 130 (!strncasecmp(property,"true", PROPERTY_VALUE_MAX )))) { 131 sEnabled = true; 132 } 133 134 sEnableMixedMode = true; 135 if((property_get("debug.mdpcomp.mixedmode.disable", property, NULL) > 0) && 136 (!strncmp(property, "1", PROPERTY_VALUE_MAX ) || 137 (!strncasecmp(property,"true", PROPERTY_VALUE_MAX )))) { 138 sEnableMixedMode = false; 139 } 140 141 qdutils::MDPVersion &mdpVersion = qdutils::MDPVersion::getInstance(); 142 143 sMaxPipesPerMixer = (int)mdpVersion.getBlendStages(); 144 if(property_get("persist.hwc.mdpcomp.maxpermixer", property, "-1") > 0) { 145 int val = atoi(property); 146 if(val >= 0) 147 sMaxPipesPerMixer = min(val, sMaxPipesPerMixer); 148 } 149 150 /* Maximum layers allowed to use MDP on secondary panels. If property 151 * doesn't exist, default to 1. Using the property it can be set to 0 or 152 * more. 153 */ 154 if(property_get("persist.hwc.maxseclayers", property, "1") > 0) { 155 int val = atoi(property); 156 sMaxSecLayers = (val >= 0) ? val : 1; 157 sMaxSecLayers = min(sMaxSecLayers, sMaxPipesPerMixer); 158 } 159 160 if(ctx->mMDP.panel != MIPI_CMD_PANEL) { 161 sIdleInvalidator = IdleInvalidator::getInstance(); 162 if(sIdleInvalidator->init(timeout_handler, ctx) < 0) { 163 delete sIdleInvalidator; 164 sIdleInvalidator = NULL; 165 } 166 } 167 168 if(!qdutils::MDPVersion::getInstance().isSrcSplit() && 169 !qdutils::MDPVersion::getInstance().isRotDownscaleEnabled() && 170 property_get("persist.mdpcomp.4k2kSplit", property, "0") > 0 && 171 (!strncmp(property, "1", PROPERTY_VALUE_MAX) || 172 !strncasecmp(property,"true", PROPERTY_VALUE_MAX))) { 173 sEnableYUVsplit = true; 174 } 175 176 bool defaultPTOR = false; 177 //Enable PTOR when "persist.hwc.ptor.enable" is not defined for 178 //8x16 and 8x39 targets by default 179 if((property_get("persist.hwc.ptor.enable", property, NULL) <= 0) && 180 (qdutils::MDPVersion::getInstance().is8x16() || 181 qdutils::MDPVersion::getInstance().is8x39())) { 182 defaultPTOR = true; 183 } 184 185 if (defaultPTOR || (!strncasecmp(property, "true", PROPERTY_VALUE_MAX)) || 186 (!strncmp(property, "1", PROPERTY_VALUE_MAX ))) { 187 ctx->mCopyBit[HWC_DISPLAY_PRIMARY] = new CopyBit(ctx, 188 HWC_DISPLAY_PRIMARY); 189 } 190 191 if((property_get("persist.mdp3.partialUpdate", property, NULL) <= 0) && 192 (ctx->mMDP.version == qdutils::MDP_V3_0_5)) { 193 enablePartialUpdateForMDP3 = true; 194 } 195 196 if(!enablePartialUpdateForMDP3 && 197 (!strncmp(property, "1", PROPERTY_VALUE_MAX ) || 198 (!strncasecmp(property,"true", PROPERTY_VALUE_MAX )))) { 199 enablePartialUpdateForMDP3 = true; 200 } 201 202 sIsPartialUpdateActive = getPartialUpdatePref(ctx); 203 204 if(property_get("persist.mdpcomp_perfhint", property, "-1") > 0) { 205 int val = atoi(property); 206 if(val > 0 && loadPerfLib()) { 207 sPerfHintWindow = val; 208 ALOGI("PerfHintWindow = %d", sPerfHintWindow); 209 } 210 } 211 212 return true; 213 } 214 215 void MDPComp::reset(hwc_context_t *ctx) { 216 const int numLayers = ctx->listStats[mDpy].numAppLayers; 217 mCurrentFrame.reset(numLayers); 218 ctx->mOverlay->clear(mDpy); 219 ctx->mLayerRotMap[mDpy]->clear(); 220 } 221 222 void MDPComp::reset() { 223 sHandleTimeout = false; 224 mModeOn = false; 225 } 226 227 void MDPComp::timeout_handler(void *udata) { 228 struct hwc_context_t* ctx = (struct hwc_context_t*)(udata); 229 230 if(!ctx) { 231 ALOGE("%s: received empty data in timer callback", __FUNCTION__); 232 return; 233 } 234 235 ctx->mDrawLock.lock(); 236 // Handle timeout event only if the previous composition is MDP or MIXED. 237 if(!sHandleTimeout) { 238 ALOGD_IF(isDebug(), "%s:Do not handle this timeout", __FUNCTION__); 239 ctx->mDrawLock.unlock(); 240 return; 241 } 242 if(!ctx->proc) { 243 ALOGE("%s: HWC proc not registered", __FUNCTION__); 244 ctx->mDrawLock.unlock(); 245 return; 246 } 247 sIdleFallBack = true; 248 ctx->mDrawLock.unlock(); 249 /* Trigger SF to redraw the current frame */ 250 ctx->proc->invalidate(ctx->proc); 251 } 252 253 void MDPComp::setMaxPipesPerMixer(const uint32_t value) { 254 qdutils::MDPVersion &mdpVersion = qdutils::MDPVersion::getInstance(); 255 uint32_t maxSupported = (int)mdpVersion.getBlendStages(); 256 if(value > maxSupported) { 257 ALOGW("%s: Input exceeds max value supported. Setting to" 258 "max value: %d", __FUNCTION__, maxSupported); 259 } 260 sMaxPipesPerMixer = min(value, maxSupported); 261 } 262 263 void MDPComp::setIdleTimeout(const uint32_t& timeout) { 264 enum { ONE_REFRESH_PERIOD_MS = 17, ONE_BILLION_MS = 1000000000 }; 265 266 if(sIdleInvalidator) { 267 if(timeout <= ONE_REFRESH_PERIOD_MS) { 268 //If the specified timeout is < 1 draw cycle worth, "virtually" 269 //disable idle timeout. The ideal way for clients to disable 270 //timeout is to set it to 0 271 sIdleInvalidator->setIdleTimeout(ONE_BILLION_MS); 272 ALOGI("Disabled idle timeout"); 273 return; 274 } 275 sIdleInvalidator->setIdleTimeout(timeout); 276 ALOGI("Idle timeout set to %u", timeout); 277 } else { 278 ALOGW("Cannot set idle timeout, IdleInvalidator not enabled"); 279 } 280 } 281 282 void MDPComp::setMDPCompLayerFlags(hwc_context_t *ctx, 283 hwc_display_contents_1_t* list) { 284 LayerProp *layerProp = ctx->layerProp[mDpy]; 285 286 for(int index = 0; index < ctx->listStats[mDpy].numAppLayers; index++) { 287 hwc_layer_1_t* layer = &(list->hwLayers[index]); 288 if(!mCurrentFrame.isFBComposed[index]) { 289 layerProp[index].mFlags |= HWC_MDPCOMP; 290 layer->compositionType = HWC_OVERLAY; 291 layer->hints |= HWC_HINT_CLEAR_FB; 292 } else { 293 /* Drop the layer when its already present in FB OR when it lies 294 * outside frame's ROI */ 295 if(!mCurrentFrame.needsRedraw || mCurrentFrame.drop[index]) { 296 layer->compositionType = HWC_OVERLAY; 297 } 298 } 299 } 300 } 301 302 void MDPComp::setRedraw(hwc_context_t *ctx, 303 hwc_display_contents_1_t* list) { 304 mCurrentFrame.needsRedraw = false; 305 if(!mCachedFrame.isSameFrame(mCurrentFrame, list) || 306 (list->flags & HWC_GEOMETRY_CHANGED) || 307 isSkipPresent(ctx, mDpy)) { 308 mCurrentFrame.needsRedraw = true; 309 } 310 } 311 312 MDPComp::FrameInfo::FrameInfo() { 313 memset(&mdpToLayer, 0, sizeof(mdpToLayer)); 314 reset(0); 315 } 316 317 void MDPComp::FrameInfo::reset(const int& numLayers) { 318 for(int i = 0 ; i < MAX_NUM_BLEND_STAGES; i++ ) { 319 if(mdpToLayer[i].pipeInfo) { 320 delete mdpToLayer[i].pipeInfo; 321 mdpToLayer[i].pipeInfo = NULL; 322 //We dont own the rotator 323 mdpToLayer[i].rot = NULL; 324 } 325 } 326 327 memset(&mdpToLayer, 0, sizeof(mdpToLayer)); 328 memset(&layerToMDP, -1, sizeof(layerToMDP)); 329 memset(&isFBComposed, 1, sizeof(isFBComposed)); 330 331 layerCount = numLayers; 332 fbCount = numLayers; 333 mdpCount = 0; 334 needsRedraw = true; 335 fbZ = -1; 336 } 337 338 void MDPComp::FrameInfo::map() { 339 // populate layer and MDP maps 340 int mdpIdx = 0; 341 for(int idx = 0; idx < layerCount; idx++) { 342 if(!isFBComposed[idx]) { 343 mdpToLayer[mdpIdx].listIndex = idx; 344 layerToMDP[idx] = mdpIdx++; 345 } 346 } 347 } 348 349 MDPComp::LayerCache::LayerCache() { 350 reset(); 351 } 352 353 void MDPComp::LayerCache::reset() { 354 memset(&hnd, 0, sizeof(hnd)); 355 memset(&isFBComposed, true, sizeof(isFBComposed)); 356 memset(&drop, false, sizeof(drop)); 357 layerCount = 0; 358 } 359 360 void MDPComp::LayerCache::cacheAll(hwc_display_contents_1_t* list) { 361 const int numAppLayers = (int)list->numHwLayers - 1; 362 for(int i = 0; i < numAppLayers; i++) { 363 hnd[i] = list->hwLayers[i].handle; 364 } 365 } 366 367 void MDPComp::LayerCache::updateCounts(const FrameInfo& curFrame) { 368 layerCount = curFrame.layerCount; 369 memcpy(&isFBComposed, &curFrame.isFBComposed, sizeof(isFBComposed)); 370 memcpy(&drop, &curFrame.drop, sizeof(drop)); 371 } 372 373 bool MDPComp::LayerCache::isSameFrame(const FrameInfo& curFrame, 374 hwc_display_contents_1_t* list) { 375 if(layerCount != curFrame.layerCount) 376 return false; 377 for(int i = 0; i < curFrame.layerCount; i++) { 378 if((curFrame.isFBComposed[i] != isFBComposed[i]) || 379 (curFrame.drop[i] != drop[i])) { 380 return false; 381 } 382 if(curFrame.isFBComposed[i] && 383 (hnd[i] != list->hwLayers[i].handle)){ 384 return false; 385 } 386 } 387 return true; 388 } 389 390 bool MDPComp::isSupportedForMDPComp(hwc_context_t *ctx, hwc_layer_1_t* layer) { 391 private_handle_t *hnd = (private_handle_t *)layer->handle; 392 if((has90Transform(layer) and (not isRotationDoable(ctx, hnd))) || 393 (not isValidDimension(ctx,layer)) 394 //More conditions here, SKIP, sRGB+Blend etc 395 ) { 396 return false; 397 } 398 return true; 399 } 400 401 bool MDPComp::isValidDimension(hwc_context_t *ctx, hwc_layer_1_t *layer) { 402 private_handle_t *hnd = (private_handle_t *)layer->handle; 403 404 if(!hnd) { 405 if (layer->flags & HWC_COLOR_FILL) { 406 // Color layer 407 return true; 408 } 409 ALOGD_IF(isDebug(), "%s: layer handle is NULL", __FUNCTION__); 410 return false; 411 } 412 413 //XXX: Investigate doing this with pixel phase on MDSS 414 if(!isSecureBuffer(hnd) && isNonIntegralSourceCrop(layer->sourceCropf)) 415 return false; 416 417 hwc_rect_t crop = integerizeSourceCrop(layer->sourceCropf); 418 hwc_rect_t dst = layer->displayFrame; 419 bool rotated90 = (bool)(layer->transform & HAL_TRANSFORM_ROT_90); 420 int crop_w = rotated90 ? crop.bottom - crop.top : crop.right - crop.left; 421 int crop_h = rotated90 ? crop.right - crop.left : crop.bottom - crop.top; 422 int dst_w = dst.right - dst.left; 423 int dst_h = dst.bottom - dst.top; 424 float w_scale = ((float)crop_w / (float)dst_w); 425 float h_scale = ((float)crop_h / (float)dst_h); 426 MDPVersion& mdpHw = MDPVersion::getInstance(); 427 428 /* Workaround for MDP HW limitation in DSI command mode panels where 429 * FPS will not go beyond 30 if buffers on RGB pipes are of width or height 430 * less than 5 pixels 431 * There also is a HW limilation in MDP, minimum block size is 2x2 432 * Fallback to GPU if height is less than 2. 433 */ 434 if(mdpHw.hasMinCropWidthLimitation() and (crop_w < 5 or crop_h < 5)) 435 return false; 436 437 if((w_scale > 1.0f) || (h_scale > 1.0f)) { 438 const uint32_t maxMDPDownscale = mdpHw.getMaxMDPDownscale(); 439 const float w_dscale = w_scale; 440 const float h_dscale = h_scale; 441 442 if(ctx->mMDP.version >= qdutils::MDSS_V5) { 443 444 if(!mdpHw.supportsDecimation()) { 445 /* On targets that doesnt support Decimation (eg.,8x26) 446 * maximum downscale support is overlay pipe downscale. 447 */ 448 if(crop_w > (int) mdpHw.getMaxPipeWidth() || 449 w_dscale > maxMDPDownscale || 450 h_dscale > maxMDPDownscale) 451 return false; 452 } else { 453 // Decimation on macrotile format layers is not supported. 454 if(isTileRendered(hnd)) { 455 /* Bail out if 456 * 1. Src crop > Mixer limit on nonsplit MDPComp 457 * 2. exceeds maximum downscale limit 458 */ 459 if(((crop_w > (int) mdpHw.getMaxPipeWidth()) && 460 !sSrcSplitEnabled) || 461 w_dscale > maxMDPDownscale || 462 h_dscale > maxMDPDownscale) { 463 return false; 464 } 465 } else if(w_dscale > 64 || h_dscale > 64) 466 return false; 467 } 468 } else { //A-family 469 if(w_dscale > maxMDPDownscale || h_dscale > maxMDPDownscale) 470 return false; 471 } 472 } 473 474 if((w_scale < 1.0f) || (h_scale < 1.0f)) { 475 const uint32_t upscale = mdpHw.getMaxMDPUpscale(); 476 const float w_uscale = 1.0f / w_scale; 477 const float h_uscale = 1.0f / h_scale; 478 479 if(w_uscale > upscale || h_uscale > upscale) 480 return false; 481 } 482 483 return true; 484 } 485 486 bool MDPComp::isFrameDoable(hwc_context_t *ctx) { 487 bool ret = true; 488 489 if(!isEnabled()) { 490 ALOGD_IF(isDebug(),"%s: MDP Comp. not enabled.", __FUNCTION__); 491 ret = false; 492 } else if((qdutils::MDPVersion::getInstance().is8x26() || 493 qdutils::MDPVersion::getInstance().is8x16() || 494 qdutils::MDPVersion::getInstance().is8x39()) && 495 ctx->mVideoTransFlag && 496 isSecondaryConnected(ctx)) { 497 //1 Padding round to shift pipes across mixers 498 ALOGD_IF(isDebug(),"%s: MDP Comp. video transition padding round", 499 __FUNCTION__); 500 ret = false; 501 } else if(qdutils::MDPVersion::getInstance().getTotalPipes() < 8) { 502 /* TODO: freeing up all the resources only for the targets having total 503 number of pipes < 8. Need to analyze number of VIG pipes used 504 for primary in previous draw cycle and accordingly decide 505 whether to fall back to full GPU comp or video only comp 506 */ 507 if(isSecondaryConfiguring(ctx)) { 508 ALOGD_IF( isDebug(),"%s: External Display connection is pending", 509 __FUNCTION__); 510 ret = false; 511 } else if(ctx->isPaddingRound) { 512 ALOGD_IF(isDebug(), "%s: padding round invoked for dpy %d", 513 __FUNCTION__,mDpy); 514 ret = false; 515 } 516 } else if (ctx->isDMAStateChanging) { 517 // Bail out if a padding round has been invoked in order to switch DMA 518 // state to block mode. We need this to cater for the case when a layer 519 // requires rotation in the current frame. 520 ALOGD_IF(isDebug(), "%s: padding round invoked to switch DMA state", 521 __FUNCTION__); 522 return false; 523 } 524 525 return ret; 526 } 527 528 void MDPCompNonSplit::trimAgainstROI(hwc_context_t *ctx, hwc_rect_t& fbRect) { 529 hwc_rect_t roi = ctx->listStats[mDpy].lRoi; 530 fbRect = getIntersection(fbRect, roi); 531 } 532 533 /* 1) Identify layers that are not visible or lying outside the updating ROI and 534 * drop them from composition. 535 * 2) If we have a scaling layer which needs cropping against generated 536 * ROI, reset ROI to full resolution. */ 537 bool MDPCompNonSplit::validateAndApplyROI(hwc_context_t *ctx, 538 hwc_display_contents_1_t* list) { 539 int numAppLayers = ctx->listStats[mDpy].numAppLayers; 540 hwc_rect_t visibleRect = ctx->listStats[mDpy].lRoi; 541 542 for(int i = numAppLayers - 1; i >= 0; i--){ 543 if(!isValidRect(visibleRect)) { 544 mCurrentFrame.drop[i] = true; 545 mCurrentFrame.dropCount++; 546 continue; 547 } 548 549 const hwc_layer_1_t* layer = &list->hwLayers[i]; 550 hwc_rect_t dstRect = layer->displayFrame; 551 hwc_rect_t res = getIntersection(visibleRect, dstRect); 552 553 if(!isValidRect(res)) { 554 mCurrentFrame.drop[i] = true; 555 mCurrentFrame.dropCount++; 556 } else { 557 /* Reset frame ROI when any layer which needs scaling also needs ROI 558 * cropping */ 559 if(!isSameRect(res, dstRect) && needsScaling (layer)) { 560 ALOGI("%s: Resetting ROI due to scaling", __FUNCTION__); 561 memset(&mCurrentFrame.drop, 0, sizeof(mCurrentFrame.drop)); 562 mCurrentFrame.dropCount = 0; 563 return false; 564 } 565 566 /* deduct any opaque region from visibleRect */ 567 if (layer->blending == HWC_BLENDING_NONE && 568 layer->planeAlpha == 0xFF) 569 visibleRect = deductRect(visibleRect, res); 570 } 571 } 572 return true; 573 } 574 575 /* Calculate ROI for the frame by accounting all the layer's dispalyFrame which 576 * are updating. If DirtyRegion is applicable, calculate it by accounting all 577 * the changing layer's dirtyRegion. */ 578 void MDPCompNonSplit::generateROI(hwc_context_t *ctx, 579 hwc_display_contents_1_t* list) { 580 int numAppLayers = ctx->listStats[mDpy].numAppLayers; 581 if(!canPartialUpdate(ctx, list)) 582 return; 583 584 struct hwc_rect roi = (struct hwc_rect){0, 0, 0, 0}; 585 hwc_rect fullFrame = (struct hwc_rect) {0, 0,(int)ctx->dpyAttr[mDpy].xres, 586 (int)ctx->dpyAttr[mDpy].yres}; 587 588 for(int index = 0; index < numAppLayers; index++ ) { 589 hwc_layer_1_t* layer = &list->hwLayers[index]; 590 if ((mCachedFrame.hnd[index] != layer->handle) || 591 isYuvBuffer((private_handle_t *)layer->handle)) { 592 hwc_rect_t dst = layer->displayFrame; 593 hwc_rect_t updatingRect = dst; 594 595 #ifdef QCOM_BSP 596 if(!needsScaling(layer) && !layer->transform) 597 { 598 hwc_rect_t src = integerizeSourceCrop(layer->sourceCropf); 599 int x_off = dst.left - src.left; 600 int y_off = dst.top - src.top; 601 updatingRect = moveRect(layer->dirtyRect, x_off, y_off); 602 } 603 #endif 604 605 roi = getUnion(roi, updatingRect); 606 } 607 } 608 609 /* No layer is updating. Still SF wants a refresh.*/ 610 if(!isValidRect(roi)) 611 return; 612 613 // Align ROI coordinates to panel restrictions 614 roi = getSanitizeROI(roi, fullFrame); 615 616 ctx->listStats[mDpy].lRoi = roi; 617 if(!validateAndApplyROI(ctx, list)) 618 resetROI(ctx, mDpy); 619 620 ALOGD_IF(isDebug(),"%s: generated ROI: [%d, %d, %d, %d]", __FUNCTION__, 621 ctx->listStats[mDpy].lRoi.left, ctx->listStats[mDpy].lRoi.top, 622 ctx->listStats[mDpy].lRoi.right, ctx->listStats[mDpy].lRoi.bottom); 623 } 624 625 void MDPCompSplit::trimAgainstROI(hwc_context_t *ctx, hwc_rect_t& fbRect) { 626 hwc_rect l_roi = ctx->listStats[mDpy].lRoi; 627 hwc_rect r_roi = ctx->listStats[mDpy].rRoi; 628 629 hwc_rect_t l_fbRect = getIntersection(fbRect, l_roi); 630 hwc_rect_t r_fbRect = getIntersection(fbRect, r_roi); 631 fbRect = getUnion(l_fbRect, r_fbRect); 632 } 633 /* 1) Identify layers that are not visible or lying outside BOTH the updating 634 * ROI's and drop them from composition. If a layer is spanning across both 635 * the halves of the screen but needed by only ROI, the non-contributing 636 * half will not be programmed for MDP. 637 * 2) If we have a scaling layer which needs cropping against generated 638 * ROI, reset ROI to full resolution. */ 639 bool MDPCompSplit::validateAndApplyROI(hwc_context_t *ctx, 640 hwc_display_contents_1_t* list) { 641 642 int numAppLayers = ctx->listStats[mDpy].numAppLayers; 643 644 hwc_rect_t visibleRectL = ctx->listStats[mDpy].lRoi; 645 hwc_rect_t visibleRectR = ctx->listStats[mDpy].rRoi; 646 647 for(int i = numAppLayers - 1; i >= 0; i--){ 648 if(!isValidRect(visibleRectL) && !isValidRect(visibleRectR)) 649 { 650 mCurrentFrame.drop[i] = true; 651 mCurrentFrame.dropCount++; 652 continue; 653 } 654 655 const hwc_layer_1_t* layer = &list->hwLayers[i]; 656 hwc_rect_t dstRect = layer->displayFrame; 657 658 hwc_rect_t l_res = getIntersection(visibleRectL, dstRect); 659 hwc_rect_t r_res = getIntersection(visibleRectR, dstRect); 660 hwc_rect_t res = getUnion(l_res, r_res); 661 662 if(!isValidRect(l_res) && !isValidRect(r_res)) { 663 mCurrentFrame.drop[i] = true; 664 mCurrentFrame.dropCount++; 665 } else { 666 /* Reset frame ROI when any layer which needs scaling also needs ROI 667 * cropping */ 668 if(!isSameRect(res, dstRect) && needsScaling (layer)) { 669 memset(&mCurrentFrame.drop, 0, sizeof(mCurrentFrame.drop)); 670 mCurrentFrame.dropCount = 0; 671 return false; 672 } 673 674 if (layer->blending == HWC_BLENDING_NONE && 675 layer->planeAlpha == 0xFF) { 676 visibleRectL = deductRect(visibleRectL, l_res); 677 visibleRectR = deductRect(visibleRectR, r_res); 678 } 679 } 680 } 681 return true; 682 } 683 /* Calculate ROI for the frame by accounting all the layer's dispalyFrame which 684 * are updating. If DirtyRegion is applicable, calculate it by accounting all 685 * the changing layer's dirtyRegion. */ 686 void MDPCompSplit::generateROI(hwc_context_t *ctx, 687 hwc_display_contents_1_t* list) { 688 if(!canPartialUpdate(ctx, list)) 689 return; 690 691 int numAppLayers = ctx->listStats[mDpy].numAppLayers; 692 int lSplit = getLeftSplit(ctx, mDpy); 693 694 int hw_h = (int)ctx->dpyAttr[mDpy].yres; 695 int hw_w = (int)ctx->dpyAttr[mDpy].xres; 696 697 struct hwc_rect l_frame = (struct hwc_rect){0, 0, lSplit, hw_h}; 698 struct hwc_rect r_frame = (struct hwc_rect){lSplit, 0, hw_w, hw_h}; 699 700 struct hwc_rect l_roi = (struct hwc_rect){0, 0, 0, 0}; 701 struct hwc_rect r_roi = (struct hwc_rect){0, 0, 0, 0}; 702 703 for(int index = 0; index < numAppLayers; index++ ) { 704 hwc_layer_1_t* layer = &list->hwLayers[index]; 705 private_handle_t *hnd = (private_handle_t *)layer->handle; 706 if ((mCachedFrame.hnd[index] != layer->handle) || 707 isYuvBuffer(hnd)) { 708 hwc_rect_t dst = layer->displayFrame; 709 hwc_rect_t updatingRect = dst; 710 711 #ifdef QCOM_BSP 712 if(!needsScaling(layer) && !layer->transform) 713 { 714 hwc_rect_t src = integerizeSourceCrop(layer->sourceCropf); 715 int x_off = dst.left - src.left; 716 int y_off = dst.top - src.top; 717 updatingRect = moveRect(layer->dirtyRect, x_off, y_off); 718 } 719 #endif 720 721 hwc_rect_t l_dst = getIntersection(l_frame, updatingRect); 722 if(isValidRect(l_dst)) 723 l_roi = getUnion(l_roi, l_dst); 724 725 hwc_rect_t r_dst = getIntersection(r_frame, updatingRect); 726 if(isValidRect(r_dst)) 727 r_roi = getUnion(r_roi, r_dst); 728 } 729 } 730 731 /* For panels that cannot accept commands in both the interfaces, we cannot 732 * send two ROI's (for each half). We merge them into single ROI and split 733 * them across lSplit for MDP mixer use. The ROI's will be merged again 734 * finally before udpating the panel in the driver. */ 735 if(qdutils::MDPVersion::getInstance().needsROIMerge()) { 736 hwc_rect_t temp_roi = getUnion(l_roi, r_roi); 737 l_roi = getIntersection(temp_roi, l_frame); 738 r_roi = getIntersection(temp_roi, r_frame); 739 } 740 741 /* No layer is updating. Still SF wants a refresh. */ 742 if(!isValidRect(l_roi) && !isValidRect(r_roi)) 743 return; 744 745 l_roi = getSanitizeROI(l_roi, l_frame); 746 r_roi = getSanitizeROI(r_roi, r_frame); 747 748 ctx->listStats[mDpy].lRoi = l_roi; 749 ctx->listStats[mDpy].rRoi = r_roi; 750 751 if(!validateAndApplyROI(ctx, list)) 752 resetROI(ctx, mDpy); 753 754 ALOGD_IF(isDebug(),"%s: generated L_ROI: [%d, %d, %d, %d]" 755 "R_ROI: [%d, %d, %d, %d]", __FUNCTION__, 756 ctx->listStats[mDpy].lRoi.left, ctx->listStats[mDpy].lRoi.top, 757 ctx->listStats[mDpy].lRoi.right, ctx->listStats[mDpy].lRoi.bottom, 758 ctx->listStats[mDpy].rRoi.left, ctx->listStats[mDpy].rRoi.top, 759 ctx->listStats[mDpy].rRoi.right, ctx->listStats[mDpy].rRoi.bottom); 760 } 761 762 /* Checks for conditions where all the layers marked for MDP comp cannot be 763 * bypassed. On such conditions we try to bypass atleast YUV layers */ 764 bool MDPComp::tryFullFrame(hwc_context_t *ctx, 765 hwc_display_contents_1_t* list){ 766 767 const int numAppLayers = ctx->listStats[mDpy].numAppLayers; 768 769 // Fall back to video only composition, if AIV video mode is enabled 770 if(ctx->listStats[mDpy].mAIVVideoMode) { 771 ALOGD_IF(isDebug(), "%s: AIV Video Mode enabled dpy %d", 772 __FUNCTION__, mDpy); 773 return false; 774 } 775 776 /* No Idle fall back if secure display or secure RGB layers are present 777 * or if there is only a single layer being composed */ 778 if(sIdleFallBack && !ctx->listStats[mDpy].secureUI && 779 !ctx->listStats[mDpy].secureRGBCount && 780 (ctx->listStats[mDpy].numAppLayers > 1)) { 781 ALOGD_IF(isDebug(), "%s: Idle fallback dpy %d",__FUNCTION__, mDpy); 782 return false; 783 } 784 785 if(isSkipPresent(ctx, mDpy)) { 786 ALOGD_IF(isDebug(),"%s: SKIP present: %d", 787 __FUNCTION__, 788 isSkipPresent(ctx, mDpy)); 789 return false; 790 } 791 792 // if secondary is configuring or Padding round, fall back to video only 793 // composition and release all assigned non VIG pipes from primary. 794 if(isSecondaryConfiguring(ctx)) { 795 ALOGD_IF( isDebug(),"%s: External Display connection is pending", 796 __FUNCTION__); 797 return false; 798 } else if(ctx->isPaddingRound) { 799 ALOGD_IF(isDebug(), "%s: padding round invoked for dpy %d", 800 __FUNCTION__,mDpy); 801 return false; 802 } 803 804 // check for action safe flag and MDP scaling mode which requires scaling. 805 if(ctx->dpyAttr[mDpy].mActionSafePresent 806 || ctx->dpyAttr[mDpy].mMDPScalingMode) { 807 ALOGD_IF(isDebug(), "%s: Scaling needed for this frame",__FUNCTION__); 808 return false; 809 } 810 811 for(int i = 0; i < numAppLayers; ++i) { 812 hwc_layer_1_t* layer = &list->hwLayers[i]; 813 private_handle_t *hnd = (private_handle_t *)layer->handle; 814 815 if(has90Transform(layer) && isRotationDoable(ctx, hnd)) { 816 if(!canUseRotator(ctx, mDpy)) { 817 ALOGD_IF(isDebug(), "%s: Can't use rotator for dpy %d", 818 __FUNCTION__, mDpy); 819 return false; 820 } 821 } 822 823 //For 8x26 with panel width>1k, if RGB layer needs HFLIP fail mdp comp 824 // may not need it if Gfx pre-rotation can handle all flips & rotations 825 MDPVersion& mdpHw = MDPVersion::getInstance(); 826 int transform = (layer->flags & HWC_COLOR_FILL) ? 0 : layer->transform; 827 if( mdpHw.is8x26() && (ctx->dpyAttr[mDpy].xres > 1024) && 828 (transform & HWC_TRANSFORM_FLIP_H) && (!isYuvBuffer(hnd))) 829 return false; 830 } 831 832 if(ctx->mAD->isDoable()) { 833 return false; 834 } 835 836 //If all above hard conditions are met we can do full or partial MDP comp. 837 bool ret = false; 838 if(fullMDPComp(ctx, list)) { 839 ret = true; 840 } else if(fullMDPCompWithPTOR(ctx, list)) { 841 ret = true; 842 } else if(partialMDPComp(ctx, list)) { 843 ret = true; 844 } 845 846 return ret; 847 } 848 849 bool MDPComp::fullMDPComp(hwc_context_t *ctx, hwc_display_contents_1_t* list) { 850 851 if(sSimulationFlags & MDPCOMP_AVOID_FULL_MDP) 852 return false; 853 854 const int numAppLayers = ctx->listStats[mDpy].numAppLayers; 855 for(int i = 0; i < numAppLayers; i++) { 856 hwc_layer_1_t* layer = &list->hwLayers[i]; 857 if(not mCurrentFrame.drop[i] and 858 not isSupportedForMDPComp(ctx, layer)) { 859 ALOGD_IF(isDebug(), "%s: Unsupported layer in list",__FUNCTION__); 860 return false; 861 } 862 } 863 864 mCurrentFrame.fbCount = 0; 865 memcpy(&mCurrentFrame.isFBComposed, &mCurrentFrame.drop, 866 sizeof(mCurrentFrame.isFBComposed)); 867 mCurrentFrame.mdpCount = mCurrentFrame.layerCount - mCurrentFrame.fbCount - 868 mCurrentFrame.dropCount; 869 870 if(sEnableYUVsplit){ 871 adjustForSourceSplit(ctx, list); 872 } 873 874 if(!postHeuristicsHandling(ctx, list)) { 875 ALOGD_IF(isDebug(), "post heuristic handling failed"); 876 reset(ctx); 877 return false; 878 } 879 ALOGD_IF(sSimulationFlags,"%s: FULL_MDP_COMP SUCCEEDED", 880 __FUNCTION__); 881 return true; 882 } 883 884 /* Full MDP Composition with Peripheral Tiny Overlap Removal. 885 * MDP bandwidth limitations can be avoided, if the overlap region 886 * covered by the smallest layer at a higher z-order, gets composed 887 * by Copybit on a render buffer, which can be queued to MDP. 888 */ 889 bool MDPComp::fullMDPCompWithPTOR(hwc_context_t *ctx, 890 hwc_display_contents_1_t* list) { 891 892 const int numAppLayers = ctx->listStats[mDpy].numAppLayers; 893 const int stagesForMDP = min(sMaxPipesPerMixer, 894 ctx->mOverlay->availablePipes(mDpy, Overlay::MIXER_DEFAULT)); 895 896 // Hard checks where we cannot use this mode 897 if (mDpy || !ctx->mCopyBit[mDpy]) { 898 ALOGD_IF(isDebug(), "%s: Feature not supported!", __FUNCTION__); 899 return false; 900 } 901 902 // Frame level checks 903 if ((numAppLayers > stagesForMDP) || isSkipPresent(ctx, mDpy) || 904 isYuvPresent(ctx, mDpy) || mCurrentFrame.dropCount || 905 isSecurePresent(ctx, mDpy)) { 906 ALOGD_IF(isDebug(), "%s: Frame not supported!", __FUNCTION__); 907 return false; 908 } 909 // MDP comp checks 910 for(int i = 0; i < numAppLayers; i++) { 911 hwc_layer_1_t* layer = &list->hwLayers[i]; 912 if(not isSupportedForMDPComp(ctx, layer)) { 913 ALOGD_IF(isDebug(), "%s: Unsupported layer in list",__FUNCTION__); 914 return false; 915 } 916 } 917 918 /* We cannot use this composition mode, if: 919 1. A below layer needs scaling. 920 2. Overlap is not peripheral to display. 921 3. Overlap or a below layer has 90 degree transform. 922 4. Overlap area > (1/3 * FrameBuffer) area, based on Perf inputs. 923 */ 924 925 int minLayerIndex[MAX_PTOR_LAYERS] = { -1, -1}; 926 hwc_rect_t overlapRect[MAX_PTOR_LAYERS]; 927 memset(overlapRect, 0, sizeof(overlapRect)); 928 int layerPixelCount, minPixelCount = 0; 929 int numPTORLayersFound = 0; 930 for (int i = numAppLayers-1; (i >= 0 && 931 numPTORLayersFound < MAX_PTOR_LAYERS); i--) { 932 hwc_layer_1_t* layer = &list->hwLayers[i]; 933 hwc_rect_t crop = integerizeSourceCrop(layer->sourceCropf); 934 hwc_rect_t dispFrame = layer->displayFrame; 935 layerPixelCount = (crop.right - crop.left) * (crop.bottom - crop.top); 936 // PTOR layer should be peripheral and cannot have transform 937 if (!isPeripheral(dispFrame, ctx->mViewFrame[mDpy]) || 938 has90Transform(layer)) { 939 continue; 940 } 941 if((3 * (layerPixelCount + minPixelCount)) > 942 ((int)ctx->dpyAttr[mDpy].xres * (int)ctx->dpyAttr[mDpy].yres)) { 943 // Overlap area > (1/3 * FrameBuffer) area, based on Perf inputs. 944 continue; 945 } 946 bool found = false; 947 for (int j = i-1; j >= 0; j--) { 948 // Check if the layers below this layer qualifies for PTOR comp 949 hwc_layer_1_t* layer = &list->hwLayers[j]; 950 hwc_rect_t disFrame = layer->displayFrame; 951 // Layer below PTOR is intersecting and has 90 degree transform or 952 // needs scaling cannot be supported. 953 if (isValidRect(getIntersection(dispFrame, disFrame))) { 954 if (has90Transform(layer) || needsScaling(layer)) { 955 found = false; 956 break; 957 } 958 found = true; 959 } 960 } 961 // Store the minLayer Index 962 if(found) { 963 minLayerIndex[numPTORLayersFound] = i; 964 overlapRect[numPTORLayersFound] = list->hwLayers[i].displayFrame; 965 minPixelCount += layerPixelCount; 966 numPTORLayersFound++; 967 } 968 } 969 970 // No overlap layers 971 if (!numPTORLayersFound) 972 return false; 973 974 // Store the displayFrame and the sourceCrops of the layers 975 hwc_rect_t displayFrame[numAppLayers]; 976 hwc_rect_t sourceCrop[numAppLayers]; 977 for(int i = 0; i < numAppLayers; i++) { 978 hwc_layer_1_t* layer = &list->hwLayers[i]; 979 displayFrame[i] = layer->displayFrame; 980 sourceCrop[i] = integerizeSourceCrop(layer->sourceCropf); 981 } 982 983 /** 984 * It's possible that 2 PTOR layers might have overlapping. 985 * In such case, remove the intersection(again if peripheral) 986 * from the lower PTOR layer to avoid overlapping. 987 * If intersection is not on peripheral then compromise 988 * by reducing number of PTOR layers. 989 **/ 990 hwc_rect_t commonRect = getIntersection(overlapRect[0], overlapRect[1]); 991 if(isValidRect(commonRect)) { 992 overlapRect[1] = deductRect(overlapRect[1], commonRect); 993 list->hwLayers[minLayerIndex[1]].displayFrame = overlapRect[1]; 994 } 995 996 ctx->mPtorInfo.count = numPTORLayersFound; 997 for(int i = 0; i < MAX_PTOR_LAYERS; i++) { 998 ctx->mPtorInfo.layerIndex[i] = minLayerIndex[i]; 999 } 1000 1001 if (!ctx->mCopyBit[mDpy]->prepareOverlap(ctx, list)) { 1002 // reset PTOR 1003 ctx->mPtorInfo.count = 0; 1004 if(isValidRect(commonRect)) { 1005 // If PTORs are intersecting restore displayframe of PTOR[1] 1006 // before returning, as we have modified it above. 1007 list->hwLayers[minLayerIndex[1]].displayFrame = 1008 displayFrame[minLayerIndex[1]]; 1009 } 1010 return false; 1011 } 1012 private_handle_t *renderBuf = ctx->mCopyBit[mDpy]->getCurrentRenderBuffer(); 1013 Whf layerWhf[numPTORLayersFound]; // To store w,h,f of PTOR layers 1014 1015 // Store the blending mode, planeAlpha, and transform of PTOR layers 1016 int32_t blending[numPTORLayersFound]; 1017 uint8_t planeAlpha[numPTORLayersFound]; 1018 uint32_t transform[numPTORLayersFound]; 1019 1020 for(int j = 0; j < numPTORLayersFound; j++) { 1021 int index = ctx->mPtorInfo.layerIndex[j]; 1022 1023 // Update src crop of PTOR layer 1024 hwc_layer_1_t* layer = &list->hwLayers[index]; 1025 layer->sourceCropf.left = (float)ctx->mPtorInfo.displayFrame[j].left; 1026 layer->sourceCropf.top = (float)ctx->mPtorInfo.displayFrame[j].top; 1027 layer->sourceCropf.right = (float)ctx->mPtorInfo.displayFrame[j].right; 1028 layer->sourceCropf.bottom =(float)ctx->mPtorInfo.displayFrame[j].bottom; 1029 1030 // Store & update w, h, format of PTOR layer 1031 private_handle_t *hnd = (private_handle_t *)layer->handle; 1032 Whf whf(hnd->width, hnd->height, hnd->format, hnd->size); 1033 layerWhf[j] = whf; 1034 hnd->width = renderBuf->width; 1035 hnd->height = renderBuf->height; 1036 hnd->format = renderBuf->format; 1037 1038 // Store & update blending mode, planeAlpha and transform of PTOR layer 1039 blending[j] = layer->blending; 1040 planeAlpha[j] = layer->planeAlpha; 1041 transform[j] = layer->transform; 1042 layer->blending = HWC_BLENDING_NONE; 1043 layer->planeAlpha = 0xFF; 1044 layer->transform = 0; 1045 1046 // Remove overlap from crop & displayFrame of below layers 1047 for (int i = 0; i < index && index !=-1; i++) { 1048 layer = &list->hwLayers[i]; 1049 if(!isValidRect(getIntersection(layer->displayFrame, 1050 overlapRect[j]))) { 1051 continue; 1052 } 1053 // Update layer attributes 1054 hwc_rect_t srcCrop = integerizeSourceCrop(layer->sourceCropf); 1055 hwc_rect_t destRect = deductRect(layer->displayFrame, 1056 getIntersection(layer->displayFrame, overlapRect[j])); 1057 qhwc::calculate_crop_rects(srcCrop, layer->displayFrame, destRect, 1058 layer->transform); 1059 layer->sourceCropf.left = (float)srcCrop.left; 1060 layer->sourceCropf.top = (float)srcCrop.top; 1061 layer->sourceCropf.right = (float)srcCrop.right; 1062 layer->sourceCropf.bottom = (float)srcCrop.bottom; 1063 } 1064 } 1065 1066 mCurrentFrame.mdpCount = numAppLayers; 1067 mCurrentFrame.fbCount = 0; 1068 mCurrentFrame.fbZ = -1; 1069 1070 for (int j = 0; j < numAppLayers; j++) { 1071 if(isValidRect(list->hwLayers[j].displayFrame)) { 1072 mCurrentFrame.isFBComposed[j] = false; 1073 } else { 1074 mCurrentFrame.mdpCount--; 1075 mCurrentFrame.drop[j] = true; 1076 } 1077 } 1078 1079 bool result = postHeuristicsHandling(ctx, list); 1080 1081 // Restore layer attributes 1082 for(int i = 0; i < numAppLayers; i++) { 1083 hwc_layer_1_t* layer = &list->hwLayers[i]; 1084 layer->displayFrame = displayFrame[i]; 1085 layer->sourceCropf.left = (float)sourceCrop[i].left; 1086 layer->sourceCropf.top = (float)sourceCrop[i].top; 1087 layer->sourceCropf.right = (float)sourceCrop[i].right; 1088 layer->sourceCropf.bottom = (float)sourceCrop[i].bottom; 1089 } 1090 1091 // Restore w,h,f, blending attributes, and transform of PTOR layers 1092 for (int i = 0; i < numPTORLayersFound; i++) { 1093 int idx = ctx->mPtorInfo.layerIndex[i]; 1094 hwc_layer_1_t* layer = &list->hwLayers[idx]; 1095 private_handle_t *hnd = (private_handle_t *)list->hwLayers[idx].handle; 1096 hnd->width = layerWhf[i].w; 1097 hnd->height = layerWhf[i].h; 1098 hnd->format = layerWhf[i].format; 1099 layer->blending = blending[i]; 1100 layer->planeAlpha = planeAlpha[i]; 1101 layer->transform = transform[i]; 1102 } 1103 1104 if (!result) { 1105 // reset PTOR 1106 ctx->mPtorInfo.count = 0; 1107 reset(ctx); 1108 } else { 1109 ALOGD_IF(isDebug(), "%s: PTOR Indexes: %d and %d", __FUNCTION__, 1110 ctx->mPtorInfo.layerIndex[0], ctx->mPtorInfo.layerIndex[1]); 1111 } 1112 1113 ALOGD_IF(isDebug(), "%s: Postheuristics %s!", __FUNCTION__, 1114 (result ? "successful" : "failed")); 1115 return result; 1116 } 1117 1118 bool MDPComp::partialMDPComp(hwc_context_t *ctx, hwc_display_contents_1_t* list) 1119 { 1120 if(!sEnableMixedMode) { 1121 //Mixed mode is disabled. No need to even try caching. 1122 return false; 1123 } 1124 1125 bool ret = false; 1126 if(list->flags & HWC_GEOMETRY_CHANGED) { //Try load based first 1127 ret = loadBasedComp(ctx, list) or 1128 cacheBasedComp(ctx, list); 1129 } else { 1130 ret = cacheBasedComp(ctx, list) or 1131 loadBasedComp(ctx, list); 1132 } 1133 1134 return ret; 1135 } 1136 1137 bool MDPComp::cacheBasedComp(hwc_context_t *ctx, 1138 hwc_display_contents_1_t* list) { 1139 if(sSimulationFlags & MDPCOMP_AVOID_CACHE_MDP) 1140 return false; 1141 1142 int numAppLayers = ctx->listStats[mDpy].numAppLayers; 1143 mCurrentFrame.reset(numAppLayers); 1144 updateLayerCache(ctx, list, mCurrentFrame); 1145 1146 //If an MDP marked layer is unsupported cannot do partial MDP Comp 1147 for(int i = 0; i < numAppLayers; i++) { 1148 if(!mCurrentFrame.isFBComposed[i]) { 1149 hwc_layer_1_t* layer = &list->hwLayers[i]; 1150 if(not isSupportedForMDPComp(ctx, layer)) { 1151 ALOGD_IF(isDebug(), "%s: Unsupported layer in list", 1152 __FUNCTION__); 1153 reset(ctx); 1154 return false; 1155 } 1156 } 1157 } 1158 1159 updateYUV(ctx, list, false /*secure only*/, mCurrentFrame); 1160 /* mark secure RGB layers for MDP comp */ 1161 updateSecureRGB(ctx, list); 1162 bool ret = markLayersForCaching(ctx, list); //sets up fbZ also 1163 if(!ret) { 1164 ALOGD_IF(isDebug(),"%s: batching failed, dpy %d",__FUNCTION__, mDpy); 1165 reset(ctx); 1166 return false; 1167 } 1168 1169 int mdpCount = mCurrentFrame.mdpCount; 1170 1171 if(sEnableYUVsplit){ 1172 adjustForSourceSplit(ctx, list); 1173 } 1174 1175 if(!postHeuristicsHandling(ctx, list)) { 1176 ALOGD_IF(isDebug(), "post heuristic handling failed"); 1177 reset(ctx); 1178 return false; 1179 } 1180 ALOGD_IF(sSimulationFlags,"%s: CACHE_MDP_COMP SUCCEEDED", 1181 __FUNCTION__); 1182 1183 return true; 1184 } 1185 1186 bool MDPComp::loadBasedComp(hwc_context_t *ctx, 1187 hwc_display_contents_1_t* list) { 1188 if(sSimulationFlags & MDPCOMP_AVOID_LOAD_MDP) 1189 return false; 1190 1191 if(not isLoadBasedCompDoable(ctx)) { 1192 return false; 1193 } 1194 1195 const int numAppLayers = ctx->listStats[mDpy].numAppLayers; 1196 const int numNonDroppedLayers = numAppLayers - mCurrentFrame.dropCount; 1197 const int stagesForMDP = min(sMaxPipesPerMixer, 1198 ctx->mOverlay->availablePipes(mDpy, Overlay::MIXER_DEFAULT)); 1199 1200 int mdpBatchSize = stagesForMDP - 1; //1 stage for FB 1201 int fbBatchSize = numNonDroppedLayers - mdpBatchSize; 1202 int lastMDPSupportedIndex = numAppLayers; 1203 int dropCount = 0; 1204 1205 //Find the minimum MDP batch size 1206 for(int i = 0; i < numAppLayers;i++) { 1207 if(mCurrentFrame.drop[i]) { 1208 dropCount++; 1209 continue; 1210 } 1211 hwc_layer_1_t* layer = &list->hwLayers[i]; 1212 if(not isSupportedForMDPComp(ctx, layer)) { 1213 lastMDPSupportedIndex = i; 1214 mdpBatchSize = min(i - dropCount, stagesForMDP - 1); 1215 fbBatchSize = numNonDroppedLayers - mdpBatchSize; 1216 break; 1217 } 1218 } 1219 1220 ALOGD_IF(isDebug(), "%s:Before optimizing fbBatch, mdpbatch %d, fbbatch %d " 1221 "dropped %d", __FUNCTION__, mdpBatchSize, fbBatchSize, 1222 mCurrentFrame.dropCount); 1223 1224 //Start at a point where the fb batch should at least have 2 layers, for 1225 //this mode to be justified. 1226 while(fbBatchSize < 2) { 1227 ++fbBatchSize; 1228 --mdpBatchSize; 1229 } 1230 1231 //If there are no layers for MDP, this mode doesnt make sense. 1232 if(mdpBatchSize < 1) { 1233 ALOGD_IF(isDebug(), "%s: No MDP layers after optimizing for fbBatch", 1234 __FUNCTION__); 1235 return false; 1236 } 1237 1238 mCurrentFrame.reset(numAppLayers); 1239 1240 //Try with successively smaller mdp batch sizes until we succeed or reach 1 1241 while(mdpBatchSize > 0) { 1242 //Mark layers for MDP comp 1243 int mdpBatchLeft = mdpBatchSize; 1244 for(int i = 0; i < lastMDPSupportedIndex and mdpBatchLeft; i++) { 1245 if(mCurrentFrame.drop[i]) { 1246 continue; 1247 } 1248 mCurrentFrame.isFBComposed[i] = false; 1249 --mdpBatchLeft; 1250 } 1251 1252 mCurrentFrame.fbZ = mdpBatchSize; 1253 mCurrentFrame.fbCount = fbBatchSize; 1254 mCurrentFrame.mdpCount = mdpBatchSize; 1255 1256 ALOGD_IF(isDebug(), "%s:Trying with: mdpbatch %d fbbatch %d dropped %d", 1257 __FUNCTION__, mdpBatchSize, fbBatchSize, 1258 mCurrentFrame.dropCount); 1259 1260 if(postHeuristicsHandling(ctx, list)) { 1261 ALOGD_IF(isDebug(), "%s: Postheuristics handling succeeded", 1262 __FUNCTION__); 1263 ALOGD_IF(sSimulationFlags,"%s: LOAD_MDP_COMP SUCCEEDED", 1264 __FUNCTION__); 1265 return true; 1266 } 1267 1268 reset(ctx); 1269 --mdpBatchSize; 1270 ++fbBatchSize; 1271 } 1272 1273 return false; 1274 } 1275 1276 bool MDPComp::isLoadBasedCompDoable(hwc_context_t *ctx) { 1277 if(mDpy or isSecurePresent(ctx, mDpy) or 1278 isYuvPresent(ctx, mDpy)) { 1279 return false; 1280 } 1281 return true; 1282 } 1283 1284 bool MDPComp::canPartialUpdate(hwc_context_t *ctx, 1285 hwc_display_contents_1_t* list){ 1286 if(!qdutils::MDPVersion::getInstance().isPartialUpdateEnabled() || 1287 isSkipPresent(ctx, mDpy) || (list->flags & HWC_GEOMETRY_CHANGED) || 1288 !sIsPartialUpdateActive || mDpy ) { 1289 return false; 1290 } 1291 if(ctx->listStats[mDpy].secureUI) 1292 return false; 1293 return true; 1294 } 1295 1296 bool MDPComp::tryVideoOnly(hwc_context_t *ctx, 1297 hwc_display_contents_1_t* list) { 1298 const bool secureOnly = true; 1299 return videoOnlyComp(ctx, list, not secureOnly) or 1300 videoOnlyComp(ctx, list, secureOnly); 1301 } 1302 1303 bool MDPComp::videoOnlyComp(hwc_context_t *ctx, 1304 hwc_display_contents_1_t* list, bool secureOnly) { 1305 if(sSimulationFlags & MDPCOMP_AVOID_VIDEO_ONLY) 1306 return false; 1307 1308 int numAppLayers = ctx->listStats[mDpy].numAppLayers; 1309 if(!isSecurePresent(ctx, mDpy)) { 1310 /* Bail out if we are processing only secured video layers 1311 * and we dont have any */ 1312 if(secureOnly) { 1313 ALOGD_IF(isDebug(),"%s: No Secure Video Layers", __FUNCTION__); 1314 return false; 1315 } 1316 /* No Idle fall back for secure video layers and if there is only 1317 * single layer being composed. */ 1318 if(sIdleFallBack && (ctx->listStats[mDpy].numAppLayers > 1)) { 1319 ALOGD_IF(isDebug(), "%s: Idle fallback dpy %d",__FUNCTION__, mDpy); 1320 return false; 1321 } 1322 } 1323 1324 mCurrentFrame.reset(numAppLayers); 1325 mCurrentFrame.fbCount -= mCurrentFrame.dropCount; 1326 updateYUV(ctx, list, secureOnly, mCurrentFrame); 1327 int mdpCount = mCurrentFrame.mdpCount; 1328 1329 if(!isYuvPresent(ctx, mDpy) or (mdpCount == 0)) { 1330 reset(ctx); 1331 return false; 1332 } 1333 1334 if(mCurrentFrame.fbCount) 1335 mCurrentFrame.fbZ = mCurrentFrame.mdpCount; 1336 1337 if(sEnableYUVsplit){ 1338 adjustForSourceSplit(ctx, list); 1339 } 1340 1341 if(!postHeuristicsHandling(ctx, list)) { 1342 ALOGD_IF(isDebug(), "post heuristic handling failed"); 1343 if(errno == ENOBUFS) { 1344 ALOGD_IF(isDebug(), "SMP Allocation failed"); 1345 //On SMP allocation failure in video only comp add padding round 1346 ctx->isPaddingRound = true; 1347 } 1348 reset(ctx); 1349 return false; 1350 } 1351 1352 ALOGD_IF(sSimulationFlags,"%s: VIDEO_ONLY_COMP SUCCEEDED", 1353 __FUNCTION__); 1354 return true; 1355 } 1356 1357 /* if tryFullFrame fails, try to push all video and secure RGB layers to MDP */ 1358 bool MDPComp::tryMDPOnlyLayers(hwc_context_t *ctx, 1359 hwc_display_contents_1_t* list) { 1360 // Fall back to video only composition, if AIV video mode is enabled 1361 if(ctx->listStats[mDpy].mAIVVideoMode) { 1362 ALOGD_IF(isDebug(), "%s: AIV Video Mode enabled dpy %d", 1363 __FUNCTION__, mDpy); 1364 return false; 1365 } 1366 1367 const bool secureOnly = true; 1368 return mdpOnlyLayersComp(ctx, list, not secureOnly) or 1369 mdpOnlyLayersComp(ctx, list, secureOnly); 1370 1371 } 1372 1373 bool MDPComp::mdpOnlyLayersComp(hwc_context_t *ctx, 1374 hwc_display_contents_1_t* list, bool secureOnly) { 1375 1376 if(sSimulationFlags & MDPCOMP_AVOID_MDP_ONLY_LAYERS) 1377 return false; 1378 1379 int numAppLayers = ctx->listStats[mDpy].numAppLayers; 1380 if(!isSecurePresent(ctx, mDpy) && !ctx->listStats[mDpy].secureUI) { 1381 /* Bail out if we are processing only secured video/ui layers 1382 * and we dont have any */ 1383 if(secureOnly) { 1384 ALOGD_IF(isDebug(), "%s: No secure video/ui layers"); 1385 return false; 1386 } 1387 /* No Idle fall back for secure video/ui layers and if there is only 1388 * single layer being composed. */ 1389 if(sIdleFallBack && (ctx->listStats[mDpy].numAppLayers > 1)) { 1390 ALOGD_IF(isDebug(), "%s: Idle fallback dpy %d",__FUNCTION__, mDpy); 1391 return false; 1392 } 1393 } 1394 1395 /* Bail out if we dont have any secure RGB layers */ 1396 if (!ctx->listStats[mDpy].secureRGBCount) { 1397 reset(ctx); 1398 return false; 1399 } 1400 1401 mCurrentFrame.reset(numAppLayers); 1402 mCurrentFrame.fbCount -= mCurrentFrame.dropCount; 1403 1404 updateYUV(ctx, list, secureOnly, mCurrentFrame); 1405 /* mark secure RGB layers for MDP comp */ 1406 updateSecureRGB(ctx, list); 1407 1408 if(mCurrentFrame.mdpCount == 0) { 1409 reset(ctx); 1410 return false; 1411 } 1412 1413 /* find the maximum batch of layers to be marked for framebuffer */ 1414 bool ret = markLayersForCaching(ctx, list); //sets up fbZ also 1415 if(!ret) { 1416 ALOGD_IF(isDebug(),"%s: batching failed, dpy %d",__FUNCTION__, mDpy); 1417 reset(ctx); 1418 return false; 1419 } 1420 1421 if(sEnableYUVsplit){ 1422 adjustForSourceSplit(ctx, list); 1423 } 1424 1425 if(!postHeuristicsHandling(ctx, list)) { 1426 ALOGD_IF(isDebug(), "post heuristic handling failed"); 1427 reset(ctx); 1428 return false; 1429 } 1430 1431 ALOGD_IF(sSimulationFlags,"%s: MDP_ONLY_LAYERS_COMP SUCCEEDED", 1432 __FUNCTION__); 1433 return true; 1434 } 1435 1436 /* Checks for conditions where YUV layers cannot be bypassed */ 1437 bool MDPComp::isYUVDoable(hwc_context_t* ctx, hwc_layer_1_t* layer) { 1438 if(isSkipLayer(layer)) { 1439 ALOGD_IF(isDebug(), "%s: Video marked SKIP dpy %d", __FUNCTION__, mDpy); 1440 return false; 1441 } 1442 1443 if(has90Transform(layer) && !canUseRotator(ctx, mDpy)) { 1444 ALOGD_IF(isDebug(), "%s: no free DMA pipe",__FUNCTION__); 1445 return false; 1446 } 1447 1448 if(isSecuring(ctx, layer)) { 1449 ALOGD_IF(isDebug(), "%s: MDP securing is active", __FUNCTION__); 1450 return false; 1451 } 1452 1453 if(!isValidDimension(ctx, layer)) { 1454 ALOGD_IF(isDebug(), "%s: Buffer is of invalid width", 1455 __FUNCTION__); 1456 return false; 1457 } 1458 1459 if(layer->planeAlpha < 0xFF) { 1460 ALOGD_IF(isDebug(), "%s: Cannot handle YUV layer with plane alpha\ 1461 in video only mode", 1462 __FUNCTION__); 1463 return false; 1464 } 1465 1466 return true; 1467 } 1468 1469 /* Checks for conditions where Secure RGB layers cannot be bypassed */ 1470 bool MDPComp::isSecureRGBDoable(hwc_context_t* ctx, hwc_layer_1_t* layer) { 1471 if(isSkipLayer(layer)) { 1472 ALOGD_IF(isDebug(), "%s: Secure RGB layer marked SKIP dpy %d", 1473 __FUNCTION__, mDpy); 1474 return false; 1475 } 1476 1477 if(isSecuring(ctx, layer)) { 1478 ALOGD_IF(isDebug(), "%s: MDP securing is active", __FUNCTION__); 1479 return false; 1480 } 1481 1482 if(not isSupportedForMDPComp(ctx, layer)) { 1483 ALOGD_IF(isDebug(), "%s: Unsupported secure RGB layer", 1484 __FUNCTION__); 1485 return false; 1486 } 1487 return true; 1488 } 1489 1490 /* starts at fromIndex and check for each layer to find 1491 * if it it has overlapping with any Updating layer above it in zorder 1492 * till the end of the batch. returns true if it finds any intersection */ 1493 bool MDPComp::canPushBatchToTop(const hwc_display_contents_1_t* list, 1494 int fromIndex, int toIndex) { 1495 for(int i = fromIndex; i < toIndex; i++) { 1496 if(mCurrentFrame.isFBComposed[i] && !mCurrentFrame.drop[i]) { 1497 if(intersectingUpdatingLayers(list, i+1, toIndex, i)) { 1498 return false; 1499 } 1500 } 1501 } 1502 return true; 1503 } 1504 1505 /* Checks if given layer at targetLayerIndex has any 1506 * intersection with all the updating layers in beween 1507 * fromIndex and toIndex. Returns true if it finds intersectiion */ 1508 bool MDPComp::intersectingUpdatingLayers(const hwc_display_contents_1_t* list, 1509 int fromIndex, int toIndex, int targetLayerIndex) { 1510 for(int i = fromIndex; i <= toIndex; i++) { 1511 if(!mCurrentFrame.isFBComposed[i]) { 1512 if(areLayersIntersecting(&list->hwLayers[i], 1513 &list->hwLayers[targetLayerIndex])) { 1514 return true; 1515 } 1516 } 1517 } 1518 return false; 1519 } 1520 1521 int MDPComp::getBatch(hwc_display_contents_1_t* list, 1522 int& maxBatchStart, int& maxBatchEnd, 1523 int& maxBatchCount) { 1524 int i = 0; 1525 int fbZOrder =-1; 1526 int droppedLayerCt = 0; 1527 while (i < mCurrentFrame.layerCount) { 1528 int batchCount = 0; 1529 int batchStart = i; 1530 int batchEnd = i; 1531 /* Adjust batch Z order with the dropped layers so far */ 1532 int fbZ = batchStart - droppedLayerCt; 1533 int firstZReverseIndex = -1; 1534 int updatingLayersAbove = 0;//Updating layer count in middle of batch 1535 while(i < mCurrentFrame.layerCount) { 1536 if(!mCurrentFrame.isFBComposed[i]) { 1537 if(!batchCount) { 1538 i++; 1539 break; 1540 } 1541 updatingLayersAbove++; 1542 i++; 1543 continue; 1544 } else { 1545 if(mCurrentFrame.drop[i]) { 1546 i++; 1547 droppedLayerCt++; 1548 continue; 1549 } else if(updatingLayersAbove <= 0) { 1550 batchCount++; 1551 batchEnd = i; 1552 i++; 1553 continue; 1554 } else { //Layer is FBComposed, not a drop & updatingLayer > 0 1555 1556 // We have a valid updating layer already. If layer-i not 1557 // have overlapping with all updating layers in between 1558 // batch-start and i, then we can add layer i to batch. 1559 if(!intersectingUpdatingLayers(list, batchStart, i-1, i)) { 1560 batchCount++; 1561 batchEnd = i; 1562 i++; 1563 continue; 1564 } else if(canPushBatchToTop(list, batchStart, i)) { 1565 //If All the non-updating layers with in this batch 1566 //does not have intersection with the updating layers 1567 //above in z-order, then we can safely move the batch to 1568 //higher z-order. Increment fbZ as it is moving up. 1569 if( firstZReverseIndex < 0) { 1570 firstZReverseIndex = i; 1571 } 1572 batchCount++; 1573 batchEnd = i; 1574 fbZ += updatingLayersAbove; 1575 i++; 1576 updatingLayersAbove = 0; 1577 continue; 1578 } else { 1579 //both failed.start the loop again from here. 1580 if(firstZReverseIndex >= 0) { 1581 i = firstZReverseIndex; 1582 } 1583 break; 1584 } 1585 } 1586 } 1587 } 1588 if(batchCount > maxBatchCount) { 1589 maxBatchCount = batchCount; 1590 maxBatchStart = batchStart; 1591 maxBatchEnd = batchEnd; 1592 fbZOrder = fbZ; 1593 } 1594 } 1595 return fbZOrder; 1596 } 1597 1598 bool MDPComp::markLayersForCaching(hwc_context_t* ctx, 1599 hwc_display_contents_1_t* list) { 1600 /* Idea is to keep as many non-updating(cached) layers in FB and 1601 * send rest of them through MDP. This is done in 2 steps. 1602 * 1. Find the maximum contiguous batch of non-updating layers. 1603 * 2. See if we can improve this batch size for caching by adding 1604 * opaque layers around the batch, if they don't have 1605 * any overlapping with the updating layers in between. 1606 * NEVER mark an updating layer for caching. 1607 * But cached ones can be marked for MDP */ 1608 1609 int maxBatchStart = -1; 1610 int maxBatchEnd = -1; 1611 int maxBatchCount = 0; 1612 int fbZ = -1; 1613 1614 /* Nothing is cached. No batching needed */ 1615 if(mCurrentFrame.fbCount == 0) { 1616 return true; 1617 } 1618 1619 /* No MDP comp layers, try to use other comp modes */ 1620 if(mCurrentFrame.mdpCount == 0) { 1621 return false; 1622 } 1623 1624 fbZ = getBatch(list, maxBatchStart, maxBatchEnd, maxBatchCount); 1625 1626 /* reset rest of the layers lying inside ROI for MDP comp */ 1627 for(int i = 0; i < mCurrentFrame.layerCount; i++) { 1628 hwc_layer_1_t* layer = &list->hwLayers[i]; 1629 if((i < maxBatchStart || i > maxBatchEnd) && 1630 mCurrentFrame.isFBComposed[i]){ 1631 if(!mCurrentFrame.drop[i]){ 1632 //If an unsupported layer is being attempted to 1633 //be pulled out we should fail 1634 if(not isSupportedForMDPComp(ctx, layer)) { 1635 return false; 1636 } 1637 mCurrentFrame.isFBComposed[i] = false; 1638 } 1639 } 1640 } 1641 1642 // update the frame data 1643 mCurrentFrame.fbZ = fbZ; 1644 mCurrentFrame.fbCount = maxBatchCount; 1645 mCurrentFrame.mdpCount = mCurrentFrame.layerCount - 1646 mCurrentFrame.fbCount - mCurrentFrame.dropCount; 1647 1648 ALOGD_IF(isDebug(),"%s: cached count: %d",__FUNCTION__, 1649 mCurrentFrame.fbCount); 1650 1651 return true; 1652 } 1653 1654 void MDPComp::updateLayerCache(hwc_context_t* ctx, 1655 hwc_display_contents_1_t* list, FrameInfo& frame) { 1656 int numAppLayers = ctx->listStats[mDpy].numAppLayers; 1657 int fbCount = 0; 1658 1659 for(int i = 0; i < numAppLayers; i++) { 1660 if (mCachedFrame.hnd[i] == list->hwLayers[i].handle) { 1661 if(!frame.drop[i]) 1662 fbCount++; 1663 frame.isFBComposed[i] = true; 1664 } else { 1665 frame.isFBComposed[i] = false; 1666 } 1667 } 1668 1669 frame.fbCount = fbCount; 1670 frame.mdpCount = frame.layerCount - frame.fbCount 1671 - frame.dropCount; 1672 1673 ALOGD_IF(isDebug(),"%s: MDP count: %d FB count %d drop count: %d", 1674 __FUNCTION__, frame.mdpCount, frame.fbCount, frame.dropCount); 1675 } 1676 1677 // drop other non-AIV layers from external display list. 1678 void MDPComp::dropNonAIVLayers(hwc_context_t* ctx, 1679 hwc_display_contents_1_t* list) { 1680 for (size_t i = 0; i < (size_t)ctx->listStats[mDpy].numAppLayers; i++) { 1681 hwc_layer_1_t * layer = &list->hwLayers[i]; 1682 if(!(isAIVVideoLayer(layer) || isAIVCCLayer(layer))) { 1683 mCurrentFrame.dropCount++; 1684 mCurrentFrame.drop[i] = true; 1685 } 1686 } 1687 mCurrentFrame.fbCount -= mCurrentFrame.dropCount; 1688 mCurrentFrame.mdpCount = mCurrentFrame.layerCount - 1689 mCurrentFrame.fbCount - mCurrentFrame.dropCount; 1690 ALOGD_IF(isDebug(),"%s: fb count: %d mdp count %d drop count %d", 1691 __FUNCTION__, mCurrentFrame.fbCount, mCurrentFrame.mdpCount, 1692 mCurrentFrame.dropCount); 1693 } 1694 1695 void MDPComp::updateYUV(hwc_context_t* ctx, hwc_display_contents_1_t* list, 1696 bool secureOnly, FrameInfo& frame) { 1697 int nYuvCount = ctx->listStats[mDpy].yuvCount; 1698 for(int index = 0;index < nYuvCount; index++){ 1699 int nYuvIndex = ctx->listStats[mDpy].yuvIndices[index]; 1700 hwc_layer_1_t* layer = &list->hwLayers[nYuvIndex]; 1701 1702 if(mCurrentFrame.drop[nYuvIndex]) { 1703 continue; 1704 } 1705 1706 if(!isYUVDoable(ctx, layer)) { 1707 if(!frame.isFBComposed[nYuvIndex]) { 1708 frame.isFBComposed[nYuvIndex] = true; 1709 frame.fbCount++; 1710 } 1711 } else { 1712 if(frame.isFBComposed[nYuvIndex]) { 1713 private_handle_t *hnd = (private_handle_t *)layer->handle; 1714 if(!secureOnly || isSecureBuffer(hnd)) { 1715 frame.isFBComposed[nYuvIndex] = false; 1716 frame.fbCount--; 1717 } 1718 } 1719 } 1720 } 1721 1722 frame.mdpCount = frame.layerCount - frame.fbCount - frame.dropCount; 1723 ALOGD_IF(isDebug(),"%s: fb count: %d",__FUNCTION__, frame.fbCount); 1724 } 1725 1726 void MDPComp::updateSecureRGB(hwc_context_t* ctx, 1727 hwc_display_contents_1_t* list) { 1728 int nSecureRGBCount = ctx->listStats[mDpy].secureRGBCount; 1729 for(int index = 0;index < nSecureRGBCount; index++){ 1730 int nSecureRGBIndex = ctx->listStats[mDpy].secureRGBIndices[index]; 1731 hwc_layer_1_t* layer = &list->hwLayers[nSecureRGBIndex]; 1732 1733 if(!isSecureRGBDoable(ctx, layer)) { 1734 if(!mCurrentFrame.isFBComposed[nSecureRGBIndex]) { 1735 mCurrentFrame.isFBComposed[nSecureRGBIndex] = true; 1736 mCurrentFrame.fbCount++; 1737 } 1738 } else { 1739 if(mCurrentFrame.isFBComposed[nSecureRGBIndex]) { 1740 mCurrentFrame.isFBComposed[nSecureRGBIndex] = false; 1741 mCurrentFrame.fbCount--; 1742 } 1743 } 1744 } 1745 1746 mCurrentFrame.mdpCount = mCurrentFrame.layerCount - 1747 mCurrentFrame.fbCount - mCurrentFrame.dropCount; 1748 ALOGD_IF(isDebug(),"%s: fb count: %d",__FUNCTION__, 1749 mCurrentFrame.fbCount); 1750 } 1751 1752 hwc_rect_t MDPComp::getUpdatingFBRect(hwc_context_t *ctx, 1753 hwc_display_contents_1_t* list){ 1754 hwc_rect_t fbRect = (struct hwc_rect){0, 0, 0, 0}; 1755 1756 /* Update only the region of FB needed for composition */ 1757 for(int i = 0; i < mCurrentFrame.layerCount; i++ ) { 1758 if(mCurrentFrame.isFBComposed[i] && !mCurrentFrame.drop[i]) { 1759 hwc_layer_1_t* layer = &list->hwLayers[i]; 1760 hwc_rect_t dst = layer->displayFrame; 1761 fbRect = getUnion(fbRect, dst); 1762 } 1763 } 1764 trimAgainstROI(ctx, fbRect); 1765 return fbRect; 1766 } 1767 1768 bool MDPComp::postHeuristicsHandling(hwc_context_t *ctx, 1769 hwc_display_contents_1_t* list) { 1770 1771 //Capability checks 1772 if(!resourceCheck(ctx, list)) { 1773 ALOGD_IF(isDebug(), "%s: resource check failed", __FUNCTION__); 1774 return false; 1775 } 1776 1777 //Limitations checks 1778 if(!hwLimitationsCheck(ctx, list)) { 1779 ALOGD_IF(isDebug(), "%s: HW limitations",__FUNCTION__); 1780 return false; 1781 } 1782 1783 //Configure framebuffer first if applicable 1784 if(mCurrentFrame.fbZ >= 0) { 1785 hwc_rect_t fbRect = getUpdatingFBRect(ctx, list); 1786 if(!ctx->mFBUpdate[mDpy]->prepare(ctx, list, fbRect, mCurrentFrame.fbZ)) 1787 { 1788 ALOGD_IF(isDebug(), "%s configure framebuffer failed", 1789 __FUNCTION__); 1790 return false; 1791 } 1792 } 1793 1794 mCurrentFrame.map(); 1795 1796 if(!allocLayerPipes(ctx, list)) { 1797 ALOGD_IF(isDebug(), "%s: Unable to allocate MDP pipes", __FUNCTION__); 1798 return false; 1799 } 1800 1801 for (int index = 0, mdpNextZOrder = 0; index < mCurrentFrame.layerCount; 1802 index++) { 1803 if(!mCurrentFrame.isFBComposed[index]) { 1804 int mdpIndex = mCurrentFrame.layerToMDP[index]; 1805 hwc_layer_1_t* layer = &list->hwLayers[index]; 1806 1807 //Leave fbZ for framebuffer. CACHE/GLES layers go here. 1808 if(mdpNextZOrder == mCurrentFrame.fbZ) { 1809 mdpNextZOrder++; 1810 } 1811 MdpPipeInfo* cur_pipe = mCurrentFrame.mdpToLayer[mdpIndex].pipeInfo; 1812 cur_pipe->zOrder = mdpNextZOrder++; 1813 1814 private_handle_t *hnd = (private_handle_t *)layer->handle; 1815 if(isYUVSplitNeeded(hnd) && sEnableYUVsplit){ 1816 if(configure4k2kYuv(ctx, layer, 1817 mCurrentFrame.mdpToLayer[mdpIndex]) 1818 != 0 ){ 1819 ALOGD_IF(isDebug(), "%s: Failed to configure split pipes \ 1820 for layer %d",__FUNCTION__, index); 1821 return false; 1822 } 1823 else{ 1824 mdpNextZOrder++; 1825 } 1826 continue; 1827 } 1828 if(configure(ctx, layer, mCurrentFrame.mdpToLayer[mdpIndex]) != 0 ){ 1829 ALOGD_IF(isDebug(), "%s: Failed to configure overlay for \ 1830 layer %d",__FUNCTION__, index); 1831 return false; 1832 } 1833 } 1834 } 1835 1836 if(!ctx->mOverlay->validateAndSet(mDpy, ctx->dpyAttr[mDpy].fd)) { 1837 ALOGD_IF(isDebug(), "%s: Failed to validate and set overlay for dpy %d" 1838 ,__FUNCTION__, mDpy); 1839 return false; 1840 } 1841 1842 setRedraw(ctx, list); 1843 return true; 1844 } 1845 1846 bool MDPComp::resourceCheck(hwc_context_t* ctx, 1847 hwc_display_contents_1_t* list) { 1848 const bool fbUsed = mCurrentFrame.fbCount; 1849 if(mCurrentFrame.mdpCount > sMaxPipesPerMixer - fbUsed) { 1850 ALOGD_IF(isDebug(), "%s: Exceeds MAX_PIPES_PER_MIXER",__FUNCTION__); 1851 return false; 1852 } 1853 1854 //Will benefit cases where a video has non-updating background. 1855 if((mDpy > HWC_DISPLAY_PRIMARY) and 1856 (mCurrentFrame.mdpCount > sMaxSecLayers)) { 1857 ALOGD_IF(isDebug(), "%s: Exceeds max secondary pipes",__FUNCTION__); 1858 return false; 1859 } 1860 1861 // Init rotCount to number of rotate sessions used by other displays 1862 int rotCount = ctx->mRotMgr->getNumActiveSessions(); 1863 // Count the number of rotator sessions required for current display 1864 for (int index = 0; index < mCurrentFrame.layerCount; index++) { 1865 if(!mCurrentFrame.isFBComposed[index]) { 1866 hwc_layer_1_t* layer = &list->hwLayers[index]; 1867 private_handle_t *hnd = (private_handle_t *)layer->handle; 1868 if(has90Transform(layer) && isRotationDoable(ctx, hnd)) { 1869 rotCount++; 1870 } 1871 } 1872 } 1873 // if number of layers to rotate exceeds max rotator sessions, bail out. 1874 if(rotCount > RotMgr::MAX_ROT_SESS) { 1875 ALOGD_IF(isDebug(), "%s: Exceeds max rotator sessions %d", 1876 __FUNCTION__, mDpy); 1877 return false; 1878 } 1879 return true; 1880 } 1881 1882 bool MDPComp::hwLimitationsCheck(hwc_context_t* ctx, 1883 hwc_display_contents_1_t* list) { 1884 1885 //A-family hw limitation: 1886 //If a layer need alpha scaling, MDP can not support. 1887 if(ctx->mMDP.version < qdutils::MDSS_V5) { 1888 for(int i = 0; i < mCurrentFrame.layerCount; ++i) { 1889 if(!mCurrentFrame.isFBComposed[i] && 1890 isAlphaScaled( &list->hwLayers[i])) { 1891 ALOGD_IF(isDebug(), "%s:frame needs alphaScaling",__FUNCTION__); 1892 return false; 1893 } 1894 } 1895 } 1896 1897 // On 8x26 & 8974 hw, we have a limitation of downscaling+blending. 1898 //If multiple layers requires downscaling and also they are overlapping 1899 //fall back to GPU since MDSS can not handle it. 1900 if(qdutils::MDPVersion::getInstance().is8x74v2() || 1901 qdutils::MDPVersion::getInstance().is8x26()) { 1902 for(int i = 0; i < mCurrentFrame.layerCount-1; ++i) { 1903 hwc_layer_1_t* botLayer = &list->hwLayers[i]; 1904 if(!mCurrentFrame.isFBComposed[i] && 1905 isDownscaleRequired(botLayer)) { 1906 //if layer-i is marked for MDP and needs downscaling 1907 //check if any MDP layer on top of i & overlaps with layer-i 1908 for(int j = i+1; j < mCurrentFrame.layerCount; ++j) { 1909 hwc_layer_1_t* topLayer = &list->hwLayers[j]; 1910 if(!mCurrentFrame.isFBComposed[j] && 1911 isDownscaleRequired(topLayer)) { 1912 hwc_rect_t r = getIntersection(botLayer->displayFrame, 1913 topLayer->displayFrame); 1914 if(isValidRect(r)) 1915 return false; 1916 } 1917 } 1918 } 1919 } 1920 } 1921 return true; 1922 } 1923 1924 // Checks only if videos or single layer(RGB) is updating 1925 // which is used for setting dynamic fps or perf hint for single 1926 // layer video playback 1927 bool MDPComp::onlyVideosUpdating(hwc_context_t *ctx, 1928 hwc_display_contents_1_t* list) { 1929 bool support = false; 1930 FrameInfo frame; 1931 frame.reset(mCurrentFrame.layerCount); 1932 memset(&frame.drop, 0, sizeof(frame.drop)); 1933 frame.dropCount = 0; 1934 ALOGD_IF(isDebug(), "%s: Update Cache and YUVInfo", __FUNCTION__); 1935 updateLayerCache(ctx, list, frame); 1936 updateYUV(ctx, list, false /*secure only*/, frame); 1937 // There are only updating YUV layers or there is single RGB 1938 // Layer(Youtube) 1939 if((ctx->listStats[mDpy].yuvCount == frame.mdpCount) || 1940 (frame.layerCount == 1)) { 1941 support = true; 1942 } 1943 return support; 1944 } 1945 1946 void MDPComp::setDynRefreshRate(hwc_context_t *ctx, hwc_display_contents_1_t* list) { 1947 //For primary display, set the dynamic refreshrate 1948 if(!mDpy && qdutils::MDPVersion::getInstance().isDynFpsSupported() && 1949 ctx->mUseMetaDataRefreshRate) { 1950 uint32_t refreshRate = ctx->dpyAttr[mDpy].refreshRate; 1951 MDPVersion& mdpHw = MDPVersion::getInstance(); 1952 if(sIdleFallBack) { 1953 //Set minimum panel refresh rate during idle timeout 1954 refreshRate = mdpHw.getMinFpsSupported(); 1955 } else if(onlyVideosUpdating(ctx, list)) { 1956 //Set the new fresh rate, if there is only one updating YUV layer 1957 //or there is one single RGB layer with this request 1958 refreshRate = ctx->listStats[mDpy].refreshRateRequest; 1959 } 1960 setRefreshRate(ctx, mDpy, refreshRate); 1961 } 1962 } 1963 1964 int MDPComp::prepare(hwc_context_t *ctx, hwc_display_contents_1_t* list) { 1965 int ret = 0; 1966 char property[PROPERTY_VALUE_MAX]; 1967 1968 if(!ctx || !list) { 1969 ALOGE("%s: Invalid context or list",__FUNCTION__); 1970 mCachedFrame.reset(); 1971 return -1; 1972 } 1973 1974 const int numLayers = ctx->listStats[mDpy].numAppLayers; 1975 if(mDpy == HWC_DISPLAY_PRIMARY) { 1976 sSimulationFlags = 0; 1977 if(property_get("debug.hwc.simulate", property, NULL) > 0) { 1978 int currentFlags = atoi(property); 1979 if(currentFlags != sSimulationFlags) { 1980 sSimulationFlags = currentFlags; 1981 ALOGI("%s: Simulation Flag read: 0x%x (%d)", __FUNCTION__, 1982 sSimulationFlags, sSimulationFlags); 1983 } 1984 } 1985 } 1986 // reset PTOR 1987 if(!mDpy) 1988 memset(&(ctx->mPtorInfo), 0, sizeof(ctx->mPtorInfo)); 1989 1990 //reset old data 1991 mCurrentFrame.reset(numLayers); 1992 memset(&mCurrentFrame.drop, 0, sizeof(mCurrentFrame.drop)); 1993 mCurrentFrame.dropCount = 0; 1994 1995 //Do not cache the information for next draw cycle. 1996 if(numLayers > MAX_NUM_APP_LAYERS or (!numLayers)) { 1997 ALOGI("%s: Unsupported layer count for mdp composition", 1998 __FUNCTION__); 1999 mCachedFrame.reset(); 2000 #ifdef DYNAMIC_FPS 2001 setDynRefreshRate(ctx, list); 2002 #endif 2003 return -1; 2004 } 2005 2006 // Detect the start of animation and fall back to GPU only once to cache 2007 // all the layers in FB and display FB content untill animation completes. 2008 if(ctx->listStats[mDpy].isDisplayAnimating) { 2009 mCurrentFrame.needsRedraw = false; 2010 if(ctx->mAnimationState[mDpy] == ANIMATION_STOPPED) { 2011 mCurrentFrame.needsRedraw = true; 2012 ctx->mAnimationState[mDpy] = ANIMATION_STARTED; 2013 } 2014 setMDPCompLayerFlags(ctx, list); 2015 mCachedFrame.updateCounts(mCurrentFrame); 2016 #ifdef DYNAMIC_FPS 2017 setDynRefreshRate(ctx, list); 2018 #endif 2019 ret = -1; 2020 return ret; 2021 } else { 2022 ctx->mAnimationState[mDpy] = ANIMATION_STOPPED; 2023 } 2024 2025 //Hard conditions, if not met, cannot do MDP comp 2026 if(isFrameDoable(ctx)) { 2027 generateROI(ctx, list); 2028 // if AIV Video mode is enabled, drop all non AIV layers from the 2029 // external display list. 2030 if(ctx->listStats[mDpy].mAIVVideoMode) { 2031 dropNonAIVLayers(ctx, list); 2032 } 2033 2034 // if tryFullFrame fails, try to push all video and secure RGB layers 2035 // to MDP for composition. 2036 mModeOn = tryFullFrame(ctx, list) || tryMDPOnlyLayers(ctx, list) || 2037 tryVideoOnly(ctx, list); 2038 if(mModeOn) { 2039 setMDPCompLayerFlags(ctx, list); 2040 } else { 2041 resetROI(ctx, mDpy); 2042 reset(ctx); 2043 memset(&mCurrentFrame.drop, 0, sizeof(mCurrentFrame.drop)); 2044 mCurrentFrame.dropCount = 0; 2045 ret = -1; 2046 ALOGE_IF(sSimulationFlags && (mDpy == HWC_DISPLAY_PRIMARY), 2047 "MDP Composition Strategies Failed"); 2048 } 2049 } else { 2050 if ((ctx->mMDP.version == qdutils::MDP_V3_0_5) && ctx->mCopyBit[mDpy] && 2051 enablePartialUpdateForMDP3) { 2052 generateROI(ctx, list); 2053 for(int i = 0; i < ctx->listStats[mDpy].numAppLayers; i++) { 2054 ctx->copybitDrop[i] = mCurrentFrame.drop[i]; 2055 } 2056 } 2057 ALOGD_IF( isDebug(),"%s: MDP Comp not possible for this frame", 2058 __FUNCTION__); 2059 ret = -1; 2060 } 2061 2062 if(isDebug()) { 2063 ALOGD("GEOMETRY change: %d", 2064 (list->flags & HWC_GEOMETRY_CHANGED)); 2065 android::String8 sDump(""); 2066 dump(sDump, ctx); 2067 ALOGD("%s",sDump.string()); 2068 } 2069 2070 #ifdef DYNAMIC_FPS 2071 setDynRefreshRate(ctx, list); 2072 #endif 2073 setPerfHint(ctx, list); 2074 2075 mCachedFrame.cacheAll(list); 2076 mCachedFrame.updateCounts(mCurrentFrame); 2077 return ret; 2078 } 2079 2080 bool MDPComp::allocSplitVGPipesfor4k2k(hwc_context_t *ctx, int index) { 2081 2082 bool bRet = true; 2083 int mdpIndex = mCurrentFrame.layerToMDP[index]; 2084 PipeLayerPair& info = mCurrentFrame.mdpToLayer[mdpIndex]; 2085 info.pipeInfo = new MdpYUVPipeInfo; 2086 info.rot = NULL; 2087 MdpYUVPipeInfo& pipe_info = *(MdpYUVPipeInfo*)info.pipeInfo; 2088 2089 pipe_info.lIndex = ovutils::OV_INVALID; 2090 pipe_info.rIndex = ovutils::OV_INVALID; 2091 2092 Overlay::PipeSpecs pipeSpecs; 2093 pipeSpecs.formatClass = Overlay::FORMAT_YUV; 2094 pipeSpecs.needsScaling = true; 2095 pipeSpecs.dpy = mDpy; 2096 pipeSpecs.fb = false; 2097 2098 pipe_info.lIndex = ctx->mOverlay->getPipe(pipeSpecs); 2099 if(pipe_info.lIndex == ovutils::OV_INVALID){ 2100 bRet = false; 2101 ALOGD_IF(isDebug(),"%s: allocating first VG pipe failed", 2102 __FUNCTION__); 2103 } 2104 pipe_info.rIndex = ctx->mOverlay->getPipe(pipeSpecs); 2105 if(pipe_info.rIndex == ovutils::OV_INVALID){ 2106 bRet = false; 2107 ALOGD_IF(isDebug(),"%s: allocating second VG pipe failed", 2108 __FUNCTION__); 2109 } 2110 return bRet; 2111 } 2112 2113 int MDPComp::drawOverlap(hwc_context_t *ctx, hwc_display_contents_1_t* list) { 2114 int fd = -1; 2115 if (ctx->mPtorInfo.isActive()) { 2116 fd = ctx->mCopyBit[mDpy]->drawOverlap(ctx, list); 2117 if (fd < 0) { 2118 ALOGD_IF(isDebug(),"%s: failed", __FUNCTION__); 2119 } 2120 } 2121 return fd; 2122 } 2123 //=============MDPCompNonSplit================================================== 2124 2125 void MDPCompNonSplit::adjustForSourceSplit(hwc_context_t *ctx, 2126 hwc_display_contents_1_t* list) { 2127 //If 4k2k Yuv layer split is possible, and if 2128 //fbz is above 4k2k layer, increment fb zorder by 1 2129 //as we split 4k2k layer and increment zorder for right half 2130 //of the layer 2131 if(!ctx) 2132 return; 2133 if(mCurrentFrame.fbZ >= 0) { 2134 for (int index = 0, mdpNextZOrder = 0; index < mCurrentFrame.layerCount; 2135 index++) { 2136 if(!mCurrentFrame.isFBComposed[index]) { 2137 if(mdpNextZOrder == mCurrentFrame.fbZ) { 2138 mdpNextZOrder++; 2139 } 2140 mdpNextZOrder++; 2141 hwc_layer_1_t* layer = &list->hwLayers[index]; 2142 private_handle_t *hnd = (private_handle_t *)layer->handle; 2143 if(isYUVSplitNeeded(hnd)) { 2144 if(mdpNextZOrder <= mCurrentFrame.fbZ) 2145 mCurrentFrame.fbZ += 1; 2146 mdpNextZOrder++; 2147 //As we split 4kx2k yuv layer and program to 2 VG pipes 2148 //(if available) increase mdpcount by 1. 2149 mCurrentFrame.mdpCount++; 2150 } 2151 } 2152 } 2153 } 2154 } 2155 2156 /* 2157 * Configures pipe(s) for MDP composition 2158 */ 2159 int MDPCompNonSplit::configure(hwc_context_t *ctx, hwc_layer_1_t *layer, 2160 PipeLayerPair& PipeLayerPair) { 2161 MdpPipeInfoNonSplit& mdp_info = 2162 *(static_cast<MdpPipeInfoNonSplit*>(PipeLayerPair.pipeInfo)); 2163 eMdpFlags mdpFlags = ovutils::OV_MDP_FLAGS_NONE; 2164 eZorder zOrder = static_cast<eZorder>(mdp_info.zOrder); 2165 eDest dest = mdp_info.index; 2166 2167 ALOGD_IF(isDebug(),"%s: configuring: layer: %p z_order: %d dest_pipe: %d", 2168 __FUNCTION__, layer, zOrder, dest); 2169 2170 return configureNonSplit(ctx, layer, mDpy, mdpFlags, zOrder, dest, 2171 &PipeLayerPair.rot); 2172 } 2173 2174 bool MDPCompNonSplit::allocLayerPipes(hwc_context_t *ctx, 2175 hwc_display_contents_1_t* list) { 2176 for(int index = 0; index < mCurrentFrame.layerCount; index++) { 2177 2178 if(mCurrentFrame.isFBComposed[index]) continue; 2179 2180 hwc_layer_1_t* layer = &list->hwLayers[index]; 2181 private_handle_t *hnd = (private_handle_t *)layer->handle; 2182 if(isYUVSplitNeeded(hnd) && sEnableYUVsplit){ 2183 if(allocSplitVGPipesfor4k2k(ctx, index)){ 2184 continue; 2185 } 2186 } 2187 2188 int mdpIndex = mCurrentFrame.layerToMDP[index]; 2189 PipeLayerPair& info = mCurrentFrame.mdpToLayer[mdpIndex]; 2190 info.pipeInfo = new MdpPipeInfoNonSplit; 2191 info.rot = NULL; 2192 MdpPipeInfoNonSplit& pipe_info = *(MdpPipeInfoNonSplit*)info.pipeInfo; 2193 2194 Overlay::PipeSpecs pipeSpecs; 2195 pipeSpecs.formatClass = isYuvBuffer(hnd) ? 2196 Overlay::FORMAT_YUV : Overlay::FORMAT_RGB; 2197 pipeSpecs.needsScaling = qhwc::needsScaling(layer) or 2198 (qdutils::MDPVersion::getInstance().is8x26() and 2199 ctx->dpyAttr[HWC_DISPLAY_PRIMARY].xres > 1024); 2200 pipeSpecs.dpy = mDpy; 2201 pipeSpecs.fb = false; 2202 pipeSpecs.numActiveDisplays = ctx->numActiveDisplays; 2203 2204 pipe_info.index = ctx->mOverlay->getPipe(pipeSpecs); 2205 2206 if(pipe_info.index == ovutils::OV_INVALID) { 2207 ALOGD_IF(isDebug(), "%s: Unable to get pipe", __FUNCTION__); 2208 return false; 2209 } 2210 } 2211 return true; 2212 } 2213 2214 int MDPCompNonSplit::configure4k2kYuv(hwc_context_t *ctx, hwc_layer_1_t *layer, 2215 PipeLayerPair& PipeLayerPair) { 2216 MdpYUVPipeInfo& mdp_info = 2217 *(static_cast<MdpYUVPipeInfo*>(PipeLayerPair.pipeInfo)); 2218 eZorder zOrder = static_cast<eZorder>(mdp_info.zOrder); 2219 eMdpFlags mdpFlagsL = ovutils::OV_MDP_FLAGS_NONE; 2220 eDest lDest = mdp_info.lIndex; 2221 eDest rDest = mdp_info.rIndex; 2222 2223 return configureSourceSplit(ctx, layer, mDpy, mdpFlagsL, zOrder, 2224 lDest, rDest, &PipeLayerPair.rot); 2225 } 2226 2227 bool MDPCompNonSplit::draw(hwc_context_t *ctx, hwc_display_contents_1_t* list) { 2228 2229 if(!isEnabled() or !mModeOn) { 2230 ALOGD_IF(isDebug(),"%s: MDP Comp not enabled/configured", __FUNCTION__); 2231 return true; 2232 } 2233 2234 // Set the Handle timeout to true for MDP or MIXED composition. 2235 if(sIdleInvalidator && !sIdleFallBack && mCurrentFrame.mdpCount) { 2236 sHandleTimeout = true; 2237 } 2238 2239 overlay::Overlay& ov = *ctx->mOverlay; 2240 LayerProp *layerProp = ctx->layerProp[mDpy]; 2241 2242 int numHwLayers = ctx->listStats[mDpy].numAppLayers; 2243 for(int i = 0; i < numHwLayers && mCurrentFrame.mdpCount; i++ ) 2244 { 2245 if(mCurrentFrame.isFBComposed[i]) continue; 2246 2247 hwc_layer_1_t *layer = &list->hwLayers[i]; 2248 private_handle_t *hnd = (private_handle_t *)layer->handle; 2249 if(!hnd) { 2250 if (!(layer->flags & HWC_COLOR_FILL)) { 2251 ALOGE("%s handle null", __FUNCTION__); 2252 return false; 2253 } 2254 // No PLAY for Color layer 2255 layerProp[i].mFlags &= ~HWC_MDPCOMP; 2256 continue; 2257 } 2258 2259 int mdpIndex = mCurrentFrame.layerToMDP[i]; 2260 2261 if(isYUVSplitNeeded(hnd) && sEnableYUVsplit) 2262 { 2263 MdpYUVPipeInfo& pipe_info = 2264 *(MdpYUVPipeInfo*)mCurrentFrame.mdpToLayer[mdpIndex].pipeInfo; 2265 Rotator *rot = mCurrentFrame.mdpToLayer[mdpIndex].rot; 2266 ovutils::eDest indexL = pipe_info.lIndex; 2267 ovutils::eDest indexR = pipe_info.rIndex; 2268 int fd = hnd->fd; 2269 uint32_t offset = (uint32_t)hnd->offset; 2270 if(rot) { 2271 rot->queueBuffer(fd, offset); 2272 fd = rot->getDstMemId(); 2273 offset = rot->getDstOffset(); 2274 } 2275 if(indexL != ovutils::OV_INVALID) { 2276 ovutils::eDest destL = (ovutils::eDest)indexL; 2277 ALOGD_IF(isDebug(),"%s: MDP Comp: Drawing layer: %p hnd: %p \ 2278 using pipe: %d", __FUNCTION__, layer, hnd, indexL ); 2279 if (!ov.queueBuffer(fd, offset, destL)) { 2280 ALOGE("%s: queueBuffer failed for display:%d", 2281 __FUNCTION__, mDpy); 2282 return false; 2283 } 2284 } 2285 2286 if(indexR != ovutils::OV_INVALID) { 2287 ovutils::eDest destR = (ovutils::eDest)indexR; 2288 ALOGD_IF(isDebug(),"%s: MDP Comp: Drawing layer: %p hnd: %p \ 2289 using pipe: %d", __FUNCTION__, layer, hnd, indexR ); 2290 if (!ov.queueBuffer(fd, offset, destR)) { 2291 ALOGE("%s: queueBuffer failed for display:%d", 2292 __FUNCTION__, mDpy); 2293 return false; 2294 } 2295 } 2296 } 2297 else{ 2298 MdpPipeInfoNonSplit& pipe_info = 2299 *(MdpPipeInfoNonSplit*)mCurrentFrame.mdpToLayer[mdpIndex].pipeInfo; 2300 ovutils::eDest dest = pipe_info.index; 2301 if(dest == ovutils::OV_INVALID) { 2302 ALOGE("%s: Invalid pipe index (%d)", __FUNCTION__, dest); 2303 return false; 2304 } 2305 2306 if(!(layerProp[i].mFlags & HWC_MDPCOMP)) { 2307 continue; 2308 } 2309 2310 int fd = hnd->fd; 2311 uint32_t offset = (uint32_t)hnd->offset; 2312 int index = ctx->mPtorInfo.getPTORArrayIndex(i); 2313 if (!mDpy && (index != -1)) { 2314 hnd = ctx->mCopyBit[mDpy]->getCurrentRenderBuffer(); 2315 fd = hnd->fd; 2316 offset = 0; 2317 } 2318 2319 ALOGD_IF(isDebug(),"%s: MDP Comp: Drawing layer: %p hnd: %p \ 2320 using pipe: %d", __FUNCTION__, layer, 2321 hnd, dest ); 2322 2323 Rotator *rot = mCurrentFrame.mdpToLayer[mdpIndex].rot; 2324 if(rot) { 2325 if(!rot->queueBuffer(fd, offset)) 2326 return false; 2327 fd = rot->getDstMemId(); 2328 offset = rot->getDstOffset(); 2329 } 2330 2331 if (!ov.queueBuffer(fd, offset, dest)) { 2332 ALOGE("%s: queueBuffer failed for display:%d ", 2333 __FUNCTION__, mDpy); 2334 return false; 2335 } 2336 } 2337 2338 layerProp[i].mFlags &= ~HWC_MDPCOMP; 2339 } 2340 return true; 2341 } 2342 2343 //=============MDPCompSplit=================================================== 2344 2345 void MDPCompSplit::adjustForSourceSplit(hwc_context_t *ctx, 2346 hwc_display_contents_1_t* list){ 2347 //if 4kx2k yuv layer is totally present in either in left half 2348 //or right half then try splitting the yuv layer to avoid decimation 2349 const int lSplit = getLeftSplit(ctx, mDpy); 2350 if(mCurrentFrame.fbZ >= 0) { 2351 for (int index = 0, mdpNextZOrder = 0; index < mCurrentFrame.layerCount; 2352 index++) { 2353 if(!mCurrentFrame.isFBComposed[index]) { 2354 if(mdpNextZOrder == mCurrentFrame.fbZ) { 2355 mdpNextZOrder++; 2356 } 2357 mdpNextZOrder++; 2358 hwc_layer_1_t* layer = &list->hwLayers[index]; 2359 private_handle_t *hnd = (private_handle_t *)layer->handle; 2360 if(isYUVSplitNeeded(hnd)) { 2361 hwc_rect_t dst = layer->displayFrame; 2362 if((dst.left > lSplit) || (dst.right < lSplit)) { 2363 mCurrentFrame.mdpCount += 1; 2364 } 2365 if(mdpNextZOrder <= mCurrentFrame.fbZ) 2366 mCurrentFrame.fbZ += 1; 2367 mdpNextZOrder++; 2368 } 2369 } 2370 } 2371 } 2372 } 2373 2374 bool MDPCompSplit::acquireMDPPipes(hwc_context_t *ctx, hwc_layer_1_t* layer, 2375 MdpPipeInfoSplit& pipe_info) { 2376 2377 const int lSplit = getLeftSplit(ctx, mDpy); 2378 private_handle_t *hnd = (private_handle_t *)layer->handle; 2379 hwc_rect_t dst = layer->displayFrame; 2380 pipe_info.lIndex = ovutils::OV_INVALID; 2381 pipe_info.rIndex = ovutils::OV_INVALID; 2382 2383 Overlay::PipeSpecs pipeSpecs; 2384 pipeSpecs.formatClass = isYuvBuffer(hnd) ? 2385 Overlay::FORMAT_YUV : Overlay::FORMAT_RGB; 2386 pipeSpecs.needsScaling = qhwc::needsScalingWithSplit(ctx, layer, mDpy); 2387 pipeSpecs.dpy = mDpy; 2388 pipeSpecs.mixer = Overlay::MIXER_LEFT; 2389 pipeSpecs.fb = false; 2390 2391 // Acquire pipe only for the updating half 2392 hwc_rect_t l_roi = ctx->listStats[mDpy].lRoi; 2393 hwc_rect_t r_roi = ctx->listStats[mDpy].rRoi; 2394 2395 if (dst.left < lSplit && isValidRect(getIntersection(dst, l_roi))) { 2396 pipe_info.lIndex = ctx->mOverlay->getPipe(pipeSpecs); 2397 if(pipe_info.lIndex == ovutils::OV_INVALID) 2398 return false; 2399 } 2400 2401 if(dst.right > lSplit && isValidRect(getIntersection(dst, r_roi))) { 2402 pipeSpecs.mixer = Overlay::MIXER_RIGHT; 2403 pipe_info.rIndex = ctx->mOverlay->getPipe(pipeSpecs); 2404 if(pipe_info.rIndex == ovutils::OV_INVALID) 2405 return false; 2406 } 2407 2408 return true; 2409 } 2410 2411 bool MDPCompSplit::allocLayerPipes(hwc_context_t *ctx, 2412 hwc_display_contents_1_t* list) { 2413 for(int index = 0 ; index < mCurrentFrame.layerCount; index++) { 2414 2415 if(mCurrentFrame.isFBComposed[index]) continue; 2416 2417 hwc_layer_1_t* layer = &list->hwLayers[index]; 2418 private_handle_t *hnd = (private_handle_t *)layer->handle; 2419 hwc_rect_t dst = layer->displayFrame; 2420 const int lSplit = getLeftSplit(ctx, mDpy); 2421 if(isYUVSplitNeeded(hnd) && sEnableYUVsplit){ 2422 if((dst.left > lSplit)||(dst.right < lSplit)){ 2423 if(allocSplitVGPipesfor4k2k(ctx, index)){ 2424 continue; 2425 } 2426 } 2427 } 2428 int mdpIndex = mCurrentFrame.layerToMDP[index]; 2429 PipeLayerPair& info = mCurrentFrame.mdpToLayer[mdpIndex]; 2430 info.pipeInfo = new MdpPipeInfoSplit; 2431 info.rot = NULL; 2432 MdpPipeInfoSplit& pipe_info = *(MdpPipeInfoSplit*)info.pipeInfo; 2433 2434 if(!acquireMDPPipes(ctx, layer, pipe_info)) { 2435 ALOGD_IF(isDebug(), "%s: Unable to get pipe for type", 2436 __FUNCTION__); 2437 return false; 2438 } 2439 } 2440 return true; 2441 } 2442 2443 int MDPCompSplit::configure4k2kYuv(hwc_context_t *ctx, hwc_layer_1_t *layer, 2444 PipeLayerPair& PipeLayerPair) { 2445 const int lSplit = getLeftSplit(ctx, mDpy); 2446 hwc_rect_t dst = layer->displayFrame; 2447 if((dst.left > lSplit)||(dst.right < lSplit)){ 2448 MdpYUVPipeInfo& mdp_info = 2449 *(static_cast<MdpYUVPipeInfo*>(PipeLayerPair.pipeInfo)); 2450 eZorder zOrder = static_cast<eZorder>(mdp_info.zOrder); 2451 eMdpFlags mdpFlagsL = ovutils::OV_MDP_FLAGS_NONE; 2452 eDest lDest = mdp_info.lIndex; 2453 eDest rDest = mdp_info.rIndex; 2454 2455 return configureSourceSplit(ctx, layer, mDpy, mdpFlagsL, zOrder, 2456 lDest, rDest, &PipeLayerPair.rot); 2457 } 2458 else{ 2459 return configure(ctx, layer, PipeLayerPair); 2460 } 2461 } 2462 2463 /* 2464 * Configures pipe(s) for MDP composition 2465 */ 2466 int MDPCompSplit::configure(hwc_context_t *ctx, hwc_layer_1_t *layer, 2467 PipeLayerPair& PipeLayerPair) { 2468 MdpPipeInfoSplit& mdp_info = 2469 *(static_cast<MdpPipeInfoSplit*>(PipeLayerPair.pipeInfo)); 2470 eZorder zOrder = static_cast<eZorder>(mdp_info.zOrder); 2471 eMdpFlags mdpFlagsL = ovutils::OV_MDP_FLAGS_NONE; 2472 eDest lDest = mdp_info.lIndex; 2473 eDest rDest = mdp_info.rIndex; 2474 2475 ALOGD_IF(isDebug(),"%s: configuring: layer: %p z_order: %d dest_pipeL: %d" 2476 "dest_pipeR: %d",__FUNCTION__, layer, zOrder, lDest, rDest); 2477 2478 return configureSplit(ctx, layer, mDpy, mdpFlagsL, zOrder, lDest, 2479 rDest, &PipeLayerPair.rot); 2480 } 2481 2482 bool MDPCompSplit::draw(hwc_context_t *ctx, hwc_display_contents_1_t* list) { 2483 2484 if(!isEnabled() or !mModeOn) { 2485 ALOGD_IF(isDebug(),"%s: MDP Comp not enabled/configured", __FUNCTION__); 2486 return true; 2487 } 2488 2489 // Set the Handle timeout to true for MDP or MIXED composition. 2490 if(sIdleInvalidator && !sIdleFallBack && mCurrentFrame.mdpCount) { 2491 sHandleTimeout = true; 2492 } 2493 2494 overlay::Overlay& ov = *ctx->mOverlay; 2495 LayerProp *layerProp = ctx->layerProp[mDpy]; 2496 2497 int numHwLayers = ctx->listStats[mDpy].numAppLayers; 2498 for(int i = 0; i < numHwLayers && mCurrentFrame.mdpCount; i++ ) 2499 { 2500 if(mCurrentFrame.isFBComposed[i]) continue; 2501 2502 hwc_layer_1_t *layer = &list->hwLayers[i]; 2503 private_handle_t *hnd = (private_handle_t *)layer->handle; 2504 if(!hnd) { 2505 ALOGE("%s handle null", __FUNCTION__); 2506 return false; 2507 } 2508 2509 if(!(layerProp[i].mFlags & HWC_MDPCOMP)) { 2510 continue; 2511 } 2512 2513 int mdpIndex = mCurrentFrame.layerToMDP[i]; 2514 2515 if(isYUVSplitNeeded(hnd) && sEnableYUVsplit) 2516 { 2517 MdpYUVPipeInfo& pipe_info = 2518 *(MdpYUVPipeInfo*)mCurrentFrame.mdpToLayer[mdpIndex].pipeInfo; 2519 Rotator *rot = mCurrentFrame.mdpToLayer[mdpIndex].rot; 2520 ovutils::eDest indexL = pipe_info.lIndex; 2521 ovutils::eDest indexR = pipe_info.rIndex; 2522 int fd = hnd->fd; 2523 uint32_t offset = (uint32_t)hnd->offset; 2524 if(rot) { 2525 rot->queueBuffer(fd, offset); 2526 fd = rot->getDstMemId(); 2527 offset = rot->getDstOffset(); 2528 } 2529 if(indexL != ovutils::OV_INVALID) { 2530 ovutils::eDest destL = (ovutils::eDest)indexL; 2531 ALOGD_IF(isDebug(),"%s: MDP Comp: Drawing layer: %p hnd: %p \ 2532 using pipe: %d", __FUNCTION__, layer, hnd, indexL ); 2533 if (!ov.queueBuffer(fd, offset, destL)) { 2534 ALOGE("%s: queueBuffer failed for display:%d", 2535 __FUNCTION__, mDpy); 2536 return false; 2537 } 2538 } 2539 2540 if(indexR != ovutils::OV_INVALID) { 2541 ovutils::eDest destR = (ovutils::eDest)indexR; 2542 ALOGD_IF(isDebug(),"%s: MDP Comp: Drawing layer: %p hnd: %p \ 2543 using pipe: %d", __FUNCTION__, layer, hnd, indexR ); 2544 if (!ov.queueBuffer(fd, offset, destR)) { 2545 ALOGE("%s: queueBuffer failed for display:%d", 2546 __FUNCTION__, mDpy); 2547 return false; 2548 } 2549 } 2550 } 2551 else{ 2552 MdpPipeInfoSplit& pipe_info = 2553 *(MdpPipeInfoSplit*)mCurrentFrame.mdpToLayer[mdpIndex].pipeInfo; 2554 Rotator *rot = mCurrentFrame.mdpToLayer[mdpIndex].rot; 2555 2556 ovutils::eDest indexL = pipe_info.lIndex; 2557 ovutils::eDest indexR = pipe_info.rIndex; 2558 2559 int fd = hnd->fd; 2560 uint32_t offset = (uint32_t)hnd->offset; 2561 int index = ctx->mPtorInfo.getPTORArrayIndex(i); 2562 if (!mDpy && (index != -1)) { 2563 hnd = ctx->mCopyBit[mDpy]->getCurrentRenderBuffer(); 2564 fd = hnd->fd; 2565 offset = 0; 2566 } 2567 2568 if(ctx->mAD->draw(ctx, fd, offset)) { 2569 fd = ctx->mAD->getDstFd(); 2570 offset = ctx->mAD->getDstOffset(); 2571 } 2572 2573 if(rot) { 2574 rot->queueBuffer(fd, offset); 2575 fd = rot->getDstMemId(); 2576 offset = rot->getDstOffset(); 2577 } 2578 2579 //************* play left mixer ********** 2580 if(indexL != ovutils::OV_INVALID) { 2581 ovutils::eDest destL = (ovutils::eDest)indexL; 2582 ALOGD_IF(isDebug(),"%s: MDP Comp: Drawing layer: %p hnd: %p \ 2583 using pipe: %d", __FUNCTION__, layer, hnd, indexL ); 2584 if (!ov.queueBuffer(fd, offset, destL)) { 2585 ALOGE("%s: queueBuffer failed for left mixer", 2586 __FUNCTION__); 2587 return false; 2588 } 2589 } 2590 2591 //************* play right mixer ********** 2592 if(indexR != ovutils::OV_INVALID) { 2593 ovutils::eDest destR = (ovutils::eDest)indexR; 2594 ALOGD_IF(isDebug(),"%s: MDP Comp: Drawing layer: %p hnd: %p \ 2595 using pipe: %d", __FUNCTION__, layer, hnd, indexR ); 2596 if (!ov.queueBuffer(fd, offset, destR)) { 2597 ALOGE("%s: queueBuffer failed for right mixer", 2598 __FUNCTION__); 2599 return false; 2600 } 2601 } 2602 } 2603 2604 layerProp[i].mFlags &= ~HWC_MDPCOMP; 2605 } 2606 2607 return true; 2608 } 2609 2610 //================MDPCompSrcSplit============================================== 2611 bool MDPCompSrcSplit::acquireMDPPipes(hwc_context_t *ctx, hwc_layer_1_t* layer, 2612 MdpPipeInfoSplit& pipe_info) { 2613 private_handle_t *hnd = (private_handle_t *)layer->handle; 2614 hwc_rect_t dst = layer->displayFrame; 2615 hwc_rect_t crop = integerizeSourceCrop(layer->sourceCropf); 2616 pipe_info.lIndex = ovutils::OV_INVALID; 2617 pipe_info.rIndex = ovutils::OV_INVALID; 2618 2619 //If 2 pipes are staged on a single stage of a mixer, then the left pipe 2620 //should have a higher priority than the right one. Pipe priorities are 2621 //starting with VG0, VG1 ... , RGB0 ..., DMA1 2622 2623 Overlay::PipeSpecs pipeSpecs; 2624 pipeSpecs.formatClass = isYuvBuffer(hnd) ? 2625 Overlay::FORMAT_YUV : Overlay::FORMAT_RGB; 2626 pipeSpecs.needsScaling = qhwc::needsScaling(layer); 2627 pipeSpecs.dpy = mDpy; 2628 pipeSpecs.fb = false; 2629 2630 //1 pipe by default for a layer 2631 pipe_info.lIndex = ctx->mOverlay->getPipe(pipeSpecs); 2632 if(pipe_info.lIndex == ovutils::OV_INVALID) { 2633 return false; 2634 } 2635 2636 /* Use 2 pipes IF 2637 a) Layer's crop width is > 2048 or 2638 b) Layer's dest width > 2048 or 2639 c) On primary, driver has indicated with caps to split always. This is 2640 based on an empirically derived value of panel height. Applied only 2641 if the layer's width is > mixer's width 2642 */ 2643 2644 MDPVersion& mdpHw = MDPVersion::getInstance(); 2645 bool primarySplitAlways = (mDpy == HWC_DISPLAY_PRIMARY) and 2646 mdpHw.isSrcSplitAlways(); 2647 const uint32_t lSplit = getLeftSplit(ctx, mDpy); 2648 const uint32_t dstWidth = dst.right - dst.left; 2649 const uint32_t dstHeight = dst.bottom - dst.top; 2650 const uint32_t cropWidth = has90Transform(layer) ? crop.bottom - crop.top : 2651 crop.right - crop.left; 2652 const uint32_t cropHeight = has90Transform(layer) ? crop.right - crop.left : 2653 crop.bottom - crop.top; 2654 //Approximation to actual clock, ignoring the common factors in pipe and 2655 //mixer cases like line_time 2656 const uint32_t layerClock = getLayerClock(dstWidth, dstHeight, cropHeight); 2657 const uint32_t mixerClock = lSplit; 2658 2659 //TODO Even if a 4k video is going to be rot-downscaled to dimensions under 2660 //pipe line length, we are still using 2 pipes. This is fine just because 2661 //this is source split where destination doesn't matter. Evaluate later to 2662 //see if going through all the calcs to save a pipe is worth it 2663 if(dstWidth > mdpHw.getMaxPipeWidth() or 2664 cropWidth > mdpHw.getMaxPipeWidth() or 2665 (primarySplitAlways and 2666 (cropWidth > lSplit or layerClock > mixerClock))) { 2667 pipe_info.rIndex = ctx->mOverlay->getPipe(pipeSpecs); 2668 if(pipe_info.rIndex == ovutils::OV_INVALID) { 2669 return false; 2670 } 2671 2672 // Return values 2673 // 1 Left pipe is higher priority, do nothing. 2674 // 0 Pipes of same priority. 2675 //-1 Right pipe is of higher priority, needs swap. 2676 if(ctx->mOverlay->comparePipePriority(pipe_info.lIndex, 2677 pipe_info.rIndex) == -1) { 2678 qhwc::swap(pipe_info.lIndex, pipe_info.rIndex); 2679 } 2680 } 2681 2682 return true; 2683 } 2684 2685 int MDPCompSrcSplit::configure(hwc_context_t *ctx, hwc_layer_1_t *layer, 2686 PipeLayerPair& PipeLayerPair) { 2687 private_handle_t *hnd = (private_handle_t *)layer->handle; 2688 if(!hnd) { 2689 ALOGE("%s: layer handle is NULL", __FUNCTION__); 2690 return -1; 2691 } 2692 MetaData_t *metadata = (MetaData_t *)hnd->base_metadata; 2693 MdpPipeInfoSplit& mdp_info = 2694 *(static_cast<MdpPipeInfoSplit*>(PipeLayerPair.pipeInfo)); 2695 Rotator **rot = &PipeLayerPair.rot; 2696 eZorder z = static_cast<eZorder>(mdp_info.zOrder); 2697 eDest lDest = mdp_info.lIndex; 2698 eDest rDest = mdp_info.rIndex; 2699 hwc_rect_t crop = integerizeSourceCrop(layer->sourceCropf); 2700 hwc_rect_t dst = layer->displayFrame; 2701 int transform = layer->transform; 2702 eTransform orient = static_cast<eTransform>(transform); 2703 int rotFlags = ROT_FLAGS_NONE; 2704 uint32_t format = ovutils::getMdpFormat(hnd->format, hnd->flags); 2705 Whf whf(getWidth(hnd), getHeight(hnd), format, hnd->size); 2706 2707 ALOGD_IF(isDebug(),"%s: configuring: layer: %p z_order: %d dest_pipeL: %d" 2708 "dest_pipeR: %d",__FUNCTION__, layer, z, lDest, rDest); 2709 2710 // Handle R/B swap 2711 if (layer->flags & HWC_FORMAT_RB_SWAP) { 2712 if (hnd->format == HAL_PIXEL_FORMAT_RGBA_8888) 2713 whf.format = getMdpFormat(HAL_PIXEL_FORMAT_BGRA_8888); 2714 else if (hnd->format == HAL_PIXEL_FORMAT_RGBX_8888) 2715 whf.format = getMdpFormat(HAL_PIXEL_FORMAT_BGRX_8888); 2716 } 2717 // update source crop and destination position of AIV video layer. 2718 if(ctx->listStats[mDpy].mAIVVideoMode && isYuvBuffer(hnd)) { 2719 updateCoordinates(ctx, crop, dst, mDpy); 2720 } 2721 /* Calculate the external display position based on MDP downscale, 2722 ActionSafe, and extorientation features. */ 2723 calcExtDisplayPosition(ctx, hnd, mDpy, crop, dst, transform, orient); 2724 2725 int downscale = getRotDownscale(ctx, layer); 2726 eMdpFlags mdpFlags = ovutils::OV_MDP_FLAGS_NONE; 2727 setMdpFlags(ctx, layer, mdpFlags, downscale, transform); 2728 2729 if(lDest != OV_INVALID && rDest != OV_INVALID) { 2730 //Enable overfetch 2731 setMdpFlags(mdpFlags, OV_MDSS_MDP_DUAL_PIPE); 2732 } 2733 2734 if((has90Transform(layer) or downscale) and isRotationDoable(ctx, hnd)) { 2735 (*rot) = ctx->mRotMgr->getNext(); 2736 if((*rot) == NULL) return -1; 2737 ctx->mLayerRotMap[mDpy]->add(layer, *rot); 2738 //If the video is using a single pipe, enable BWC 2739 if(rDest == OV_INVALID) { 2740 BwcPM::setBwc(ctx, mDpy, hnd, crop, dst, transform, downscale, 2741 mdpFlags); 2742 } 2743 //Configure rotator for pre-rotation 2744 if(configRotator(*rot, whf, crop, mdpFlags, orient, downscale) < 0) { 2745 ALOGE("%s: configRotator failed!", __FUNCTION__); 2746 return -1; 2747 } 2748 updateSource(orient, whf, crop, *rot); 2749 rotFlags |= ovutils::ROT_PREROTATED; 2750 } 2751 2752 //If 2 pipes being used, divide layer into half, crop and dst 2753 hwc_rect_t cropL = crop; 2754 hwc_rect_t cropR = crop; 2755 hwc_rect_t dstL = dst; 2756 hwc_rect_t dstR = dst; 2757 if(lDest != OV_INVALID && rDest != OV_INVALID) { 2758 cropL.right = (crop.right + crop.left) / 2; 2759 cropR.left = cropL.right; 2760 sanitizeSourceCrop(cropL, cropR, hnd); 2761 2762 bool cropSwap = false; 2763 //Swap crops on H flip since 2 pipes are being used 2764 if((orient & OVERLAY_TRANSFORM_FLIP_H) && (*rot) == NULL) { 2765 hwc_rect_t tmp = cropL; 2766 cropL = cropR; 2767 cropR = tmp; 2768 cropSwap = true; 2769 } 2770 2771 //cropSwap trick: If the src and dst widths are both odd, let us say 2772 //2507, then splitting both into half would cause left width to be 1253 2773 //and right 1254. If crop is swapped because of H flip, this will cause 2774 //left crop width to be 1254, whereas left dst width remains 1253, thus 2775 //inducing a scaling that is unaccounted for. To overcome that we add 1 2776 //to the dst width if there is a cropSwap. So if the original width was 2777 //2507, the left dst width will be 1254. Even if the original width was 2778 //even for ex: 2508, the left dst width will still remain 1254. 2779 dstL.right = (dst.right + dst.left + cropSwap) / 2; 2780 dstR.left = dstL.right; 2781 } 2782 2783 //For the mdp, since either we are pre-rotating or MDP does flips 2784 orient = OVERLAY_TRANSFORM_0; 2785 transform = 0; 2786 2787 //configure left pipe 2788 if(lDest != OV_INVALID) { 2789 PipeArgs pargL(mdpFlags, whf, z, 2790 static_cast<eRotFlags>(rotFlags), layer->planeAlpha, 2791 (ovutils::eBlending) getBlending(layer->blending)); 2792 2793 if(configMdp(ctx->mOverlay, pargL, orient, 2794 cropL, dstL, metadata, lDest) < 0) { 2795 ALOGE("%s: commit failed for left mixer config", __FUNCTION__); 2796 return -1; 2797 } 2798 } 2799 2800 //configure right pipe 2801 if(rDest != OV_INVALID) { 2802 PipeArgs pargR(mdpFlags, whf, z, 2803 static_cast<eRotFlags>(rotFlags), 2804 layer->planeAlpha, 2805 (ovutils::eBlending) getBlending(layer->blending)); 2806 if(configMdp(ctx->mOverlay, pargR, orient, 2807 cropR, dstR, metadata, rDest) < 0) { 2808 ALOGE("%s: commit failed for right mixer config", __FUNCTION__); 2809 return -1; 2810 } 2811 } 2812 2813 return 0; 2814 } 2815 2816 bool MDPComp::getPartialUpdatePref(hwc_context_t *ctx) { 2817 Locker::Autolock _l(ctx->mDrawLock); 2818 const int fbNum = Overlay::getFbForDpy(Overlay::DPY_PRIMARY); 2819 char path[MAX_SYSFS_FILE_PATH]; 2820 snprintf (path, sizeof(path), "sys/class/graphics/fb%d/dyn_pu", fbNum); 2821 int fd = open(path, O_RDONLY); 2822 if(fd < 0) { 2823 ALOGE("%s: Failed to open sysfd node: %s", __FUNCTION__, path); 2824 return -1; 2825 } 2826 char value[4]; 2827 ssize_t size_read = read(fd, value, sizeof(value)-1); 2828 if(size_read <= 0) { 2829 ALOGE("%s: Failed to read sysfd node: %s", __FUNCTION__, path); 2830 close(fd); 2831 return -1; 2832 } 2833 close(fd); 2834 value[size_read] = '\0'; 2835 return atoi(value); 2836 } 2837 2838 int MDPComp::setPartialUpdatePref(hwc_context_t *ctx, bool enable) { 2839 Locker::Autolock _l(ctx->mDrawLock); 2840 const int fbNum = Overlay::getFbForDpy(Overlay::DPY_PRIMARY); 2841 char path[MAX_SYSFS_FILE_PATH]; 2842 snprintf (path, sizeof(path), "sys/class/graphics/fb%d/dyn_pu", fbNum); 2843 int fd = open(path, O_WRONLY); 2844 if(fd < 0) { 2845 ALOGE("%s: Failed to open sysfd node: %s", __FUNCTION__, path); 2846 return -1; 2847 } 2848 char value[4]; 2849 snprintf(value, sizeof(value), "%d", (int)enable); 2850 ssize_t ret = write(fd, value, strlen(value)); 2851 if(ret <= 0) { 2852 ALOGE("%s: Failed to write to sysfd nodes: %s", __FUNCTION__, path); 2853 close(fd); 2854 return -1; 2855 } 2856 close(fd); 2857 sIsPartialUpdateActive = enable; 2858 return 0; 2859 } 2860 2861 bool MDPComp::loadPerfLib() { 2862 char perfLibPath[PROPERTY_VALUE_MAX] = {0}; 2863 bool success = false; 2864 if((property_get("ro.vendor.extension_library", perfLibPath, NULL) <= 0)) { 2865 ALOGE("vendor library not set in ro.vendor.extension_library"); 2866 return false; 2867 } 2868 2869 sLibPerfHint = dlopen(perfLibPath, RTLD_NOW); 2870 if(sLibPerfHint) { 2871 *(void **)&sPerfLockAcquire = dlsym(sLibPerfHint, "perf_lock_acq"); 2872 *(void **)&sPerfLockRelease = dlsym(sLibPerfHint, "perf_lock_rel"); 2873 if (!sPerfLockAcquire || !sPerfLockRelease) { 2874 ALOGE("Failed to load symbols for perfLock"); 2875 dlclose(sLibPerfHint); 2876 sLibPerfHint = NULL; 2877 return false; 2878 } 2879 success = true; 2880 ALOGI("Successfully Loaded perf hint API's"); 2881 } else { 2882 ALOGE("Failed to open %s : %s", perfLibPath, dlerror()); 2883 } 2884 return success; 2885 } 2886 2887 void MDPComp::setPerfHint(hwc_context_t *ctx, hwc_display_contents_1_t* list) { 2888 if ((sPerfHintWindow < 0) || mDpy || !sLibPerfHint) { 2889 return; 2890 } 2891 static int count = sPerfHintWindow; 2892 static int perflockFlag = 0; 2893 2894 /* Send hint to mpctl when single layer is updated 2895 * for a successful number of windows. Hint release 2896 * happens immediately upon multiple layer update. 2897 */ 2898 if (onlyVideosUpdating(ctx, list)) { 2899 if(count) { 2900 count--; 2901 } 2902 } else { 2903 if (perflockFlag) { 2904 perflockFlag = 0; 2905 sPerfLockRelease(sPerfLockHandle); 2906 } 2907 count = sPerfHintWindow; 2908 } 2909 if (count == 0 && !perflockFlag) { 2910 int perfHint = 0x4501; // 45-display layer hint, 01-Enable 2911 sPerfLockHandle = sPerfLockAcquire(0 /*handle*/, 0/*duration*/, 2912 &perfHint, sizeof(perfHint)/sizeof(int)); 2913 if(sPerfLockHandle < 0) { 2914 ALOGE("Perf Lock Acquire Failed"); 2915 } else { 2916 perflockFlag = 1; 2917 } 2918 } 2919 } 2920 2921 }; //namespace 2922 2923