1 /* 2 * Copyright (C) 2012-2014, The Linux Foundation. All rights reserved. 3 * Not a Contribution, Apache license notifications and license are retained 4 * for attribution purposes only. 5 * 6 * Licensed under the Apache License, Version 2.0 (the "License"); 7 * you may not use this file except in compliance with the License. 8 * You may obtain a copy of the License at 9 * 10 * http://www.apache.org/licenses/LICENSE-2.0 11 * 12 * Unless required by applicable law or agreed to in writing, software 13 * distributed under the License is distributed on an "AS IS" BASIS, 14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 * See the License for the specific language governing permissions and 16 * limitations under the License. 17 */ 18 19 #include <math.h> 20 #include "hwc_mdpcomp.h" 21 #include <sys/ioctl.h> 22 #include "external.h" 23 #include "virtual.h" 24 #include "qdMetaData.h" 25 #include "mdp_version.h" 26 #include "hwc_fbupdate.h" 27 #include "hwc_ad.h" 28 #include <overlayRotator.h> 29 30 using namespace overlay; 31 using namespace qdutils; 32 using namespace overlay::utils; 33 namespace ovutils = overlay::utils; 34 35 namespace qhwc { 36 37 //==============MDPComp======================================================== 38 39 IdleInvalidator *MDPComp::idleInvalidator = NULL; 40 bool MDPComp::sIdleFallBack = false; 41 bool MDPComp::sHandleTimeout = false; 42 bool MDPComp::sDebugLogs = false; 43 bool MDPComp::sEnabled = false; 44 bool MDPComp::sEnableMixedMode = true; 45 int MDPComp::sSimulationFlags = 0; 46 int MDPComp::sMaxPipesPerMixer = MAX_PIPES_PER_MIXER; 47 bool MDPComp::sEnable4k2kYUVSplit = false; 48 bool MDPComp::sSrcSplitEnabled = false; 49 MDPComp* MDPComp::getObject(hwc_context_t *ctx, const int& dpy) { 50 if(qdutils::MDPVersion::getInstance().isSrcSplit()) { 51 sSrcSplitEnabled = true; 52 return new MDPCompSrcSplit(dpy); 53 } else if(isDisplaySplit(ctx, dpy)) { 54 return new MDPCompSplit(dpy); 55 } 56 return new MDPCompNonSplit(dpy); 57 } 58 59 MDPComp::MDPComp(int dpy):mDpy(dpy){}; 60 61 void MDPComp::dump(android::String8& buf, hwc_context_t *ctx) 62 { 63 if(mCurrentFrame.layerCount > MAX_NUM_APP_LAYERS) 64 return; 65 66 dumpsys_log(buf,"HWC Map for Dpy: %s \n", 67 (mDpy == 0) ? "\"PRIMARY\"" : 68 (mDpy == 1) ? "\"EXTERNAL\"" : "\"VIRTUAL\""); 69 dumpsys_log(buf,"CURR_FRAME: layerCount:%2d mdpCount:%2d " 70 "fbCount:%2d \n", mCurrentFrame.layerCount, 71 mCurrentFrame.mdpCount, mCurrentFrame.fbCount); 72 dumpsys_log(buf,"needsFBRedraw:%3s pipesUsed:%2d MaxPipesPerMixer: %d \n", 73 (mCurrentFrame.needsRedraw? "YES" : "NO"), 74 mCurrentFrame.mdpCount, sMaxPipesPerMixer); 75 if(isDisplaySplit(ctx, mDpy)) { 76 dumpsys_log(buf, "Programmed ROI's: Left: [%d, %d, %d, %d] " 77 "Right: [%d, %d, %d, %d] \n", 78 ctx->listStats[mDpy].lRoi.left, ctx->listStats[mDpy].lRoi.top, 79 ctx->listStats[mDpy].lRoi.right, 80 ctx->listStats[mDpy].lRoi.bottom, 81 ctx->listStats[mDpy].rRoi.left,ctx->listStats[mDpy].rRoi.top, 82 ctx->listStats[mDpy].rRoi.right, 83 ctx->listStats[mDpy].rRoi.bottom); 84 } else { 85 dumpsys_log(buf, "Programmed ROI: [%d, %d, %d, %d] \n", 86 ctx->listStats[mDpy].lRoi.left,ctx->listStats[mDpy].lRoi.top, 87 ctx->listStats[mDpy].lRoi.right, 88 ctx->listStats[mDpy].lRoi.bottom); 89 } 90 dumpsys_log(buf," --------------------------------------------- \n"); 91 dumpsys_log(buf," listIdx | cached? | mdpIndex | comptype | Z \n"); 92 dumpsys_log(buf," --------------------------------------------- \n"); 93 for(int index = 0; index < mCurrentFrame.layerCount; index++ ) 94 dumpsys_log(buf," %7d | %7s | %8d | %9s | %2d \n", 95 index, 96 (mCurrentFrame.isFBComposed[index] ? "YES" : "NO"), 97 mCurrentFrame.layerToMDP[index], 98 (mCurrentFrame.isFBComposed[index] ? 99 (mCurrentFrame.drop[index] ? "DROP" : 100 (mCurrentFrame.needsRedraw ? "GLES" : "CACHE")) : "MDP"), 101 (mCurrentFrame.isFBComposed[index] ? mCurrentFrame.fbZ : 102 mCurrentFrame.mdpToLayer[mCurrentFrame.layerToMDP[index]].pipeInfo->zOrder)); 103 dumpsys_log(buf,"\n"); 104 } 105 106 bool MDPComp::init(hwc_context_t *ctx) { 107 108 if(!ctx) { 109 ALOGE("%s: Invalid hwc context!!",__FUNCTION__); 110 return false; 111 } 112 113 char property[PROPERTY_VALUE_MAX]; 114 115 sEnabled = false; 116 if((property_get("persist.hwc.mdpcomp.enable", property, NULL) > 0) && 117 (!strncmp(property, "1", PROPERTY_VALUE_MAX ) || 118 (!strncasecmp(property,"true", PROPERTY_VALUE_MAX )))) { 119 sEnabled = true; 120 } 121 122 sEnableMixedMode = true; 123 if((property_get("debug.mdpcomp.mixedmode.disable", property, NULL) > 0) && 124 (!strncmp(property, "1", PROPERTY_VALUE_MAX ) || 125 (!strncasecmp(property,"true", PROPERTY_VALUE_MAX )))) { 126 sEnableMixedMode = false; 127 } 128 129 if(property_get("debug.mdpcomp.logs", property, NULL) > 0) { 130 if(atoi(property) != 0) 131 sDebugLogs = true; 132 } 133 134 sMaxPipesPerMixer = MAX_PIPES_PER_MIXER; 135 if(property_get("debug.mdpcomp.maxpermixer", property, "-1") > 0) { 136 int val = atoi(property); 137 if(val >= 0) 138 sMaxPipesPerMixer = min(val, MAX_PIPES_PER_MIXER); 139 } 140 141 if(ctx->mMDP.panel != MIPI_CMD_PANEL) { 142 // Idle invalidation is not necessary on command mode panels 143 long idle_timeout = DEFAULT_IDLE_TIME; 144 if(property_get("debug.mdpcomp.idletime", property, NULL) > 0) { 145 if(atoi(property) != 0) 146 idle_timeout = atoi(property); 147 } 148 149 //create Idle Invalidator only when not disabled through property 150 if(idle_timeout != -1) 151 idleInvalidator = IdleInvalidator::getInstance(); 152 153 if(idleInvalidator == NULL) { 154 ALOGE("%s: failed to instantiate idleInvalidator object", 155 __FUNCTION__); 156 } else { 157 idleInvalidator->init(timeout_handler, ctx, 158 (unsigned int)idle_timeout); 159 } 160 } 161 162 if(!qdutils::MDPVersion::getInstance().isSrcSplit() && 163 property_get("persist.mdpcomp.4k2kSplit", property, "0") > 0 && 164 (!strncmp(property, "1", PROPERTY_VALUE_MAX) || 165 !strncasecmp(property,"true", PROPERTY_VALUE_MAX))) { 166 sEnable4k2kYUVSplit = true; 167 } 168 return true; 169 } 170 171 void MDPComp::reset(hwc_context_t *ctx) { 172 const int numLayers = ctx->listStats[mDpy].numAppLayers; 173 mCurrentFrame.reset(numLayers); 174 ctx->mOverlay->clear(mDpy); 175 ctx->mLayerRotMap[mDpy]->clear(); 176 } 177 178 void MDPComp::timeout_handler(void *udata) { 179 struct hwc_context_t* ctx = (struct hwc_context_t*)(udata); 180 181 if(!ctx) { 182 ALOGE("%s: received empty data in timer callback", __FUNCTION__); 183 return; 184 } 185 Locker::Autolock _l(ctx->mDrawLock); 186 // Handle timeout event only if the previous composition is MDP or MIXED. 187 if(!sHandleTimeout) { 188 ALOGD_IF(isDebug(), "%s:Do not handle this timeout", __FUNCTION__); 189 return; 190 } 191 if(!ctx->proc) { 192 ALOGE("%s: HWC proc not registered", __FUNCTION__); 193 return; 194 } 195 sIdleFallBack = true; 196 /* Trigger SF to redraw the current frame */ 197 ctx->proc->invalidate(ctx->proc); 198 } 199 200 void MDPComp::setMDPCompLayerFlags(hwc_context_t *ctx, 201 hwc_display_contents_1_t* list) { 202 LayerProp *layerProp = ctx->layerProp[mDpy]; 203 204 for(int index = 0; index < ctx->listStats[mDpy].numAppLayers; index++) { 205 hwc_layer_1_t* layer = &(list->hwLayers[index]); 206 if(!mCurrentFrame.isFBComposed[index]) { 207 layerProp[index].mFlags |= HWC_MDPCOMP; 208 layer->compositionType = HWC_OVERLAY; 209 layer->hints |= HWC_HINT_CLEAR_FB; 210 } else { 211 /* Drop the layer when its already present in FB OR when it lies 212 * outside frame's ROI */ 213 if(!mCurrentFrame.needsRedraw || mCurrentFrame.drop[index]) { 214 layer->compositionType = HWC_OVERLAY; 215 } 216 } 217 } 218 } 219 220 void MDPComp::setRedraw(hwc_context_t *ctx, 221 hwc_display_contents_1_t* list) { 222 mCurrentFrame.needsRedraw = false; 223 if(!mCachedFrame.isSameFrame(mCurrentFrame, list) || 224 (list->flags & HWC_GEOMETRY_CHANGED) || 225 isSkipPresent(ctx, mDpy)) { 226 mCurrentFrame.needsRedraw = true; 227 } 228 } 229 230 MDPComp::FrameInfo::FrameInfo() { 231 memset(&mdpToLayer, 0, sizeof(mdpToLayer)); 232 reset(0); 233 } 234 235 void MDPComp::FrameInfo::reset(const int& numLayers) { 236 for(int i = 0 ; i < MAX_PIPES_PER_MIXER; i++ ) { 237 if(mdpToLayer[i].pipeInfo) { 238 delete mdpToLayer[i].pipeInfo; 239 mdpToLayer[i].pipeInfo = NULL; 240 //We dont own the rotator 241 mdpToLayer[i].rot = NULL; 242 } 243 } 244 245 memset(&mdpToLayer, 0, sizeof(mdpToLayer)); 246 memset(&layerToMDP, -1, sizeof(layerToMDP)); 247 memset(&isFBComposed, 1, sizeof(isFBComposed)); 248 249 layerCount = numLayers; 250 fbCount = numLayers; 251 mdpCount = 0; 252 needsRedraw = true; 253 fbZ = -1; 254 } 255 256 void MDPComp::FrameInfo::map() { 257 // populate layer and MDP maps 258 int mdpIdx = 0; 259 for(int idx = 0; idx < layerCount; idx++) { 260 if(!isFBComposed[idx]) { 261 mdpToLayer[mdpIdx].listIndex = idx; 262 layerToMDP[idx] = mdpIdx++; 263 } 264 } 265 } 266 267 MDPComp::LayerCache::LayerCache() { 268 reset(); 269 } 270 271 void MDPComp::LayerCache::reset() { 272 memset(&hnd, 0, sizeof(hnd)); 273 memset(&isFBComposed, true, sizeof(isFBComposed)); 274 memset(&drop, false, sizeof(drop)); 275 layerCount = 0; 276 } 277 278 void MDPComp::LayerCache::cacheAll(hwc_display_contents_1_t* list) { 279 const int numAppLayers = (int)list->numHwLayers - 1; 280 for(int i = 0; i < numAppLayers; i++) { 281 hnd[i] = list->hwLayers[i].handle; 282 } 283 } 284 285 void MDPComp::LayerCache::updateCounts(const FrameInfo& curFrame) { 286 layerCount = curFrame.layerCount; 287 memcpy(&isFBComposed, &curFrame.isFBComposed, sizeof(isFBComposed)); 288 memcpy(&drop, &curFrame.drop, sizeof(drop)); 289 } 290 291 bool MDPComp::LayerCache::isSameFrame(const FrameInfo& curFrame, 292 hwc_display_contents_1_t* list) { 293 if(layerCount != curFrame.layerCount) 294 return false; 295 for(int i = 0; i < curFrame.layerCount; i++) { 296 if((curFrame.isFBComposed[i] != isFBComposed[i]) || 297 (curFrame.drop[i] != drop[i])) { 298 return false; 299 } 300 if(curFrame.isFBComposed[i] && 301 (hnd[i] != list->hwLayers[i].handle)){ 302 return false; 303 } 304 } 305 return true; 306 } 307 308 bool MDPComp::isSupportedForMDPComp(hwc_context_t *ctx, hwc_layer_1_t* layer) { 309 private_handle_t *hnd = (private_handle_t *)layer->handle; 310 if((not isYuvBuffer(hnd) and has90Transform(layer)) or 311 (not isValidDimension(ctx,layer)) 312 //More conditions here, SKIP, sRGB+Blend etc 313 ) { 314 return false; 315 } 316 return true; 317 } 318 319 bool MDPComp::isValidDimension(hwc_context_t *ctx, hwc_layer_1_t *layer) { 320 private_handle_t *hnd = (private_handle_t *)layer->handle; 321 322 if(!hnd) { 323 if (layer->flags & HWC_COLOR_FILL) { 324 // Color layer 325 return true; 326 } 327 ALOGE("%s: layer handle is NULL", __FUNCTION__); 328 return false; 329 } 330 331 //XXX: Investigate doing this with pixel phase on MDSS 332 if(!isSecureBuffer(hnd) && isNonIntegralSourceCrop(layer->sourceCropf)) 333 return false; 334 335 hwc_rect_t crop = integerizeSourceCrop(layer->sourceCropf); 336 hwc_rect_t dst = layer->displayFrame; 337 int crop_w = crop.right - crop.left; 338 int crop_h = crop.bottom - crop.top; 339 int dst_w = dst.right - dst.left; 340 int dst_h = dst.bottom - dst.top; 341 float w_scale = ((float)crop_w / (float)dst_w); 342 float h_scale = ((float)crop_h / (float)dst_h); 343 344 /* Workaround for MDP HW limitation in DSI command mode panels where 345 * FPS will not go beyond 30 if buffers on RGB pipes are of width or height 346 * less than 5 pixels 347 * There also is a HW limilation in MDP, minimum block size is 2x2 348 * Fallback to GPU if height is less than 2. 349 */ 350 if((crop_w < 5)||(crop_h < 5)) 351 return false; 352 353 if((w_scale > 1.0f) || (h_scale > 1.0f)) { 354 const uint32_t maxMDPDownscale = 355 qdutils::MDPVersion::getInstance().getMaxMDPDownscale(); 356 const float w_dscale = w_scale; 357 const float h_dscale = h_scale; 358 359 if(ctx->mMDP.version >= qdutils::MDSS_V5) { 360 361 if(!qdutils::MDPVersion::getInstance().supportsDecimation()) { 362 /* On targets that doesnt support Decimation (eg.,8x26) 363 * maximum downscale support is overlay pipe downscale. 364 */ 365 if(crop_w > MAX_DISPLAY_DIM || w_dscale > maxMDPDownscale || 366 h_dscale > maxMDPDownscale) 367 return false; 368 } else { 369 // Decimation on macrotile format layers is not supported. 370 if(isTileRendered(hnd)) { 371 /* MDP can read maximum MAX_DISPLAY_DIM width. 372 * Bail out if 373 * 1. Src crop > MAX_DISPLAY_DIM on nonsplit MDPComp 374 * 2. exceeds maximum downscale limit 375 */ 376 if(((crop_w > MAX_DISPLAY_DIM) && !sSrcSplitEnabled) || 377 w_dscale > maxMDPDownscale || 378 h_dscale > maxMDPDownscale) { 379 return false; 380 } 381 } else if(w_dscale > 64 || h_dscale > 64) 382 return false; 383 } 384 } else { //A-family 385 if(w_dscale > maxMDPDownscale || h_dscale > maxMDPDownscale) 386 return false; 387 } 388 } 389 390 if((w_scale < 1.0f) || (h_scale < 1.0f)) { 391 const uint32_t upscale = 392 qdutils::MDPVersion::getInstance().getMaxMDPUpscale(); 393 const float w_uscale = 1.0f / w_scale; 394 const float h_uscale = 1.0f / h_scale; 395 396 if(w_uscale > upscale || h_uscale > upscale) 397 return false; 398 } 399 400 return true; 401 } 402 403 bool MDPComp::isFrameDoable(hwc_context_t *ctx) { 404 bool ret = true; 405 406 if(!isEnabled()) { 407 ALOGD_IF(isDebug(),"%s: MDP Comp. not enabled.", __FUNCTION__); 408 ret = false; 409 } else if(qdutils::MDPVersion::getInstance().is8x26() && 410 ctx->mVideoTransFlag && 411 isSecondaryConnected(ctx)) { 412 //1 Padding round to shift pipes across mixers 413 ALOGD_IF(isDebug(),"%s: MDP Comp. video transition padding round", 414 __FUNCTION__); 415 ret = false; 416 } else if(isSecondaryConfiguring(ctx)) { 417 ALOGD_IF( isDebug(),"%s: External Display connection is pending", 418 __FUNCTION__); 419 ret = false; 420 } else if(ctx->isPaddingRound) { 421 ALOGD_IF(isDebug(), "%s: padding round invoked for dpy %d", 422 __FUNCTION__,mDpy); 423 ret = false; 424 } 425 return ret; 426 } 427 428 void MDPCompNonSplit::trimAgainstROI(hwc_context_t *ctx, hwc_rect_t& fbRect) { 429 hwc_rect_t roi = ctx->listStats[mDpy].lRoi; 430 fbRect = getIntersection(fbRect, roi); 431 } 432 433 /* 1) Identify layers that are not visible or lying outside the updating ROI and 434 * drop them from composition. 435 * 2) If we have a scaling layer which needs cropping against generated 436 * ROI, reset ROI to full resolution. */ 437 bool MDPCompNonSplit::validateAndApplyROI(hwc_context_t *ctx, 438 hwc_display_contents_1_t* list) { 439 int numAppLayers = ctx->listStats[mDpy].numAppLayers; 440 hwc_rect_t visibleRect = ctx->listStats[mDpy].lRoi; 441 442 for(int i = numAppLayers - 1; i >= 0; i--){ 443 if(!isValidRect(visibleRect)) { 444 mCurrentFrame.drop[i] = true; 445 mCurrentFrame.dropCount++; 446 continue; 447 } 448 449 const hwc_layer_1_t* layer = &list->hwLayers[i]; 450 hwc_rect_t dstRect = layer->displayFrame; 451 hwc_rect_t res = getIntersection(visibleRect, dstRect); 452 453 if(!isValidRect(res)) { 454 mCurrentFrame.drop[i] = true; 455 mCurrentFrame.dropCount++; 456 } else { 457 /* Reset frame ROI when any layer which needs scaling also needs ROI 458 * cropping */ 459 if(!isSameRect(res, dstRect) && needsScaling (layer)) { 460 ALOGI("%s: Resetting ROI due to scaling", __FUNCTION__); 461 memset(&mCurrentFrame.drop, 0, sizeof(mCurrentFrame.drop)); 462 mCurrentFrame.dropCount = 0; 463 return false; 464 } 465 466 /* deduct any opaque region from visibleRect */ 467 if (layer->blending == HWC_BLENDING_NONE) 468 visibleRect = deductRect(visibleRect, res); 469 } 470 } 471 return true; 472 } 473 474 /* Calculate ROI for the frame by accounting all the layer's dispalyFrame which 475 * are updating. If DirtyRegion is applicable, calculate it by accounting all 476 * the changing layer's dirtyRegion. */ 477 void MDPCompNonSplit::generateROI(hwc_context_t *ctx, 478 hwc_display_contents_1_t* list) { 479 int numAppLayers = ctx->listStats[mDpy].numAppLayers; 480 if(!canPartialUpdate(ctx, list)) 481 return; 482 483 struct hwc_rect roi = (struct hwc_rect){0, 0, 0, 0}; 484 hwc_rect fullFrame = (struct hwc_rect) {0, 0,(int)ctx->dpyAttr[mDpy].xres, 485 (int)ctx->dpyAttr[mDpy].yres}; 486 487 for(int index = 0; index < numAppLayers; index++ ) { 488 hwc_layer_1_t* layer = &list->hwLayers[index]; 489 if ((mCachedFrame.hnd[index] != layer->handle) || 490 isYuvBuffer((private_handle_t *)layer->handle)) { 491 hwc_rect_t dst = layer->displayFrame; 492 hwc_rect_t updatingRect = dst; 493 494 #ifdef QCOM_BSP 495 if(!needsScaling(layer) && !layer->transform) 496 { 497 hwc_rect_t src = integerizeSourceCrop(layer->sourceCropf); 498 int x_off = dst.left - src.left; 499 int y_off = dst.top - src.top; 500 updatingRect = moveRect(layer->dirtyRect, x_off, y_off); 501 } 502 #endif 503 504 roi = getUnion(roi, updatingRect); 505 } 506 } 507 508 /* No layer is updating. Still SF wants a refresh.*/ 509 if(!isValidRect(roi)) 510 return; 511 512 // Align ROI coordinates to panel restrictions 513 roi = getSanitizeROI(roi, fullFrame); 514 515 ctx->listStats[mDpy].lRoi = roi; 516 if(!validateAndApplyROI(ctx, list)) 517 resetROI(ctx, mDpy); 518 519 ALOGD_IF(isDebug(),"%s: generated ROI: [%d, %d, %d, %d]", __FUNCTION__, 520 ctx->listStats[mDpy].lRoi.left, ctx->listStats[mDpy].lRoi.top, 521 ctx->listStats[mDpy].lRoi.right, ctx->listStats[mDpy].lRoi.bottom); 522 } 523 524 void MDPCompSplit::trimAgainstROI(hwc_context_t *ctx, hwc_rect_t& fbRect) { 525 hwc_rect l_roi = ctx->listStats[mDpy].lRoi; 526 hwc_rect r_roi = ctx->listStats[mDpy].rRoi; 527 528 hwc_rect_t l_fbRect = getIntersection(fbRect, l_roi); 529 hwc_rect_t r_fbRect = getIntersection(fbRect, r_roi); 530 fbRect = getUnion(l_fbRect, r_fbRect); 531 } 532 /* 1) Identify layers that are not visible or lying outside BOTH the updating 533 * ROI's and drop them from composition. If a layer is spanning across both 534 * the halves of the screen but needed by only ROI, the non-contributing 535 * half will not be programmed for MDP. 536 * 2) If we have a scaling layer which needs cropping against generated 537 * ROI, reset ROI to full resolution. */ 538 bool MDPCompSplit::validateAndApplyROI(hwc_context_t *ctx, 539 hwc_display_contents_1_t* list) { 540 541 int numAppLayers = ctx->listStats[mDpy].numAppLayers; 542 543 hwc_rect_t visibleRectL = ctx->listStats[mDpy].lRoi; 544 hwc_rect_t visibleRectR = ctx->listStats[mDpy].rRoi; 545 546 for(int i = numAppLayers - 1; i >= 0; i--){ 547 if(!isValidRect(visibleRectL) && !isValidRect(visibleRectR)) 548 { 549 mCurrentFrame.drop[i] = true; 550 mCurrentFrame.dropCount++; 551 continue; 552 } 553 554 const hwc_layer_1_t* layer = &list->hwLayers[i]; 555 hwc_rect_t dstRect = layer->displayFrame; 556 557 hwc_rect_t l_res = getIntersection(visibleRectL, dstRect); 558 hwc_rect_t r_res = getIntersection(visibleRectR, dstRect); 559 hwc_rect_t res = getUnion(l_res, r_res); 560 561 if(!isValidRect(l_res) && !isValidRect(r_res)) { 562 mCurrentFrame.drop[i] = true; 563 mCurrentFrame.dropCount++; 564 } else { 565 /* Reset frame ROI when any layer which needs scaling also needs ROI 566 * cropping */ 567 if(!isSameRect(res, dstRect) && needsScaling (layer)) { 568 memset(&mCurrentFrame.drop, 0, sizeof(mCurrentFrame.drop)); 569 mCurrentFrame.dropCount = 0; 570 return false; 571 } 572 573 if (layer->blending == HWC_BLENDING_NONE) { 574 visibleRectL = deductRect(visibleRectL, l_res); 575 visibleRectR = deductRect(visibleRectR, r_res); 576 } 577 } 578 } 579 return true; 580 } 581 /* Calculate ROI for the frame by accounting all the layer's dispalyFrame which 582 * are updating. If DirtyRegion is applicable, calculate it by accounting all 583 * the changing layer's dirtyRegion. */ 584 void MDPCompSplit::generateROI(hwc_context_t *ctx, 585 hwc_display_contents_1_t* list) { 586 if(!canPartialUpdate(ctx, list)) 587 return; 588 589 int numAppLayers = ctx->listStats[mDpy].numAppLayers; 590 int lSplit = getLeftSplit(ctx, mDpy); 591 592 int hw_h = (int)ctx->dpyAttr[mDpy].yres; 593 int hw_w = (int)ctx->dpyAttr[mDpy].xres; 594 595 struct hwc_rect l_frame = (struct hwc_rect){0, 0, lSplit, hw_h}; 596 struct hwc_rect r_frame = (struct hwc_rect){lSplit, 0, hw_w, hw_h}; 597 598 struct hwc_rect l_roi = (struct hwc_rect){0, 0, 0, 0}; 599 struct hwc_rect r_roi = (struct hwc_rect){0, 0, 0, 0}; 600 601 for(int index = 0; index < numAppLayers; index++ ) { 602 hwc_layer_1_t* layer = &list->hwLayers[index]; 603 if ((mCachedFrame.hnd[index] != layer->handle) || 604 isYuvBuffer((private_handle_t *)layer->handle)) { 605 hwc_rect_t dst = layer->displayFrame; 606 hwc_rect_t updatingRect = dst; 607 608 #ifdef QCOM_BSP 609 if(!needsScaling(layer) && !layer->transform) 610 { 611 hwc_rect_t src = integerizeSourceCrop(layer->sourceCropf); 612 int x_off = dst.left - src.left; 613 int y_off = dst.top - src.top; 614 updatingRect = moveRect(layer->dirtyRect, x_off, y_off); 615 } 616 #endif 617 618 hwc_rect_t l_dst = getIntersection(l_frame, updatingRect); 619 if(isValidRect(l_dst)) 620 l_roi = getUnion(l_roi, l_dst); 621 622 hwc_rect_t r_dst = getIntersection(r_frame, updatingRect); 623 if(isValidRect(r_dst)) 624 r_roi = getUnion(r_roi, r_dst); 625 } 626 } 627 628 /* For panels that cannot accept commands in both the interfaces, we cannot 629 * send two ROI's (for each half). We merge them into single ROI and split 630 * them across lSplit for MDP mixer use. The ROI's will be merged again 631 * finally before udpating the panel in the driver. */ 632 if(qdutils::MDPVersion::getInstance().needsROIMerge()) { 633 hwc_rect_t temp_roi = getUnion(l_roi, r_roi); 634 l_roi = getIntersection(temp_roi, l_frame); 635 r_roi = getIntersection(temp_roi, r_frame); 636 } 637 638 /* No layer is updating. Still SF wants a refresh. */ 639 if(!isValidRect(l_roi) && !isValidRect(r_roi)) 640 return; 641 642 l_roi = getSanitizeROI(l_roi, l_frame); 643 r_roi = getSanitizeROI(r_roi, r_frame); 644 645 ctx->listStats[mDpy].lRoi = l_roi; 646 ctx->listStats[mDpy].rRoi = r_roi; 647 648 if(!validateAndApplyROI(ctx, list)) 649 resetROI(ctx, mDpy); 650 651 ALOGD_IF(isDebug(),"%s: generated L_ROI: [%d, %d, %d, %d]" 652 "R_ROI: [%d, %d, %d, %d]", __FUNCTION__, 653 ctx->listStats[mDpy].lRoi.left, ctx->listStats[mDpy].lRoi.top, 654 ctx->listStats[mDpy].lRoi.right, ctx->listStats[mDpy].lRoi.bottom, 655 ctx->listStats[mDpy].rRoi.left, ctx->listStats[mDpy].rRoi.top, 656 ctx->listStats[mDpy].rRoi.right, ctx->listStats[mDpy].rRoi.bottom); 657 } 658 659 /* Checks for conditions where all the layers marked for MDP comp cannot be 660 * bypassed. On such conditions we try to bypass atleast YUV layers */ 661 bool MDPComp::tryFullFrame(hwc_context_t *ctx, 662 hwc_display_contents_1_t* list){ 663 664 const int numAppLayers = ctx->listStats[mDpy].numAppLayers; 665 int priDispW = ctx->dpyAttr[HWC_DISPLAY_PRIMARY].xres; 666 667 if(sIdleFallBack && !ctx->listStats[mDpy].secureUI) { 668 ALOGD_IF(isDebug(), "%s: Idle fallback dpy %d",__FUNCTION__, mDpy); 669 return false; 670 } 671 672 if(isSkipPresent(ctx, mDpy)) { 673 ALOGD_IF(isDebug(),"%s: SKIP present: %d", 674 __FUNCTION__, 675 isSkipPresent(ctx, mDpy)); 676 return false; 677 } 678 679 if(mDpy > HWC_DISPLAY_PRIMARY && (priDispW > MAX_DISPLAY_DIM) && 680 (ctx->dpyAttr[mDpy].xres < MAX_DISPLAY_DIM)) { 681 // Disable MDP comp on Secondary when the primary is highres panel and 682 // the secondary is a normal 1080p, because, MDP comp on secondary under 683 // in such usecase, decimation gets used for downscale and there will be 684 // a quality mismatch when there will be a fallback to GPU comp 685 ALOGD_IF(isDebug(), "%s: Disable MDP Compositon for Secondary Disp", 686 __FUNCTION__); 687 return false; 688 } 689 690 // check for action safe flag and downscale mode which requires scaling. 691 if(ctx->dpyAttr[mDpy].mActionSafePresent 692 || ctx->dpyAttr[mDpy].mDownScaleMode) { 693 ALOGD_IF(isDebug(), "%s: Scaling needed for this frame",__FUNCTION__); 694 return false; 695 } 696 697 for(int i = 0; i < numAppLayers; ++i) { 698 hwc_layer_1_t* layer = &list->hwLayers[i]; 699 private_handle_t *hnd = (private_handle_t *)layer->handle; 700 701 if(isYuvBuffer(hnd) && has90Transform(layer)) { 702 if(!canUseRotator(ctx, mDpy)) { 703 ALOGD_IF(isDebug(), "%s: Can't use rotator for dpy %d", 704 __FUNCTION__, mDpy); 705 return false; 706 } 707 } 708 709 //For 8x26 with panel width>1k, if RGB layer needs HFLIP fail mdp comp 710 // may not need it if Gfx pre-rotation can handle all flips & rotations 711 if(qdutils::MDPVersion::getInstance().is8x26() && 712 (ctx->dpyAttr[mDpy].xres > 1024) && 713 (layer->transform & HWC_TRANSFORM_FLIP_H) && 714 (!isYuvBuffer(hnd))) 715 return false; 716 } 717 718 if(ctx->mAD->isDoable()) { 719 return false; 720 } 721 722 //If all above hard conditions are met we can do full or partial MDP comp. 723 bool ret = false; 724 if(fullMDPComp(ctx, list)) { 725 ret = true; 726 } else if(partialMDPComp(ctx, list)) { 727 ret = true; 728 } 729 730 return ret; 731 } 732 733 bool MDPComp::fullMDPComp(hwc_context_t *ctx, hwc_display_contents_1_t* list) { 734 735 if(sSimulationFlags & MDPCOMP_AVOID_FULL_MDP) 736 return false; 737 738 //Will benefit presentation / secondary-only layer. 739 if((mDpy > HWC_DISPLAY_PRIMARY) && 740 (list->numHwLayers - 1) > MAX_SEC_LAYERS) { 741 ALOGD_IF(isDebug(), "%s: Exceeds max secondary pipes",__FUNCTION__); 742 return false; 743 } 744 745 const int numAppLayers = ctx->listStats[mDpy].numAppLayers; 746 for(int i = 0; i < numAppLayers; i++) { 747 hwc_layer_1_t* layer = &list->hwLayers[i]; 748 if(not mCurrentFrame.drop[i] and 749 not isSupportedForMDPComp(ctx, layer)) { 750 ALOGD_IF(isDebug(), "%s: Unsupported layer in list",__FUNCTION__); 751 return false; 752 } 753 754 //For 8x26, if there is only one layer which needs scale for secondary 755 //while no scale for primary display, DMA pipe is occupied by primary. 756 //If need to fall back to GLES composition, virtual display lacks DMA 757 //pipe and error is reported. 758 if(qdutils::MDPVersion::getInstance().is8x26() && 759 mDpy >= HWC_DISPLAY_EXTERNAL && 760 qhwc::needsScaling(layer)) 761 return false; 762 } 763 764 mCurrentFrame.fbCount = 0; 765 memcpy(&mCurrentFrame.isFBComposed, &mCurrentFrame.drop, 766 sizeof(mCurrentFrame.isFBComposed)); 767 mCurrentFrame.mdpCount = mCurrentFrame.layerCount - mCurrentFrame.fbCount - 768 mCurrentFrame.dropCount; 769 770 if(sEnable4k2kYUVSplit){ 771 adjustForSourceSplit(ctx, list); 772 } 773 774 if(!postHeuristicsHandling(ctx, list)) { 775 ALOGD_IF(isDebug(), "post heuristic handling failed"); 776 reset(ctx); 777 return false; 778 } 779 ALOGD_IF(sSimulationFlags,"%s: FULL_MDP_COMP SUCCEEDED", 780 __FUNCTION__); 781 return true; 782 } 783 784 bool MDPComp::partialMDPComp(hwc_context_t *ctx, hwc_display_contents_1_t* list) 785 { 786 if(!sEnableMixedMode) { 787 //Mixed mode is disabled. No need to even try caching. 788 return false; 789 } 790 791 bool ret = false; 792 if(list->flags & HWC_GEOMETRY_CHANGED) { //Try load based first 793 ret = loadBasedComp(ctx, list) or 794 cacheBasedComp(ctx, list); 795 } else { 796 ret = cacheBasedComp(ctx, list) or 797 loadBasedComp(ctx, list); 798 } 799 800 return ret; 801 } 802 803 bool MDPComp::cacheBasedComp(hwc_context_t *ctx, 804 hwc_display_contents_1_t* list) { 805 if(sSimulationFlags & MDPCOMP_AVOID_CACHE_MDP) 806 return false; 807 808 int numAppLayers = ctx->listStats[mDpy].numAppLayers; 809 mCurrentFrame.reset(numAppLayers); 810 updateLayerCache(ctx, list); 811 812 //If an MDP marked layer is unsupported cannot do partial MDP Comp 813 for(int i = 0; i < numAppLayers; i++) { 814 if(!mCurrentFrame.isFBComposed[i]) { 815 hwc_layer_1_t* layer = &list->hwLayers[i]; 816 if(not isSupportedForMDPComp(ctx, layer)) { 817 ALOGD_IF(isDebug(), "%s: Unsupported layer in list", 818 __FUNCTION__); 819 reset(ctx); 820 return false; 821 } 822 } 823 } 824 825 updateYUV(ctx, list, false /*secure only*/); 826 bool ret = markLayersForCaching(ctx, list); //sets up fbZ also 827 if(!ret) { 828 ALOGD_IF(isDebug(),"%s: batching failed, dpy %d",__FUNCTION__, mDpy); 829 reset(ctx); 830 return false; 831 } 832 833 int mdpCount = mCurrentFrame.mdpCount; 834 835 if(sEnable4k2kYUVSplit){ 836 adjustForSourceSplit(ctx, list); 837 } 838 839 //Will benefit cases where a video has non-updating background. 840 if((mDpy > HWC_DISPLAY_PRIMARY) and 841 (mdpCount > MAX_SEC_LAYERS)) { 842 ALOGD_IF(isDebug(), "%s: Exceeds max secondary pipes",__FUNCTION__); 843 reset(ctx); 844 return false; 845 } 846 847 if(!postHeuristicsHandling(ctx, list)) { 848 ALOGD_IF(isDebug(), "post heuristic handling failed"); 849 reset(ctx); 850 return false; 851 } 852 ALOGD_IF(sSimulationFlags,"%s: CACHE_MDP_COMP SUCCEEDED", 853 __FUNCTION__); 854 855 return true; 856 } 857 858 bool MDPComp::loadBasedComp(hwc_context_t *ctx, 859 hwc_display_contents_1_t* list) { 860 if(sSimulationFlags & MDPCOMP_AVOID_LOAD_MDP) 861 return false; 862 863 if(not isLoadBasedCompDoable(ctx)) { 864 return false; 865 } 866 867 const int numAppLayers = ctx->listStats[mDpy].numAppLayers; 868 const int numNonDroppedLayers = numAppLayers - mCurrentFrame.dropCount; 869 const int stagesForMDP = min(sMaxPipesPerMixer, 870 ctx->mOverlay->availablePipes(mDpy, Overlay::MIXER_DEFAULT)); 871 872 int mdpBatchSize = stagesForMDP - 1; //1 stage for FB 873 int fbBatchSize = numNonDroppedLayers - mdpBatchSize; 874 int lastMDPSupportedIndex = numAppLayers; 875 int dropCount = 0; 876 877 //Find the minimum MDP batch size 878 for(int i = 0; i < numAppLayers;i++) { 879 if(mCurrentFrame.drop[i]) { 880 dropCount++; 881 continue; 882 } 883 hwc_layer_1_t* layer = &list->hwLayers[i]; 884 if(not isSupportedForMDPComp(ctx, layer)) { 885 lastMDPSupportedIndex = i; 886 mdpBatchSize = min(i - dropCount, stagesForMDP - 1); 887 fbBatchSize = numNonDroppedLayers - mdpBatchSize; 888 break; 889 } 890 } 891 892 ALOGD_IF(isDebug(), "%s:Before optimizing fbBatch, mdpbatch %d, fbbatch %d " 893 "dropped %d", __FUNCTION__, mdpBatchSize, fbBatchSize, 894 mCurrentFrame.dropCount); 895 896 //Start at a point where the fb batch should at least have 2 layers, for 897 //this mode to be justified. 898 while(fbBatchSize < 2) { 899 ++fbBatchSize; 900 --mdpBatchSize; 901 } 902 903 //If there are no layers for MDP, this mode doesnt make sense. 904 if(mdpBatchSize < 1) { 905 ALOGD_IF(isDebug(), "%s: No MDP layers after optimizing for fbBatch", 906 __FUNCTION__); 907 return false; 908 } 909 910 mCurrentFrame.reset(numAppLayers); 911 912 //Try with successively smaller mdp batch sizes until we succeed or reach 1 913 while(mdpBatchSize > 0) { 914 //Mark layers for MDP comp 915 int mdpBatchLeft = mdpBatchSize; 916 for(int i = 0; i < lastMDPSupportedIndex and mdpBatchLeft; i++) { 917 if(mCurrentFrame.drop[i]) { 918 continue; 919 } 920 mCurrentFrame.isFBComposed[i] = false; 921 --mdpBatchLeft; 922 } 923 924 mCurrentFrame.fbZ = mdpBatchSize; 925 mCurrentFrame.fbCount = fbBatchSize; 926 mCurrentFrame.mdpCount = mdpBatchSize; 927 928 ALOGD_IF(isDebug(), "%s:Trying with: mdpbatch %d fbbatch %d dropped %d", 929 __FUNCTION__, mdpBatchSize, fbBatchSize, 930 mCurrentFrame.dropCount); 931 932 if(postHeuristicsHandling(ctx, list)) { 933 ALOGD_IF(isDebug(), "%s: Postheuristics handling succeeded", 934 __FUNCTION__); 935 ALOGD_IF(sSimulationFlags,"%s: LOAD_MDP_COMP SUCCEEDED", 936 __FUNCTION__); 937 return true; 938 } 939 940 reset(ctx); 941 --mdpBatchSize; 942 ++fbBatchSize; 943 } 944 945 return false; 946 } 947 948 bool MDPComp::isLoadBasedCompDoable(hwc_context_t *ctx) { 949 if(mDpy or isSecurePresent(ctx, mDpy) or 950 isYuvPresent(ctx, mDpy)) { 951 return false; 952 } 953 return true; 954 } 955 956 bool MDPComp::canPartialUpdate(hwc_context_t *ctx, 957 hwc_display_contents_1_t* list){ 958 if(!qdutils::MDPVersion::getInstance().isPartialUpdateEnabled() || 959 isSkipPresent(ctx, mDpy) || (list->flags & HWC_GEOMETRY_CHANGED) || 960 mDpy ) { 961 return false; 962 } 963 return true; 964 } 965 966 bool MDPComp::tryVideoOnly(hwc_context_t *ctx, 967 hwc_display_contents_1_t* list) { 968 const bool secureOnly = true; 969 return videoOnlyComp(ctx, list, not secureOnly) or 970 videoOnlyComp(ctx, list, secureOnly); 971 } 972 973 bool MDPComp::videoOnlyComp(hwc_context_t *ctx, 974 hwc_display_contents_1_t* list, bool secureOnly) { 975 if(sSimulationFlags & MDPCOMP_AVOID_VIDEO_ONLY) 976 return false; 977 int numAppLayers = ctx->listStats[mDpy].numAppLayers; 978 979 mCurrentFrame.reset(numAppLayers); 980 mCurrentFrame.fbCount -= mCurrentFrame.dropCount; 981 updateYUV(ctx, list, secureOnly); 982 int mdpCount = mCurrentFrame.mdpCount; 983 984 if(!isYuvPresent(ctx, mDpy) or (mdpCount == 0)) { 985 reset(ctx); 986 return false; 987 } 988 989 /* Bail out if we are processing only secured video layers 990 * and we dont have any */ 991 if(!isSecurePresent(ctx, mDpy) && secureOnly){ 992 reset(ctx); 993 return false; 994 } 995 996 if(mCurrentFrame.fbCount) 997 mCurrentFrame.fbZ = mCurrentFrame.mdpCount; 998 999 if(sEnable4k2kYUVSplit){ 1000 adjustForSourceSplit(ctx, list); 1001 } 1002 1003 if(!postHeuristicsHandling(ctx, list)) { 1004 ALOGD_IF(isDebug(), "post heuristic handling failed"); 1005 reset(ctx); 1006 return false; 1007 } 1008 1009 ALOGD_IF(sSimulationFlags,"%s: VIDEO_ONLY_COMP SUCCEEDED", 1010 __FUNCTION__); 1011 return true; 1012 } 1013 1014 /* Checks for conditions where YUV layers cannot be bypassed */ 1015 bool MDPComp::isYUVDoable(hwc_context_t* ctx, hwc_layer_1_t* layer) { 1016 if(isSkipLayer(layer)) { 1017 ALOGD_IF(isDebug(), "%s: Video marked SKIP dpy %d", __FUNCTION__, mDpy); 1018 return false; 1019 } 1020 1021 if(layer->transform & HWC_TRANSFORM_ROT_90 && !canUseRotator(ctx,mDpy)) { 1022 ALOGD_IF(isDebug(), "%s: no free DMA pipe",__FUNCTION__); 1023 return false; 1024 } 1025 1026 if(isSecuring(ctx, layer)) { 1027 ALOGD_IF(isDebug(), "%s: MDP securing is active", __FUNCTION__); 1028 return false; 1029 } 1030 1031 if(!isValidDimension(ctx, layer)) { 1032 ALOGD_IF(isDebug(), "%s: Buffer is of invalid width", 1033 __FUNCTION__); 1034 return false; 1035 } 1036 1037 if(layer->planeAlpha < 0xFF) { 1038 ALOGD_IF(isDebug(), "%s: Cannot handle YUV layer with plane alpha\ 1039 in video only mode", 1040 __FUNCTION__); 1041 return false; 1042 } 1043 1044 return true; 1045 } 1046 1047 /* starts at fromIndex and check for each layer to find 1048 * if it it has overlapping with any Updating layer above it in zorder 1049 * till the end of the batch. returns true if it finds any intersection */ 1050 bool MDPComp::canPushBatchToTop(const hwc_display_contents_1_t* list, 1051 int fromIndex, int toIndex) { 1052 for(int i = fromIndex; i < toIndex; i++) { 1053 if(mCurrentFrame.isFBComposed[i] && !mCurrentFrame.drop[i]) { 1054 if(intersectingUpdatingLayers(list, i+1, toIndex, i)) { 1055 return false; 1056 } 1057 } 1058 } 1059 return true; 1060 } 1061 1062 /* Checks if given layer at targetLayerIndex has any 1063 * intersection with all the updating layers in beween 1064 * fromIndex and toIndex. Returns true if it finds intersectiion */ 1065 bool MDPComp::intersectingUpdatingLayers(const hwc_display_contents_1_t* list, 1066 int fromIndex, int toIndex, int targetLayerIndex) { 1067 for(int i = fromIndex; i <= toIndex; i++) { 1068 if(!mCurrentFrame.isFBComposed[i]) { 1069 if(areLayersIntersecting(&list->hwLayers[i], 1070 &list->hwLayers[targetLayerIndex])) { 1071 return true; 1072 } 1073 } 1074 } 1075 return false; 1076 } 1077 1078 int MDPComp::getBatch(hwc_display_contents_1_t* list, 1079 int& maxBatchStart, int& maxBatchEnd, 1080 int& maxBatchCount) { 1081 int i = 0; 1082 int fbZOrder =-1; 1083 int droppedLayerCt = 0; 1084 while (i < mCurrentFrame.layerCount) { 1085 int batchCount = 0; 1086 int batchStart = i; 1087 int batchEnd = i; 1088 /* Adjust batch Z order with the dropped layers so far */ 1089 int fbZ = batchStart - droppedLayerCt; 1090 int firstZReverseIndex = -1; 1091 int updatingLayersAbove = 0;//Updating layer count in middle of batch 1092 while(i < mCurrentFrame.layerCount) { 1093 if(!mCurrentFrame.isFBComposed[i]) { 1094 if(!batchCount) { 1095 i++; 1096 break; 1097 } 1098 updatingLayersAbove++; 1099 i++; 1100 continue; 1101 } else { 1102 if(mCurrentFrame.drop[i]) { 1103 i++; 1104 droppedLayerCt++; 1105 continue; 1106 } else if(updatingLayersAbove <= 0) { 1107 batchCount++; 1108 batchEnd = i; 1109 i++; 1110 continue; 1111 } else { //Layer is FBComposed, not a drop & updatingLayer > 0 1112 1113 // We have a valid updating layer already. If layer-i not 1114 // have overlapping with all updating layers in between 1115 // batch-start and i, then we can add layer i to batch. 1116 if(!intersectingUpdatingLayers(list, batchStart, i-1, i)) { 1117 batchCount++; 1118 batchEnd = i; 1119 i++; 1120 continue; 1121 } else if(canPushBatchToTop(list, batchStart, i)) { 1122 //If All the non-updating layers with in this batch 1123 //does not have intersection with the updating layers 1124 //above in z-order, then we can safely move the batch to 1125 //higher z-order. Increment fbZ as it is moving up. 1126 if( firstZReverseIndex < 0) { 1127 firstZReverseIndex = i; 1128 } 1129 batchCount++; 1130 batchEnd = i; 1131 fbZ += updatingLayersAbove; 1132 i++; 1133 updatingLayersAbove = 0; 1134 continue; 1135 } else { 1136 //both failed.start the loop again from here. 1137 if(firstZReverseIndex >= 0) { 1138 i = firstZReverseIndex; 1139 } 1140 break; 1141 } 1142 } 1143 } 1144 } 1145 if(batchCount > maxBatchCount) { 1146 maxBatchCount = batchCount; 1147 maxBatchStart = batchStart; 1148 maxBatchEnd = batchEnd; 1149 fbZOrder = fbZ; 1150 } 1151 } 1152 return fbZOrder; 1153 } 1154 1155 bool MDPComp::markLayersForCaching(hwc_context_t* ctx, 1156 hwc_display_contents_1_t* list) { 1157 /* Idea is to keep as many non-updating(cached) layers in FB and 1158 * send rest of them through MDP. This is done in 2 steps. 1159 * 1. Find the maximum contiguous batch of non-updating layers. 1160 * 2. See if we can improve this batch size for caching by adding 1161 * opaque layers around the batch, if they don't have 1162 * any overlapping with the updating layers in between. 1163 * NEVER mark an updating layer for caching. 1164 * But cached ones can be marked for MDP */ 1165 1166 int maxBatchStart = -1; 1167 int maxBatchEnd = -1; 1168 int maxBatchCount = 0; 1169 int fbZ = -1; 1170 1171 /* Nothing is cached. No batching needed */ 1172 if(mCurrentFrame.fbCount == 0) { 1173 return true; 1174 } 1175 1176 /* No MDP comp layers, try to use other comp modes */ 1177 if(mCurrentFrame.mdpCount == 0) { 1178 return false; 1179 } 1180 1181 fbZ = getBatch(list, maxBatchStart, maxBatchEnd, maxBatchCount); 1182 1183 /* reset rest of the layers lying inside ROI for MDP comp */ 1184 for(int i = 0; i < mCurrentFrame.layerCount; i++) { 1185 hwc_layer_1_t* layer = &list->hwLayers[i]; 1186 if((i < maxBatchStart || i > maxBatchEnd) && 1187 mCurrentFrame.isFBComposed[i]){ 1188 if(!mCurrentFrame.drop[i]){ 1189 //If an unsupported layer is being attempted to 1190 //be pulled out we should fail 1191 if(not isSupportedForMDPComp(ctx, layer)) { 1192 return false; 1193 } 1194 mCurrentFrame.isFBComposed[i] = false; 1195 } 1196 } 1197 } 1198 1199 // update the frame data 1200 mCurrentFrame.fbZ = fbZ; 1201 mCurrentFrame.fbCount = maxBatchCount; 1202 mCurrentFrame.mdpCount = mCurrentFrame.layerCount - 1203 mCurrentFrame.fbCount - mCurrentFrame.dropCount; 1204 1205 ALOGD_IF(isDebug(),"%s: cached count: %d",__FUNCTION__, 1206 mCurrentFrame.fbCount); 1207 1208 return true; 1209 } 1210 1211 void MDPComp::updateLayerCache(hwc_context_t* ctx, 1212 hwc_display_contents_1_t* list) { 1213 int numAppLayers = ctx->listStats[mDpy].numAppLayers; 1214 int fbCount = 0; 1215 1216 for(int i = 0; i < numAppLayers; i++) { 1217 if (mCachedFrame.hnd[i] == list->hwLayers[i].handle) { 1218 if(!mCurrentFrame.drop[i]) 1219 fbCount++; 1220 mCurrentFrame.isFBComposed[i] = true; 1221 } else { 1222 mCurrentFrame.isFBComposed[i] = false; 1223 } 1224 } 1225 1226 mCurrentFrame.fbCount = fbCount; 1227 mCurrentFrame.mdpCount = mCurrentFrame.layerCount - mCurrentFrame.fbCount 1228 - mCurrentFrame.dropCount; 1229 1230 ALOGD_IF(isDebug(),"%s: MDP count: %d FB count %d drop count: %d" 1231 ,__FUNCTION__, mCurrentFrame.mdpCount, mCurrentFrame.fbCount, 1232 mCurrentFrame.dropCount); 1233 } 1234 1235 void MDPComp::updateYUV(hwc_context_t* ctx, hwc_display_contents_1_t* list, 1236 bool secureOnly) { 1237 int nYuvCount = ctx->listStats[mDpy].yuvCount; 1238 for(int index = 0;index < nYuvCount; index++){ 1239 int nYuvIndex = ctx->listStats[mDpy].yuvIndices[index]; 1240 hwc_layer_1_t* layer = &list->hwLayers[nYuvIndex]; 1241 1242 if(!isYUVDoable(ctx, layer)) { 1243 if(!mCurrentFrame.isFBComposed[nYuvIndex]) { 1244 mCurrentFrame.isFBComposed[nYuvIndex] = true; 1245 mCurrentFrame.fbCount++; 1246 } 1247 } else { 1248 if(mCurrentFrame.isFBComposed[nYuvIndex]) { 1249 private_handle_t *hnd = (private_handle_t *)layer->handle; 1250 if(!secureOnly || isSecureBuffer(hnd)) { 1251 mCurrentFrame.isFBComposed[nYuvIndex] = false; 1252 mCurrentFrame.fbCount--; 1253 } 1254 } 1255 } 1256 } 1257 1258 mCurrentFrame.mdpCount = mCurrentFrame.layerCount - 1259 mCurrentFrame.fbCount - mCurrentFrame.dropCount; 1260 ALOGD_IF(isDebug(),"%s: fb count: %d",__FUNCTION__, 1261 mCurrentFrame.fbCount); 1262 } 1263 1264 hwc_rect_t MDPComp::getUpdatingFBRect(hwc_context_t *ctx, 1265 hwc_display_contents_1_t* list){ 1266 hwc_rect_t fbRect = (struct hwc_rect){0, 0, 0, 0}; 1267 1268 /* Update only the region of FB needed for composition */ 1269 for(int i = 0; i < mCurrentFrame.layerCount; i++ ) { 1270 if(mCurrentFrame.isFBComposed[i] && !mCurrentFrame.drop[i]) { 1271 hwc_layer_1_t* layer = &list->hwLayers[i]; 1272 hwc_rect_t dst = layer->displayFrame; 1273 fbRect = getUnion(fbRect, dst); 1274 } 1275 } 1276 trimAgainstROI(ctx, fbRect); 1277 return fbRect; 1278 } 1279 1280 bool MDPComp::postHeuristicsHandling(hwc_context_t *ctx, 1281 hwc_display_contents_1_t* list) { 1282 1283 //Capability checks 1284 if(!resourceCheck()) { 1285 ALOGD_IF(isDebug(), "%s: resource check failed", __FUNCTION__); 1286 return false; 1287 } 1288 1289 //Limitations checks 1290 if(!hwLimitationsCheck(ctx, list)) { 1291 ALOGD_IF(isDebug(), "%s: HW limitations",__FUNCTION__); 1292 return false; 1293 } 1294 1295 //Configure framebuffer first if applicable 1296 if(mCurrentFrame.fbZ >= 0) { 1297 hwc_rect_t fbRect = getUpdatingFBRect(ctx, list); 1298 if(!ctx->mFBUpdate[mDpy]->prepare(ctx, list, fbRect, mCurrentFrame.fbZ)) 1299 { 1300 ALOGD_IF(isDebug(), "%s configure framebuffer failed", 1301 __FUNCTION__); 1302 return false; 1303 } 1304 } 1305 1306 mCurrentFrame.map(); 1307 1308 if(!allocLayerPipes(ctx, list)) { 1309 ALOGD_IF(isDebug(), "%s: Unable to allocate MDP pipes", __FUNCTION__); 1310 return false; 1311 } 1312 1313 for (int index = 0, mdpNextZOrder = 0; index < mCurrentFrame.layerCount; 1314 index++) { 1315 if(!mCurrentFrame.isFBComposed[index]) { 1316 int mdpIndex = mCurrentFrame.layerToMDP[index]; 1317 hwc_layer_1_t* layer = &list->hwLayers[index]; 1318 1319 //Leave fbZ for framebuffer. CACHE/GLES layers go here. 1320 if(mdpNextZOrder == mCurrentFrame.fbZ) { 1321 mdpNextZOrder++; 1322 } 1323 MdpPipeInfo* cur_pipe = mCurrentFrame.mdpToLayer[mdpIndex].pipeInfo; 1324 cur_pipe->zOrder = mdpNextZOrder++; 1325 1326 private_handle_t *hnd = (private_handle_t *)layer->handle; 1327 if(is4kx2kYuvBuffer(hnd) && sEnable4k2kYUVSplit){ 1328 if(configure4k2kYuv(ctx, layer, 1329 mCurrentFrame.mdpToLayer[mdpIndex]) 1330 != 0 ){ 1331 ALOGD_IF(isDebug(), "%s: Failed to configure split pipes \ 1332 for layer %d",__FUNCTION__, index); 1333 return false; 1334 } 1335 else{ 1336 mdpNextZOrder++; 1337 } 1338 continue; 1339 } 1340 if(configure(ctx, layer, mCurrentFrame.mdpToLayer[mdpIndex]) != 0 ){ 1341 ALOGD_IF(isDebug(), "%s: Failed to configure overlay for \ 1342 layer %d",__FUNCTION__, index); 1343 return false; 1344 } 1345 } 1346 } 1347 1348 if(!ctx->mOverlay->validateAndSet(mDpy, ctx->dpyAttr[mDpy].fd)) { 1349 ALOGD_IF(isDebug(), "%s: Failed to validate and set overlay for dpy %d" 1350 ,__FUNCTION__, mDpy); 1351 return false; 1352 } 1353 1354 setRedraw(ctx, list); 1355 return true; 1356 } 1357 1358 bool MDPComp::resourceCheck() { 1359 const bool fbUsed = mCurrentFrame.fbCount; 1360 if(mCurrentFrame.mdpCount > sMaxPipesPerMixer - fbUsed) { 1361 ALOGD_IF(isDebug(), "%s: Exceeds MAX_PIPES_PER_MIXER",__FUNCTION__); 1362 return false; 1363 } 1364 return true; 1365 } 1366 1367 bool MDPComp::hwLimitationsCheck(hwc_context_t* ctx, 1368 hwc_display_contents_1_t* list) { 1369 1370 //A-family hw limitation: 1371 //If a layer need alpha scaling, MDP can not support. 1372 if(ctx->mMDP.version < qdutils::MDSS_V5) { 1373 for(int i = 0; i < mCurrentFrame.layerCount; ++i) { 1374 if(!mCurrentFrame.isFBComposed[i] && 1375 isAlphaScaled( &list->hwLayers[i])) { 1376 ALOGD_IF(isDebug(), "%s:frame needs alphaScaling",__FUNCTION__); 1377 return false; 1378 } 1379 } 1380 } 1381 1382 // On 8x26 & 8974 hw, we have a limitation of downscaling+blending. 1383 //If multiple layers requires downscaling and also they are overlapping 1384 //fall back to GPU since MDSS can not handle it. 1385 if(qdutils::MDPVersion::getInstance().is8x74v2() || 1386 qdutils::MDPVersion::getInstance().is8x26()) { 1387 for(int i = 0; i < mCurrentFrame.layerCount-1; ++i) { 1388 hwc_layer_1_t* botLayer = &list->hwLayers[i]; 1389 if(!mCurrentFrame.isFBComposed[i] && 1390 isDownscaleRequired(botLayer)) { 1391 //if layer-i is marked for MDP and needs downscaling 1392 //check if any MDP layer on top of i & overlaps with layer-i 1393 for(int j = i+1; j < mCurrentFrame.layerCount; ++j) { 1394 hwc_layer_1_t* topLayer = &list->hwLayers[j]; 1395 if(!mCurrentFrame.isFBComposed[j] && 1396 isDownscaleRequired(topLayer)) { 1397 hwc_rect_t r = getIntersection(botLayer->displayFrame, 1398 topLayer->displayFrame); 1399 if(isValidRect(r)) 1400 return false; 1401 } 1402 } 1403 } 1404 } 1405 } 1406 return true; 1407 } 1408 1409 int MDPComp::prepare(hwc_context_t *ctx, hwc_display_contents_1_t* list) { 1410 int ret = 0; 1411 const int numLayers = ctx->listStats[mDpy].numAppLayers; 1412 char property[PROPERTY_VALUE_MAX]; 1413 1414 if(property_get("debug.hwc.simulate", property, NULL) > 0) { 1415 int currentFlags = atoi(property); 1416 if(currentFlags != sSimulationFlags) { 1417 sSimulationFlags = currentFlags; 1418 ALOGE("%s: Simulation Flag read: 0x%x (%d)", __FUNCTION__, 1419 sSimulationFlags, sSimulationFlags); 1420 } 1421 } 1422 1423 //Do not cache the information for next draw cycle. 1424 if(numLayers > MAX_NUM_APP_LAYERS or (!numLayers)) { 1425 ALOGI("%s: Unsupported layer count for mdp composition", 1426 __FUNCTION__); 1427 mCachedFrame.reset(); 1428 return -1; 1429 } 1430 1431 //reset old data 1432 mCurrentFrame.reset(numLayers); 1433 memset(&mCurrentFrame.drop, 0, sizeof(mCurrentFrame.drop)); 1434 mCurrentFrame.dropCount = 0; 1435 1436 // Detect the start of animation and fall back to GPU only once to cache 1437 // all the layers in FB and display FB content untill animation completes. 1438 if(ctx->listStats[mDpy].isDisplayAnimating) { 1439 mCurrentFrame.needsRedraw = false; 1440 if(ctx->mAnimationState[mDpy] == ANIMATION_STOPPED) { 1441 mCurrentFrame.needsRedraw = true; 1442 ctx->mAnimationState[mDpy] = ANIMATION_STARTED; 1443 } 1444 setMDPCompLayerFlags(ctx, list); 1445 mCachedFrame.updateCounts(mCurrentFrame); 1446 ret = -1; 1447 return ret; 1448 } else { 1449 ctx->mAnimationState[mDpy] = ANIMATION_STOPPED; 1450 } 1451 1452 //Hard conditions, if not met, cannot do MDP comp 1453 if(isFrameDoable(ctx)) { 1454 generateROI(ctx, list); 1455 1456 if(tryFullFrame(ctx, list) || tryVideoOnly(ctx, list)) { 1457 setMDPCompLayerFlags(ctx, list); 1458 } else { 1459 resetROI(ctx, mDpy); 1460 reset(ctx); 1461 memset(&mCurrentFrame.drop, 0, sizeof(mCurrentFrame.drop)); 1462 mCurrentFrame.dropCount = 0; 1463 ret = -1; 1464 } 1465 } else { 1466 ALOGD_IF( isDebug(),"%s: MDP Comp not possible for this frame", 1467 __FUNCTION__); 1468 ret = -1; 1469 } 1470 1471 if(isDebug()) { 1472 ALOGD("GEOMETRY change: %d", 1473 (list->flags & HWC_GEOMETRY_CHANGED)); 1474 android::String8 sDump(""); 1475 dump(sDump, ctx); 1476 ALOGD("%s",sDump.string()); 1477 } 1478 1479 mCachedFrame.cacheAll(list); 1480 mCachedFrame.updateCounts(mCurrentFrame); 1481 return ret; 1482 } 1483 1484 bool MDPComp::allocSplitVGPipesfor4k2k(hwc_context_t *ctx, int index) { 1485 1486 bool bRet = true; 1487 int mdpIndex = mCurrentFrame.layerToMDP[index]; 1488 PipeLayerPair& info = mCurrentFrame.mdpToLayer[mdpIndex]; 1489 info.pipeInfo = new MdpYUVPipeInfo; 1490 info.rot = NULL; 1491 MdpYUVPipeInfo& pipe_info = *(MdpYUVPipeInfo*)info.pipeInfo; 1492 1493 pipe_info.lIndex = ovutils::OV_INVALID; 1494 pipe_info.rIndex = ovutils::OV_INVALID; 1495 1496 Overlay::PipeSpecs pipeSpecs; 1497 pipeSpecs.formatClass = Overlay::FORMAT_YUV; 1498 pipeSpecs.needsScaling = true; 1499 pipeSpecs.dpy = mDpy; 1500 pipeSpecs.fb = false; 1501 1502 pipe_info.lIndex = ctx->mOverlay->getPipe(pipeSpecs); 1503 if(pipe_info.lIndex == ovutils::OV_INVALID){ 1504 bRet = false; 1505 ALOGD_IF(isDebug(),"%s: allocating first VG pipe failed", 1506 __FUNCTION__); 1507 } 1508 pipe_info.rIndex = ctx->mOverlay->getPipe(pipeSpecs); 1509 if(pipe_info.rIndex == ovutils::OV_INVALID){ 1510 bRet = false; 1511 ALOGD_IF(isDebug(),"%s: allocating second VG pipe failed", 1512 __FUNCTION__); 1513 } 1514 return bRet; 1515 } 1516 //=============MDPCompNonSplit================================================== 1517 1518 void MDPCompNonSplit::adjustForSourceSplit(hwc_context_t *ctx, 1519 hwc_display_contents_1_t* list) { 1520 //If 4k2k Yuv layer split is possible, and if 1521 //fbz is above 4k2k layer, increment fb zorder by 1 1522 //as we split 4k2k layer and increment zorder for right half 1523 //of the layer 1524 if(mCurrentFrame.fbZ >= 0) { 1525 for (int index = 0, mdpNextZOrder = 0; index < mCurrentFrame.layerCount; 1526 index++) { 1527 if(!mCurrentFrame.isFBComposed[index]) { 1528 if(mdpNextZOrder == mCurrentFrame.fbZ) { 1529 mdpNextZOrder++; 1530 } 1531 mdpNextZOrder++; 1532 hwc_layer_1_t* layer = &list->hwLayers[index]; 1533 private_handle_t *hnd = (private_handle_t *)layer->handle; 1534 if(is4kx2kYuvBuffer(hnd)) { 1535 if(mdpNextZOrder <= mCurrentFrame.fbZ) 1536 mCurrentFrame.fbZ += 1; 1537 mdpNextZOrder++; 1538 //As we split 4kx2k yuv layer and program to 2 VG pipes 1539 //(if available) increase mdpcount by 1. 1540 mCurrentFrame.mdpCount++; 1541 } 1542 } 1543 } 1544 } 1545 } 1546 1547 /* 1548 * Configures pipe(s) for MDP composition 1549 */ 1550 int MDPCompNonSplit::configure(hwc_context_t *ctx, hwc_layer_1_t *layer, 1551 PipeLayerPair& PipeLayerPair) { 1552 MdpPipeInfoNonSplit& mdp_info = 1553 *(static_cast<MdpPipeInfoNonSplit*>(PipeLayerPair.pipeInfo)); 1554 eMdpFlags mdpFlags = OV_MDP_BACKEND_COMPOSITION; 1555 eZorder zOrder = static_cast<eZorder>(mdp_info.zOrder); 1556 eIsFg isFg = IS_FG_OFF; 1557 eDest dest = mdp_info.index; 1558 1559 ALOGD_IF(isDebug(),"%s: configuring: layer: %p z_order: %d dest_pipe: %d", 1560 __FUNCTION__, layer, zOrder, dest); 1561 1562 return configureNonSplit(ctx, layer, mDpy, mdpFlags, zOrder, isFg, dest, 1563 &PipeLayerPair.rot); 1564 } 1565 1566 bool MDPCompNonSplit::allocLayerPipes(hwc_context_t *ctx, 1567 hwc_display_contents_1_t* list) { 1568 for(int index = 0; index < mCurrentFrame.layerCount; index++) { 1569 1570 if(mCurrentFrame.isFBComposed[index]) continue; 1571 1572 hwc_layer_1_t* layer = &list->hwLayers[index]; 1573 private_handle_t *hnd = (private_handle_t *)layer->handle; 1574 if(is4kx2kYuvBuffer(hnd) && sEnable4k2kYUVSplit){ 1575 if(allocSplitVGPipesfor4k2k(ctx, index)){ 1576 continue; 1577 } 1578 } 1579 1580 int mdpIndex = mCurrentFrame.layerToMDP[index]; 1581 PipeLayerPair& info = mCurrentFrame.mdpToLayer[mdpIndex]; 1582 info.pipeInfo = new MdpPipeInfoNonSplit; 1583 info.rot = NULL; 1584 MdpPipeInfoNonSplit& pipe_info = *(MdpPipeInfoNonSplit*)info.pipeInfo; 1585 1586 Overlay::PipeSpecs pipeSpecs; 1587 pipeSpecs.formatClass = isYuvBuffer(hnd) ? 1588 Overlay::FORMAT_YUV : Overlay::FORMAT_RGB; 1589 pipeSpecs.needsScaling = qhwc::needsScaling(layer) or 1590 (qdutils::MDPVersion::getInstance().is8x26() and 1591 ctx->dpyAttr[HWC_DISPLAY_PRIMARY].xres > 1024); 1592 pipeSpecs.dpy = mDpy; 1593 pipeSpecs.fb = false; 1594 1595 pipe_info.index = ctx->mOverlay->getPipe(pipeSpecs); 1596 1597 if(pipe_info.index == ovutils::OV_INVALID) { 1598 ALOGD_IF(isDebug(), "%s: Unable to get pipe", __FUNCTION__); 1599 return false; 1600 } 1601 } 1602 return true; 1603 } 1604 1605 int MDPCompNonSplit::configure4k2kYuv(hwc_context_t *ctx, hwc_layer_1_t *layer, 1606 PipeLayerPair& PipeLayerPair) { 1607 MdpYUVPipeInfo& mdp_info = 1608 *(static_cast<MdpYUVPipeInfo*>(PipeLayerPair.pipeInfo)); 1609 eZorder zOrder = static_cast<eZorder>(mdp_info.zOrder); 1610 eIsFg isFg = IS_FG_OFF; 1611 eMdpFlags mdpFlagsL = OV_MDP_BACKEND_COMPOSITION; 1612 eDest lDest = mdp_info.lIndex; 1613 eDest rDest = mdp_info.rIndex; 1614 1615 return configureSourceSplit(ctx, layer, mDpy, mdpFlagsL, zOrder, isFg, 1616 lDest, rDest, &PipeLayerPair.rot); 1617 } 1618 1619 bool MDPCompNonSplit::draw(hwc_context_t *ctx, hwc_display_contents_1_t* list) { 1620 1621 if(!isEnabled()) { 1622 ALOGD_IF(isDebug(),"%s: MDP Comp not configured", __FUNCTION__); 1623 return true; 1624 } 1625 1626 if(!ctx || !list) { 1627 ALOGE("%s: invalid contxt or list",__FUNCTION__); 1628 return false; 1629 } 1630 1631 if(ctx->listStats[mDpy].numAppLayers > MAX_NUM_APP_LAYERS) { 1632 ALOGD_IF(isDebug(),"%s: Exceeding max layer count", __FUNCTION__); 1633 return true; 1634 } 1635 1636 // Set the Handle timeout to true for MDP or MIXED composition. 1637 if(idleInvalidator && !sIdleFallBack && mCurrentFrame.mdpCount) { 1638 sHandleTimeout = true; 1639 } 1640 1641 overlay::Overlay& ov = *ctx->mOverlay; 1642 LayerProp *layerProp = ctx->layerProp[mDpy]; 1643 1644 int numHwLayers = ctx->listStats[mDpy].numAppLayers; 1645 for(int i = 0; i < numHwLayers && mCurrentFrame.mdpCount; i++ ) 1646 { 1647 if(mCurrentFrame.isFBComposed[i]) continue; 1648 1649 hwc_layer_1_t *layer = &list->hwLayers[i]; 1650 private_handle_t *hnd = (private_handle_t *)layer->handle; 1651 if(!hnd) { 1652 if (!(layer->flags & HWC_COLOR_FILL)) { 1653 ALOGE("%s handle null", __FUNCTION__); 1654 return false; 1655 } 1656 // No PLAY for Color layer 1657 layerProp[i].mFlags &= ~HWC_MDPCOMP; 1658 continue; 1659 } 1660 1661 int mdpIndex = mCurrentFrame.layerToMDP[i]; 1662 1663 if(is4kx2kYuvBuffer(hnd) && sEnable4k2kYUVSplit) 1664 { 1665 MdpYUVPipeInfo& pipe_info = 1666 *(MdpYUVPipeInfo*)mCurrentFrame.mdpToLayer[mdpIndex].pipeInfo; 1667 Rotator *rot = mCurrentFrame.mdpToLayer[mdpIndex].rot; 1668 ovutils::eDest indexL = pipe_info.lIndex; 1669 ovutils::eDest indexR = pipe_info.rIndex; 1670 int fd = hnd->fd; 1671 uint32_t offset = (uint32_t)hnd->offset; 1672 if(rot) { 1673 rot->queueBuffer(fd, offset); 1674 fd = rot->getDstMemId(); 1675 offset = rot->getDstOffset(); 1676 } 1677 if(indexL != ovutils::OV_INVALID) { 1678 ovutils::eDest destL = (ovutils::eDest)indexL; 1679 ALOGD_IF(isDebug(),"%s: MDP Comp: Drawing layer: %p hnd: %p \ 1680 using pipe: %d", __FUNCTION__, layer, hnd, indexL ); 1681 if (!ov.queueBuffer(fd, offset, destL)) { 1682 ALOGE("%s: queueBuffer failed for display:%d", 1683 __FUNCTION__, mDpy); 1684 return false; 1685 } 1686 } 1687 1688 if(indexR != ovutils::OV_INVALID) { 1689 ovutils::eDest destR = (ovutils::eDest)indexR; 1690 ALOGD_IF(isDebug(),"%s: MDP Comp: Drawing layer: %p hnd: %p \ 1691 using pipe: %d", __FUNCTION__, layer, hnd, indexR ); 1692 if (!ov.queueBuffer(fd, offset, destR)) { 1693 ALOGE("%s: queueBuffer failed for display:%d", 1694 __FUNCTION__, mDpy); 1695 return false; 1696 } 1697 } 1698 } 1699 else{ 1700 MdpPipeInfoNonSplit& pipe_info = 1701 *(MdpPipeInfoNonSplit*)mCurrentFrame.mdpToLayer[mdpIndex].pipeInfo; 1702 ovutils::eDest dest = pipe_info.index; 1703 if(dest == ovutils::OV_INVALID) { 1704 ALOGE("%s: Invalid pipe index (%d)", __FUNCTION__, dest); 1705 return false; 1706 } 1707 1708 if(!(layerProp[i].mFlags & HWC_MDPCOMP)) { 1709 continue; 1710 } 1711 1712 ALOGD_IF(isDebug(),"%s: MDP Comp: Drawing layer: %p hnd: %p \ 1713 using pipe: %d", __FUNCTION__, layer, 1714 hnd, dest ); 1715 1716 int fd = hnd->fd; 1717 uint32_t offset = (uint32_t)hnd->offset; 1718 1719 Rotator *rot = mCurrentFrame.mdpToLayer[mdpIndex].rot; 1720 if(rot) { 1721 if(!rot->queueBuffer(fd, offset)) 1722 return false; 1723 fd = rot->getDstMemId(); 1724 offset = rot->getDstOffset(); 1725 } 1726 1727 if (!ov.queueBuffer(fd, offset, dest)) { 1728 ALOGE("%s: queueBuffer failed for display:%d ", 1729 __FUNCTION__, mDpy); 1730 return false; 1731 } 1732 } 1733 1734 layerProp[i].mFlags &= ~HWC_MDPCOMP; 1735 } 1736 return true; 1737 } 1738 1739 //=============MDPCompSplit=================================================== 1740 1741 void MDPCompSplit::adjustForSourceSplit(hwc_context_t *ctx, 1742 hwc_display_contents_1_t* list){ 1743 //if 4kx2k yuv layer is totally present in either in left half 1744 //or right half then try splitting the yuv layer to avoid decimation 1745 const int lSplit = getLeftSplit(ctx, mDpy); 1746 if(mCurrentFrame.fbZ >= 0) { 1747 for (int index = 0, mdpNextZOrder = 0; index < mCurrentFrame.layerCount; 1748 index++) { 1749 if(!mCurrentFrame.isFBComposed[index]) { 1750 if(mdpNextZOrder == mCurrentFrame.fbZ) { 1751 mdpNextZOrder++; 1752 } 1753 mdpNextZOrder++; 1754 hwc_layer_1_t* layer = &list->hwLayers[index]; 1755 private_handle_t *hnd = (private_handle_t *)layer->handle; 1756 if(is4kx2kYuvBuffer(hnd)) { 1757 hwc_rect_t dst = layer->displayFrame; 1758 if((dst.left > lSplit) || (dst.right < lSplit)) { 1759 mCurrentFrame.mdpCount += 1; 1760 } 1761 if(mdpNextZOrder <= mCurrentFrame.fbZ) 1762 mCurrentFrame.fbZ += 1; 1763 mdpNextZOrder++; 1764 } 1765 } 1766 } 1767 } 1768 } 1769 1770 bool MDPCompSplit::acquireMDPPipes(hwc_context_t *ctx, hwc_layer_1_t* layer, 1771 MdpPipeInfoSplit& pipe_info) { 1772 1773 const int lSplit = getLeftSplit(ctx, mDpy); 1774 private_handle_t *hnd = (private_handle_t *)layer->handle; 1775 hwc_rect_t dst = layer->displayFrame; 1776 pipe_info.lIndex = ovutils::OV_INVALID; 1777 pipe_info.rIndex = ovutils::OV_INVALID; 1778 1779 Overlay::PipeSpecs pipeSpecs; 1780 pipeSpecs.formatClass = isYuvBuffer(hnd) ? 1781 Overlay::FORMAT_YUV : Overlay::FORMAT_RGB; 1782 pipeSpecs.needsScaling = qhwc::needsScalingWithSplit(ctx, layer, mDpy); 1783 pipeSpecs.dpy = mDpy; 1784 pipeSpecs.mixer = Overlay::MIXER_LEFT; 1785 pipeSpecs.fb = false; 1786 1787 // Acquire pipe only for the updating half 1788 hwc_rect_t l_roi = ctx->listStats[mDpy].lRoi; 1789 hwc_rect_t r_roi = ctx->listStats[mDpy].rRoi; 1790 1791 if (dst.left < lSplit && isValidRect(getIntersection(dst, l_roi))) { 1792 pipe_info.lIndex = ctx->mOverlay->getPipe(pipeSpecs); 1793 if(pipe_info.lIndex == ovutils::OV_INVALID) 1794 return false; 1795 } 1796 1797 if(dst.right > lSplit && isValidRect(getIntersection(dst, r_roi))) { 1798 pipeSpecs.mixer = Overlay::MIXER_RIGHT; 1799 pipe_info.rIndex = ctx->mOverlay->getPipe(pipeSpecs); 1800 if(pipe_info.rIndex == ovutils::OV_INVALID) 1801 return false; 1802 } 1803 1804 return true; 1805 } 1806 1807 bool MDPCompSplit::allocLayerPipes(hwc_context_t *ctx, 1808 hwc_display_contents_1_t* list) { 1809 for(int index = 0 ; index < mCurrentFrame.layerCount; index++) { 1810 1811 if(mCurrentFrame.isFBComposed[index]) continue; 1812 1813 hwc_layer_1_t* layer = &list->hwLayers[index]; 1814 private_handle_t *hnd = (private_handle_t *)layer->handle; 1815 hwc_rect_t dst = layer->displayFrame; 1816 const int lSplit = getLeftSplit(ctx, mDpy); 1817 if(is4kx2kYuvBuffer(hnd) && sEnable4k2kYUVSplit){ 1818 if((dst.left > lSplit)||(dst.right < lSplit)){ 1819 if(allocSplitVGPipesfor4k2k(ctx, index)){ 1820 continue; 1821 } 1822 } 1823 } 1824 int mdpIndex = mCurrentFrame.layerToMDP[index]; 1825 PipeLayerPair& info = mCurrentFrame.mdpToLayer[mdpIndex]; 1826 info.pipeInfo = new MdpPipeInfoSplit; 1827 info.rot = NULL; 1828 MdpPipeInfoSplit& pipe_info = *(MdpPipeInfoSplit*)info.pipeInfo; 1829 1830 if(!acquireMDPPipes(ctx, layer, pipe_info)) { 1831 ALOGD_IF(isDebug(), "%s: Unable to get pipe for type", 1832 __FUNCTION__); 1833 return false; 1834 } 1835 } 1836 return true; 1837 } 1838 1839 int MDPCompSplit::configure4k2kYuv(hwc_context_t *ctx, hwc_layer_1_t *layer, 1840 PipeLayerPair& PipeLayerPair) { 1841 const int lSplit = getLeftSplit(ctx, mDpy); 1842 hwc_rect_t dst = layer->displayFrame; 1843 if((dst.left > lSplit)||(dst.right < lSplit)){ 1844 MdpYUVPipeInfo& mdp_info = 1845 *(static_cast<MdpYUVPipeInfo*>(PipeLayerPair.pipeInfo)); 1846 eZorder zOrder = static_cast<eZorder>(mdp_info.zOrder); 1847 eIsFg isFg = IS_FG_OFF; 1848 eMdpFlags mdpFlagsL = OV_MDP_BACKEND_COMPOSITION; 1849 eDest lDest = mdp_info.lIndex; 1850 eDest rDest = mdp_info.rIndex; 1851 1852 return configureSourceSplit(ctx, layer, mDpy, mdpFlagsL, zOrder, isFg, 1853 lDest, rDest, &PipeLayerPair.rot); 1854 } 1855 else{ 1856 return configure(ctx, layer, PipeLayerPair); 1857 } 1858 } 1859 1860 /* 1861 * Configures pipe(s) for MDP composition 1862 */ 1863 int MDPCompSplit::configure(hwc_context_t *ctx, hwc_layer_1_t *layer, 1864 PipeLayerPair& PipeLayerPair) { 1865 MdpPipeInfoSplit& mdp_info = 1866 *(static_cast<MdpPipeInfoSplit*>(PipeLayerPair.pipeInfo)); 1867 eZorder zOrder = static_cast<eZorder>(mdp_info.zOrder); 1868 eIsFg isFg = IS_FG_OFF; 1869 eMdpFlags mdpFlagsL = OV_MDP_BACKEND_COMPOSITION; 1870 eDest lDest = mdp_info.lIndex; 1871 eDest rDest = mdp_info.rIndex; 1872 1873 ALOGD_IF(isDebug(),"%s: configuring: layer: %p z_order: %d dest_pipeL: %d" 1874 "dest_pipeR: %d",__FUNCTION__, layer, zOrder, lDest, rDest); 1875 1876 return configureSplit(ctx, layer, mDpy, mdpFlagsL, zOrder, isFg, lDest, 1877 rDest, &PipeLayerPair.rot); 1878 } 1879 1880 bool MDPCompSplit::draw(hwc_context_t *ctx, hwc_display_contents_1_t* list) { 1881 1882 if(!isEnabled()) { 1883 ALOGD_IF(isDebug(),"%s: MDP Comp not configured", __FUNCTION__); 1884 return true; 1885 } 1886 1887 if(!ctx || !list) { 1888 ALOGE("%s: invalid contxt or list",__FUNCTION__); 1889 return false; 1890 } 1891 1892 if(ctx->listStats[mDpy].numAppLayers > MAX_NUM_APP_LAYERS) { 1893 ALOGD_IF(isDebug(),"%s: Exceeding max layer count", __FUNCTION__); 1894 return true; 1895 } 1896 1897 // Set the Handle timeout to true for MDP or MIXED composition. 1898 if(idleInvalidator && !sIdleFallBack && mCurrentFrame.mdpCount) { 1899 sHandleTimeout = true; 1900 } 1901 1902 overlay::Overlay& ov = *ctx->mOverlay; 1903 LayerProp *layerProp = ctx->layerProp[mDpy]; 1904 1905 int numHwLayers = ctx->listStats[mDpy].numAppLayers; 1906 for(int i = 0; i < numHwLayers && mCurrentFrame.mdpCount; i++ ) 1907 { 1908 if(mCurrentFrame.isFBComposed[i]) continue; 1909 1910 hwc_layer_1_t *layer = &list->hwLayers[i]; 1911 private_handle_t *hnd = (private_handle_t *)layer->handle; 1912 if(!hnd) { 1913 ALOGE("%s handle null", __FUNCTION__); 1914 return false; 1915 } 1916 1917 if(!(layerProp[i].mFlags & HWC_MDPCOMP)) { 1918 continue; 1919 } 1920 1921 int mdpIndex = mCurrentFrame.layerToMDP[i]; 1922 1923 if(is4kx2kYuvBuffer(hnd) && sEnable4k2kYUVSplit) 1924 { 1925 MdpYUVPipeInfo& pipe_info = 1926 *(MdpYUVPipeInfo*)mCurrentFrame.mdpToLayer[mdpIndex].pipeInfo; 1927 Rotator *rot = mCurrentFrame.mdpToLayer[mdpIndex].rot; 1928 ovutils::eDest indexL = pipe_info.lIndex; 1929 ovutils::eDest indexR = pipe_info.rIndex; 1930 int fd = hnd->fd; 1931 uint32_t offset = (uint32_t)hnd->offset; 1932 if(rot) { 1933 rot->queueBuffer(fd, offset); 1934 fd = rot->getDstMemId(); 1935 offset = rot->getDstOffset(); 1936 } 1937 if(indexL != ovutils::OV_INVALID) { 1938 ovutils::eDest destL = (ovutils::eDest)indexL; 1939 ALOGD_IF(isDebug(),"%s: MDP Comp: Drawing layer: %p hnd: %p \ 1940 using pipe: %d", __FUNCTION__, layer, hnd, indexL ); 1941 if (!ov.queueBuffer(fd, offset, destL)) { 1942 ALOGE("%s: queueBuffer failed for display:%d", 1943 __FUNCTION__, mDpy); 1944 return false; 1945 } 1946 } 1947 1948 if(indexR != ovutils::OV_INVALID) { 1949 ovutils::eDest destR = (ovutils::eDest)indexR; 1950 ALOGD_IF(isDebug(),"%s: MDP Comp: Drawing layer: %p hnd: %p \ 1951 using pipe: %d", __FUNCTION__, layer, hnd, indexR ); 1952 if (!ov.queueBuffer(fd, offset, destR)) { 1953 ALOGE("%s: queueBuffer failed for display:%d", 1954 __FUNCTION__, mDpy); 1955 return false; 1956 } 1957 } 1958 } 1959 else{ 1960 MdpPipeInfoSplit& pipe_info = 1961 *(MdpPipeInfoSplit*)mCurrentFrame.mdpToLayer[mdpIndex].pipeInfo; 1962 Rotator *rot = mCurrentFrame.mdpToLayer[mdpIndex].rot; 1963 1964 ovutils::eDest indexL = pipe_info.lIndex; 1965 ovutils::eDest indexR = pipe_info.rIndex; 1966 1967 int fd = hnd->fd; 1968 int offset = (uint32_t)hnd->offset; 1969 1970 if(ctx->mAD->isModeOn()) { 1971 if(ctx->mAD->draw(ctx, fd, offset)) { 1972 fd = ctx->mAD->getDstFd(); 1973 offset = ctx->mAD->getDstOffset(); 1974 } 1975 } 1976 1977 if(rot) { 1978 rot->queueBuffer(fd, offset); 1979 fd = rot->getDstMemId(); 1980 offset = rot->getDstOffset(); 1981 } 1982 1983 //************* play left mixer ********** 1984 if(indexL != ovutils::OV_INVALID) { 1985 ovutils::eDest destL = (ovutils::eDest)indexL; 1986 ALOGD_IF(isDebug(),"%s: MDP Comp: Drawing layer: %p hnd: %p \ 1987 using pipe: %d", __FUNCTION__, layer, hnd, indexL ); 1988 if (!ov.queueBuffer(fd, offset, destL)) { 1989 ALOGE("%s: queueBuffer failed for left mixer", 1990 __FUNCTION__); 1991 return false; 1992 } 1993 } 1994 1995 //************* play right mixer ********** 1996 if(indexR != ovutils::OV_INVALID) { 1997 ovutils::eDest destR = (ovutils::eDest)indexR; 1998 ALOGD_IF(isDebug(),"%s: MDP Comp: Drawing layer: %p hnd: %p \ 1999 using pipe: %d", __FUNCTION__, layer, hnd, indexR ); 2000 if (!ov.queueBuffer(fd, offset, destR)) { 2001 ALOGE("%s: queueBuffer failed for right mixer", 2002 __FUNCTION__); 2003 return false; 2004 } 2005 } 2006 } 2007 2008 layerProp[i].mFlags &= ~HWC_MDPCOMP; 2009 } 2010 2011 return true; 2012 } 2013 2014 //================MDPCompSrcSplit============================================== 2015 bool MDPCompSrcSplit::acquireMDPPipes(hwc_context_t *ctx, hwc_layer_1_t* layer, 2016 MdpPipeInfoSplit& pipe_info) { 2017 private_handle_t *hnd = (private_handle_t *)layer->handle; 2018 hwc_rect_t dst = layer->displayFrame; 2019 hwc_rect_t crop = integerizeSourceCrop(layer->sourceCropf); 2020 pipe_info.lIndex = ovutils::OV_INVALID; 2021 pipe_info.rIndex = ovutils::OV_INVALID; 2022 2023 //If 2 pipes are staged on a single stage of a mixer, then the left pipe 2024 //should have a higher priority than the right one. Pipe priorities are 2025 //starting with VG0, VG1 ... , RGB0 ..., DMA1 2026 2027 Overlay::PipeSpecs pipeSpecs; 2028 pipeSpecs.formatClass = isYuvBuffer(hnd) ? 2029 Overlay::FORMAT_YUV : Overlay::FORMAT_RGB; 2030 pipeSpecs.needsScaling = qhwc::needsScaling(layer); 2031 pipeSpecs.dpy = mDpy; 2032 pipeSpecs.fb = false; 2033 2034 //1 pipe by default for a layer 2035 pipe_info.lIndex = ctx->mOverlay->getPipe(pipeSpecs); 2036 if(pipe_info.lIndex == ovutils::OV_INVALID) { 2037 return false; 2038 } 2039 2040 /* Use 2 pipes IF 2041 a) Layer's crop width is > 2048 or 2042 b) Layer's dest width > 2048 or 2043 c) On primary, driver has indicated with caps to split always. This is 2044 based on an empirically derived value of panel height. Applied only 2045 if the layer's width is > mixer's width 2046 */ 2047 2048 bool primarySplitAlways = (mDpy == HWC_DISPLAY_PRIMARY) and 2049 qdutils::MDPVersion::getInstance().isSrcSplitAlways(); 2050 int lSplit = getLeftSplit(ctx, mDpy); 2051 int dstWidth = dst.right - dst.left; 2052 int cropWidth = crop.right - crop.left; 2053 2054 if(dstWidth > qdutils::MAX_DISPLAY_DIM or 2055 cropWidth > qdutils::MAX_DISPLAY_DIM or 2056 (primarySplitAlways and (cropWidth > lSplit))) { 2057 pipe_info.rIndex = ctx->mOverlay->getPipe(pipeSpecs); 2058 if(pipe_info.rIndex == ovutils::OV_INVALID) { 2059 return false; 2060 } 2061 2062 // Return values 2063 // 1 Left pipe is higher priority, do nothing. 2064 // 0 Pipes of same priority. 2065 //-1 Right pipe is of higher priority, needs swap. 2066 if(ctx->mOverlay->comparePipePriority(pipe_info.lIndex, 2067 pipe_info.rIndex) == -1) { 2068 qhwc::swap(pipe_info.lIndex, pipe_info.rIndex); 2069 } 2070 } 2071 2072 return true; 2073 } 2074 2075 int MDPCompSrcSplit::configure(hwc_context_t *ctx, hwc_layer_1_t *layer, 2076 PipeLayerPair& PipeLayerPair) { 2077 private_handle_t *hnd = (private_handle_t *)layer->handle; 2078 if(!hnd) { 2079 ALOGE("%s: layer handle is NULL", __FUNCTION__); 2080 return -1; 2081 } 2082 MetaData_t *metadata = (MetaData_t *)hnd->base_metadata; 2083 MdpPipeInfoSplit& mdp_info = 2084 *(static_cast<MdpPipeInfoSplit*>(PipeLayerPair.pipeInfo)); 2085 Rotator **rot = &PipeLayerPair.rot; 2086 eZorder z = static_cast<eZorder>(mdp_info.zOrder); 2087 eIsFg isFg = IS_FG_OFF; 2088 eDest lDest = mdp_info.lIndex; 2089 eDest rDest = mdp_info.rIndex; 2090 hwc_rect_t crop = integerizeSourceCrop(layer->sourceCropf); 2091 hwc_rect_t dst = layer->displayFrame; 2092 int transform = layer->transform; 2093 eTransform orient = static_cast<eTransform>(transform); 2094 const int downscale = 0; 2095 int rotFlags = ROT_FLAGS_NONE; 2096 uint32_t format = ovutils::getMdpFormat(hnd->format, isTileRendered(hnd)); 2097 Whf whf(getWidth(hnd), getHeight(hnd), format, hnd->size); 2098 2099 ALOGD_IF(isDebug(),"%s: configuring: layer: %p z_order: %d dest_pipeL: %d" 2100 "dest_pipeR: %d",__FUNCTION__, layer, z, lDest, rDest); 2101 2102 // Handle R/B swap 2103 if (layer->flags & HWC_FORMAT_RB_SWAP) { 2104 if (hnd->format == HAL_PIXEL_FORMAT_RGBA_8888) 2105 whf.format = getMdpFormat(HAL_PIXEL_FORMAT_BGRA_8888); 2106 else if (hnd->format == HAL_PIXEL_FORMAT_RGBX_8888) 2107 whf.format = getMdpFormat(HAL_PIXEL_FORMAT_BGRX_8888); 2108 } 2109 2110 eMdpFlags mdpFlags = OV_MDP_BACKEND_COMPOSITION; 2111 setMdpFlags(layer, mdpFlags, 0, transform); 2112 2113 if(lDest != OV_INVALID && rDest != OV_INVALID) { 2114 //Enable overfetch 2115 setMdpFlags(mdpFlags, OV_MDSS_MDP_DUAL_PIPE); 2116 } 2117 2118 if(isYuvBuffer(hnd) && (transform & HWC_TRANSFORM_ROT_90)) { 2119 (*rot) = ctx->mRotMgr->getNext(); 2120 if((*rot) == NULL) return -1; 2121 ctx->mLayerRotMap[mDpy]->add(layer, *rot); 2122 //If the video is using a single pipe, enable BWC 2123 if(rDest == OV_INVALID) { 2124 BwcPM::setBwc(crop, dst, transform, mdpFlags); 2125 } 2126 //Configure rotator for pre-rotation 2127 if(configRotator(*rot, whf, crop, mdpFlags, orient, downscale) < 0) { 2128 ALOGE("%s: configRotator failed!", __FUNCTION__); 2129 return -1; 2130 } 2131 whf.format = (*rot)->getDstFormat(); 2132 updateSource(orient, whf, crop); 2133 rotFlags |= ROT_PREROTATED; 2134 } 2135 2136 //If 2 pipes being used, divide layer into half, crop and dst 2137 hwc_rect_t cropL = crop; 2138 hwc_rect_t cropR = crop; 2139 hwc_rect_t dstL = dst; 2140 hwc_rect_t dstR = dst; 2141 if(lDest != OV_INVALID && rDest != OV_INVALID) { 2142 cropL.right = (crop.right + crop.left) / 2; 2143 cropR.left = cropL.right; 2144 sanitizeSourceCrop(cropL, cropR, hnd); 2145 2146 //Swap crops on H flip since 2 pipes are being used 2147 if((orient & OVERLAY_TRANSFORM_FLIP_H) && (*rot) == NULL) { 2148 hwc_rect_t tmp = cropL; 2149 cropL = cropR; 2150 cropR = tmp; 2151 } 2152 2153 dstL.right = (dst.right + dst.left) / 2; 2154 dstR.left = dstL.right; 2155 } 2156 2157 //For the mdp, since either we are pre-rotating or MDP does flips 2158 orient = OVERLAY_TRANSFORM_0; 2159 transform = 0; 2160 2161 //configure left pipe 2162 if(lDest != OV_INVALID) { 2163 PipeArgs pargL(mdpFlags, whf, z, isFg, 2164 static_cast<eRotFlags>(rotFlags), layer->planeAlpha, 2165 (ovutils::eBlending) getBlending(layer->blending)); 2166 2167 if(configMdp(ctx->mOverlay, pargL, orient, 2168 cropL, dstL, metadata, lDest) < 0) { 2169 ALOGE("%s: commit failed for left mixer config", __FUNCTION__); 2170 return -1; 2171 } 2172 } 2173 2174 //configure right pipe 2175 if(rDest != OV_INVALID) { 2176 PipeArgs pargR(mdpFlags, whf, z, isFg, 2177 static_cast<eRotFlags>(rotFlags), 2178 layer->planeAlpha, 2179 (ovutils::eBlending) getBlending(layer->blending)); 2180 if(configMdp(ctx->mOverlay, pargR, orient, 2181 cropR, dstR, metadata, rDest) < 0) { 2182 ALOGE("%s: commit failed for right mixer config", __FUNCTION__); 2183 return -1; 2184 } 2185 } 2186 2187 return 0; 2188 } 2189 2190 }; //namespace 2191 2192