1 /* 2 * Copyright 2011 Google Inc. 3 * 4 * Use of this source code is governed by a BSD-style license that can be 5 * found in the LICENSE file. 6 */ 7 8 9 #include "GrGpuGL.h" 10 #include "GrGLStencilBuffer.h" 11 #include "GrOptDrawState.h" 12 #include "GrTemplates.h" 13 #include "GrTypes.h" 14 #include "SkStrokeRec.h" 15 #include "SkTemplates.h" 16 17 #define GL_CALL(X) GR_GL_CALL(this->glInterface(), X) 18 #define GL_CALL_RET(RET, X) GR_GL_CALL_RET(this->glInterface(), RET, X) 19 20 #define SKIP_CACHE_CHECK true 21 22 #if GR_GL_CHECK_ALLOC_WITH_GET_ERROR 23 #define CLEAR_ERROR_BEFORE_ALLOC(iface) GrGLClearErr(iface) 24 #define GL_ALLOC_CALL(iface, call) GR_GL_CALL_NOERRCHECK(iface, call) 25 #define CHECK_ALLOC_ERROR(iface) GR_GL_GET_ERROR(iface) 26 #else 27 #define CLEAR_ERROR_BEFORE_ALLOC(iface) 28 #define GL_ALLOC_CALL(iface, call) GR_GL_CALL(iface, call) 29 #define CHECK_ALLOC_ERROR(iface) GR_GL_NO_ERROR 30 #endif 31 32 33 /////////////////////////////////////////////////////////////////////////////// 34 35 36 static const GrGLenum gXfermodeCoeff2Blend[] = { 37 GR_GL_ZERO, 38 GR_GL_ONE, 39 GR_GL_SRC_COLOR, 40 GR_GL_ONE_MINUS_SRC_COLOR, 41 GR_GL_DST_COLOR, 42 GR_GL_ONE_MINUS_DST_COLOR, 43 GR_GL_SRC_ALPHA, 44 GR_GL_ONE_MINUS_SRC_ALPHA, 45 GR_GL_DST_ALPHA, 46 GR_GL_ONE_MINUS_DST_ALPHA, 47 GR_GL_CONSTANT_COLOR, 48 GR_GL_ONE_MINUS_CONSTANT_COLOR, 49 GR_GL_CONSTANT_ALPHA, 50 GR_GL_ONE_MINUS_CONSTANT_ALPHA, 51 52 // extended blend coeffs 53 GR_GL_SRC1_COLOR, 54 GR_GL_ONE_MINUS_SRC1_COLOR, 55 GR_GL_SRC1_ALPHA, 56 GR_GL_ONE_MINUS_SRC1_ALPHA, 57 }; 58 59 bool GrGpuGL::BlendCoeffReferencesConstant(GrBlendCoeff coeff) { 60 static const bool gCoeffReferencesBlendConst[] = { 61 false, 62 false, 63 false, 64 false, 65 false, 66 false, 67 false, 68 false, 69 false, 70 false, 71 true, 72 true, 73 true, 74 true, 75 76 // extended blend coeffs 77 false, 78 false, 79 false, 80 false, 81 }; 82 return gCoeffReferencesBlendConst[coeff]; 83 GR_STATIC_ASSERT(kTotalGrBlendCoeffCount == 84 SK_ARRAY_COUNT(gCoeffReferencesBlendConst)); 85 86 GR_STATIC_ASSERT(0 == kZero_GrBlendCoeff); 87 GR_STATIC_ASSERT(1 == kOne_GrBlendCoeff); 88 GR_STATIC_ASSERT(2 == kSC_GrBlendCoeff); 89 GR_STATIC_ASSERT(3 == kISC_GrBlendCoeff); 90 GR_STATIC_ASSERT(4 == kDC_GrBlendCoeff); 91 GR_STATIC_ASSERT(5 == kIDC_GrBlendCoeff); 92 GR_STATIC_ASSERT(6 == kSA_GrBlendCoeff); 93 GR_STATIC_ASSERT(7 == kISA_GrBlendCoeff); 94 GR_STATIC_ASSERT(8 == kDA_GrBlendCoeff); 95 GR_STATIC_ASSERT(9 == kIDA_GrBlendCoeff); 96 GR_STATIC_ASSERT(10 == kConstC_GrBlendCoeff); 97 GR_STATIC_ASSERT(11 == kIConstC_GrBlendCoeff); 98 GR_STATIC_ASSERT(12 == kConstA_GrBlendCoeff); 99 GR_STATIC_ASSERT(13 == kIConstA_GrBlendCoeff); 100 101 GR_STATIC_ASSERT(14 == kS2C_GrBlendCoeff); 102 GR_STATIC_ASSERT(15 == kIS2C_GrBlendCoeff); 103 GR_STATIC_ASSERT(16 == kS2A_GrBlendCoeff); 104 GR_STATIC_ASSERT(17 == kIS2A_GrBlendCoeff); 105 106 // assertion for gXfermodeCoeff2Blend have to be in GrGpu scope 107 GR_STATIC_ASSERT(kTotalGrBlendCoeffCount == 108 SK_ARRAY_COUNT(gXfermodeCoeff2Blend)); 109 } 110 111 /////////////////////////////////////////////////////////////////////////////// 112 113 static bool gPrintStartupSpew; 114 115 GrGpuGL::GrGpuGL(const GrGLContext& ctx, GrContext* context) 116 : GrGpu(context) 117 , fGLContext(ctx) { 118 119 SkASSERT(ctx.isInitialized()); 120 fCaps.reset(SkRef(ctx.caps())); 121 122 fHWBoundTextureUniqueIDs.reset(this->glCaps().maxFragmentTextureUnits()); 123 124 GrGLClearErr(fGLContext.interface()); 125 if (gPrintStartupSpew) { 126 const GrGLubyte* vendor; 127 const GrGLubyte* renderer; 128 const GrGLubyte* version; 129 GL_CALL_RET(vendor, GetString(GR_GL_VENDOR)); 130 GL_CALL_RET(renderer, GetString(GR_GL_RENDERER)); 131 GL_CALL_RET(version, GetString(GR_GL_VERSION)); 132 GrPrintf("------------------------- create GrGpuGL %p --------------\n", 133 this); 134 GrPrintf("------ VENDOR %s\n", vendor); 135 GrPrintf("------ RENDERER %s\n", renderer); 136 GrPrintf("------ VERSION %s\n", version); 137 GrPrintf("------ EXTENSIONS\n"); 138 ctx.extensions().print(); 139 GrPrintf("\n"); 140 GrPrintf(this->glCaps().dump().c_str()); 141 } 142 143 fProgramCache = SkNEW_ARGS(ProgramCache, (this)); 144 145 SkASSERT(this->glCaps().maxVertexAttributes() >= GrDrawState::kMaxVertexAttribCnt); 146 147 fLastSuccessfulStencilFmtIdx = 0; 148 fHWProgramID = 0; 149 150 if (this->glCaps().pathRenderingSupport()) { 151 fPathRendering.reset(new GrGLPathRendering(this)); 152 } 153 } 154 155 GrGpuGL::~GrGpuGL() { 156 if (0 != fHWProgramID) { 157 // detach the current program so there is no confusion on OpenGL's part 158 // that we want it to be deleted 159 SkASSERT(fHWProgramID == fCurrentProgram->programID()); 160 GL_CALL(UseProgram(0)); 161 } 162 163 delete fProgramCache; 164 165 // This must be called by before the GrDrawTarget destructor 166 this->releaseGeometry(); 167 } 168 169 void GrGpuGL::contextAbandoned() { 170 INHERITED::contextAbandoned(); 171 fProgramCache->abandon(); 172 fHWProgramID = 0; 173 if (this->glCaps().pathRenderingSupport()) { 174 this->glPathRendering()->abandonGpuResources(); 175 } 176 } 177 178 /////////////////////////////////////////////////////////////////////////////// 179 180 181 GrPixelConfig GrGpuGL::preferredReadPixelsConfig(GrPixelConfig readConfig, 182 GrPixelConfig surfaceConfig) const { 183 if (GR_GL_RGBA_8888_PIXEL_OPS_SLOW && kRGBA_8888_GrPixelConfig == readConfig) { 184 return kBGRA_8888_GrPixelConfig; 185 } else if (this->glContext().isMesa() && 186 GrBytesPerPixel(readConfig) == 4 && 187 GrPixelConfigSwapRAndB(readConfig) == surfaceConfig) { 188 // Mesa 3D takes a slow path on when reading back BGRA from an RGBA surface and vice-versa. 189 // Perhaps this should be guarded by some compiletime or runtime check. 190 return surfaceConfig; 191 } else if (readConfig == kBGRA_8888_GrPixelConfig && 192 !this->glCaps().readPixelsSupported(this->glInterface(), 193 GR_GL_BGRA, GR_GL_UNSIGNED_BYTE)) { 194 return kRGBA_8888_GrPixelConfig; 195 } else { 196 return readConfig; 197 } 198 } 199 200 GrPixelConfig GrGpuGL::preferredWritePixelsConfig(GrPixelConfig writeConfig, 201 GrPixelConfig surfaceConfig) const { 202 if (GR_GL_RGBA_8888_PIXEL_OPS_SLOW && kRGBA_8888_GrPixelConfig == writeConfig) { 203 return kBGRA_8888_GrPixelConfig; 204 } else { 205 return writeConfig; 206 } 207 } 208 209 bool GrGpuGL::canWriteTexturePixels(const GrTexture* texture, GrPixelConfig srcConfig) const { 210 if (kIndex_8_GrPixelConfig == srcConfig || kIndex_8_GrPixelConfig == texture->config()) { 211 return false; 212 } 213 if (srcConfig != texture->config() && kGLES_GrGLStandard == this->glStandard()) { 214 // In general ES2 requires the internal format of the texture and the format of the src 215 // pixels to match. However, It may or may not be possible to upload BGRA data to a RGBA 216 // texture. It depends upon which extension added BGRA. The Apple extension allows it 217 // (BGRA's internal format is RGBA) while the EXT extension does not (BGRA is its own 218 // internal format). 219 if (this->glCaps().isConfigTexturable(kBGRA_8888_GrPixelConfig) && 220 !this->glCaps().bgraIsInternalFormat() && 221 kBGRA_8888_GrPixelConfig == srcConfig && 222 kRGBA_8888_GrPixelConfig == texture->config()) { 223 return true; 224 } else { 225 return false; 226 } 227 } else { 228 return true; 229 } 230 } 231 232 bool GrGpuGL::fullReadPixelsIsFasterThanPartial() const { 233 return SkToBool(GR_GL_FULL_READPIXELS_FASTER_THAN_PARTIAL); 234 } 235 236 void GrGpuGL::onResetContext(uint32_t resetBits) { 237 // we don't use the zb at all 238 if (resetBits & kMisc_GrGLBackendState) { 239 GL_CALL(Disable(GR_GL_DEPTH_TEST)); 240 GL_CALL(DepthMask(GR_GL_FALSE)); 241 242 fHWDrawFace = GrDrawState::kInvalid_DrawFace; 243 fHWDitherEnabled = kUnknown_TriState; 244 245 if (kGL_GrGLStandard == this->glStandard()) { 246 // Desktop-only state that we never change 247 if (!this->glCaps().isCoreProfile()) { 248 GL_CALL(Disable(GR_GL_POINT_SMOOTH)); 249 GL_CALL(Disable(GR_GL_LINE_SMOOTH)); 250 GL_CALL(Disable(GR_GL_POLYGON_SMOOTH)); 251 GL_CALL(Disable(GR_GL_POLYGON_STIPPLE)); 252 GL_CALL(Disable(GR_GL_COLOR_LOGIC_OP)); 253 GL_CALL(Disable(GR_GL_INDEX_LOGIC_OP)); 254 } 255 // The windows NVIDIA driver has GL_ARB_imaging in the extension string when using a 256 // core profile. This seems like a bug since the core spec removes any mention of 257 // GL_ARB_imaging. 258 if (this->glCaps().imagingSupport() && !this->glCaps().isCoreProfile()) { 259 GL_CALL(Disable(GR_GL_COLOR_TABLE)); 260 } 261 GL_CALL(Disable(GR_GL_POLYGON_OFFSET_FILL)); 262 // Since ES doesn't support glPointSize at all we always use the VS to 263 // set the point size 264 GL_CALL(Enable(GR_GL_VERTEX_PROGRAM_POINT_SIZE)); 265 266 // We should set glPolygonMode(FRONT_AND_BACK,FILL) here, too. It isn't 267 // currently part of our gl interface. There are probably others as 268 // well. 269 } 270 271 if (kGLES_GrGLStandard == this->glStandard() && 272 fGLContext.hasExtension("GL_ARM_shader_framebuffer_fetch")) { 273 // The arm extension requires specifically enabling MSAA fetching per sample. 274 // On some devices this may have a perf hit. Also multiple render targets are disabled 275 GL_CALL(Enable(GR_GL_FETCH_PER_SAMPLE_ARM)); 276 } 277 fHWWriteToColor = kUnknown_TriState; 278 // we only ever use lines in hairline mode 279 GL_CALL(LineWidth(1)); 280 } 281 282 if (resetBits & kMSAAEnable_GrGLBackendState) { 283 fMSAAEnabled = kUnknown_TriState; 284 } 285 286 fHWActiveTextureUnitIdx = -1; // invalid 287 288 if (resetBits & kTextureBinding_GrGLBackendState) { 289 for (int s = 0; s < fHWBoundTextureUniqueIDs.count(); ++s) { 290 fHWBoundTextureUniqueIDs[s] = SK_InvalidUniqueID; 291 } 292 } 293 294 if (resetBits & kBlend_GrGLBackendState) { 295 fHWBlendState.invalidate(); 296 } 297 298 if (resetBits & kView_GrGLBackendState) { 299 fHWScissorSettings.invalidate(); 300 fHWViewport.invalidate(); 301 } 302 303 if (resetBits & kStencil_GrGLBackendState) { 304 fHWStencilSettings.invalidate(); 305 fHWStencilTestEnabled = kUnknown_TriState; 306 } 307 308 // Vertex 309 if (resetBits & kVertex_GrGLBackendState) { 310 fHWGeometryState.invalidate(); 311 } 312 313 if (resetBits & kRenderTarget_GrGLBackendState) { 314 fHWBoundRenderTargetUniqueID = SK_InvalidUniqueID; 315 } 316 317 if (resetBits & kPathRendering_GrGLBackendState) { 318 if (this->caps()->pathRenderingSupport()) { 319 this->glPathRendering()->resetContext(); 320 } 321 } 322 323 // we assume these values 324 if (resetBits & kPixelStore_GrGLBackendState) { 325 if (this->glCaps().unpackRowLengthSupport()) { 326 GL_CALL(PixelStorei(GR_GL_UNPACK_ROW_LENGTH, 0)); 327 } 328 if (this->glCaps().packRowLengthSupport()) { 329 GL_CALL(PixelStorei(GR_GL_PACK_ROW_LENGTH, 0)); 330 } 331 if (this->glCaps().unpackFlipYSupport()) { 332 GL_CALL(PixelStorei(GR_GL_UNPACK_FLIP_Y, GR_GL_FALSE)); 333 } 334 if (this->glCaps().packFlipYSupport()) { 335 GL_CALL(PixelStorei(GR_GL_PACK_REVERSE_ROW_ORDER, GR_GL_FALSE)); 336 } 337 } 338 339 if (resetBits & kProgram_GrGLBackendState) { 340 fHWProgramID = 0; 341 fSharedGLProgramState.invalidate(); 342 } 343 } 344 345 namespace { 346 347 GrSurfaceOrigin resolve_origin(GrSurfaceOrigin origin, bool renderTarget) { 348 // By default, GrRenderTargets are GL's normal orientation so that they 349 // can be drawn to by the outside world without the client having 350 // to render upside down. 351 if (kDefault_GrSurfaceOrigin == origin) { 352 return renderTarget ? kBottomLeft_GrSurfaceOrigin : kTopLeft_GrSurfaceOrigin; 353 } else { 354 return origin; 355 } 356 } 357 358 } 359 360 GrTexture* GrGpuGL::onWrapBackendTexture(const GrBackendTextureDesc& desc) { 361 if (!this->configToGLFormats(desc.fConfig, false, NULL, NULL, NULL)) { 362 return NULL; 363 } 364 365 if (0 == desc.fTextureHandle) { 366 return NULL; 367 } 368 369 int maxSize = this->caps()->maxTextureSize(); 370 if (desc.fWidth > maxSize || desc.fHeight > maxSize) { 371 return NULL; 372 } 373 374 GrGLTexture::Desc glTexDesc; 375 // next line relies on GrBackendTextureDesc's flags matching GrTexture's 376 glTexDesc.fFlags = (GrTextureFlags) desc.fFlags; 377 glTexDesc.fWidth = desc.fWidth; 378 glTexDesc.fHeight = desc.fHeight; 379 glTexDesc.fConfig = desc.fConfig; 380 glTexDesc.fSampleCnt = desc.fSampleCnt; 381 glTexDesc.fTextureID = static_cast<GrGLuint>(desc.fTextureHandle); 382 glTexDesc.fIsWrapped = true; 383 bool renderTarget = SkToBool(desc.fFlags & kRenderTarget_GrBackendTextureFlag); 384 // FIXME: this should be calling resolve_origin(), but Chrome code is currently 385 // assuming the old behaviour, which is that backend textures are always 386 // BottomLeft, even for non-RT's. Once Chrome is fixed, change this to: 387 // glTexDesc.fOrigin = resolve_origin(desc.fOrigin, renderTarget); 388 if (kDefault_GrSurfaceOrigin == desc.fOrigin) { 389 glTexDesc.fOrigin = kBottomLeft_GrSurfaceOrigin; 390 } else { 391 glTexDesc.fOrigin = desc.fOrigin; 392 } 393 394 GrGLTexture* texture = NULL; 395 if (renderTarget) { 396 GrGLRenderTarget::Desc glRTDesc; 397 glRTDesc.fRTFBOID = 0; 398 glRTDesc.fTexFBOID = 0; 399 glRTDesc.fMSColorRenderbufferID = 0; 400 glRTDesc.fConfig = desc.fConfig; 401 glRTDesc.fSampleCnt = desc.fSampleCnt; 402 glRTDesc.fOrigin = glTexDesc.fOrigin; 403 glRTDesc.fCheckAllocation = false; 404 if (!this->createRenderTargetObjects(glTexDesc.fWidth, 405 glTexDesc.fHeight, 406 glTexDesc.fTextureID, 407 &glRTDesc)) { 408 return NULL; 409 } 410 texture = SkNEW_ARGS(GrGLTexture, (this, glTexDesc, glRTDesc)); 411 } else { 412 texture = SkNEW_ARGS(GrGLTexture, (this, glTexDesc)); 413 } 414 if (NULL == texture) { 415 return NULL; 416 } 417 418 return texture; 419 } 420 421 GrRenderTarget* GrGpuGL::onWrapBackendRenderTarget(const GrBackendRenderTargetDesc& desc) { 422 GrGLRenderTarget::Desc glDesc; 423 glDesc.fConfig = desc.fConfig; 424 glDesc.fRTFBOID = static_cast<GrGLuint>(desc.fRenderTargetHandle); 425 glDesc.fMSColorRenderbufferID = 0; 426 glDesc.fTexFBOID = GrGLRenderTarget::kUnresolvableFBOID; 427 glDesc.fSampleCnt = desc.fSampleCnt; 428 glDesc.fIsWrapped = true; 429 glDesc.fCheckAllocation = false; 430 431 glDesc.fOrigin = resolve_origin(desc.fOrigin, true); 432 GrGLIRect viewport; 433 viewport.fLeft = 0; 434 viewport.fBottom = 0; 435 viewport.fWidth = desc.fWidth; 436 viewport.fHeight = desc.fHeight; 437 438 GrRenderTarget* tgt = SkNEW_ARGS(GrGLRenderTarget, 439 (this, glDesc, viewport)); 440 if (desc.fStencilBits) { 441 GrGLStencilBuffer::Format format; 442 format.fInternalFormat = GrGLStencilBuffer::kUnknownInternalFormat; 443 format.fPacked = false; 444 format.fStencilBits = desc.fStencilBits; 445 format.fTotalBits = desc.fStencilBits; 446 static const bool kIsSBWrapped = false; 447 GrGLStencilBuffer* sb = SkNEW_ARGS(GrGLStencilBuffer, 448 (this, 449 kIsSBWrapped, 450 0, 451 desc.fWidth, 452 desc.fHeight, 453 desc.fSampleCnt, 454 format)); 455 tgt->setStencilBuffer(sb); 456 sb->unref(); 457 } 458 return tgt; 459 } 460 461 //////////////////////////////////////////////////////////////////////////////// 462 463 bool GrGpuGL::onWriteTexturePixels(GrTexture* texture, 464 int left, int top, int width, int height, 465 GrPixelConfig config, const void* buffer, 466 size_t rowBytes) { 467 if (NULL == buffer) { 468 return false; 469 } 470 GrGLTexture* glTex = static_cast<GrGLTexture*>(texture); 471 472 this->setScratchTextureUnit(); 473 GL_CALL(BindTexture(GR_GL_TEXTURE_2D, glTex->textureID())); 474 GrGLTexture::Desc desc; 475 desc.fFlags = glTex->desc().fFlags; 476 desc.fWidth = glTex->width(); 477 desc.fHeight = glTex->height(); 478 desc.fConfig = glTex->config(); 479 desc.fSampleCnt = glTex->desc().fSampleCnt; 480 desc.fTextureID = glTex->textureID(); 481 desc.fOrigin = glTex->origin(); 482 483 bool success = false; 484 if (GrPixelConfigIsCompressed(desc.fConfig)) { 485 // We check that config == desc.fConfig in GrGpuGL::canWriteTexturePixels() 486 SkASSERT(config == desc.fConfig); 487 success = this->uploadCompressedTexData(desc, buffer, false, 488 left, top, width, height); 489 } else { 490 success = this->uploadTexData(desc, false, 491 left, top, width, height, 492 config, buffer, rowBytes); 493 } 494 495 if (success) { 496 texture->impl()->dirtyMipMaps(true); 497 return true; 498 } 499 500 return false; 501 } 502 503 namespace { 504 bool adjust_pixel_ops_params(int surfaceWidth, 505 int surfaceHeight, 506 size_t bpp, 507 int* left, int* top, int* width, int* height, 508 const void** data, 509 size_t* rowBytes) { 510 if (!*rowBytes) { 511 *rowBytes = *width * bpp; 512 } 513 514 SkIRect subRect = SkIRect::MakeXYWH(*left, *top, *width, *height); 515 SkIRect bounds = SkIRect::MakeWH(surfaceWidth, surfaceHeight); 516 517 if (!subRect.intersect(bounds)) { 518 return false; 519 } 520 *data = reinterpret_cast<const void*>(reinterpret_cast<intptr_t>(*data) + 521 (subRect.fTop - *top) * *rowBytes + (subRect.fLeft - *left) * bpp); 522 523 *left = subRect.fLeft; 524 *top = subRect.fTop; 525 *width = subRect.width(); 526 *height = subRect.height(); 527 return true; 528 } 529 530 GrGLenum check_alloc_error(const GrTextureDesc& desc, const GrGLInterface* interface) { 531 if (SkToBool(desc.fFlags & kCheckAllocation_GrTextureFlagBit)) { 532 return GR_GL_GET_ERROR(interface); 533 } else { 534 return CHECK_ALLOC_ERROR(interface); 535 } 536 } 537 538 } 539 540 bool GrGpuGL::uploadTexData(const GrGLTexture::Desc& desc, 541 bool isNewTexture, 542 int left, int top, int width, int height, 543 GrPixelConfig dataConfig, 544 const void* data, 545 size_t rowBytes) { 546 SkASSERT(data || isNewTexture); 547 548 // If we're uploading compressed data then we should be using uploadCompressedTexData 549 SkASSERT(!GrPixelConfigIsCompressed(dataConfig)); 550 551 size_t bpp = GrBytesPerPixel(dataConfig); 552 if (!adjust_pixel_ops_params(desc.fWidth, desc.fHeight, bpp, &left, &top, 553 &width, &height, &data, &rowBytes)) { 554 return false; 555 } 556 size_t trimRowBytes = width * bpp; 557 558 // in case we need a temporary, trimmed copy of the src pixels 559 GrAutoMalloc<128 * 128> tempStorage; 560 561 // We currently lazily create MIPMAPs when the we see a draw with 562 // GrTextureParams::kMipMap_FilterMode. Using texture storage requires that the 563 // MIP levels are all created when the texture is created. So for now we don't use 564 // texture storage. 565 bool useTexStorage = false && 566 isNewTexture && 567 this->glCaps().texStorageSupport(); 568 569 if (useTexStorage && kGL_GrGLStandard == this->glStandard()) { 570 // 565 is not a sized internal format on desktop GL. So on desktop with 571 // 565 we always use an unsized internal format to let the system pick 572 // the best sized format to convert the 565 data to. Since TexStorage 573 // only allows sized internal formats we will instead use TexImage2D. 574 useTexStorage = desc.fConfig != kRGB_565_GrPixelConfig; 575 } 576 577 GrGLenum internalFormat; 578 GrGLenum externalFormat = 0x0; // suprress warning 579 GrGLenum externalType = 0x0;// suprress warning 580 581 // glTexStorage requires sized internal formats on both desktop and ES. ES2 requires an unsized 582 // format for glTexImage, unlike ES3 and desktop. However, we allow the driver to decide the 583 // size of the internal format whenever possible and so only use a sized internal format when 584 // using texture storage. 585 bool useSizedFormat = useTexStorage; 586 // At least some versions of the desktop ES3 drivers for NVIDIA won't accept GL_RED in 587 // glTexImage2D for the internal format but will accept GL_R8. 588 if (!useSizedFormat && kNVIDIA_GrGLVendor == this->glContext().vendor() && 589 kGLES_GrGLStandard == this->glStandard() && this->glVersion() >= GR_GL_VER(3, 0)) { 590 useSizedFormat = true; 591 } 592 if (!this->configToGLFormats(dataConfig, useSizedFormat, &internalFormat, 593 &externalFormat, &externalType)) { 594 return false; 595 } 596 597 /* 598 * check whether to allocate a temporary buffer for flipping y or 599 * because our srcData has extra bytes past each row. If so, we need 600 * to trim those off here, since GL ES may not let us specify 601 * GL_UNPACK_ROW_LENGTH. 602 */ 603 bool restoreGLRowLength = false; 604 bool swFlipY = false; 605 bool glFlipY = false; 606 if (data) { 607 if (kBottomLeft_GrSurfaceOrigin == desc.fOrigin) { 608 if (this->glCaps().unpackFlipYSupport()) { 609 glFlipY = true; 610 } else { 611 swFlipY = true; 612 } 613 } 614 if (this->glCaps().unpackRowLengthSupport() && !swFlipY) { 615 // can't use this for flipping, only non-neg values allowed. :( 616 if (rowBytes != trimRowBytes) { 617 GrGLint rowLength = static_cast<GrGLint>(rowBytes / bpp); 618 GL_CALL(PixelStorei(GR_GL_UNPACK_ROW_LENGTH, rowLength)); 619 restoreGLRowLength = true; 620 } 621 } else { 622 if (trimRowBytes != rowBytes || swFlipY) { 623 // copy data into our new storage, skipping the trailing bytes 624 size_t trimSize = height * trimRowBytes; 625 const char* src = (const char*)data; 626 if (swFlipY) { 627 src += (height - 1) * rowBytes; 628 } 629 char* dst = (char*)tempStorage.reset(trimSize); 630 for (int y = 0; y < height; y++) { 631 memcpy(dst, src, trimRowBytes); 632 if (swFlipY) { 633 src -= rowBytes; 634 } else { 635 src += rowBytes; 636 } 637 dst += trimRowBytes; 638 } 639 // now point data to our copied version 640 data = tempStorage.get(); 641 } 642 } 643 if (glFlipY) { 644 GL_CALL(PixelStorei(GR_GL_UNPACK_FLIP_Y, GR_GL_TRUE)); 645 } 646 GL_CALL(PixelStorei(GR_GL_UNPACK_ALIGNMENT, 647 static_cast<GrGLint>(GrUnpackAlignment(dataConfig)))); 648 } 649 bool succeeded = true; 650 if (isNewTexture && 651 0 == left && 0 == top && 652 desc.fWidth == width && desc.fHeight == height) { 653 CLEAR_ERROR_BEFORE_ALLOC(this->glInterface()); 654 if (useTexStorage) { 655 // We never resize or change formats of textures. 656 GL_ALLOC_CALL(this->glInterface(), 657 TexStorage2D(GR_GL_TEXTURE_2D, 658 1, // levels 659 internalFormat, 660 desc.fWidth, desc.fHeight)); 661 } else { 662 GL_ALLOC_CALL(this->glInterface(), 663 TexImage2D(GR_GL_TEXTURE_2D, 664 0, // level 665 internalFormat, 666 desc.fWidth, desc.fHeight, 667 0, // border 668 externalFormat, externalType, 669 data)); 670 } 671 GrGLenum error = check_alloc_error(desc, this->glInterface()); 672 if (error != GR_GL_NO_ERROR) { 673 succeeded = false; 674 } else { 675 // if we have data and we used TexStorage to create the texture, we 676 // now upload with TexSubImage. 677 if (data && useTexStorage) { 678 GL_CALL(TexSubImage2D(GR_GL_TEXTURE_2D, 679 0, // level 680 left, top, 681 width, height, 682 externalFormat, externalType, 683 data)); 684 } 685 } 686 } else { 687 if (swFlipY || glFlipY) { 688 top = desc.fHeight - (top + height); 689 } 690 GL_CALL(TexSubImage2D(GR_GL_TEXTURE_2D, 691 0, // level 692 left, top, 693 width, height, 694 externalFormat, externalType, data)); 695 } 696 697 if (restoreGLRowLength) { 698 SkASSERT(this->glCaps().unpackRowLengthSupport()); 699 GL_CALL(PixelStorei(GR_GL_UNPACK_ROW_LENGTH, 0)); 700 } 701 if (glFlipY) { 702 GL_CALL(PixelStorei(GR_GL_UNPACK_FLIP_Y, GR_GL_FALSE)); 703 } 704 return succeeded; 705 } 706 707 // TODO: This function is using a lot of wonky semantics like, if width == -1 708 // then set width = desc.fWdith ... blah. A better way to do it might be to 709 // create a CompressedTexData struct that takes a desc/ptr and figures out 710 // the proper upload semantics. Then users can construct this function how they 711 // see fit if they want to go against the "standard" way to do it. 712 bool GrGpuGL::uploadCompressedTexData(const GrGLTexture::Desc& desc, 713 const void* data, 714 bool isNewTexture, 715 int left, int top, int width, int height) { 716 SkASSERT(data || isNewTexture); 717 718 // No support for software flip y, yet... 719 SkASSERT(kBottomLeft_GrSurfaceOrigin != desc.fOrigin); 720 721 if (-1 == width) { 722 width = desc.fWidth; 723 } 724 #ifdef SK_DEBUG 725 else { 726 SkASSERT(width <= desc.fWidth); 727 } 728 #endif 729 730 if (-1 == height) { 731 height = desc.fHeight; 732 } 733 #ifdef SK_DEBUG 734 else { 735 SkASSERT(height <= desc.fHeight); 736 } 737 #endif 738 739 // Make sure that the width and height that we pass to OpenGL 740 // is a multiple of the block size. 741 int dataSize = GrCompressedFormatDataSize(desc.fConfig, width, height); 742 743 // We only need the internal format for compressed 2D textures. 744 GrGLenum internalFormat = 0; 745 if (!this->configToGLFormats(desc.fConfig, false, &internalFormat, NULL, NULL)) { 746 return false; 747 } 748 749 if (isNewTexture) { 750 CLEAR_ERROR_BEFORE_ALLOC(this->glInterface()); 751 GL_ALLOC_CALL(this->glInterface(), 752 CompressedTexImage2D(GR_GL_TEXTURE_2D, 753 0, // level 754 internalFormat, 755 width, height, 756 0, // border 757 dataSize, 758 data)); 759 GrGLenum error = check_alloc_error(desc, this->glInterface()); 760 if (error != GR_GL_NO_ERROR) { 761 return false; 762 } 763 } else { 764 // Paletted textures can't be updated. 765 if (GR_GL_PALETTE8_RGBA8 == internalFormat) { 766 return false; 767 } 768 GL_CALL(CompressedTexSubImage2D(GR_GL_TEXTURE_2D, 769 0, // level 770 left, top, 771 width, height, 772 internalFormat, 773 dataSize, 774 data)); 775 } 776 777 return true; 778 } 779 780 static bool renderbuffer_storage_msaa(GrGLContext& ctx, 781 int sampleCount, 782 GrGLenum format, 783 int width, int height) { 784 CLEAR_ERROR_BEFORE_ALLOC(ctx.interface()); 785 SkASSERT(GrGLCaps::kNone_MSFBOType != ctx.caps()->msFBOType()); 786 switch (ctx.caps()->msFBOType()) { 787 case GrGLCaps::kDesktop_ARB_MSFBOType: 788 case GrGLCaps::kDesktop_EXT_MSFBOType: 789 case GrGLCaps::kES_3_0_MSFBOType: 790 GL_ALLOC_CALL(ctx.interface(), 791 RenderbufferStorageMultisample(GR_GL_RENDERBUFFER, 792 sampleCount, 793 format, 794 width, height)); 795 break; 796 case GrGLCaps::kES_Apple_MSFBOType: 797 GL_ALLOC_CALL(ctx.interface(), 798 RenderbufferStorageMultisampleES2APPLE(GR_GL_RENDERBUFFER, 799 sampleCount, 800 format, 801 width, height)); 802 break; 803 case GrGLCaps::kES_EXT_MsToTexture_MSFBOType: 804 case GrGLCaps::kES_IMG_MsToTexture_MSFBOType: 805 GL_ALLOC_CALL(ctx.interface(), 806 RenderbufferStorageMultisampleES2EXT(GR_GL_RENDERBUFFER, 807 sampleCount, 808 format, 809 width, height)); 810 break; 811 case GrGLCaps::kNone_MSFBOType: 812 SkFAIL("Shouldn't be here if we don't support multisampled renderbuffers."); 813 break; 814 } 815 return (GR_GL_NO_ERROR == CHECK_ALLOC_ERROR(ctx.interface()));; 816 } 817 818 bool GrGpuGL::createRenderTargetObjects(int width, int height, 819 GrGLuint texID, 820 GrGLRenderTarget::Desc* desc) { 821 desc->fMSColorRenderbufferID = 0; 822 desc->fRTFBOID = 0; 823 desc->fTexFBOID = 0; 824 desc->fIsWrapped = false; 825 826 GrGLenum status; 827 828 GrGLenum msColorFormat = 0; // suppress warning 829 830 if (desc->fSampleCnt > 0 && GrGLCaps::kNone_MSFBOType == this->glCaps().msFBOType()) { 831 goto FAILED; 832 } 833 834 GL_CALL(GenFramebuffers(1, &desc->fTexFBOID)); 835 if (!desc->fTexFBOID) { 836 goto FAILED; 837 } 838 839 840 // If we are using multisampling we will create two FBOS. We render to one and then resolve to 841 // the texture bound to the other. The exception is the IMG multisample extension. With this 842 // extension the texture is multisampled when rendered to and then auto-resolves it when it is 843 // rendered from. 844 if (desc->fSampleCnt > 0 && this->glCaps().usesMSAARenderBuffers()) { 845 GL_CALL(GenFramebuffers(1, &desc->fRTFBOID)); 846 GL_CALL(GenRenderbuffers(1, &desc->fMSColorRenderbufferID)); 847 if (!desc->fRTFBOID || 848 !desc->fMSColorRenderbufferID || 849 !this->configToGLFormats(desc->fConfig, 850 // ES2 and ES3 require sized internal formats for rb storage. 851 kGLES_GrGLStandard == this->glStandard(), 852 &msColorFormat, 853 NULL, 854 NULL)) { 855 goto FAILED; 856 } 857 } else { 858 desc->fRTFBOID = desc->fTexFBOID; 859 } 860 861 // below here we may bind the FBO 862 fHWBoundRenderTargetUniqueID = SK_InvalidUniqueID; 863 if (desc->fRTFBOID != desc->fTexFBOID) { 864 SkASSERT(desc->fSampleCnt > 0); 865 GL_CALL(BindRenderbuffer(GR_GL_RENDERBUFFER, 866 desc->fMSColorRenderbufferID)); 867 if (!renderbuffer_storage_msaa(fGLContext, 868 desc->fSampleCnt, 869 msColorFormat, 870 width, height)) { 871 goto FAILED; 872 } 873 fGPUStats.incRenderTargetBinds(); 874 GL_CALL(BindFramebuffer(GR_GL_FRAMEBUFFER, desc->fRTFBOID)); 875 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, 876 GR_GL_COLOR_ATTACHMENT0, 877 GR_GL_RENDERBUFFER, 878 desc->fMSColorRenderbufferID)); 879 if (desc->fCheckAllocation || 880 !this->glCaps().isConfigVerifiedColorAttachment(desc->fConfig)) { 881 GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER)); 882 if (status != GR_GL_FRAMEBUFFER_COMPLETE) { 883 goto FAILED; 884 } 885 fGLContext.caps()->markConfigAsValidColorAttachment(desc->fConfig); 886 } 887 } 888 fGPUStats.incRenderTargetBinds(); 889 GL_CALL(BindFramebuffer(GR_GL_FRAMEBUFFER, desc->fTexFBOID)); 890 891 if (this->glCaps().usesImplicitMSAAResolve() && desc->fSampleCnt > 0) { 892 GL_CALL(FramebufferTexture2DMultisample(GR_GL_FRAMEBUFFER, 893 GR_GL_COLOR_ATTACHMENT0, 894 GR_GL_TEXTURE_2D, 895 texID, 0, desc->fSampleCnt)); 896 } else { 897 GL_CALL(FramebufferTexture2D(GR_GL_FRAMEBUFFER, 898 GR_GL_COLOR_ATTACHMENT0, 899 GR_GL_TEXTURE_2D, 900 texID, 0)); 901 } 902 if (desc->fCheckAllocation || 903 !this->glCaps().isConfigVerifiedColorAttachment(desc->fConfig)) { 904 GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER)); 905 if (status != GR_GL_FRAMEBUFFER_COMPLETE) { 906 goto FAILED; 907 } 908 fGLContext.caps()->markConfigAsValidColorAttachment(desc->fConfig); 909 } 910 911 return true; 912 913 FAILED: 914 if (desc->fMSColorRenderbufferID) { 915 GL_CALL(DeleteRenderbuffers(1, &desc->fMSColorRenderbufferID)); 916 } 917 if (desc->fRTFBOID != desc->fTexFBOID) { 918 GL_CALL(DeleteFramebuffers(1, &desc->fRTFBOID)); 919 } 920 if (desc->fTexFBOID) { 921 GL_CALL(DeleteFramebuffers(1, &desc->fTexFBOID)); 922 } 923 return false; 924 } 925 926 // good to set a break-point here to know when createTexture fails 927 static GrTexture* return_null_texture() { 928 // SkDEBUGFAIL("null texture"); 929 return NULL; 930 } 931 932 #if 0 && defined(SK_DEBUG) 933 static size_t as_size_t(int x) { 934 return x; 935 } 936 #endif 937 938 GrTexture* GrGpuGL::onCreateTexture(const GrTextureDesc& desc, 939 const void* srcData, 940 size_t rowBytes) { 941 942 GrGLTexture::Desc glTexDesc; 943 GrGLRenderTarget::Desc glRTDesc; 944 945 // Attempt to catch un- or wrongly initialized sample counts; 946 SkASSERT(desc.fSampleCnt >= 0 && desc.fSampleCnt <= 64); 947 // We fail if the MSAA was requested and is not available. 948 if (GrGLCaps::kNone_MSFBOType == this->glCaps().msFBOType() && desc.fSampleCnt) { 949 //GrPrintf("MSAA RT requested but not supported on this platform."); 950 return return_null_texture(); 951 } 952 // If the sample count exceeds the max then we clamp it. 953 glTexDesc.fSampleCnt = SkTMin(desc.fSampleCnt, this->caps()->maxSampleCount()); 954 955 glTexDesc.fFlags = desc.fFlags; 956 glTexDesc.fWidth = desc.fWidth; 957 glTexDesc.fHeight = desc.fHeight; 958 glTexDesc.fConfig = desc.fConfig; 959 glTexDesc.fIsWrapped = false; 960 961 glRTDesc.fMSColorRenderbufferID = 0; 962 glRTDesc.fRTFBOID = 0; 963 glRTDesc.fTexFBOID = 0; 964 glRTDesc.fIsWrapped = false; 965 glRTDesc.fConfig = glTexDesc.fConfig; 966 glRTDesc.fCheckAllocation = SkToBool(desc.fFlags & kCheckAllocation_GrTextureFlagBit); 967 968 bool renderTarget = SkToBool(desc.fFlags & kRenderTarget_GrTextureFlagBit); 969 970 glTexDesc.fOrigin = resolve_origin(desc.fOrigin, renderTarget); 971 glRTDesc.fOrigin = glTexDesc.fOrigin; 972 973 glRTDesc.fSampleCnt = glTexDesc.fSampleCnt; 974 if (GrGLCaps::kNone_MSFBOType == this->glCaps().msFBOType() && 975 desc.fSampleCnt) { 976 //GrPrintf("MSAA RT requested but not supported on this platform."); 977 return return_null_texture(); 978 } 979 980 if (renderTarget) { 981 int maxRTSize = this->caps()->maxRenderTargetSize(); 982 if (glTexDesc.fWidth > maxRTSize || glTexDesc.fHeight > maxRTSize) { 983 return return_null_texture(); 984 } 985 } else { 986 int maxSize = this->caps()->maxTextureSize(); 987 if (glTexDesc.fWidth > maxSize || glTexDesc.fHeight > maxSize) { 988 return return_null_texture(); 989 } 990 } 991 992 GL_CALL(GenTextures(1, &glTexDesc.fTextureID)); 993 994 if (!glTexDesc.fTextureID) { 995 return return_null_texture(); 996 } 997 998 this->setScratchTextureUnit(); 999 GL_CALL(BindTexture(GR_GL_TEXTURE_2D, glTexDesc.fTextureID)); 1000 1001 if (renderTarget && this->glCaps().textureUsageSupport()) { 1002 // provides a hint about how this texture will be used 1003 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, 1004 GR_GL_TEXTURE_USAGE, 1005 GR_GL_FRAMEBUFFER_ATTACHMENT)); 1006 } 1007 1008 // Some drivers like to know filter/wrap before seeing glTexImage2D. Some 1009 // drivers have a bug where an FBO won't be complete if it includes a 1010 // texture that is not mipmap complete (considering the filter in use). 1011 GrGLTexture::TexParams initialTexParams; 1012 // we only set a subset here so invalidate first 1013 initialTexParams.invalidate(); 1014 initialTexParams.fMinFilter = GR_GL_NEAREST; 1015 initialTexParams.fMagFilter = GR_GL_NEAREST; 1016 initialTexParams.fWrapS = GR_GL_CLAMP_TO_EDGE; 1017 initialTexParams.fWrapT = GR_GL_CLAMP_TO_EDGE; 1018 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, 1019 GR_GL_TEXTURE_MAG_FILTER, 1020 initialTexParams.fMagFilter)); 1021 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, 1022 GR_GL_TEXTURE_MIN_FILTER, 1023 initialTexParams.fMinFilter)); 1024 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, 1025 GR_GL_TEXTURE_WRAP_S, 1026 initialTexParams.fWrapS)); 1027 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, 1028 GR_GL_TEXTURE_WRAP_T, 1029 initialTexParams.fWrapT)); 1030 if (!this->uploadTexData(glTexDesc, true, 0, 0, 1031 glTexDesc.fWidth, glTexDesc.fHeight, 1032 desc.fConfig, srcData, rowBytes)) { 1033 GL_CALL(DeleteTextures(1, &glTexDesc.fTextureID)); 1034 return return_null_texture(); 1035 } 1036 1037 GrGLTexture* tex; 1038 if (renderTarget) { 1039 // unbind the texture from the texture unit before binding it to the frame buffer 1040 GL_CALL(BindTexture(GR_GL_TEXTURE_2D, 0)); 1041 1042 if (!this->createRenderTargetObjects(glTexDesc.fWidth, 1043 glTexDesc.fHeight, 1044 glTexDesc.fTextureID, 1045 &glRTDesc)) { 1046 GL_CALL(DeleteTextures(1, &glTexDesc.fTextureID)); 1047 return return_null_texture(); 1048 } 1049 tex = SkNEW_ARGS(GrGLTexture, (this, glTexDesc, glRTDesc)); 1050 } else { 1051 tex = SkNEW_ARGS(GrGLTexture, (this, glTexDesc)); 1052 } 1053 tex->setCachedTexParams(initialTexParams, this->getResetTimestamp()); 1054 #ifdef TRACE_TEXTURE_CREATION 1055 GrPrintf("--- new texture [%d] size=(%d %d) config=%d\n", 1056 glTexDesc.fTextureID, desc.fWidth, desc.fHeight, desc.fConfig); 1057 #endif 1058 return tex; 1059 } 1060 1061 GrTexture* GrGpuGL::onCreateCompressedTexture(const GrTextureDesc& desc, 1062 const void* srcData) { 1063 1064 if(SkToBool(desc.fFlags & kRenderTarget_GrTextureFlagBit)) { 1065 return return_null_texture(); 1066 } 1067 1068 // Make sure that we're not flipping Y. 1069 GrSurfaceOrigin texOrigin = resolve_origin(desc.fOrigin, false); 1070 if (kBottomLeft_GrSurfaceOrigin == texOrigin) { 1071 return return_null_texture(); 1072 } 1073 1074 GrGLTexture::Desc glTexDesc; 1075 1076 glTexDesc.fFlags = desc.fFlags; 1077 glTexDesc.fWidth = desc.fWidth; 1078 glTexDesc.fHeight = desc.fHeight; 1079 glTexDesc.fConfig = desc.fConfig; 1080 glTexDesc.fIsWrapped = false; 1081 glTexDesc.fOrigin = texOrigin; 1082 1083 int maxSize = this->caps()->maxTextureSize(); 1084 if (glTexDesc.fWidth > maxSize || glTexDesc.fHeight > maxSize) { 1085 return return_null_texture(); 1086 } 1087 1088 GL_CALL(GenTextures(1, &glTexDesc.fTextureID)); 1089 1090 if (!glTexDesc.fTextureID) { 1091 return return_null_texture(); 1092 } 1093 1094 this->setScratchTextureUnit(); 1095 GL_CALL(BindTexture(GR_GL_TEXTURE_2D, glTexDesc.fTextureID)); 1096 1097 // Some drivers like to know filter/wrap before seeing glTexImage2D. Some 1098 // drivers have a bug where an FBO won't be complete if it includes a 1099 // texture that is not mipmap complete (considering the filter in use). 1100 GrGLTexture::TexParams initialTexParams; 1101 // we only set a subset here so invalidate first 1102 initialTexParams.invalidate(); 1103 initialTexParams.fMinFilter = GR_GL_NEAREST; 1104 initialTexParams.fMagFilter = GR_GL_NEAREST; 1105 initialTexParams.fWrapS = GR_GL_CLAMP_TO_EDGE; 1106 initialTexParams.fWrapT = GR_GL_CLAMP_TO_EDGE; 1107 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, 1108 GR_GL_TEXTURE_MAG_FILTER, 1109 initialTexParams.fMagFilter)); 1110 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, 1111 GR_GL_TEXTURE_MIN_FILTER, 1112 initialTexParams.fMinFilter)); 1113 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, 1114 GR_GL_TEXTURE_WRAP_S, 1115 initialTexParams.fWrapS)); 1116 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, 1117 GR_GL_TEXTURE_WRAP_T, 1118 initialTexParams.fWrapT)); 1119 1120 if (!this->uploadCompressedTexData(glTexDesc, srcData)) { 1121 GL_CALL(DeleteTextures(1, &glTexDesc.fTextureID)); 1122 return return_null_texture(); 1123 } 1124 1125 GrGLTexture* tex; 1126 tex = SkNEW_ARGS(GrGLTexture, (this, glTexDesc)); 1127 tex->setCachedTexParams(initialTexParams, this->getResetTimestamp()); 1128 #ifdef TRACE_TEXTURE_CREATION 1129 GrPrintf("--- new compressed texture [%d] size=(%d %d) config=%d\n", 1130 glTexDesc.fTextureID, desc.fWidth, desc.fHeight, desc.fConfig); 1131 #endif 1132 return tex; 1133 } 1134 1135 namespace { 1136 1137 const GrGLuint kUnknownBitCount = GrGLStencilBuffer::kUnknownBitCount; 1138 1139 void inline get_stencil_rb_sizes(const GrGLInterface* gl, 1140 GrGLStencilBuffer::Format* format) { 1141 1142 // we shouldn't ever know one size and not the other 1143 SkASSERT((kUnknownBitCount == format->fStencilBits) == 1144 (kUnknownBitCount == format->fTotalBits)); 1145 if (kUnknownBitCount == format->fStencilBits) { 1146 GR_GL_GetRenderbufferParameteriv(gl, GR_GL_RENDERBUFFER, 1147 GR_GL_RENDERBUFFER_STENCIL_SIZE, 1148 (GrGLint*)&format->fStencilBits); 1149 if (format->fPacked) { 1150 GR_GL_GetRenderbufferParameteriv(gl, GR_GL_RENDERBUFFER, 1151 GR_GL_RENDERBUFFER_DEPTH_SIZE, 1152 (GrGLint*)&format->fTotalBits); 1153 format->fTotalBits += format->fStencilBits; 1154 } else { 1155 format->fTotalBits = format->fStencilBits; 1156 } 1157 } 1158 } 1159 } 1160 1161 bool GrGpuGL::createStencilBufferForRenderTarget(GrRenderTarget* rt, 1162 int width, int height) { 1163 1164 // All internally created RTs are also textures. We don't create 1165 // SBs for a client's standalone RT (that is a RT that isn't also a texture). 1166 SkASSERT(rt->asTexture()); 1167 SkASSERT(width >= rt->width()); 1168 SkASSERT(height >= rt->height()); 1169 1170 int samples = rt->numSamples(); 1171 GrGLuint sbID; 1172 GL_CALL(GenRenderbuffers(1, &sbID)); 1173 if (!sbID) { 1174 return false; 1175 } 1176 1177 int stencilFmtCnt = this->glCaps().stencilFormats().count(); 1178 for (int i = 0; i < stencilFmtCnt; ++i) { 1179 GL_CALL(BindRenderbuffer(GR_GL_RENDERBUFFER, sbID)); 1180 // we start with the last stencil format that succeeded in hopes 1181 // that we won't go through this loop more than once after the 1182 // first (painful) stencil creation. 1183 int sIdx = (i + fLastSuccessfulStencilFmtIdx) % stencilFmtCnt; 1184 const GrGLCaps::StencilFormat& sFmt = 1185 this->glCaps().stencilFormats()[sIdx]; 1186 CLEAR_ERROR_BEFORE_ALLOC(this->glInterface()); 1187 // we do this "if" so that we don't call the multisample 1188 // version on a GL that doesn't have an MSAA extension. 1189 bool created; 1190 if (samples > 0) { 1191 created = renderbuffer_storage_msaa(fGLContext, 1192 samples, 1193 sFmt.fInternalFormat, 1194 width, height); 1195 } else { 1196 GL_ALLOC_CALL(this->glInterface(), 1197 RenderbufferStorage(GR_GL_RENDERBUFFER, 1198 sFmt.fInternalFormat, 1199 width, height)); 1200 created = 1201 (GR_GL_NO_ERROR == check_alloc_error(rt->desc(), this->glInterface())); 1202 } 1203 if (created) { 1204 // After sized formats we attempt an unsized format and take 1205 // whatever sizes GL gives us. In that case we query for the size. 1206 GrGLStencilBuffer::Format format = sFmt; 1207 get_stencil_rb_sizes(this->glInterface(), &format); 1208 static const bool kIsWrapped = false; 1209 SkAutoTUnref<GrStencilBuffer> sb(SkNEW_ARGS(GrGLStencilBuffer, 1210 (this, kIsWrapped, sbID, width, height, 1211 samples, format))); 1212 if (this->attachStencilBufferToRenderTarget(sb, rt)) { 1213 fLastSuccessfulStencilFmtIdx = sIdx; 1214 sb->transferToCache(); 1215 rt->setStencilBuffer(sb); 1216 return true; 1217 } 1218 sb->abandon(); // otherwise we lose sbID 1219 } 1220 } 1221 GL_CALL(DeleteRenderbuffers(1, &sbID)); 1222 return false; 1223 } 1224 1225 bool GrGpuGL::attachStencilBufferToRenderTarget(GrStencilBuffer* sb, GrRenderTarget* rt) { 1226 GrGLRenderTarget* glrt = (GrGLRenderTarget*) rt; 1227 1228 GrGLuint fbo = glrt->renderFBOID(); 1229 1230 if (NULL == sb) { 1231 if (rt->getStencilBuffer()) { 1232 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, 1233 GR_GL_STENCIL_ATTACHMENT, 1234 GR_GL_RENDERBUFFER, 0)); 1235 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, 1236 GR_GL_DEPTH_ATTACHMENT, 1237 GR_GL_RENDERBUFFER, 0)); 1238 #ifdef SK_DEBUG 1239 GrGLenum status; 1240 GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER)); 1241 SkASSERT(GR_GL_FRAMEBUFFER_COMPLETE == status); 1242 #endif 1243 } 1244 return true; 1245 } else { 1246 GrGLStencilBuffer* glsb = static_cast<GrGLStencilBuffer*>(sb); 1247 GrGLuint rb = glsb->renderbufferID(); 1248 1249 fHWBoundRenderTargetUniqueID = SK_InvalidUniqueID; 1250 fGPUStats.incRenderTargetBinds(); 1251 GL_CALL(BindFramebuffer(GR_GL_FRAMEBUFFER, fbo)); 1252 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, 1253 GR_GL_STENCIL_ATTACHMENT, 1254 GR_GL_RENDERBUFFER, rb)); 1255 if (glsb->format().fPacked) { 1256 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, 1257 GR_GL_DEPTH_ATTACHMENT, 1258 GR_GL_RENDERBUFFER, rb)); 1259 } else { 1260 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, 1261 GR_GL_DEPTH_ATTACHMENT, 1262 GR_GL_RENDERBUFFER, 0)); 1263 } 1264 1265 GrGLenum status; 1266 if (!this->glCaps().isColorConfigAndStencilFormatVerified(rt->config(), glsb->format())) { 1267 GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER)); 1268 if (status != GR_GL_FRAMEBUFFER_COMPLETE) { 1269 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, 1270 GR_GL_STENCIL_ATTACHMENT, 1271 GR_GL_RENDERBUFFER, 0)); 1272 if (glsb->format().fPacked) { 1273 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, 1274 GR_GL_DEPTH_ATTACHMENT, 1275 GR_GL_RENDERBUFFER, 0)); 1276 } 1277 return false; 1278 } else { 1279 fGLContext.caps()->markColorConfigAndStencilFormatAsVerified( 1280 rt->config(), 1281 glsb->format()); 1282 } 1283 } 1284 return true; 1285 } 1286 } 1287 1288 //////////////////////////////////////////////////////////////////////////////// 1289 1290 GrVertexBuffer* GrGpuGL::onCreateVertexBuffer(size_t size, bool dynamic) { 1291 GrGLVertexBuffer::Desc desc; 1292 desc.fDynamic = dynamic; 1293 desc.fSizeInBytes = size; 1294 desc.fIsWrapped = false; 1295 1296 if (this->glCaps().useNonVBOVertexAndIndexDynamicData() && desc.fDynamic) { 1297 desc.fID = 0; 1298 GrGLVertexBuffer* vertexBuffer = SkNEW_ARGS(GrGLVertexBuffer, (this, desc)); 1299 return vertexBuffer; 1300 } else { 1301 GL_CALL(GenBuffers(1, &desc.fID)); 1302 if (desc.fID) { 1303 fHWGeometryState.setVertexBufferID(this, desc.fID); 1304 CLEAR_ERROR_BEFORE_ALLOC(this->glInterface()); 1305 // make sure driver can allocate memory for this buffer 1306 GL_ALLOC_CALL(this->glInterface(), 1307 BufferData(GR_GL_ARRAY_BUFFER, 1308 (GrGLsizeiptr) desc.fSizeInBytes, 1309 NULL, // data ptr 1310 desc.fDynamic ? GR_GL_DYNAMIC_DRAW : GR_GL_STATIC_DRAW)); 1311 if (CHECK_ALLOC_ERROR(this->glInterface()) != GR_GL_NO_ERROR) { 1312 GL_CALL(DeleteBuffers(1, &desc.fID)); 1313 this->notifyVertexBufferDelete(desc.fID); 1314 return NULL; 1315 } 1316 GrGLVertexBuffer* vertexBuffer = SkNEW_ARGS(GrGLVertexBuffer, (this, desc)); 1317 return vertexBuffer; 1318 } 1319 return NULL; 1320 } 1321 } 1322 1323 GrIndexBuffer* GrGpuGL::onCreateIndexBuffer(size_t size, bool dynamic) { 1324 GrGLIndexBuffer::Desc desc; 1325 desc.fDynamic = dynamic; 1326 desc.fSizeInBytes = size; 1327 desc.fIsWrapped = false; 1328 1329 if (this->glCaps().useNonVBOVertexAndIndexDynamicData() && desc.fDynamic) { 1330 desc.fID = 0; 1331 GrIndexBuffer* indexBuffer = SkNEW_ARGS(GrGLIndexBuffer, (this, desc)); 1332 return indexBuffer; 1333 } else { 1334 GL_CALL(GenBuffers(1, &desc.fID)); 1335 if (desc.fID) { 1336 fHWGeometryState.setIndexBufferIDOnDefaultVertexArray(this, desc.fID); 1337 CLEAR_ERROR_BEFORE_ALLOC(this->glInterface()); 1338 // make sure driver can allocate memory for this buffer 1339 GL_ALLOC_CALL(this->glInterface(), 1340 BufferData(GR_GL_ELEMENT_ARRAY_BUFFER, 1341 (GrGLsizeiptr) desc.fSizeInBytes, 1342 NULL, // data ptr 1343 desc.fDynamic ? GR_GL_DYNAMIC_DRAW : GR_GL_STATIC_DRAW)); 1344 if (CHECK_ALLOC_ERROR(this->glInterface()) != GR_GL_NO_ERROR) { 1345 GL_CALL(DeleteBuffers(1, &desc.fID)); 1346 this->notifyIndexBufferDelete(desc.fID); 1347 return NULL; 1348 } 1349 GrIndexBuffer* indexBuffer = SkNEW_ARGS(GrGLIndexBuffer, (this, desc)); 1350 return indexBuffer; 1351 } 1352 return NULL; 1353 } 1354 } 1355 1356 void GrGpuGL::flushScissor(const GrGLIRect& rtViewport, GrSurfaceOrigin rtOrigin) { 1357 if (fScissorState.fEnabled) { 1358 GrGLIRect scissor; 1359 scissor.setRelativeTo(rtViewport, 1360 fScissorState.fRect.fLeft, 1361 fScissorState.fRect.fTop, 1362 fScissorState.fRect.width(), 1363 fScissorState.fRect.height(), 1364 rtOrigin); 1365 // if the scissor fully contains the viewport then we fall through and 1366 // disable the scissor test. 1367 if (!scissor.contains(rtViewport)) { 1368 if (fHWScissorSettings.fRect != scissor) { 1369 scissor.pushToGLScissor(this->glInterface()); 1370 fHWScissorSettings.fRect = scissor; 1371 } 1372 if (kYes_TriState != fHWScissorSettings.fEnabled) { 1373 GL_CALL(Enable(GR_GL_SCISSOR_TEST)); 1374 fHWScissorSettings.fEnabled = kYes_TriState; 1375 } 1376 return; 1377 } 1378 } 1379 if (kNo_TriState != fHWScissorSettings.fEnabled) { 1380 GL_CALL(Disable(GR_GL_SCISSOR_TEST)); 1381 fHWScissorSettings.fEnabled = kNo_TriState; 1382 return; 1383 } 1384 } 1385 1386 void GrGpuGL::onClear(GrRenderTarget* target, const SkIRect* rect, GrColor color, 1387 bool canIgnoreRect) { 1388 // parent class should never let us get here with no RT 1389 SkASSERT(target); 1390 GrGLRenderTarget* glRT = static_cast<GrGLRenderTarget*>(target); 1391 1392 if (canIgnoreRect && this->glCaps().fullClearIsFree()) { 1393 rect = NULL; 1394 } 1395 1396 SkIRect clippedRect; 1397 if (rect) { 1398 // flushScissor expects rect to be clipped to the target. 1399 clippedRect = *rect; 1400 SkIRect rtRect = SkIRect::MakeWH(target->width(), target->height()); 1401 if (clippedRect.intersect(rtRect)) { 1402 rect = &clippedRect; 1403 } else { 1404 return; 1405 } 1406 } 1407 1408 this->flushRenderTarget(glRT, rect); 1409 GrAutoTRestore<ScissorState> asr(&fScissorState); 1410 fScissorState.fEnabled = SkToBool(rect); 1411 if (fScissorState.fEnabled) { 1412 fScissorState.fRect = *rect; 1413 } 1414 this->flushScissor(glRT->getViewport(), glRT->origin()); 1415 1416 GrGLfloat r, g, b, a; 1417 static const GrGLfloat scale255 = 1.f / 255.f; 1418 a = GrColorUnpackA(color) * scale255; 1419 GrGLfloat scaleRGB = scale255; 1420 r = GrColorUnpackR(color) * scaleRGB; 1421 g = GrColorUnpackG(color) * scaleRGB; 1422 b = GrColorUnpackB(color) * scaleRGB; 1423 1424 GL_CALL(ColorMask(GR_GL_TRUE, GR_GL_TRUE, GR_GL_TRUE, GR_GL_TRUE)); 1425 fHWWriteToColor = kYes_TriState; 1426 GL_CALL(ClearColor(r, g, b, a)); 1427 GL_CALL(Clear(GR_GL_COLOR_BUFFER_BIT)); 1428 } 1429 1430 void GrGpuGL::discard(GrRenderTarget* renderTarget) { 1431 if (!this->caps()->discardRenderTargetSupport()) { 1432 return; 1433 } 1434 if (NULL == renderTarget) { 1435 renderTarget = this->drawState()->getRenderTarget(); 1436 if (NULL == renderTarget) { 1437 return; 1438 } 1439 } 1440 1441 GrGLRenderTarget* glRT = static_cast<GrGLRenderTarget*>(renderTarget); 1442 if (renderTarget->getUniqueID() != fHWBoundRenderTargetUniqueID) { 1443 fHWBoundRenderTargetUniqueID = SK_InvalidUniqueID; 1444 fGPUStats.incRenderTargetBinds(); 1445 GL_CALL(BindFramebuffer(GR_GL_FRAMEBUFFER, glRT->renderFBOID())); 1446 } 1447 switch (this->glCaps().invalidateFBType()) { 1448 case GrGLCaps::kNone_InvalidateFBType: 1449 SkFAIL("Should never get here."); 1450 break; 1451 case GrGLCaps::kInvalidate_InvalidateFBType: 1452 if (0 == glRT->renderFBOID()) { 1453 // When rendering to the default framebuffer the legal values for attachments 1454 // are GL_COLOR, GL_DEPTH, GL_STENCIL, ... rather than the various FBO attachment 1455 // types. 1456 static const GrGLenum attachments[] = { GR_GL_COLOR }; 1457 GL_CALL(InvalidateFramebuffer(GR_GL_FRAMEBUFFER, SK_ARRAY_COUNT(attachments), 1458 attachments)); 1459 } else { 1460 static const GrGLenum attachments[] = { GR_GL_COLOR_ATTACHMENT0 }; 1461 GL_CALL(InvalidateFramebuffer(GR_GL_FRAMEBUFFER, SK_ARRAY_COUNT(attachments), 1462 attachments)); 1463 } 1464 break; 1465 case GrGLCaps::kDiscard_InvalidateFBType: { 1466 if (0 == glRT->renderFBOID()) { 1467 // When rendering to the default framebuffer the legal values for attachments 1468 // are GL_COLOR, GL_DEPTH, GL_STENCIL, ... rather than the various FBO attachment 1469 // types. See glDiscardFramebuffer() spec. 1470 static const GrGLenum attachments[] = { GR_GL_COLOR }; 1471 GL_CALL(DiscardFramebuffer(GR_GL_FRAMEBUFFER, SK_ARRAY_COUNT(attachments), 1472 attachments)); 1473 } else { 1474 static const GrGLenum attachments[] = { GR_GL_COLOR_ATTACHMENT0 }; 1475 GL_CALL(DiscardFramebuffer(GR_GL_FRAMEBUFFER, SK_ARRAY_COUNT(attachments), 1476 attachments)); 1477 } 1478 break; 1479 } 1480 } 1481 renderTarget->flagAsResolved(); 1482 } 1483 1484 1485 void GrGpuGL::clearStencil(GrRenderTarget* target) { 1486 if (NULL == target) { 1487 return; 1488 } 1489 GrGLRenderTarget* glRT = static_cast<GrGLRenderTarget*>(target); 1490 this->flushRenderTarget(glRT, &SkIRect::EmptyIRect()); 1491 1492 GrAutoTRestore<ScissorState> asr(&fScissorState); 1493 fScissorState.fEnabled = false; 1494 this->flushScissor(glRT->getViewport(), glRT->origin()); 1495 1496 GL_CALL(StencilMask(0xffffffff)); 1497 GL_CALL(ClearStencil(0)); 1498 GL_CALL(Clear(GR_GL_STENCIL_BUFFER_BIT)); 1499 fHWStencilSettings.invalidate(); 1500 } 1501 1502 void GrGpuGL::clearStencilClip(GrRenderTarget* target, const SkIRect& rect, bool insideClip) { 1503 SkASSERT(target); 1504 1505 // this should only be called internally when we know we have a 1506 // stencil buffer. 1507 SkASSERT(target->getStencilBuffer()); 1508 GrGLint stencilBitCount = target->getStencilBuffer()->bits(); 1509 #if 0 1510 SkASSERT(stencilBitCount > 0); 1511 GrGLint clipStencilMask = (1 << (stencilBitCount - 1)); 1512 #else 1513 // we could just clear the clip bit but when we go through 1514 // ANGLE a partial stencil mask will cause clears to be 1515 // turned into draws. Our contract on GrDrawTarget says that 1516 // changing the clip between stencil passes may or may not 1517 // zero the client's clip bits. So we just clear the whole thing. 1518 static const GrGLint clipStencilMask = ~0; 1519 #endif 1520 GrGLint value; 1521 if (insideClip) { 1522 value = (1 << (stencilBitCount - 1)); 1523 } else { 1524 value = 0; 1525 } 1526 GrGLRenderTarget* glRT = static_cast<GrGLRenderTarget*>(target); 1527 this->flushRenderTarget(glRT, &SkIRect::EmptyIRect()); 1528 1529 GrAutoTRestore<ScissorState> asr(&fScissorState); 1530 fScissorState.fEnabled = true; 1531 fScissorState.fRect = rect; 1532 this->flushScissor(glRT->getViewport(), glRT->origin()); 1533 1534 GL_CALL(StencilMask((uint32_t) clipStencilMask)); 1535 GL_CALL(ClearStencil(value)); 1536 GL_CALL(Clear(GR_GL_STENCIL_BUFFER_BIT)); 1537 fHWStencilSettings.invalidate(); 1538 } 1539 1540 bool GrGpuGL::readPixelsWillPayForYFlip(GrRenderTarget* renderTarget, 1541 int left, int top, 1542 int width, int height, 1543 GrPixelConfig config, 1544 size_t rowBytes) const { 1545 // If this rendertarget is aready TopLeft, we don't need to flip. 1546 if (kTopLeft_GrSurfaceOrigin == renderTarget->origin()) { 1547 return false; 1548 } 1549 1550 // if GL can do the flip then we'll never pay for it. 1551 if (this->glCaps().packFlipYSupport()) { 1552 return false; 1553 } 1554 1555 // If we have to do memcpy to handle non-trim rowBytes then we 1556 // get the flip for free. Otherwise it costs. 1557 if (this->glCaps().packRowLengthSupport()) { 1558 return true; 1559 } 1560 // If we have to do memcpys to handle rowBytes then y-flip is free 1561 // Note the rowBytes might be tight to the passed in data, but if data 1562 // gets clipped in x to the target the rowBytes will no longer be tight. 1563 if (left >= 0 && (left + width) < renderTarget->width()) { 1564 return 0 == rowBytes || 1565 GrBytesPerPixel(config) * width == rowBytes; 1566 } else { 1567 return false; 1568 } 1569 } 1570 1571 bool GrGpuGL::onReadPixels(GrRenderTarget* target, 1572 int left, int top, 1573 int width, int height, 1574 GrPixelConfig config, 1575 void* buffer, 1576 size_t rowBytes) { 1577 // We cannot read pixels into a compressed buffer 1578 if (GrPixelConfigIsCompressed(config)) { 1579 return false; 1580 } 1581 1582 GrGLenum format = 0; 1583 GrGLenum type = 0; 1584 bool flipY = kBottomLeft_GrSurfaceOrigin == target->origin(); 1585 if (!this->configToGLFormats(config, false, NULL, &format, &type)) { 1586 return false; 1587 } 1588 size_t bpp = GrBytesPerPixel(config); 1589 if (!adjust_pixel_ops_params(target->width(), target->height(), bpp, 1590 &left, &top, &width, &height, 1591 const_cast<const void**>(&buffer), 1592 &rowBytes)) { 1593 return false; 1594 } 1595 1596 // resolve the render target if necessary 1597 GrGLRenderTarget* tgt = static_cast<GrGLRenderTarget*>(target); 1598 switch (tgt->getResolveType()) { 1599 case GrGLRenderTarget::kCantResolve_ResolveType: 1600 return false; 1601 case GrGLRenderTarget::kAutoResolves_ResolveType: 1602 this->flushRenderTarget(static_cast<GrGLRenderTarget*>(target), 1603 &SkIRect::EmptyIRect()); 1604 break; 1605 case GrGLRenderTarget::kCanResolve_ResolveType: 1606 this->onResolveRenderTarget(tgt); 1607 // we don't track the state of the READ FBO ID. 1608 fGPUStats.incRenderTargetBinds(); 1609 GL_CALL(BindFramebuffer(GR_GL_READ_FRAMEBUFFER, 1610 tgt->textureFBOID())); 1611 break; 1612 default: 1613 SkFAIL("Unknown resolve type"); 1614 } 1615 1616 const GrGLIRect& glvp = tgt->getViewport(); 1617 1618 // the read rect is viewport-relative 1619 GrGLIRect readRect; 1620 readRect.setRelativeTo(glvp, left, top, width, height, target->origin()); 1621 1622 size_t tightRowBytes = bpp * width; 1623 if (0 == rowBytes) { 1624 rowBytes = tightRowBytes; 1625 } 1626 size_t readDstRowBytes = tightRowBytes; 1627 void* readDst = buffer; 1628 1629 // determine if GL can read using the passed rowBytes or if we need 1630 // a scratch buffer. 1631 GrAutoMalloc<32 * sizeof(GrColor)> scratch; 1632 if (rowBytes != tightRowBytes) { 1633 if (this->glCaps().packRowLengthSupport()) { 1634 SkASSERT(!(rowBytes % sizeof(GrColor))); 1635 GL_CALL(PixelStorei(GR_GL_PACK_ROW_LENGTH, 1636 static_cast<GrGLint>(rowBytes / sizeof(GrColor)))); 1637 readDstRowBytes = rowBytes; 1638 } else { 1639 scratch.reset(tightRowBytes * height); 1640 readDst = scratch.get(); 1641 } 1642 } 1643 if (flipY && this->glCaps().packFlipYSupport()) { 1644 GL_CALL(PixelStorei(GR_GL_PACK_REVERSE_ROW_ORDER, 1)); 1645 } 1646 GL_CALL(ReadPixels(readRect.fLeft, readRect.fBottom, 1647 readRect.fWidth, readRect.fHeight, 1648 format, type, readDst)); 1649 if (readDstRowBytes != tightRowBytes) { 1650 SkASSERT(this->glCaps().packRowLengthSupport()); 1651 GL_CALL(PixelStorei(GR_GL_PACK_ROW_LENGTH, 0)); 1652 } 1653 if (flipY && this->glCaps().packFlipYSupport()) { 1654 GL_CALL(PixelStorei(GR_GL_PACK_REVERSE_ROW_ORDER, 0)); 1655 flipY = false; 1656 } 1657 1658 // now reverse the order of the rows, since GL's are bottom-to-top, but our 1659 // API presents top-to-bottom. We must preserve the padding contents. Note 1660 // that the above readPixels did not overwrite the padding. 1661 if (readDst == buffer) { 1662 SkASSERT(rowBytes == readDstRowBytes); 1663 if (flipY) { 1664 scratch.reset(tightRowBytes); 1665 void* tmpRow = scratch.get(); 1666 // flip y in-place by rows 1667 const int halfY = height >> 1; 1668 char* top = reinterpret_cast<char*>(buffer); 1669 char* bottom = top + (height - 1) * rowBytes; 1670 for (int y = 0; y < halfY; y++) { 1671 memcpy(tmpRow, top, tightRowBytes); 1672 memcpy(top, bottom, tightRowBytes); 1673 memcpy(bottom, tmpRow, tightRowBytes); 1674 top += rowBytes; 1675 bottom -= rowBytes; 1676 } 1677 } 1678 } else { 1679 SkASSERT(readDst != buffer); SkASSERT(rowBytes != tightRowBytes); 1680 // copy from readDst to buffer while flipping y 1681 // const int halfY = height >> 1; 1682 const char* src = reinterpret_cast<const char*>(readDst); 1683 char* dst = reinterpret_cast<char*>(buffer); 1684 if (flipY) { 1685 dst += (height-1) * rowBytes; 1686 } 1687 for (int y = 0; y < height; y++) { 1688 memcpy(dst, src, tightRowBytes); 1689 src += readDstRowBytes; 1690 if (!flipY) { 1691 dst += rowBytes; 1692 } else { 1693 dst -= rowBytes; 1694 } 1695 } 1696 } 1697 return true; 1698 } 1699 1700 void GrGpuGL::flushRenderTarget(GrGLRenderTarget* target, const SkIRect* bound) { 1701 1702 SkASSERT(target); 1703 1704 uint32_t rtID = target->getUniqueID(); 1705 if (fHWBoundRenderTargetUniqueID != rtID) { 1706 fGPUStats.incRenderTargetBinds(); 1707 GL_CALL(BindFramebuffer(GR_GL_FRAMEBUFFER, target->renderFBOID())); 1708 #ifdef SK_DEBUG 1709 // don't do this check in Chromium -- this is causing 1710 // lots of repeated command buffer flushes when the compositor is 1711 // rendering with Ganesh, which is really slow; even too slow for 1712 // Debug mode. 1713 if (!this->glContext().isChromium()) { 1714 GrGLenum status; 1715 GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER)); 1716 if (status != GR_GL_FRAMEBUFFER_COMPLETE) { 1717 GrPrintf("GrGpuGL::flushRenderTarget glCheckFramebufferStatus %x\n", status); 1718 } 1719 } 1720 #endif 1721 fHWBoundRenderTargetUniqueID = rtID; 1722 const GrGLIRect& vp = target->getViewport(); 1723 if (fHWViewport != vp) { 1724 vp.pushToGLViewport(this->glInterface()); 1725 fHWViewport = vp; 1726 } 1727 } 1728 if (NULL == bound || !bound->isEmpty()) { 1729 target->flagAsNeedingResolve(bound); 1730 } 1731 1732 GrTexture *texture = target->asTexture(); 1733 if (texture) { 1734 texture->impl()->dirtyMipMaps(true); 1735 } 1736 } 1737 1738 GrGLenum gPrimitiveType2GLMode[] = { 1739 GR_GL_TRIANGLES, 1740 GR_GL_TRIANGLE_STRIP, 1741 GR_GL_TRIANGLE_FAN, 1742 GR_GL_POINTS, 1743 GR_GL_LINES, 1744 GR_GL_LINE_STRIP 1745 }; 1746 1747 #define SWAP_PER_DRAW 0 1748 1749 #if SWAP_PER_DRAW 1750 #if defined(SK_BUILD_FOR_MAC) 1751 #include <AGL/agl.h> 1752 #elif defined(SK_BUILD_FOR_WIN32) 1753 #include <gl/GL.h> 1754 void SwapBuf() { 1755 DWORD procID = GetCurrentProcessId(); 1756 HWND hwnd = GetTopWindow(GetDesktopWindow()); 1757 while(hwnd) { 1758 DWORD wndProcID = 0; 1759 GetWindowThreadProcessId(hwnd, &wndProcID); 1760 if(wndProcID == procID) { 1761 SwapBuffers(GetDC(hwnd)); 1762 } 1763 hwnd = GetNextWindow(hwnd, GW_HWNDNEXT); 1764 } 1765 } 1766 #endif 1767 #endif 1768 1769 void GrGpuGL::onGpuDraw(const DrawInfo& info) { 1770 size_t indexOffsetInBytes; 1771 this->setupGeometry(info, &indexOffsetInBytes); 1772 1773 SkASSERT((size_t)info.primitiveType() < SK_ARRAY_COUNT(gPrimitiveType2GLMode)); 1774 1775 if (info.isIndexed()) { 1776 GrGLvoid* indices = 1777 reinterpret_cast<GrGLvoid*>(indexOffsetInBytes + sizeof(uint16_t) * info.startIndex()); 1778 // info.startVertex() was accounted for by setupGeometry. 1779 GL_CALL(DrawElements(gPrimitiveType2GLMode[info.primitiveType()], 1780 info.indexCount(), 1781 GR_GL_UNSIGNED_SHORT, 1782 indices)); 1783 } else { 1784 // Pass 0 for parameter first. We have to adjust glVertexAttribPointer() to account for 1785 // startVertex in the DrawElements case. So we always rely on setupGeometry to have 1786 // accounted for startVertex. 1787 GL_CALL(DrawArrays(gPrimitiveType2GLMode[info.primitiveType()], 0, info.vertexCount())); 1788 } 1789 #if SWAP_PER_DRAW 1790 glFlush(); 1791 #if defined(SK_BUILD_FOR_MAC) 1792 aglSwapBuffers(aglGetCurrentContext()); 1793 int set_a_break_pt_here = 9; 1794 aglSwapBuffers(aglGetCurrentContext()); 1795 #elif defined(SK_BUILD_FOR_WIN32) 1796 SwapBuf(); 1797 int set_a_break_pt_here = 9; 1798 SwapBuf(); 1799 #endif 1800 #endif 1801 } 1802 1803 void GrGpuGL::onResolveRenderTarget(GrRenderTarget* target) { 1804 GrGLRenderTarget* rt = static_cast<GrGLRenderTarget*>(target); 1805 if (rt->needsResolve()) { 1806 // Some extensions automatically resolves the texture when it is read. 1807 if (this->glCaps().usesMSAARenderBuffers()) { 1808 SkASSERT(rt->textureFBOID() != rt->renderFBOID()); 1809 fGPUStats.incRenderTargetBinds(); 1810 fGPUStats.incRenderTargetBinds(); 1811 GL_CALL(BindFramebuffer(GR_GL_READ_FRAMEBUFFER, rt->renderFBOID())); 1812 GL_CALL(BindFramebuffer(GR_GL_DRAW_FRAMEBUFFER, rt->textureFBOID())); 1813 // make sure we go through flushRenderTarget() since we've modified 1814 // the bound DRAW FBO ID. 1815 fHWBoundRenderTargetUniqueID = SK_InvalidUniqueID; 1816 const GrGLIRect& vp = rt->getViewport(); 1817 const SkIRect dirtyRect = rt->getResolveRect(); 1818 GrGLIRect r; 1819 r.setRelativeTo(vp, dirtyRect.fLeft, dirtyRect.fTop, 1820 dirtyRect.width(), dirtyRect.height(), target->origin()); 1821 1822 GrAutoTRestore<ScissorState> asr; 1823 if (GrGLCaps::kES_Apple_MSFBOType == this->glCaps().msFBOType()) { 1824 // Apple's extension uses the scissor as the blit bounds. 1825 asr.reset(&fScissorState); 1826 fScissorState.fEnabled = true; 1827 fScissorState.fRect = dirtyRect; 1828 this->flushScissor(rt->getViewport(), rt->origin()); 1829 GL_CALL(ResolveMultisampleFramebuffer()); 1830 } else { 1831 int right = r.fLeft + r.fWidth; 1832 int top = r.fBottom + r.fHeight; 1833 1834 // BlitFrameBuffer respects the scissor, so disable it. 1835 asr.reset(&fScissorState); 1836 fScissorState.fEnabled = false; 1837 this->flushScissor(rt->getViewport(), rt->origin()); 1838 GL_CALL(BlitFramebuffer(r.fLeft, r.fBottom, right, top, 1839 r.fLeft, r.fBottom, right, top, 1840 GR_GL_COLOR_BUFFER_BIT, GR_GL_NEAREST)); 1841 } 1842 } 1843 rt->flagAsResolved(); 1844 } 1845 } 1846 1847 namespace { 1848 1849 1850 GrGLenum gr_to_gl_stencil_op(GrStencilOp op) { 1851 static const GrGLenum gTable[] = { 1852 GR_GL_KEEP, // kKeep_StencilOp 1853 GR_GL_REPLACE, // kReplace_StencilOp 1854 GR_GL_INCR_WRAP, // kIncWrap_StencilOp 1855 GR_GL_INCR, // kIncClamp_StencilOp 1856 GR_GL_DECR_WRAP, // kDecWrap_StencilOp 1857 GR_GL_DECR, // kDecClamp_StencilOp 1858 GR_GL_ZERO, // kZero_StencilOp 1859 GR_GL_INVERT, // kInvert_StencilOp 1860 }; 1861 GR_STATIC_ASSERT(SK_ARRAY_COUNT(gTable) == kStencilOpCount); 1862 GR_STATIC_ASSERT(0 == kKeep_StencilOp); 1863 GR_STATIC_ASSERT(1 == kReplace_StencilOp); 1864 GR_STATIC_ASSERT(2 == kIncWrap_StencilOp); 1865 GR_STATIC_ASSERT(3 == kIncClamp_StencilOp); 1866 GR_STATIC_ASSERT(4 == kDecWrap_StencilOp); 1867 GR_STATIC_ASSERT(5 == kDecClamp_StencilOp); 1868 GR_STATIC_ASSERT(6 == kZero_StencilOp); 1869 GR_STATIC_ASSERT(7 == kInvert_StencilOp); 1870 SkASSERT((unsigned) op < kStencilOpCount); 1871 return gTable[op]; 1872 } 1873 1874 void set_gl_stencil(const GrGLInterface* gl, 1875 const GrStencilSettings& settings, 1876 GrGLenum glFace, 1877 GrStencilSettings::Face grFace) { 1878 GrGLenum glFunc = GrToGLStencilFunc(settings.func(grFace)); 1879 GrGLenum glFailOp = gr_to_gl_stencil_op(settings.failOp(grFace)); 1880 GrGLenum glPassOp = gr_to_gl_stencil_op(settings.passOp(grFace)); 1881 1882 GrGLint ref = settings.funcRef(grFace); 1883 GrGLint mask = settings.funcMask(grFace); 1884 GrGLint writeMask = settings.writeMask(grFace); 1885 1886 if (GR_GL_FRONT_AND_BACK == glFace) { 1887 // we call the combined func just in case separate stencil is not 1888 // supported. 1889 GR_GL_CALL(gl, StencilFunc(glFunc, ref, mask)); 1890 GR_GL_CALL(gl, StencilMask(writeMask)); 1891 GR_GL_CALL(gl, StencilOp(glFailOp, glPassOp, glPassOp)); 1892 } else { 1893 GR_GL_CALL(gl, StencilFuncSeparate(glFace, glFunc, ref, mask)); 1894 GR_GL_CALL(gl, StencilMaskSeparate(glFace, writeMask)); 1895 GR_GL_CALL(gl, StencilOpSeparate(glFace, glFailOp, glPassOp, glPassOp)); 1896 } 1897 } 1898 } 1899 1900 void GrGpuGL::flushStencil(DrawType type) { 1901 if (kStencilPath_DrawType != type && fHWStencilSettings != fStencilSettings) { 1902 if (fStencilSettings.isDisabled()) { 1903 if (kNo_TriState != fHWStencilTestEnabled) { 1904 GL_CALL(Disable(GR_GL_STENCIL_TEST)); 1905 fHWStencilTestEnabled = kNo_TriState; 1906 } 1907 } else { 1908 if (kYes_TriState != fHWStencilTestEnabled) { 1909 GL_CALL(Enable(GR_GL_STENCIL_TEST)); 1910 fHWStencilTestEnabled = kYes_TriState; 1911 } 1912 } 1913 if (!fStencilSettings.isDisabled()) { 1914 if (this->caps()->twoSidedStencilSupport()) { 1915 set_gl_stencil(this->glInterface(), 1916 fStencilSettings, 1917 GR_GL_FRONT, 1918 GrStencilSettings::kFront_Face); 1919 set_gl_stencil(this->glInterface(), 1920 fStencilSettings, 1921 GR_GL_BACK, 1922 GrStencilSettings::kBack_Face); 1923 } else { 1924 set_gl_stencil(this->glInterface(), 1925 fStencilSettings, 1926 GR_GL_FRONT_AND_BACK, 1927 GrStencilSettings::kFront_Face); 1928 } 1929 } 1930 fHWStencilSettings = fStencilSettings; 1931 } 1932 } 1933 1934 void GrGpuGL::flushAAState(const GrOptDrawState& optState, DrawType type) { 1935 // At least some ATI linux drivers will render GL_LINES incorrectly when MSAA state is enabled but 1936 // the target is not multisampled. Single pixel wide lines are rendered thicker than 1 pixel wide. 1937 #if 0 1938 // Replace RT_HAS_MSAA with this definition once this driver bug is no longer a relevant concern 1939 #define RT_HAS_MSAA rt->isMultisampled() 1940 #else 1941 #define RT_HAS_MSAA (rt->isMultisampled() || kDrawLines_DrawType == type) 1942 #endif 1943 1944 const GrRenderTarget* rt = optState.getRenderTarget(); 1945 if (kGL_GrGLStandard == this->glStandard()) { 1946 if (RT_HAS_MSAA) { 1947 // FIXME: GL_NV_pr doesn't seem to like MSAA disabled. The paths 1948 // convex hulls of each segment appear to get filled. 1949 bool enableMSAA = kStencilPath_DrawType == type || 1950 optState.isHWAntialiasState(); 1951 if (enableMSAA) { 1952 if (kYes_TriState != fMSAAEnabled) { 1953 GL_CALL(Enable(GR_GL_MULTISAMPLE)); 1954 fMSAAEnabled = kYes_TriState; 1955 } 1956 } else { 1957 if (kNo_TriState != fMSAAEnabled) { 1958 GL_CALL(Disable(GR_GL_MULTISAMPLE)); 1959 fMSAAEnabled = kNo_TriState; 1960 } 1961 } 1962 } 1963 } 1964 } 1965 1966 void GrGpuGL::flushBlend(const GrOptDrawState& optState, bool isLines, 1967 GrBlendCoeff srcCoeff, GrBlendCoeff dstCoeff) { 1968 // Any optimization to disable blending should have already been applied and 1969 // tweaked the coeffs to (1, 0). 1970 bool blendOff = kOne_GrBlendCoeff == srcCoeff && kZero_GrBlendCoeff == dstCoeff; 1971 if (blendOff) { 1972 if (kNo_TriState != fHWBlendState.fEnabled) { 1973 GL_CALL(Disable(GR_GL_BLEND)); 1974 fHWBlendState.fEnabled = kNo_TriState; 1975 } 1976 } else { 1977 if (kYes_TriState != fHWBlendState.fEnabled) { 1978 GL_CALL(Enable(GR_GL_BLEND)); 1979 fHWBlendState.fEnabled = kYes_TriState; 1980 } 1981 if (fHWBlendState.fSrcCoeff != srcCoeff || 1982 fHWBlendState.fDstCoeff != dstCoeff) { 1983 GL_CALL(BlendFunc(gXfermodeCoeff2Blend[srcCoeff], 1984 gXfermodeCoeff2Blend[dstCoeff])); 1985 fHWBlendState.fSrcCoeff = srcCoeff; 1986 fHWBlendState.fDstCoeff = dstCoeff; 1987 } 1988 GrColor blendConst = optState.getBlendConstant(); 1989 if ((BlendCoeffReferencesConstant(srcCoeff) || 1990 BlendCoeffReferencesConstant(dstCoeff)) && 1991 (!fHWBlendState.fConstColorValid || 1992 fHWBlendState.fConstColor != blendConst)) { 1993 GrGLfloat c[4]; 1994 GrColorToRGBAFloat(blendConst, c); 1995 GL_CALL(BlendColor(c[0], c[1], c[2], c[3])); 1996 fHWBlendState.fConstColor = blendConst; 1997 fHWBlendState.fConstColorValid = true; 1998 } 1999 } 2000 } 2001 2002 static inline GrGLenum tile_to_gl_wrap(SkShader::TileMode tm) { 2003 static const GrGLenum gWrapModes[] = { 2004 GR_GL_CLAMP_TO_EDGE, 2005 GR_GL_REPEAT, 2006 GR_GL_MIRRORED_REPEAT 2007 }; 2008 GR_STATIC_ASSERT(SkShader::kTileModeCount == SK_ARRAY_COUNT(gWrapModes)); 2009 GR_STATIC_ASSERT(0 == SkShader::kClamp_TileMode); 2010 GR_STATIC_ASSERT(1 == SkShader::kRepeat_TileMode); 2011 GR_STATIC_ASSERT(2 == SkShader::kMirror_TileMode); 2012 return gWrapModes[tm]; 2013 } 2014 2015 void GrGpuGL::bindTexture(int unitIdx, const GrTextureParams& params, GrGLTexture* texture) { 2016 SkASSERT(texture); 2017 2018 // If we created a rt/tex and rendered to it without using a texture and now we're texturing 2019 // from the rt it will still be the last bound texture, but it needs resolving. So keep this 2020 // out of the "last != next" check. 2021 GrGLRenderTarget* texRT = static_cast<GrGLRenderTarget*>(texture->asRenderTarget()); 2022 if (texRT) { 2023 this->onResolveRenderTarget(texRT); 2024 } 2025 2026 uint32_t textureID = texture->getUniqueID(); 2027 if (fHWBoundTextureUniqueIDs[unitIdx] != textureID) { 2028 this->setTextureUnit(unitIdx); 2029 GL_CALL(BindTexture(GR_GL_TEXTURE_2D, texture->textureID())); 2030 fHWBoundTextureUniqueIDs[unitIdx] = textureID; 2031 } 2032 2033 ResetTimestamp timestamp; 2034 const GrGLTexture::TexParams& oldTexParams = texture->getCachedTexParams(×tamp); 2035 bool setAll = timestamp < this->getResetTimestamp(); 2036 GrGLTexture::TexParams newTexParams; 2037 2038 static GrGLenum glMinFilterModes[] = { 2039 GR_GL_NEAREST, 2040 GR_GL_LINEAR, 2041 GR_GL_LINEAR_MIPMAP_LINEAR 2042 }; 2043 static GrGLenum glMagFilterModes[] = { 2044 GR_GL_NEAREST, 2045 GR_GL_LINEAR, 2046 GR_GL_LINEAR 2047 }; 2048 GrTextureParams::FilterMode filterMode = params.filterMode(); 2049 if (!this->caps()->mipMapSupport() && GrTextureParams::kMipMap_FilterMode == filterMode) { 2050 filterMode = GrTextureParams::kBilerp_FilterMode; 2051 } 2052 newTexParams.fMinFilter = glMinFilterModes[filterMode]; 2053 newTexParams.fMagFilter = glMagFilterModes[filterMode]; 2054 2055 if (GrTextureParams::kMipMap_FilterMode == filterMode && 2056 texture->mipMapsAreDirty() && !GrPixelConfigIsCompressed(texture->config())) { 2057 GL_CALL(GenerateMipmap(GR_GL_TEXTURE_2D)); 2058 texture->dirtyMipMaps(false); 2059 } 2060 2061 newTexParams.fWrapS = tile_to_gl_wrap(params.getTileModeX()); 2062 newTexParams.fWrapT = tile_to_gl_wrap(params.getTileModeY()); 2063 memcpy(newTexParams.fSwizzleRGBA, 2064 GrGLShaderBuilder::GetTexParamSwizzle(texture->config(), this->glCaps()), 2065 sizeof(newTexParams.fSwizzleRGBA)); 2066 if (setAll || newTexParams.fMagFilter != oldTexParams.fMagFilter) { 2067 this->setTextureUnit(unitIdx); 2068 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, 2069 GR_GL_TEXTURE_MAG_FILTER, 2070 newTexParams.fMagFilter)); 2071 } 2072 if (setAll || newTexParams.fMinFilter != oldTexParams.fMinFilter) { 2073 this->setTextureUnit(unitIdx); 2074 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, 2075 GR_GL_TEXTURE_MIN_FILTER, 2076 newTexParams.fMinFilter)); 2077 } 2078 if (setAll || newTexParams.fWrapS != oldTexParams.fWrapS) { 2079 this->setTextureUnit(unitIdx); 2080 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, 2081 GR_GL_TEXTURE_WRAP_S, 2082 newTexParams.fWrapS)); 2083 } 2084 if (setAll || newTexParams.fWrapT != oldTexParams.fWrapT) { 2085 this->setTextureUnit(unitIdx); 2086 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, 2087 GR_GL_TEXTURE_WRAP_T, 2088 newTexParams.fWrapT)); 2089 } 2090 if (this->glCaps().textureSwizzleSupport() && 2091 (setAll || memcmp(newTexParams.fSwizzleRGBA, 2092 oldTexParams.fSwizzleRGBA, 2093 sizeof(newTexParams.fSwizzleRGBA)))) { 2094 this->setTextureUnit(unitIdx); 2095 if (this->glStandard() == kGLES_GrGLStandard) { 2096 // ES3 added swizzle support but not GL_TEXTURE_SWIZZLE_RGBA. 2097 const GrGLenum* swizzle = newTexParams.fSwizzleRGBA; 2098 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, GR_GL_TEXTURE_SWIZZLE_R, swizzle[0])); 2099 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, GR_GL_TEXTURE_SWIZZLE_G, swizzle[1])); 2100 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, GR_GL_TEXTURE_SWIZZLE_B, swizzle[2])); 2101 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, GR_GL_TEXTURE_SWIZZLE_A, swizzle[3])); 2102 } else { 2103 GR_STATIC_ASSERT(sizeof(newTexParams.fSwizzleRGBA[0]) == sizeof(GrGLint)); 2104 const GrGLint* swizzle = reinterpret_cast<const GrGLint*>(newTexParams.fSwizzleRGBA); 2105 GL_CALL(TexParameteriv(GR_GL_TEXTURE_2D, GR_GL_TEXTURE_SWIZZLE_RGBA, swizzle)); 2106 } 2107 } 2108 texture->setCachedTexParams(newTexParams, this->getResetTimestamp()); 2109 } 2110 2111 void GrGpuGL::flushMiscFixedFunctionState(const GrOptDrawState& optState) { 2112 if (optState.isDitherState()) { 2113 if (kYes_TriState != fHWDitherEnabled) { 2114 GL_CALL(Enable(GR_GL_DITHER)); 2115 fHWDitherEnabled = kYes_TriState; 2116 } 2117 } else { 2118 if (kNo_TriState != fHWDitherEnabled) { 2119 GL_CALL(Disable(GR_GL_DITHER)); 2120 fHWDitherEnabled = kNo_TriState; 2121 } 2122 } 2123 2124 if (optState.isColorWriteDisabled()) { 2125 if (kNo_TriState != fHWWriteToColor) { 2126 GL_CALL(ColorMask(GR_GL_FALSE, GR_GL_FALSE, 2127 GR_GL_FALSE, GR_GL_FALSE)); 2128 fHWWriteToColor = kNo_TriState; 2129 } 2130 } else { 2131 if (kYes_TriState != fHWWriteToColor) { 2132 GL_CALL(ColorMask(GR_GL_TRUE, GR_GL_TRUE, GR_GL_TRUE, GR_GL_TRUE)); 2133 fHWWriteToColor = kYes_TriState; 2134 } 2135 } 2136 2137 if (fHWDrawFace != optState.getDrawFace()) { 2138 switch (optState.getDrawFace()) { 2139 case GrDrawState::kCCW_DrawFace: 2140 GL_CALL(Enable(GR_GL_CULL_FACE)); 2141 GL_CALL(CullFace(GR_GL_BACK)); 2142 break; 2143 case GrDrawState::kCW_DrawFace: 2144 GL_CALL(Enable(GR_GL_CULL_FACE)); 2145 GL_CALL(CullFace(GR_GL_FRONT)); 2146 break; 2147 case GrDrawState::kBoth_DrawFace: 2148 GL_CALL(Disable(GR_GL_CULL_FACE)); 2149 break; 2150 default: 2151 SkFAIL("Unknown draw face."); 2152 } 2153 fHWDrawFace = optState.getDrawFace(); 2154 } 2155 } 2156 2157 bool GrGpuGL::configToGLFormats(GrPixelConfig config, 2158 bool getSizedInternalFormat, 2159 GrGLenum* internalFormat, 2160 GrGLenum* externalFormat, 2161 GrGLenum* externalType) { 2162 GrGLenum dontCare; 2163 if (NULL == internalFormat) { 2164 internalFormat = &dontCare; 2165 } 2166 if (NULL == externalFormat) { 2167 externalFormat = &dontCare; 2168 } 2169 if (NULL == externalType) { 2170 externalType = &dontCare; 2171 } 2172 2173 if(!this->glCaps().isConfigTexturable(config)) { 2174 return false; 2175 } 2176 2177 switch (config) { 2178 case kRGBA_8888_GrPixelConfig: 2179 *internalFormat = GR_GL_RGBA; 2180 *externalFormat = GR_GL_RGBA; 2181 if (getSizedInternalFormat) { 2182 *internalFormat = GR_GL_RGBA8; 2183 } else { 2184 *internalFormat = GR_GL_RGBA; 2185 } 2186 *externalType = GR_GL_UNSIGNED_BYTE; 2187 break; 2188 case kBGRA_8888_GrPixelConfig: 2189 if (this->glCaps().bgraIsInternalFormat()) { 2190 if (getSizedInternalFormat) { 2191 *internalFormat = GR_GL_BGRA8; 2192 } else { 2193 *internalFormat = GR_GL_BGRA; 2194 } 2195 } else { 2196 if (getSizedInternalFormat) { 2197 *internalFormat = GR_GL_RGBA8; 2198 } else { 2199 *internalFormat = GR_GL_RGBA; 2200 } 2201 } 2202 *externalFormat = GR_GL_BGRA; 2203 *externalType = GR_GL_UNSIGNED_BYTE; 2204 break; 2205 case kRGB_565_GrPixelConfig: 2206 *internalFormat = GR_GL_RGB; 2207 *externalFormat = GR_GL_RGB; 2208 if (getSizedInternalFormat) { 2209 if (this->glStandard() == kGL_GrGLStandard) { 2210 return false; 2211 } else { 2212 *internalFormat = GR_GL_RGB565; 2213 } 2214 } else { 2215 *internalFormat = GR_GL_RGB; 2216 } 2217 *externalType = GR_GL_UNSIGNED_SHORT_5_6_5; 2218 break; 2219 case kRGBA_4444_GrPixelConfig: 2220 *internalFormat = GR_GL_RGBA; 2221 *externalFormat = GR_GL_RGBA; 2222 if (getSizedInternalFormat) { 2223 *internalFormat = GR_GL_RGBA4; 2224 } else { 2225 *internalFormat = GR_GL_RGBA; 2226 } 2227 *externalType = GR_GL_UNSIGNED_SHORT_4_4_4_4; 2228 break; 2229 case kIndex_8_GrPixelConfig: 2230 // no sized/unsized internal format distinction here 2231 *internalFormat = GR_GL_PALETTE8_RGBA8; 2232 break; 2233 case kAlpha_8_GrPixelConfig: 2234 if (this->glCaps().textureRedSupport()) { 2235 *internalFormat = GR_GL_RED; 2236 *externalFormat = GR_GL_RED; 2237 if (getSizedInternalFormat) { 2238 *internalFormat = GR_GL_R8; 2239 } else { 2240 *internalFormat = GR_GL_RED; 2241 } 2242 *externalType = GR_GL_UNSIGNED_BYTE; 2243 } else { 2244 *internalFormat = GR_GL_ALPHA; 2245 *externalFormat = GR_GL_ALPHA; 2246 if (getSizedInternalFormat) { 2247 *internalFormat = GR_GL_ALPHA8; 2248 } else { 2249 *internalFormat = GR_GL_ALPHA; 2250 } 2251 *externalType = GR_GL_UNSIGNED_BYTE; 2252 } 2253 break; 2254 case kETC1_GrPixelConfig: 2255 *internalFormat = GR_GL_COMPRESSED_RGB8_ETC1; 2256 break; 2257 case kLATC_GrPixelConfig: 2258 switch(this->glCaps().latcAlias()) { 2259 case GrGLCaps::kLATC_LATCAlias: 2260 *internalFormat = GR_GL_COMPRESSED_LUMINANCE_LATC1; 2261 break; 2262 case GrGLCaps::kRGTC_LATCAlias: 2263 *internalFormat = GR_GL_COMPRESSED_RED_RGTC1; 2264 break; 2265 case GrGLCaps::k3DC_LATCAlias: 2266 *internalFormat = GR_GL_COMPRESSED_3DC_X; 2267 break; 2268 } 2269 break; 2270 case kR11_EAC_GrPixelConfig: 2271 *internalFormat = GR_GL_COMPRESSED_R11; 2272 break; 2273 2274 case kASTC_12x12_GrPixelConfig: 2275 *internalFormat = GR_GL_COMPRESSED_RGBA_ASTC_12x12; 2276 break; 2277 2278 case kRGBA_float_GrPixelConfig: 2279 *internalFormat = GR_GL_RGBA32F; 2280 *externalFormat = GR_GL_RGBA; 2281 *externalType = GR_GL_FLOAT; 2282 break; 2283 2284 default: 2285 return false; 2286 } 2287 return true; 2288 } 2289 2290 void GrGpuGL::setTextureUnit(int unit) { 2291 SkASSERT(unit >= 0 && unit < fHWBoundTextureUniqueIDs.count()); 2292 if (unit != fHWActiveTextureUnitIdx) { 2293 GL_CALL(ActiveTexture(GR_GL_TEXTURE0 + unit)); 2294 fHWActiveTextureUnitIdx = unit; 2295 } 2296 } 2297 2298 void GrGpuGL::setScratchTextureUnit() { 2299 // Bind the last texture unit since it is the least likely to be used by GrGLProgram. 2300 int lastUnitIdx = fHWBoundTextureUniqueIDs.count() - 1; 2301 if (lastUnitIdx != fHWActiveTextureUnitIdx) { 2302 GL_CALL(ActiveTexture(GR_GL_TEXTURE0 + lastUnitIdx)); 2303 fHWActiveTextureUnitIdx = lastUnitIdx; 2304 } 2305 // clear out the this field so that if a program does use this unit it will rebind the correct 2306 // texture. 2307 fHWBoundTextureUniqueIDs[lastUnitIdx] = SK_InvalidUniqueID; 2308 } 2309 2310 namespace { 2311 // Determines whether glBlitFramebuffer could be used between src and dst. 2312 inline bool can_blit_framebuffer(const GrSurface* dst, 2313 const GrSurface* src, 2314 const GrGpuGL* gpu, 2315 bool* wouldNeedTempFBO = NULL) { 2316 if (gpu->glCaps().isConfigRenderable(dst->config(), dst->desc().fSampleCnt > 0) && 2317 gpu->glCaps().isConfigRenderable(src->config(), src->desc().fSampleCnt > 0) && 2318 gpu->glCaps().usesMSAARenderBuffers()) { 2319 // ES3 doesn't allow framebuffer blits when the src has MSAA and the configs don't match 2320 // or the rects are not the same (not just the same size but have the same edges). 2321 if (GrGLCaps::kES_3_0_MSFBOType == gpu->glCaps().msFBOType() && 2322 (src->desc().fSampleCnt > 0 || src->config() != dst->config())) { 2323 return false; 2324 } 2325 if (wouldNeedTempFBO) { 2326 *wouldNeedTempFBO = NULL == dst->asRenderTarget() || NULL == src->asRenderTarget(); 2327 } 2328 return true; 2329 } else { 2330 return false; 2331 } 2332 } 2333 2334 inline bool can_copy_texsubimage(const GrSurface* dst, 2335 const GrSurface* src, 2336 const GrGpuGL* gpu, 2337 bool* wouldNeedTempFBO = NULL) { 2338 // Table 3.9 of the ES2 spec indicates the supported formats with CopyTexSubImage 2339 // and BGRA isn't in the spec. There doesn't appear to be any extension that adds it. Perhaps 2340 // many drivers would allow it to work, but ANGLE does not. 2341 if (kGLES_GrGLStandard == gpu->glStandard() && gpu->glCaps().bgraIsInternalFormat() && 2342 (kBGRA_8888_GrPixelConfig == dst->config() || kBGRA_8888_GrPixelConfig == src->config())) { 2343 return false; 2344 } 2345 const GrGLRenderTarget* dstRT = static_cast<const GrGLRenderTarget*>(dst->asRenderTarget()); 2346 // If dst is multisampled (and uses an extension where there is a separate MSAA renderbuffer) 2347 // then we don't want to copy to the texture but to the MSAA buffer. 2348 if (dstRT && dstRT->renderFBOID() != dstRT->textureFBOID()) { 2349 return false; 2350 } 2351 const GrGLRenderTarget* srcRT = static_cast<const GrGLRenderTarget*>(src->asRenderTarget()); 2352 // If the src is multisampled (and uses an extension where there is a separate MSAA 2353 // renderbuffer) then it is an invalid operation to call CopyTexSubImage 2354 if (srcRT && srcRT->renderFBOID() != srcRT->textureFBOID()) { 2355 return false; 2356 } 2357 if (gpu->glCaps().isConfigRenderable(src->config(), src->desc().fSampleCnt > 0) && 2358 dst->asTexture() && 2359 dst->origin() == src->origin() && 2360 !GrPixelConfigIsCompressed(src->config())) { 2361 if (wouldNeedTempFBO) { 2362 *wouldNeedTempFBO = NULL == src->asRenderTarget(); 2363 } 2364 return true; 2365 } else { 2366 return false; 2367 } 2368 } 2369 2370 } 2371 2372 // If a temporary FBO was created, its non-zero ID is returned. The viewport that the copy rect is 2373 // relative to is output. 2374 GrGLuint GrGpuGL::bindSurfaceAsFBO(GrSurface* surface, GrGLenum fboTarget, GrGLIRect* viewport) { 2375 GrGLRenderTarget* rt = static_cast<GrGLRenderTarget*>(surface->asRenderTarget()); 2376 GrGLuint tempFBOID; 2377 if (NULL == rt) { 2378 SkASSERT(surface->asTexture()); 2379 GrGLuint texID = static_cast<GrGLTexture*>(surface->asTexture())->textureID(); 2380 GR_GL_CALL(this->glInterface(), GenFramebuffers(1, &tempFBOID)); 2381 fGPUStats.incRenderTargetBinds(); 2382 GR_GL_CALL(this->glInterface(), BindFramebuffer(fboTarget, tempFBOID)); 2383 GR_GL_CALL(this->glInterface(), FramebufferTexture2D(fboTarget, 2384 GR_GL_COLOR_ATTACHMENT0, 2385 GR_GL_TEXTURE_2D, 2386 texID, 2387 0)); 2388 viewport->fLeft = 0; 2389 viewport->fBottom = 0; 2390 viewport->fWidth = surface->width(); 2391 viewport->fHeight = surface->height(); 2392 } else { 2393 tempFBOID = 0; 2394 fGPUStats.incRenderTargetBinds(); 2395 GR_GL_CALL(this->glInterface(), BindFramebuffer(fboTarget, rt->renderFBOID())); 2396 *viewport = rt->getViewport(); 2397 } 2398 return tempFBOID; 2399 } 2400 2401 void GrGpuGL::initCopySurfaceDstDesc(const GrSurface* src, GrTextureDesc* desc) { 2402 // Check for format issues with glCopyTexSubImage2D 2403 if (kGLES_GrGLStandard == this->glStandard() && this->glCaps().bgraIsInternalFormat() && 2404 kBGRA_8888_GrPixelConfig == src->config()) { 2405 // glCopyTexSubImage2D doesn't work with this config. We'll want to make it a render target 2406 // in order to call glBlitFramebuffer or to copy to it by rendering. 2407 INHERITED::initCopySurfaceDstDesc(src, desc); 2408 return; 2409 } else if (NULL == src->asRenderTarget()) { 2410 // We don't want to have to create an FBO just to use glCopyTexSubImage2D. Let the base 2411 // class handle it by rendering. 2412 INHERITED::initCopySurfaceDstDesc(src, desc); 2413 return; 2414 } 2415 2416 const GrGLRenderTarget* srcRT = static_cast<const GrGLRenderTarget*>(src->asRenderTarget()); 2417 if (srcRT && srcRT->renderFBOID() != srcRT->textureFBOID()) { 2418 // It's illegal to call CopyTexSubImage2D on a MSAA renderbuffer. 2419 INHERITED::initCopySurfaceDstDesc(src, desc); 2420 } else { 2421 desc->fConfig = src->config(); 2422 desc->fOrigin = src->origin(); 2423 desc->fFlags = kNone_GrTextureFlags; 2424 } 2425 } 2426 2427 bool GrGpuGL::onCopySurface(GrSurface* dst, 2428 GrSurface* src, 2429 const SkIRect& srcRect, 2430 const SkIPoint& dstPoint) { 2431 bool inheritedCouldCopy = INHERITED::onCanCopySurface(dst, src, srcRect, dstPoint); 2432 bool copied = false; 2433 bool wouldNeedTempFBO = false; 2434 if (can_copy_texsubimage(dst, src, this, &wouldNeedTempFBO) && 2435 (!wouldNeedTempFBO || !inheritedCouldCopy)) { 2436 GrGLuint srcFBO; 2437 GrGLIRect srcVP; 2438 srcFBO = this->bindSurfaceAsFBO(src, GR_GL_FRAMEBUFFER, &srcVP); 2439 GrGLTexture* dstTex = static_cast<GrGLTexture*>(dst->asTexture()); 2440 SkASSERT(dstTex); 2441 // We modified the bound FBO 2442 fHWBoundRenderTargetUniqueID = SK_InvalidUniqueID; 2443 GrGLIRect srcGLRect; 2444 srcGLRect.setRelativeTo(srcVP, 2445 srcRect.fLeft, 2446 srcRect.fTop, 2447 srcRect.width(), 2448 srcRect.height(), 2449 src->origin()); 2450 2451 this->setScratchTextureUnit(); 2452 GL_CALL(BindTexture(GR_GL_TEXTURE_2D, dstTex->textureID())); 2453 GrGLint dstY; 2454 if (kBottomLeft_GrSurfaceOrigin == dst->origin()) { 2455 dstY = dst->height() - (dstPoint.fY + srcGLRect.fHeight); 2456 } else { 2457 dstY = dstPoint.fY; 2458 } 2459 GL_CALL(CopyTexSubImage2D(GR_GL_TEXTURE_2D, 0, 2460 dstPoint.fX, dstY, 2461 srcGLRect.fLeft, srcGLRect.fBottom, 2462 srcGLRect.fWidth, srcGLRect.fHeight)); 2463 copied = true; 2464 if (srcFBO) { 2465 GL_CALL(DeleteFramebuffers(1, &srcFBO)); 2466 } 2467 } else if (can_blit_framebuffer(dst, src, this, &wouldNeedTempFBO) && 2468 (!wouldNeedTempFBO || !inheritedCouldCopy)) { 2469 SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY, 2470 srcRect.width(), srcRect.height()); 2471 bool selfOverlap = false; 2472 if (dst->isSameAs(src)) { 2473 selfOverlap = SkIRect::IntersectsNoEmptyCheck(dstRect, srcRect); 2474 } 2475 2476 if (!selfOverlap) { 2477 GrGLuint dstFBO; 2478 GrGLuint srcFBO; 2479 GrGLIRect dstVP; 2480 GrGLIRect srcVP; 2481 dstFBO = this->bindSurfaceAsFBO(dst, GR_GL_DRAW_FRAMEBUFFER, &dstVP); 2482 srcFBO = this->bindSurfaceAsFBO(src, GR_GL_READ_FRAMEBUFFER, &srcVP); 2483 // We modified the bound FBO 2484 fHWBoundRenderTargetUniqueID = SK_InvalidUniqueID; 2485 GrGLIRect srcGLRect; 2486 GrGLIRect dstGLRect; 2487 srcGLRect.setRelativeTo(srcVP, 2488 srcRect.fLeft, 2489 srcRect.fTop, 2490 srcRect.width(), 2491 srcRect.height(), 2492 src->origin()); 2493 dstGLRect.setRelativeTo(dstVP, 2494 dstRect.fLeft, 2495 dstRect.fTop, 2496 dstRect.width(), 2497 dstRect.height(), 2498 dst->origin()); 2499 2500 GrAutoTRestore<ScissorState> asr; 2501 // BlitFrameBuffer respects the scissor, so disable it. 2502 asr.reset(&fScissorState); 2503 fScissorState.fEnabled = false; 2504 this->flushScissor(dstGLRect, dst->origin()); 2505 2506 GrGLint srcY0; 2507 GrGLint srcY1; 2508 // Does the blit need to y-mirror or not? 2509 if (src->origin() == dst->origin()) { 2510 srcY0 = srcGLRect.fBottom; 2511 srcY1 = srcGLRect.fBottom + srcGLRect.fHeight; 2512 } else { 2513 srcY0 = srcGLRect.fBottom + srcGLRect.fHeight; 2514 srcY1 = srcGLRect.fBottom; 2515 } 2516 GL_CALL(BlitFramebuffer(srcGLRect.fLeft, 2517 srcY0, 2518 srcGLRect.fLeft + srcGLRect.fWidth, 2519 srcY1, 2520 dstGLRect.fLeft, 2521 dstGLRect.fBottom, 2522 dstGLRect.fLeft + dstGLRect.fWidth, 2523 dstGLRect.fBottom + dstGLRect.fHeight, 2524 GR_GL_COLOR_BUFFER_BIT, GR_GL_NEAREST)); 2525 if (dstFBO) { 2526 GL_CALL(DeleteFramebuffers(1, &dstFBO)); 2527 } 2528 if (srcFBO) { 2529 GL_CALL(DeleteFramebuffers(1, &srcFBO)); 2530 } 2531 copied = true; 2532 } 2533 } 2534 if (!copied && inheritedCouldCopy) { 2535 copied = INHERITED::onCopySurface(dst, src, srcRect, dstPoint); 2536 SkASSERT(copied); 2537 } 2538 return copied; 2539 } 2540 2541 bool GrGpuGL::onCanCopySurface(GrSurface* dst, 2542 GrSurface* src, 2543 const SkIRect& srcRect, 2544 const SkIPoint& dstPoint) { 2545 // This mirrors the logic in onCopySurface. 2546 if (can_copy_texsubimage(dst, src, this)) { 2547 return true; 2548 } 2549 if (can_blit_framebuffer(dst, src, this)) { 2550 if (dst->isSameAs(src)) { 2551 SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY, 2552 srcRect.width(), srcRect.height()); 2553 if(!SkIRect::IntersectsNoEmptyCheck(dstRect, srcRect)) { 2554 return true; 2555 } 2556 } else { 2557 return true; 2558 } 2559 } 2560 return INHERITED::onCanCopySurface(dst, src, srcRect, dstPoint); 2561 } 2562 2563 void GrGpuGL::didAddGpuTraceMarker() { 2564 if (this->caps()->gpuTracingSupport()) { 2565 const GrTraceMarkerSet& markerArray = this->getActiveTraceMarkers(); 2566 SkString markerString = markerArray.toStringLast(); 2567 GL_CALL(PushGroupMarker(0, markerString.c_str())); 2568 } 2569 } 2570 2571 void GrGpuGL::didRemoveGpuTraceMarker() { 2572 if (this->caps()->gpuTracingSupport()) { 2573 GL_CALL(PopGroupMarker()); 2574 } 2575 } 2576 /////////////////////////////////////////////////////////////////////////////// 2577 2578 GrGLAttribArrayState* GrGpuGL::HWGeometryState::bindArrayAndBuffersToDraw( 2579 GrGpuGL* gpu, 2580 const GrGLVertexBuffer* vbuffer, 2581 const GrGLIndexBuffer* ibuffer) { 2582 SkASSERT(vbuffer); 2583 GrGLAttribArrayState* attribState; 2584 2585 // We use a vertex array if we're on a core profile and the verts are in a VBO. 2586 if (gpu->glCaps().isCoreProfile() && !vbuffer->isCPUBacked()) { 2587 if (NULL == fVBOVertexArray || fVBOVertexArray->wasDestroyed()) { 2588 SkSafeUnref(fVBOVertexArray); 2589 GrGLuint arrayID; 2590 GR_GL_CALL(gpu->glInterface(), GenVertexArrays(1, &arrayID)); 2591 int attrCount = gpu->glCaps().maxVertexAttributes(); 2592 fVBOVertexArray = SkNEW_ARGS(GrGLVertexArray, (gpu, arrayID, attrCount)); 2593 } 2594 attribState = fVBOVertexArray->bindWithIndexBuffer(ibuffer); 2595 } else { 2596 if (ibuffer) { 2597 this->setIndexBufferIDOnDefaultVertexArray(gpu, ibuffer->bufferID()); 2598 } else { 2599 this->setVertexArrayID(gpu, 0); 2600 } 2601 int attrCount = gpu->glCaps().maxVertexAttributes(); 2602 if (fDefaultVertexArrayAttribState.count() != attrCount) { 2603 fDefaultVertexArrayAttribState.resize(attrCount); 2604 } 2605 attribState = &fDefaultVertexArrayAttribState; 2606 } 2607 return attribState; 2608 } 2609