1 /* 2 * Copyright 2011 Google Inc. 3 * 4 * Use of this source code is governed by a BSD-style license that can be 5 * found in the LICENSE file. 6 */ 7 8 9 #include "GrGpuGL.h" 10 #include "GrGLStencilBuffer.h" 11 #include "GrGLPath.h" 12 #include "GrGLShaderBuilder.h" 13 #include "GrTemplates.h" 14 #include "GrTypes.h" 15 #include "SkTemplates.h" 16 17 static const GrGLuint GR_MAX_GLUINT = ~0U; 18 static const GrGLint GR_INVAL_GLINT = ~0; 19 20 #define GL_CALL(X) GR_GL_CALL(this->glInterface(), X) 21 #define GL_CALL_RET(RET, X) GR_GL_CALL_RET(this->glInterface(), RET, X) 22 23 24 #define SKIP_CACHE_CHECK true 25 26 #if GR_GL_CHECK_ALLOC_WITH_GET_ERROR 27 #define CLEAR_ERROR_BEFORE_ALLOC(iface) GrGLClearErr(iface) 28 #define GL_ALLOC_CALL(iface, call) GR_GL_CALL_NOERRCHECK(iface, call) 29 #define CHECK_ALLOC_ERROR(iface) GR_GL_GET_ERROR(iface) 30 #else 31 #define CLEAR_ERROR_BEFORE_ALLOC(iface) 32 #define GL_ALLOC_CALL(iface, call) GR_GL_CALL(iface, call) 33 #define CHECK_ALLOC_ERROR(iface) GR_GL_NO_ERROR 34 #endif 35 36 37 /////////////////////////////////////////////////////////////////////////////// 38 39 static const GrGLenum gXfermodeCoeff2Blend[] = { 40 GR_GL_ZERO, 41 GR_GL_ONE, 42 GR_GL_SRC_COLOR, 43 GR_GL_ONE_MINUS_SRC_COLOR, 44 GR_GL_DST_COLOR, 45 GR_GL_ONE_MINUS_DST_COLOR, 46 GR_GL_SRC_ALPHA, 47 GR_GL_ONE_MINUS_SRC_ALPHA, 48 GR_GL_DST_ALPHA, 49 GR_GL_ONE_MINUS_DST_ALPHA, 50 GR_GL_CONSTANT_COLOR, 51 GR_GL_ONE_MINUS_CONSTANT_COLOR, 52 GR_GL_CONSTANT_ALPHA, 53 GR_GL_ONE_MINUS_CONSTANT_ALPHA, 54 55 // extended blend coeffs 56 GR_GL_SRC1_COLOR, 57 GR_GL_ONE_MINUS_SRC1_COLOR, 58 GR_GL_SRC1_ALPHA, 59 GR_GL_ONE_MINUS_SRC1_ALPHA, 60 }; 61 62 bool GrGpuGL::BlendCoeffReferencesConstant(GrBlendCoeff coeff) { 63 static const bool gCoeffReferencesBlendConst[] = { 64 false, 65 false, 66 false, 67 false, 68 false, 69 false, 70 false, 71 false, 72 false, 73 false, 74 true, 75 true, 76 true, 77 true, 78 79 // extended blend coeffs 80 false, 81 false, 82 false, 83 false, 84 }; 85 return gCoeffReferencesBlendConst[coeff]; 86 GR_STATIC_ASSERT(kTotalGrBlendCoeffCount == 87 GR_ARRAY_COUNT(gCoeffReferencesBlendConst)); 88 89 GR_STATIC_ASSERT(0 == kZero_GrBlendCoeff); 90 GR_STATIC_ASSERT(1 == kOne_GrBlendCoeff); 91 GR_STATIC_ASSERT(2 == kSC_GrBlendCoeff); 92 GR_STATIC_ASSERT(3 == kISC_GrBlendCoeff); 93 GR_STATIC_ASSERT(4 == kDC_GrBlendCoeff); 94 GR_STATIC_ASSERT(5 == kIDC_GrBlendCoeff); 95 GR_STATIC_ASSERT(6 == kSA_GrBlendCoeff); 96 GR_STATIC_ASSERT(7 == kISA_GrBlendCoeff); 97 GR_STATIC_ASSERT(8 == kDA_GrBlendCoeff); 98 GR_STATIC_ASSERT(9 == kIDA_GrBlendCoeff); 99 GR_STATIC_ASSERT(10 == kConstC_GrBlendCoeff); 100 GR_STATIC_ASSERT(11 == kIConstC_GrBlendCoeff); 101 GR_STATIC_ASSERT(12 == kConstA_GrBlendCoeff); 102 GR_STATIC_ASSERT(13 == kIConstA_GrBlendCoeff); 103 104 GR_STATIC_ASSERT(14 == kS2C_GrBlendCoeff); 105 GR_STATIC_ASSERT(15 == kIS2C_GrBlendCoeff); 106 GR_STATIC_ASSERT(16 == kS2A_GrBlendCoeff); 107 GR_STATIC_ASSERT(17 == kIS2A_GrBlendCoeff); 108 109 // assertion for gXfermodeCoeff2Blend have to be in GrGpu scope 110 GR_STATIC_ASSERT(kTotalGrBlendCoeffCount == 111 GR_ARRAY_COUNT(gXfermodeCoeff2Blend)); 112 } 113 114 /////////////////////////////////////////////////////////////////////////////// 115 116 static bool gPrintStartupSpew; 117 118 GrGpuGL::GrGpuGL(const GrGLContext& ctx, GrContext* context) 119 : GrGpu(context) 120 , fGLContext(ctx) { 121 122 GrAssert(ctx.isInitialized()); 123 124 fCaps.reset(SkRef(ctx.info().caps())); 125 126 fHWBoundTextures.reset(ctx.info().caps()->maxFragmentTextureUnits()); 127 128 fillInConfigRenderableTable(); 129 130 131 GrGLClearErr(fGLContext.interface()); 132 133 if (gPrintStartupSpew) { 134 const GrGLubyte* vendor; 135 const GrGLubyte* renderer; 136 const GrGLubyte* version; 137 GL_CALL_RET(vendor, GetString(GR_GL_VENDOR)); 138 GL_CALL_RET(renderer, GetString(GR_GL_RENDERER)); 139 GL_CALL_RET(version, GetString(GR_GL_VERSION)); 140 GrPrintf("------------------------- create GrGpuGL %p --------------\n", 141 this); 142 GrPrintf("------ VENDOR %s\n", vendor); 143 GrPrintf("------ RENDERER %s\n", renderer); 144 GrPrintf("------ VERSION %s\n", version); 145 GrPrintf("------ EXTENSIONS\n"); 146 ctx.info().extensions().print(); 147 GrPrintf("\n"); 148 ctx.info().caps()->print(); 149 } 150 151 fProgramCache = SkNEW_ARGS(ProgramCache, (this->glContext())); 152 153 GrAssert(this->glCaps().maxVertexAttributes() >= GrDrawState::kMaxVertexAttribCnt); 154 155 fLastSuccessfulStencilFmtIdx = 0; 156 fHWProgramID = 0; 157 } 158 159 GrGpuGL::~GrGpuGL() { 160 if (0 != fHWProgramID) { 161 // detach the current program so there is no confusion on OpenGL's part 162 // that we want it to be deleted 163 GrAssert(fHWProgramID == fCurrentProgram->programID()); 164 GL_CALL(UseProgram(0)); 165 } 166 167 delete fProgramCache; 168 169 // This must be called by before the GrDrawTarget destructor 170 this->releaseGeometry(); 171 // This subclass must do this before the base class destructor runs 172 // since we will unref the GrGLInterface. 173 this->releaseResources(); 174 } 175 176 /////////////////////////////////////////////////////////////////////////////// 177 178 void GrGpuGL::fillInConfigRenderableTable() { 179 180 // OpenGL < 3.0 181 // no support for render targets unless the GL_ARB_framebuffer_object 182 // extension is supported (in which case we get ALPHA, RED, RG, RGB, 183 // RGBA (ALPHA8, RGBA4, RGBA8) for OpenGL > 1.1). Note that we 184 // probably don't get R8 in this case. 185 186 // OpenGL 3.0 187 // base color renderable: ALPHA, RED, RG, RGB, and RGBA 188 // sized derivatives: ALPHA8, R8, RGBA4, RGBA8 189 190 // >= OpenGL 3.1 191 // base color renderable: RED, RG, RGB, and RGBA 192 // sized derivatives: R8, RGBA4, RGBA8 193 // if the GL_ARB_compatibility extension is supported then we get back 194 // support for GL_ALPHA and ALPHA8 195 196 // GL_EXT_bgra adds BGRA render targets to any version 197 198 // ES 2.0 199 // color renderable: RGBA4, RGB5_A1, RGB565 200 // GL_EXT_texture_rg adds support for R8 as a color render target 201 // GL_OES_rgb8_rgba8 and/or GL_ARM_rgba8 adds support for RGBA8 202 // GL_EXT_texture_format_BGRA8888 and/or GL_APPLE_texture_format_BGRA8888 added BGRA support 203 204 if (kDesktop_GrGLBinding == this->glBinding()) { 205 // Post 3.0 we will get R8 206 // Prior to 3.0 we will get ALPHA8 (with GL_ARB_framebuffer_object) 207 if (this->glVersion() >= GR_GL_VER(3,0) || 208 this->hasExtension("GL_ARB_framebuffer_object")) { 209 fConfigRenderSupport[kAlpha_8_GrPixelConfig] = true; 210 } 211 } else { 212 // On ES we can only hope for R8 213 fConfigRenderSupport[kAlpha_8_GrPixelConfig] = 214 this->glCaps().textureRedSupport(); 215 } 216 217 if (kDesktop_GrGLBinding != this->glBinding()) { 218 // only available in ES 219 fConfigRenderSupport[kRGB_565_GrPixelConfig] = true; 220 } 221 222 // we no longer support 444 as a render target 223 fConfigRenderSupport[kRGBA_4444_GrPixelConfig] = false; 224 225 if (this->glCaps().rgba8RenderbufferSupport()) { 226 fConfigRenderSupport[kRGBA_8888_GrPixelConfig] = true; 227 } 228 229 if (this->glCaps().bgraFormatSupport()) { 230 fConfigRenderSupport[kBGRA_8888_GrPixelConfig] = true; 231 } 232 } 233 234 GrPixelConfig GrGpuGL::preferredReadPixelsConfig(GrPixelConfig readConfig, 235 GrPixelConfig surfaceConfig) const { 236 if (GR_GL_RGBA_8888_PIXEL_OPS_SLOW && kRGBA_8888_GrPixelConfig == readConfig) { 237 return kBGRA_8888_GrPixelConfig; 238 } else if (fGLContext.info().isMesa() && 239 GrBytesPerPixel(readConfig) == 4 && 240 GrPixelConfigSwapRAndB(readConfig) == surfaceConfig) { 241 // Mesa 3D takes a slow path on when reading back BGRA from an RGBA surface and vice-versa. 242 // Perhaps this should be guarded by some compiletime or runtime check. 243 return surfaceConfig; 244 } else if (readConfig == kBGRA_8888_GrPixelConfig && 245 !this->glCaps().readPixelsSupported(this->glInterface(), 246 GR_GL_BGRA, GR_GL_UNSIGNED_BYTE)) { 247 return kRGBA_8888_GrPixelConfig; 248 } else { 249 return readConfig; 250 } 251 } 252 253 GrPixelConfig GrGpuGL::preferredWritePixelsConfig(GrPixelConfig writeConfig, 254 GrPixelConfig surfaceConfig) const { 255 if (GR_GL_RGBA_8888_PIXEL_OPS_SLOW && kRGBA_8888_GrPixelConfig == writeConfig) { 256 return kBGRA_8888_GrPixelConfig; 257 } else { 258 return writeConfig; 259 } 260 } 261 262 bool GrGpuGL::canWriteTexturePixels(const GrTexture* texture, GrPixelConfig srcConfig) const { 263 if (kIndex_8_GrPixelConfig == srcConfig || kIndex_8_GrPixelConfig == texture->config()) { 264 return false; 265 } 266 if (srcConfig != texture->config() && kES2_GrGLBinding == this->glBinding()) { 267 // In general ES2 requires the internal format of the texture and the format of the src 268 // pixels to match. However, It may or may not be possible to upload BGRA data to a RGBA 269 // texture. It depends upon which extension added BGRA. The Apple extension allows it 270 // (BGRA's internal format is RGBA) while the EXT extension does not (BGRA is its own 271 // internal format). 272 if (this->glCaps().bgraFormatSupport() && 273 !this->glCaps().bgraIsInternalFormat() && 274 kBGRA_8888_GrPixelConfig == srcConfig && 275 kRGBA_8888_GrPixelConfig == texture->config()) { 276 return true; 277 } else { 278 return false; 279 } 280 } else { 281 return true; 282 } 283 } 284 285 bool GrGpuGL::fullReadPixelsIsFasterThanPartial() const { 286 return SkToBool(GR_GL_FULL_READPIXELS_FASTER_THAN_PARTIAL); 287 } 288 289 void GrGpuGL::onResetContext(uint32_t resetBits) { 290 // we don't use the zb at all 291 if (resetBits & kMisc_GrGLBackendState) { 292 GL_CALL(Disable(GR_GL_DEPTH_TEST)); 293 GL_CALL(DepthMask(GR_GL_FALSE)); 294 295 fHWDrawFace = GrDrawState::kInvalid_DrawFace; 296 fHWDitherEnabled = kUnknown_TriState; 297 298 if (kDesktop_GrGLBinding == this->glBinding()) { 299 // Desktop-only state that we never change 300 if (!this->glCaps().isCoreProfile()) { 301 GL_CALL(Disable(GR_GL_POINT_SMOOTH)); 302 GL_CALL(Disable(GR_GL_LINE_SMOOTH)); 303 GL_CALL(Disable(GR_GL_POLYGON_SMOOTH)); 304 GL_CALL(Disable(GR_GL_POLYGON_STIPPLE)); 305 GL_CALL(Disable(GR_GL_COLOR_LOGIC_OP)); 306 GL_CALL(Disable(GR_GL_INDEX_LOGIC_OP)); 307 } 308 // The windows NVIDIA driver has GL_ARB_imaging in the extension string when using a 309 // core profile. This seems like a bug since the core spec removes any mention of 310 // GL_ARB_imaging. 311 if (this->glCaps().imagingSupport() && !this->glCaps().isCoreProfile()) { 312 GL_CALL(Disable(GR_GL_COLOR_TABLE)); 313 } 314 GL_CALL(Disable(GR_GL_POLYGON_OFFSET_FILL)); 315 // Since ES doesn't support glPointSize at all we always use the VS to 316 // set the point size 317 GL_CALL(Enable(GR_GL_VERTEX_PROGRAM_POINT_SIZE)); 318 319 // We should set glPolygonMode(FRONT_AND_BACK,FILL) here, too. It isn't 320 // currently part of our gl interface. There are probably others as 321 // well. 322 } 323 fHWWriteToColor = kUnknown_TriState; 324 // we only ever use lines in hairline mode 325 GL_CALL(LineWidth(1)); 326 } 327 328 if (resetBits & kAA_GrGLBackendState) { 329 fHWAAState.invalidate(); 330 } 331 332 // invalid 333 if (resetBits & kTextureBinding_GrGLBackendState) { 334 fHWActiveTextureUnitIdx = -1; 335 for (int s = 0; s < fHWBoundTextures.count(); ++s) { 336 fHWBoundTextures[s] = NULL; 337 } 338 } 339 340 if (resetBits & kBlend_GrGLBackendState) { 341 fHWBlendState.invalidate(); 342 } 343 344 if (resetBits & kView_GrGLBackendState) { 345 fHWScissorSettings.invalidate(); 346 fHWViewport.invalidate(); 347 } 348 349 if (resetBits & kStencil_GrGLBackendState) { 350 fHWStencilSettings.invalidate(); 351 fHWStencilTestEnabled = kUnknown_TriState; 352 } 353 354 // Vertex 355 if (resetBits & kVertex_GrGLBackendState) { 356 fHWGeometryState.invalidate(); 357 } 358 359 if (resetBits & kRenderTarget_GrGLBackendState) { 360 fHWBoundRenderTarget = NULL; 361 } 362 363 if (resetBits & kPathStencil_GrGLBackendState) { 364 fHWPathStencilMatrixState.invalidate(); 365 if (this->caps()->pathStencilingSupport()) { 366 // we don't use the model view matrix. 367 GL_CALL(MatrixMode(GR_GL_MODELVIEW)); 368 GL_CALL(LoadIdentity()); 369 } 370 } 371 372 // we assume these values 373 if (resetBits & kPixelStore_GrGLBackendState) { 374 if (this->glCaps().unpackRowLengthSupport()) { 375 GL_CALL(PixelStorei(GR_GL_UNPACK_ROW_LENGTH, 0)); 376 } 377 if (this->glCaps().packRowLengthSupport()) { 378 GL_CALL(PixelStorei(GR_GL_PACK_ROW_LENGTH, 0)); 379 } 380 if (this->glCaps().unpackFlipYSupport()) { 381 GL_CALL(PixelStorei(GR_GL_UNPACK_FLIP_Y, GR_GL_FALSE)); 382 } 383 if (this->glCaps().packFlipYSupport()) { 384 GL_CALL(PixelStorei(GR_GL_PACK_REVERSE_ROW_ORDER, GR_GL_FALSE)); 385 } 386 } 387 388 if (resetBits & kProgram_GrGLBackendState) { 389 fHWProgramID = 0; 390 fSharedGLProgramState.invalidate(); 391 } 392 } 393 394 namespace { 395 396 GrSurfaceOrigin resolve_origin(GrSurfaceOrigin origin, bool renderTarget) { 397 // By default, GrRenderTargets are GL's normal orientation so that they 398 // can be drawn to by the outside world without the client having 399 // to render upside down. 400 if (kDefault_GrSurfaceOrigin == origin) { 401 return renderTarget ? kBottomLeft_GrSurfaceOrigin : kTopLeft_GrSurfaceOrigin; 402 } else { 403 return origin; 404 } 405 } 406 407 } 408 409 GrTexture* GrGpuGL::onWrapBackendTexture(const GrBackendTextureDesc& desc) { 410 if (!this->configToGLFormats(desc.fConfig, false, NULL, NULL, NULL)) { 411 return NULL; 412 } 413 414 if (0 == desc.fTextureHandle) { 415 return NULL; 416 } 417 418 int maxSize = this->caps()->maxTextureSize(); 419 if (desc.fWidth > maxSize || desc.fHeight > maxSize) { 420 return NULL; 421 } 422 423 GrGLTexture::Desc glTexDesc; 424 // next line relies on GrBackendTextureDesc's flags matching GrTexture's 425 glTexDesc.fFlags = (GrTextureFlags) desc.fFlags; 426 glTexDesc.fWidth = desc.fWidth; 427 glTexDesc.fHeight = desc.fHeight; 428 glTexDesc.fConfig = desc.fConfig; 429 glTexDesc.fSampleCnt = desc.fSampleCnt; 430 glTexDesc.fTextureID = static_cast<GrGLuint>(desc.fTextureHandle); 431 glTexDesc.fIsWrapped = true; 432 bool renderTarget = SkToBool(desc.fFlags & kRenderTarget_GrBackendTextureFlag); 433 // FIXME: this should be calling resolve_origin(), but Chrome code is currently 434 // assuming the old behaviour, which is that backend textures are always 435 // BottomLeft, even for non-RT's. Once Chrome is fixed, change this to: 436 // glTexDesc.fOrigin = resolve_origin(desc.fOrigin, renderTarget); 437 if (kDefault_GrSurfaceOrigin == desc.fOrigin) { 438 glTexDesc.fOrigin = kBottomLeft_GrSurfaceOrigin; 439 } else { 440 glTexDesc.fOrigin = desc.fOrigin; 441 } 442 443 GrGLTexture* texture = NULL; 444 if (renderTarget) { 445 GrGLRenderTarget::Desc glRTDesc; 446 glRTDesc.fRTFBOID = 0; 447 glRTDesc.fTexFBOID = 0; 448 glRTDesc.fMSColorRenderbufferID = 0; 449 glRTDesc.fConfig = desc.fConfig; 450 glRTDesc.fSampleCnt = desc.fSampleCnt; 451 glRTDesc.fOrigin = glTexDesc.fOrigin; 452 glRTDesc.fCheckAllocation = false; 453 if (!this->createRenderTargetObjects(glTexDesc.fWidth, 454 glTexDesc.fHeight, 455 glTexDesc.fTextureID, 456 &glRTDesc)) { 457 return NULL; 458 } 459 texture = SkNEW_ARGS(GrGLTexture, (this, glTexDesc, glRTDesc)); 460 } else { 461 texture = SkNEW_ARGS(GrGLTexture, (this, glTexDesc)); 462 } 463 if (NULL == texture) { 464 return NULL; 465 } 466 467 return texture; 468 } 469 470 GrRenderTarget* GrGpuGL::onWrapBackendRenderTarget(const GrBackendRenderTargetDesc& desc) { 471 GrGLRenderTarget::Desc glDesc; 472 glDesc.fConfig = desc.fConfig; 473 glDesc.fRTFBOID = static_cast<GrGLuint>(desc.fRenderTargetHandle); 474 glDesc.fMSColorRenderbufferID = 0; 475 glDesc.fTexFBOID = GrGLRenderTarget::kUnresolvableFBOID; 476 glDesc.fSampleCnt = desc.fSampleCnt; 477 glDesc.fIsWrapped = true; 478 glDesc.fCheckAllocation = false; 479 480 glDesc.fOrigin = resolve_origin(desc.fOrigin, true); 481 GrGLIRect viewport; 482 viewport.fLeft = 0; 483 viewport.fBottom = 0; 484 viewport.fWidth = desc.fWidth; 485 viewport.fHeight = desc.fHeight; 486 487 GrRenderTarget* tgt = SkNEW_ARGS(GrGLRenderTarget, 488 (this, glDesc, viewport)); 489 if (desc.fStencilBits) { 490 GrGLStencilBuffer::Format format; 491 format.fInternalFormat = GrGLStencilBuffer::kUnknownInternalFormat; 492 format.fPacked = false; 493 format.fStencilBits = desc.fStencilBits; 494 format.fTotalBits = desc.fStencilBits; 495 static const bool kIsSBWrapped = false; 496 GrGLStencilBuffer* sb = SkNEW_ARGS(GrGLStencilBuffer, 497 (this, 498 kIsSBWrapped, 499 0, 500 desc.fWidth, 501 desc.fHeight, 502 desc.fSampleCnt, 503 format)); 504 tgt->setStencilBuffer(sb); 505 sb->unref(); 506 } 507 return tgt; 508 } 509 510 //////////////////////////////////////////////////////////////////////////////// 511 512 bool GrGpuGL::onWriteTexturePixels(GrTexture* texture, 513 int left, int top, int width, int height, 514 GrPixelConfig config, const void* buffer, 515 size_t rowBytes) { 516 if (NULL == buffer) { 517 return false; 518 } 519 GrGLTexture* glTex = static_cast<GrGLTexture*>(texture); 520 521 this->setScratchTextureUnit(); 522 GL_CALL(BindTexture(GR_GL_TEXTURE_2D, glTex->textureID())); 523 GrGLTexture::Desc desc; 524 desc.fFlags = glTex->desc().fFlags; 525 desc.fWidth = glTex->width(); 526 desc.fHeight = glTex->height(); 527 desc.fConfig = glTex->config(); 528 desc.fSampleCnt = glTex->desc().fSampleCnt; 529 desc.fTextureID = glTex->textureID(); 530 desc.fOrigin = glTex->origin(); 531 532 if (this->uploadTexData(desc, false, 533 left, top, width, height, 534 config, buffer, rowBytes)) { 535 texture->dirtyMipMaps(true); 536 return true; 537 } else { 538 return false; 539 } 540 } 541 542 namespace { 543 bool adjust_pixel_ops_params(int surfaceWidth, 544 int surfaceHeight, 545 size_t bpp, 546 int* left, int* top, int* width, int* height, 547 const void** data, 548 size_t* rowBytes) { 549 if (!*rowBytes) { 550 *rowBytes = *width * bpp; 551 } 552 553 SkIRect subRect = SkIRect::MakeXYWH(*left, *top, *width, *height); 554 SkIRect bounds = SkIRect::MakeWH(surfaceWidth, surfaceHeight); 555 556 if (!subRect.intersect(bounds)) { 557 return false; 558 } 559 *data = reinterpret_cast<const void*>(reinterpret_cast<intptr_t>(*data) + 560 (subRect.fTop - *top) * *rowBytes + (subRect.fLeft - *left) * bpp); 561 562 *left = subRect.fLeft; 563 *top = subRect.fTop; 564 *width = subRect.width(); 565 *height = subRect.height(); 566 return true; 567 } 568 569 GrGLenum check_alloc_error(const GrTextureDesc& desc, const GrGLInterface* interface) { 570 if (SkToBool(desc.fFlags & kCheckAllocation_GrTextureFlagBit)) { 571 return GR_GL_GET_ERROR(interface); 572 } else { 573 return CHECK_ALLOC_ERROR(interface); 574 } 575 } 576 577 } 578 579 bool GrGpuGL::uploadTexData(const GrGLTexture::Desc& desc, 580 bool isNewTexture, 581 int left, int top, int width, int height, 582 GrPixelConfig dataConfig, 583 const void* data, 584 size_t rowBytes) { 585 GrAssert(NULL != data || isNewTexture); 586 587 size_t bpp = GrBytesPerPixel(dataConfig); 588 if (!adjust_pixel_ops_params(desc.fWidth, desc.fHeight, bpp, &left, &top, 589 &width, &height, &data, &rowBytes)) { 590 return false; 591 } 592 size_t trimRowBytes = width * bpp; 593 594 // in case we need a temporary, trimmed copy of the src pixels 595 SkAutoSMalloc<128 * 128> tempStorage; 596 597 // paletted textures cannot be partially updated 598 bool useTexStorage = isNewTexture && 599 desc.fConfig != kIndex_8_GrPixelConfig && 600 this->glCaps().texStorageSupport(); 601 602 if (useTexStorage && kDesktop_GrGLBinding == this->glBinding()) { 603 // 565 is not a sized internal format on desktop GL. So on desktop with 604 // 565 we always use an unsized internal format to let the system pick 605 // the best sized format to convert the 565 data to. Since TexStorage 606 // only allows sized internal formats we will instead use TexImage2D. 607 useTexStorage = desc.fConfig != kRGB_565_GrPixelConfig; 608 } 609 610 GrGLenum internalFormat; 611 GrGLenum externalFormat; 612 GrGLenum externalType; 613 // glTexStorage requires sized internal formats on both desktop and ES. ES 614 // glTexImage requires an unsized format. 615 if (!this->configToGLFormats(dataConfig, useTexStorage, &internalFormat, 616 &externalFormat, &externalType)) { 617 return false; 618 } 619 620 if (!isNewTexture && GR_GL_PALETTE8_RGBA8 == internalFormat) { 621 // paletted textures cannot be updated 622 return false; 623 } 624 625 /* 626 * check whether to allocate a temporary buffer for flipping y or 627 * because our srcData has extra bytes past each row. If so, we need 628 * to trim those off here, since GL ES may not let us specify 629 * GL_UNPACK_ROW_LENGTH. 630 */ 631 bool restoreGLRowLength = false; 632 bool swFlipY = false; 633 bool glFlipY = false; 634 if (NULL != data) { 635 if (kBottomLeft_GrSurfaceOrigin == desc.fOrigin) { 636 if (this->glCaps().unpackFlipYSupport()) { 637 glFlipY = true; 638 } else { 639 swFlipY = true; 640 } 641 } 642 if (this->glCaps().unpackRowLengthSupport() && !swFlipY) { 643 // can't use this for flipping, only non-neg values allowed. :( 644 if (rowBytes != trimRowBytes) { 645 GrGLint rowLength = static_cast<GrGLint>(rowBytes / bpp); 646 GL_CALL(PixelStorei(GR_GL_UNPACK_ROW_LENGTH, rowLength)); 647 restoreGLRowLength = true; 648 } 649 } else { 650 if (trimRowBytes != rowBytes || swFlipY) { 651 // copy data into our new storage, skipping the trailing bytes 652 size_t trimSize = height * trimRowBytes; 653 const char* src = (const char*)data; 654 if (swFlipY) { 655 src += (height - 1) * rowBytes; 656 } 657 char* dst = (char*)tempStorage.reset(trimSize); 658 for (int y = 0; y < height; y++) { 659 memcpy(dst, src, trimRowBytes); 660 if (swFlipY) { 661 src -= rowBytes; 662 } else { 663 src += rowBytes; 664 } 665 dst += trimRowBytes; 666 } 667 // now point data to our copied version 668 data = tempStorage.get(); 669 } 670 } 671 if (glFlipY) { 672 GL_CALL(PixelStorei(GR_GL_UNPACK_FLIP_Y, GR_GL_TRUE)); 673 } 674 GL_CALL(PixelStorei(GR_GL_UNPACK_ALIGNMENT, static_cast<GrGLint>(bpp))); 675 } 676 bool succeeded = true; 677 if (isNewTexture && 678 0 == left && 0 == top && 679 desc.fWidth == width && desc.fHeight == height) { 680 CLEAR_ERROR_BEFORE_ALLOC(this->glInterface()); 681 if (useTexStorage) { 682 // We never resize or change formats of textures. We don't use 683 // mipmaps currently. 684 GL_ALLOC_CALL(this->glInterface(), 685 TexStorage2D(GR_GL_TEXTURE_2D, 686 1, // levels 687 internalFormat, 688 desc.fWidth, desc.fHeight)); 689 } else { 690 if (GR_GL_PALETTE8_RGBA8 == internalFormat) { 691 GrGLsizei imageSize = desc.fWidth * desc.fHeight + 692 kGrColorTableSize; 693 GL_ALLOC_CALL(this->glInterface(), 694 CompressedTexImage2D(GR_GL_TEXTURE_2D, 695 0, // level 696 internalFormat, 697 desc.fWidth, desc.fHeight, 698 0, // border 699 imageSize, 700 data)); 701 } else { 702 GL_ALLOC_CALL(this->glInterface(), 703 TexImage2D(GR_GL_TEXTURE_2D, 704 0, // level 705 internalFormat, 706 desc.fWidth, desc.fHeight, 707 0, // border 708 externalFormat, externalType, 709 data)); 710 } 711 } 712 GrGLenum error = check_alloc_error(desc, this->glInterface()); 713 if (error != GR_GL_NO_ERROR) { 714 succeeded = false; 715 } else { 716 // if we have data and we used TexStorage to create the texture, we 717 // now upload with TexSubImage. 718 if (NULL != data && useTexStorage) { 719 GL_CALL(TexSubImage2D(GR_GL_TEXTURE_2D, 720 0, // level 721 left, top, 722 width, height, 723 externalFormat, externalType, 724 data)); 725 } 726 } 727 } else { 728 if (swFlipY || glFlipY) { 729 top = desc.fHeight - (top + height); 730 } 731 GL_CALL(TexSubImage2D(GR_GL_TEXTURE_2D, 732 0, // level 733 left, top, 734 width, height, 735 externalFormat, externalType, data)); 736 } 737 738 if (restoreGLRowLength) { 739 GrAssert(this->glCaps().unpackRowLengthSupport()); 740 GL_CALL(PixelStorei(GR_GL_UNPACK_ROW_LENGTH, 0)); 741 } 742 if (glFlipY) { 743 GL_CALL(PixelStorei(GR_GL_UNPACK_FLIP_Y, GR_GL_FALSE)); 744 } 745 return succeeded; 746 } 747 748 namespace { 749 bool renderbuffer_storage_msaa(GrGLContext& ctx, 750 int sampleCount, 751 GrGLenum format, 752 int width, int height) { 753 CLEAR_ERROR_BEFORE_ALLOC(ctx.interface()); 754 GrAssert(GrGLCaps::kNone_MSFBOType != ctx.info().caps()->msFBOType()); 755 bool created = false; 756 if (GrGLCaps::kNVDesktop_CoverageAAType == 757 ctx.info().caps()->coverageAAType()) { 758 const GrGLCaps::MSAACoverageMode& mode = 759 ctx.info().caps()->getMSAACoverageMode(sampleCount); 760 GL_ALLOC_CALL(ctx.interface(), 761 RenderbufferStorageMultisampleCoverage(GR_GL_RENDERBUFFER, 762 mode.fCoverageSampleCnt, 763 mode.fColorSampleCnt, 764 format, 765 width, height)); 766 created = (GR_GL_NO_ERROR == CHECK_ALLOC_ERROR(ctx.interface())); 767 } 768 if (!created) { 769 GL_ALLOC_CALL(ctx.interface(), 770 RenderbufferStorageMultisample(GR_GL_RENDERBUFFER, 771 sampleCount, 772 format, 773 width, height)); 774 created = (GR_GL_NO_ERROR == CHECK_ALLOC_ERROR(ctx.interface())); 775 } 776 return created; 777 } 778 } 779 780 bool GrGpuGL::createRenderTargetObjects(int width, int height, 781 GrGLuint texID, 782 GrGLRenderTarget::Desc* desc) { 783 desc->fMSColorRenderbufferID = 0; 784 desc->fRTFBOID = 0; 785 desc->fTexFBOID = 0; 786 desc->fIsWrapped = false; 787 788 GrGLenum status; 789 790 GrGLenum msColorFormat = 0; // suppress warning 791 792 if (desc->fSampleCnt > 0 && GrGLCaps::kNone_MSFBOType == this->glCaps().msFBOType()) { 793 goto FAILED; 794 } 795 796 GL_CALL(GenFramebuffers(1, &desc->fTexFBOID)); 797 if (!desc->fTexFBOID) { 798 goto FAILED; 799 } 800 801 802 // If we are using multisampling we will create two FBOS. We render to one and then resolve to 803 // the texture bound to the other. The exception is the IMG multisample extension. With this 804 // extension the texture is multisampled when rendered to and then auto-resolves it when it is 805 // rendered from. 806 if (desc->fSampleCnt > 0 && this->glCaps().usesMSAARenderBuffers()) { 807 GL_CALL(GenFramebuffers(1, &desc->fRTFBOID)); 808 GL_CALL(GenRenderbuffers(1, &desc->fMSColorRenderbufferID)); 809 if (!desc->fRTFBOID || 810 !desc->fMSColorRenderbufferID || 811 !this->configToGLFormats(desc->fConfig, 812 // GLES requires sized internal formats 813 kES2_GrGLBinding == this->glBinding(), 814 &msColorFormat, 815 NULL, 816 NULL)) { 817 goto FAILED; 818 } 819 } else { 820 desc->fRTFBOID = desc->fTexFBOID; 821 } 822 823 // below here we may bind the FBO 824 fHWBoundRenderTarget = NULL; 825 if (desc->fRTFBOID != desc->fTexFBOID) { 826 GrAssert(desc->fSampleCnt > 0); 827 GL_CALL(BindRenderbuffer(GR_GL_RENDERBUFFER, 828 desc->fMSColorRenderbufferID)); 829 if (!renderbuffer_storage_msaa(fGLContext, 830 desc->fSampleCnt, 831 msColorFormat, 832 width, height)) { 833 goto FAILED; 834 } 835 GL_CALL(BindFramebuffer(GR_GL_FRAMEBUFFER, desc->fRTFBOID)); 836 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, 837 GR_GL_COLOR_ATTACHMENT0, 838 GR_GL_RENDERBUFFER, 839 desc->fMSColorRenderbufferID)); 840 if (desc->fCheckAllocation || 841 !this->glCaps().isConfigVerifiedColorAttachment(desc->fConfig)) { 842 GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER)); 843 if (status != GR_GL_FRAMEBUFFER_COMPLETE) { 844 goto FAILED; 845 } 846 fGLContext.info().caps()->markConfigAsValidColorAttachment(desc->fConfig); 847 } 848 } 849 GL_CALL(BindFramebuffer(GR_GL_FRAMEBUFFER, desc->fTexFBOID)); 850 851 if (this->glCaps().usesImplicitMSAAResolve() && desc->fSampleCnt > 0) { 852 GL_CALL(FramebufferTexture2DMultisample(GR_GL_FRAMEBUFFER, 853 GR_GL_COLOR_ATTACHMENT0, 854 GR_GL_TEXTURE_2D, 855 texID, 0, desc->fSampleCnt)); 856 } else { 857 GL_CALL(FramebufferTexture2D(GR_GL_FRAMEBUFFER, 858 GR_GL_COLOR_ATTACHMENT0, 859 GR_GL_TEXTURE_2D, 860 texID, 0)); 861 } 862 if (desc->fCheckAllocation || 863 !this->glCaps().isConfigVerifiedColorAttachment(desc->fConfig)) { 864 GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER)); 865 if (status != GR_GL_FRAMEBUFFER_COMPLETE) { 866 goto FAILED; 867 } 868 fGLContext.info().caps()->markConfigAsValidColorAttachment(desc->fConfig); 869 } 870 871 return true; 872 873 FAILED: 874 if (desc->fMSColorRenderbufferID) { 875 GL_CALL(DeleteRenderbuffers(1, &desc->fMSColorRenderbufferID)); 876 } 877 if (desc->fRTFBOID != desc->fTexFBOID) { 878 GL_CALL(DeleteFramebuffers(1, &desc->fRTFBOID)); 879 } 880 if (desc->fTexFBOID) { 881 GL_CALL(DeleteFramebuffers(1, &desc->fTexFBOID)); 882 } 883 return false; 884 } 885 886 // good to set a break-point here to know when createTexture fails 887 static GrTexture* return_null_texture() { 888 // GrAssert(!"null texture"); 889 return NULL; 890 } 891 892 #if 0 && GR_DEBUG 893 static size_t as_size_t(int x) { 894 return x; 895 } 896 #endif 897 898 GrTexture* GrGpuGL::onCreateTexture(const GrTextureDesc& desc, 899 const void* srcData, 900 size_t rowBytes) { 901 902 GrGLTexture::Desc glTexDesc; 903 GrGLRenderTarget::Desc glRTDesc; 904 905 // Attempt to catch un- or wrongly initialized sample counts; 906 GrAssert(desc.fSampleCnt >= 0 && desc.fSampleCnt <= 64); 907 // We fail if the MSAA was requested and is not available. 908 if (GrGLCaps::kNone_MSFBOType == this->glCaps().msFBOType() && desc.fSampleCnt) { 909 //GrPrintf("MSAA RT requested but not supported on this platform."); 910 return return_null_texture(); 911 } 912 // If the sample count exceeds the max then we clamp it. 913 glTexDesc.fSampleCnt = GrMin(desc.fSampleCnt, this->caps()->maxSampleCount()); 914 915 glTexDesc.fFlags = desc.fFlags; 916 glTexDesc.fWidth = desc.fWidth; 917 glTexDesc.fHeight = desc.fHeight; 918 glTexDesc.fConfig = desc.fConfig; 919 glTexDesc.fIsWrapped = false; 920 921 glRTDesc.fMSColorRenderbufferID = 0; 922 glRTDesc.fRTFBOID = 0; 923 glRTDesc.fTexFBOID = 0; 924 glRTDesc.fIsWrapped = false; 925 glRTDesc.fConfig = glTexDesc.fConfig; 926 glRTDesc.fCheckAllocation = SkToBool(desc.fFlags & kCheckAllocation_GrTextureFlagBit); 927 928 bool renderTarget = SkToBool(desc.fFlags & kRenderTarget_GrTextureFlagBit); 929 930 glTexDesc.fOrigin = resolve_origin(desc.fOrigin, renderTarget); 931 glRTDesc.fOrigin = glTexDesc.fOrigin; 932 933 glRTDesc.fSampleCnt = glTexDesc.fSampleCnt; 934 if (GrGLCaps::kNone_MSFBOType == this->glCaps().msFBOType() && 935 desc.fSampleCnt) { 936 //GrPrintf("MSAA RT requested but not supported on this platform."); 937 return return_null_texture(); 938 } 939 940 if (renderTarget) { 941 int maxRTSize = this->caps()->maxRenderTargetSize(); 942 if (glTexDesc.fWidth > maxRTSize || glTexDesc.fHeight > maxRTSize) { 943 return return_null_texture(); 944 } 945 } else { 946 int maxSize = this->caps()->maxTextureSize(); 947 if (glTexDesc.fWidth > maxSize || glTexDesc.fHeight > maxSize) { 948 return return_null_texture(); 949 } 950 } 951 952 GL_CALL(GenTextures(1, &glTexDesc.fTextureID)); 953 954 if (!glTexDesc.fTextureID) { 955 return return_null_texture(); 956 } 957 958 this->setScratchTextureUnit(); 959 GL_CALL(BindTexture(GR_GL_TEXTURE_2D, glTexDesc.fTextureID)); 960 961 if (renderTarget && this->glCaps().textureUsageSupport()) { 962 // provides a hint about how this texture will be used 963 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, 964 GR_GL_TEXTURE_USAGE, 965 GR_GL_FRAMEBUFFER_ATTACHMENT)); 966 } 967 968 // Some drivers like to know filter/wrap before seeing glTexImage2D. Some 969 // drivers have a bug where an FBO won't be complete if it includes a 970 // texture that is not mipmap complete (considering the filter in use). 971 GrGLTexture::TexParams initialTexParams; 972 // we only set a subset here so invalidate first 973 initialTexParams.invalidate(); 974 initialTexParams.fMinFilter = GR_GL_NEAREST; 975 initialTexParams.fMagFilter = GR_GL_NEAREST; 976 initialTexParams.fWrapS = GR_GL_CLAMP_TO_EDGE; 977 initialTexParams.fWrapT = GR_GL_CLAMP_TO_EDGE; 978 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, 979 GR_GL_TEXTURE_MAG_FILTER, 980 initialTexParams.fMagFilter)); 981 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, 982 GR_GL_TEXTURE_MIN_FILTER, 983 initialTexParams.fMinFilter)); 984 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, 985 GR_GL_TEXTURE_WRAP_S, 986 initialTexParams.fWrapS)); 987 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, 988 GR_GL_TEXTURE_WRAP_T, 989 initialTexParams.fWrapT)); 990 if (!this->uploadTexData(glTexDesc, true, 0, 0, 991 glTexDesc.fWidth, glTexDesc.fHeight, 992 desc.fConfig, srcData, rowBytes)) { 993 GL_CALL(DeleteTextures(1, &glTexDesc.fTextureID)); 994 return return_null_texture(); 995 } 996 997 GrGLTexture* tex; 998 if (renderTarget) { 999 // unbind the texture from the texture unit before binding it to the frame buffer 1000 GL_CALL(BindTexture(GR_GL_TEXTURE_2D, 0)); 1001 1002 if (!this->createRenderTargetObjects(glTexDesc.fWidth, 1003 glTexDesc.fHeight, 1004 glTexDesc.fTextureID, 1005 &glRTDesc)) { 1006 GL_CALL(DeleteTextures(1, &glTexDesc.fTextureID)); 1007 return return_null_texture(); 1008 } 1009 tex = SkNEW_ARGS(GrGLTexture, (this, glTexDesc, glRTDesc)); 1010 } else { 1011 tex = SkNEW_ARGS(GrGLTexture, (this, glTexDesc)); 1012 } 1013 tex->setCachedTexParams(initialTexParams, this->getResetTimestamp()); 1014 #ifdef TRACE_TEXTURE_CREATION 1015 GrPrintf("--- new texture [%d] size=(%d %d) config=%d\n", 1016 glTexDesc.fTextureID, desc.fWidth, desc.fHeight, desc.fConfig); 1017 #endif 1018 return tex; 1019 } 1020 1021 namespace { 1022 1023 const GrGLuint kUnknownBitCount = GrGLStencilBuffer::kUnknownBitCount; 1024 1025 void inline get_stencil_rb_sizes(const GrGLInterface* gl, 1026 GrGLStencilBuffer::Format* format) { 1027 1028 // we shouldn't ever know one size and not the other 1029 GrAssert((kUnknownBitCount == format->fStencilBits) == 1030 (kUnknownBitCount == format->fTotalBits)); 1031 if (kUnknownBitCount == format->fStencilBits) { 1032 GR_GL_GetRenderbufferParameteriv(gl, GR_GL_RENDERBUFFER, 1033 GR_GL_RENDERBUFFER_STENCIL_SIZE, 1034 (GrGLint*)&format->fStencilBits); 1035 if (format->fPacked) { 1036 GR_GL_GetRenderbufferParameteriv(gl, GR_GL_RENDERBUFFER, 1037 GR_GL_RENDERBUFFER_DEPTH_SIZE, 1038 (GrGLint*)&format->fTotalBits); 1039 format->fTotalBits += format->fStencilBits; 1040 } else { 1041 format->fTotalBits = format->fStencilBits; 1042 } 1043 } 1044 } 1045 } 1046 1047 bool GrGpuGL::createStencilBufferForRenderTarget(GrRenderTarget* rt, 1048 int width, int height) { 1049 1050 // All internally created RTs are also textures. We don't create 1051 // SBs for a client's standalone RT (that is a RT that isn't also a texture). 1052 GrAssert(rt->asTexture()); 1053 GrAssert(width >= rt->width()); 1054 GrAssert(height >= rt->height()); 1055 1056 int samples = rt->numSamples(); 1057 GrGLuint sbID; 1058 GL_CALL(GenRenderbuffers(1, &sbID)); 1059 if (!sbID) { 1060 return false; 1061 } 1062 1063 int stencilFmtCnt = this->glCaps().stencilFormats().count(); 1064 for (int i = 0; i < stencilFmtCnt; ++i) { 1065 GL_CALL(BindRenderbuffer(GR_GL_RENDERBUFFER, sbID)); 1066 // we start with the last stencil format that succeeded in hopes 1067 // that we won't go through this loop more than once after the 1068 // first (painful) stencil creation. 1069 int sIdx = (i + fLastSuccessfulStencilFmtIdx) % stencilFmtCnt; 1070 const GrGLCaps::StencilFormat& sFmt = 1071 this->glCaps().stencilFormats()[sIdx]; 1072 CLEAR_ERROR_BEFORE_ALLOC(this->glInterface()); 1073 // we do this "if" so that we don't call the multisample 1074 // version on a GL that doesn't have an MSAA extension. 1075 bool created; 1076 if (samples > 0) { 1077 created = renderbuffer_storage_msaa(fGLContext, 1078 samples, 1079 sFmt.fInternalFormat, 1080 width, height); 1081 } else { 1082 GL_ALLOC_CALL(this->glInterface(), 1083 RenderbufferStorage(GR_GL_RENDERBUFFER, 1084 sFmt.fInternalFormat, 1085 width, height)); 1086 created = 1087 (GR_GL_NO_ERROR == check_alloc_error(rt->desc(), this->glInterface())); 1088 } 1089 if (created) { 1090 // After sized formats we attempt an unsized format and take 1091 // whatever sizes GL gives us. In that case we query for the size. 1092 GrGLStencilBuffer::Format format = sFmt; 1093 get_stencil_rb_sizes(this->glInterface(), &format); 1094 static const bool kIsWrapped = false; 1095 SkAutoTUnref<GrStencilBuffer> sb(SkNEW_ARGS(GrGLStencilBuffer, 1096 (this, kIsWrapped, sbID, width, height, 1097 samples, format))); 1098 if (this->attachStencilBufferToRenderTarget(sb, rt)) { 1099 fLastSuccessfulStencilFmtIdx = sIdx; 1100 sb->transferToCache(); 1101 rt->setStencilBuffer(sb); 1102 return true; 1103 } 1104 sb->abandon(); // otherwise we lose sbID 1105 } 1106 } 1107 GL_CALL(DeleteRenderbuffers(1, &sbID)); 1108 return false; 1109 } 1110 1111 bool GrGpuGL::attachStencilBufferToRenderTarget(GrStencilBuffer* sb, GrRenderTarget* rt) { 1112 GrGLRenderTarget* glrt = (GrGLRenderTarget*) rt; 1113 1114 GrGLuint fbo = glrt->renderFBOID(); 1115 1116 if (NULL == sb) { 1117 if (NULL != rt->getStencilBuffer()) { 1118 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, 1119 GR_GL_STENCIL_ATTACHMENT, 1120 GR_GL_RENDERBUFFER, 0)); 1121 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, 1122 GR_GL_DEPTH_ATTACHMENT, 1123 GR_GL_RENDERBUFFER, 0)); 1124 #if GR_DEBUG 1125 GrGLenum status; 1126 GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER)); 1127 GrAssert(GR_GL_FRAMEBUFFER_COMPLETE == status); 1128 #endif 1129 } 1130 return true; 1131 } else { 1132 GrGLStencilBuffer* glsb = static_cast<GrGLStencilBuffer*>(sb); 1133 GrGLuint rb = glsb->renderbufferID(); 1134 1135 fHWBoundRenderTarget = NULL; 1136 GL_CALL(BindFramebuffer(GR_GL_FRAMEBUFFER, fbo)); 1137 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, 1138 GR_GL_STENCIL_ATTACHMENT, 1139 GR_GL_RENDERBUFFER, rb)); 1140 if (glsb->format().fPacked) { 1141 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, 1142 GR_GL_DEPTH_ATTACHMENT, 1143 GR_GL_RENDERBUFFER, rb)); 1144 } else { 1145 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, 1146 GR_GL_DEPTH_ATTACHMENT, 1147 GR_GL_RENDERBUFFER, 0)); 1148 } 1149 1150 GrGLenum status; 1151 if (!this->glCaps().isColorConfigAndStencilFormatVerified(rt->config(), glsb->format())) { 1152 GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER)); 1153 if (status != GR_GL_FRAMEBUFFER_COMPLETE) { 1154 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, 1155 GR_GL_STENCIL_ATTACHMENT, 1156 GR_GL_RENDERBUFFER, 0)); 1157 if (glsb->format().fPacked) { 1158 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, 1159 GR_GL_DEPTH_ATTACHMENT, 1160 GR_GL_RENDERBUFFER, 0)); 1161 } 1162 return false; 1163 } else { 1164 fGLContext.info().caps()->markColorConfigAndStencilFormatAsVerified( 1165 rt->config(), 1166 glsb->format()); 1167 } 1168 } 1169 return true; 1170 } 1171 } 1172 1173 //////////////////////////////////////////////////////////////////////////////// 1174 1175 GrVertexBuffer* GrGpuGL::onCreateVertexBuffer(uint32_t size, bool dynamic) { 1176 GrGLVertexBuffer::Desc desc; 1177 desc.fDynamic = dynamic; 1178 desc.fSizeInBytes = size; 1179 desc.fIsWrapped = false; 1180 1181 if (this->glCaps().useNonVBOVertexAndIndexDynamicData() && desc.fDynamic) { 1182 desc.fID = 0; 1183 GrGLVertexBuffer* vertexBuffer = SkNEW_ARGS(GrGLVertexBuffer, (this, desc)); 1184 return vertexBuffer; 1185 } else { 1186 GL_CALL(GenBuffers(1, &desc.fID)); 1187 if (desc.fID) { 1188 fHWGeometryState.setVertexBufferID(this, desc.fID); 1189 CLEAR_ERROR_BEFORE_ALLOC(this->glInterface()); 1190 // make sure driver can allocate memory for this buffer 1191 GL_ALLOC_CALL(this->glInterface(), 1192 BufferData(GR_GL_ARRAY_BUFFER, 1193 desc.fSizeInBytes, 1194 NULL, // data ptr 1195 desc.fDynamic ? GR_GL_DYNAMIC_DRAW : GR_GL_STATIC_DRAW)); 1196 if (CHECK_ALLOC_ERROR(this->glInterface()) != GR_GL_NO_ERROR) { 1197 GL_CALL(DeleteBuffers(1, &desc.fID)); 1198 this->notifyVertexBufferDelete(desc.fID); 1199 return NULL; 1200 } 1201 GrGLVertexBuffer* vertexBuffer = SkNEW_ARGS(GrGLVertexBuffer, (this, desc)); 1202 return vertexBuffer; 1203 } 1204 return NULL; 1205 } 1206 } 1207 1208 GrIndexBuffer* GrGpuGL::onCreateIndexBuffer(uint32_t size, bool dynamic) { 1209 GrGLIndexBuffer::Desc desc; 1210 desc.fDynamic = dynamic; 1211 desc.fSizeInBytes = size; 1212 desc.fIsWrapped = false; 1213 1214 if (this->glCaps().useNonVBOVertexAndIndexDynamicData() && desc.fDynamic) { 1215 desc.fID = 0; 1216 GrIndexBuffer* indexBuffer = SkNEW_ARGS(GrGLIndexBuffer, (this, desc)); 1217 return indexBuffer; 1218 } else { 1219 GL_CALL(GenBuffers(1, &desc.fID)); 1220 if (desc.fID) { 1221 fHWGeometryState.setIndexBufferIDOnDefaultVertexArray(this, desc.fID); 1222 CLEAR_ERROR_BEFORE_ALLOC(this->glInterface()); 1223 // make sure driver can allocate memory for this buffer 1224 GL_ALLOC_CALL(this->glInterface(), 1225 BufferData(GR_GL_ELEMENT_ARRAY_BUFFER, 1226 desc.fSizeInBytes, 1227 NULL, // data ptr 1228 desc.fDynamic ? GR_GL_DYNAMIC_DRAW : GR_GL_STATIC_DRAW)); 1229 if (CHECK_ALLOC_ERROR(this->glInterface()) != GR_GL_NO_ERROR) { 1230 GL_CALL(DeleteBuffers(1, &desc.fID)); 1231 this->notifyIndexBufferDelete(desc.fID); 1232 return NULL; 1233 } 1234 GrIndexBuffer* indexBuffer = SkNEW_ARGS(GrGLIndexBuffer, (this, desc)); 1235 return indexBuffer; 1236 } 1237 return NULL; 1238 } 1239 } 1240 1241 GrPath* GrGpuGL::onCreatePath(const SkPath& inPath) { 1242 GrAssert(this->caps()->pathStencilingSupport()); 1243 return SkNEW_ARGS(GrGLPath, (this, inPath)); 1244 } 1245 1246 void GrGpuGL::flushScissor() { 1247 const GrDrawState& drawState = this->getDrawState(); 1248 const GrGLRenderTarget* rt = 1249 static_cast<const GrGLRenderTarget*>(drawState.getRenderTarget()); 1250 1251 GrAssert(NULL != rt); 1252 const GrGLIRect& vp = rt->getViewport(); 1253 1254 if (fScissorState.fEnabled) { 1255 GrGLIRect scissor; 1256 scissor.setRelativeTo(vp, 1257 fScissorState.fRect.fLeft, 1258 fScissorState.fRect.fTop, 1259 fScissorState.fRect.width(), 1260 fScissorState.fRect.height(), 1261 rt->origin()); 1262 // if the scissor fully contains the viewport then we fall through and 1263 // disable the scissor test. 1264 if (!scissor.contains(vp)) { 1265 if (fHWScissorSettings.fRect != scissor) { 1266 scissor.pushToGLScissor(this->glInterface()); 1267 fHWScissorSettings.fRect = scissor; 1268 } 1269 if (kYes_TriState != fHWScissorSettings.fEnabled) { 1270 GL_CALL(Enable(GR_GL_SCISSOR_TEST)); 1271 fHWScissorSettings.fEnabled = kYes_TriState; 1272 } 1273 return; 1274 } 1275 } 1276 if (kNo_TriState != fHWScissorSettings.fEnabled) { 1277 GL_CALL(Disable(GR_GL_SCISSOR_TEST)); 1278 fHWScissorSettings.fEnabled = kNo_TriState; 1279 return; 1280 } 1281 } 1282 1283 void GrGpuGL::onClear(const SkIRect* rect, GrColor color) { 1284 const GrDrawState& drawState = this->getDrawState(); 1285 const GrRenderTarget* rt = drawState.getRenderTarget(); 1286 // parent class should never let us get here with no RT 1287 GrAssert(NULL != rt); 1288 1289 SkIRect clippedRect; 1290 if (NULL != rect) { 1291 // flushScissor expects rect to be clipped to the target. 1292 clippedRect = *rect; 1293 SkIRect rtRect = SkIRect::MakeWH(rt->width(), rt->height()); 1294 if (clippedRect.intersect(rtRect)) { 1295 rect = &clippedRect; 1296 } else { 1297 return; 1298 } 1299 } 1300 this->flushRenderTarget(rect); 1301 GrAutoTRestore<ScissorState> asr(&fScissorState); 1302 fScissorState.fEnabled = (NULL != rect); 1303 if (fScissorState.fEnabled) { 1304 fScissorState.fRect = *rect; 1305 } 1306 this->flushScissor(); 1307 1308 GrGLfloat r, g, b, a; 1309 static const GrGLfloat scale255 = 1.f / 255.f; 1310 a = GrColorUnpackA(color) * scale255; 1311 GrGLfloat scaleRGB = scale255; 1312 r = GrColorUnpackR(color) * scaleRGB; 1313 g = GrColorUnpackG(color) * scaleRGB; 1314 b = GrColorUnpackB(color) * scaleRGB; 1315 1316 GL_CALL(ColorMask(GR_GL_TRUE, GR_GL_TRUE, GR_GL_TRUE, GR_GL_TRUE)); 1317 fHWWriteToColor = kYes_TriState; 1318 GL_CALL(ClearColor(r, g, b, a)); 1319 GL_CALL(Clear(GR_GL_COLOR_BUFFER_BIT)); 1320 } 1321 1322 void GrGpuGL::clearStencil() { 1323 if (NULL == this->getDrawState().getRenderTarget()) { 1324 return; 1325 } 1326 1327 this->flushRenderTarget(&SkIRect::EmptyIRect()); 1328 1329 GrAutoTRestore<ScissorState> asr(&fScissorState); 1330 fScissorState.fEnabled = false; 1331 this->flushScissor(); 1332 1333 GL_CALL(StencilMask(0xffffffff)); 1334 GL_CALL(ClearStencil(0)); 1335 GL_CALL(Clear(GR_GL_STENCIL_BUFFER_BIT)); 1336 fHWStencilSettings.invalidate(); 1337 } 1338 1339 void GrGpuGL::clearStencilClip(const SkIRect& rect, bool insideClip) { 1340 const GrDrawState& drawState = this->getDrawState(); 1341 const GrRenderTarget* rt = drawState.getRenderTarget(); 1342 GrAssert(NULL != rt); 1343 1344 // this should only be called internally when we know we have a 1345 // stencil buffer. 1346 GrAssert(NULL != rt->getStencilBuffer()); 1347 GrGLint stencilBitCount = rt->getStencilBuffer()->bits(); 1348 #if 0 1349 GrAssert(stencilBitCount > 0); 1350 GrGLint clipStencilMask = (1 << (stencilBitCount - 1)); 1351 #else 1352 // we could just clear the clip bit but when we go through 1353 // ANGLE a partial stencil mask will cause clears to be 1354 // turned into draws. Our contract on GrDrawTarget says that 1355 // changing the clip between stencil passes may or may not 1356 // zero the client's clip bits. So we just clear the whole thing. 1357 static const GrGLint clipStencilMask = ~0; 1358 #endif 1359 GrGLint value; 1360 if (insideClip) { 1361 value = (1 << (stencilBitCount - 1)); 1362 } else { 1363 value = 0; 1364 } 1365 this->flushRenderTarget(&SkIRect::EmptyIRect()); 1366 1367 GrAutoTRestore<ScissorState> asr(&fScissorState); 1368 fScissorState.fEnabled = true; 1369 fScissorState.fRect = rect; 1370 this->flushScissor(); 1371 1372 GL_CALL(StencilMask((uint32_t) clipStencilMask)); 1373 GL_CALL(ClearStencil(value)); 1374 GL_CALL(Clear(GR_GL_STENCIL_BUFFER_BIT)); 1375 fHWStencilSettings.invalidate(); 1376 } 1377 1378 void GrGpuGL::onForceRenderTargetFlush() { 1379 this->flushRenderTarget(&SkIRect::EmptyIRect()); 1380 } 1381 1382 bool GrGpuGL::readPixelsWillPayForYFlip(GrRenderTarget* renderTarget, 1383 int left, int top, 1384 int width, int height, 1385 GrPixelConfig config, 1386 size_t rowBytes) const { 1387 // If this rendertarget is aready TopLeft, we don't need to flip. 1388 if (kTopLeft_GrSurfaceOrigin == renderTarget->origin()) { 1389 return false; 1390 } 1391 1392 // if GL can do the flip then we'll never pay for it. 1393 if (this->glCaps().packFlipYSupport()) { 1394 return false; 1395 } 1396 1397 // If we have to do memcpy to handle non-trim rowBytes then we 1398 // get the flip for free. Otherwise it costs. 1399 if (this->glCaps().packRowLengthSupport()) { 1400 return true; 1401 } 1402 // If we have to do memcpys to handle rowBytes then y-flip is free 1403 // Note the rowBytes might be tight to the passed in data, but if data 1404 // gets clipped in x to the target the rowBytes will no longer be tight. 1405 if (left >= 0 && (left + width) < renderTarget->width()) { 1406 return 0 == rowBytes || 1407 GrBytesPerPixel(config) * width == rowBytes; 1408 } else { 1409 return false; 1410 } 1411 } 1412 1413 bool GrGpuGL::onReadPixels(GrRenderTarget* target, 1414 int left, int top, 1415 int width, int height, 1416 GrPixelConfig config, 1417 void* buffer, 1418 size_t rowBytes) { 1419 GrGLenum format; 1420 GrGLenum type; 1421 bool flipY = kBottomLeft_GrSurfaceOrigin == target->origin(); 1422 if (!this->configToGLFormats(config, false, NULL, &format, &type)) { 1423 return false; 1424 } 1425 size_t bpp = GrBytesPerPixel(config); 1426 if (!adjust_pixel_ops_params(target->width(), target->height(), bpp, 1427 &left, &top, &width, &height, 1428 const_cast<const void**>(&buffer), 1429 &rowBytes)) { 1430 return false; 1431 } 1432 1433 // resolve the render target if necessary 1434 GrGLRenderTarget* tgt = static_cast<GrGLRenderTarget*>(target); 1435 GrDrawState::AutoRenderTargetRestore artr; 1436 switch (tgt->getResolveType()) { 1437 case GrGLRenderTarget::kCantResolve_ResolveType: 1438 return false; 1439 case GrGLRenderTarget::kAutoResolves_ResolveType: 1440 artr.set(this->drawState(), target); 1441 this->flushRenderTarget(&SkIRect::EmptyIRect()); 1442 break; 1443 case GrGLRenderTarget::kCanResolve_ResolveType: 1444 this->onResolveRenderTarget(tgt); 1445 // we don't track the state of the READ FBO ID. 1446 GL_CALL(BindFramebuffer(GR_GL_READ_FRAMEBUFFER, 1447 tgt->textureFBOID())); 1448 break; 1449 default: 1450 GrCrash("Unknown resolve type"); 1451 } 1452 1453 const GrGLIRect& glvp = tgt->getViewport(); 1454 1455 // the read rect is viewport-relative 1456 GrGLIRect readRect; 1457 readRect.setRelativeTo(glvp, left, top, width, height, target->origin()); 1458 1459 size_t tightRowBytes = bpp * width; 1460 if (0 == rowBytes) { 1461 rowBytes = tightRowBytes; 1462 } 1463 size_t readDstRowBytes = tightRowBytes; 1464 void* readDst = buffer; 1465 1466 // determine if GL can read using the passed rowBytes or if we need 1467 // a scratch buffer. 1468 SkAutoSMalloc<32 * sizeof(GrColor)> scratch; 1469 if (rowBytes != tightRowBytes) { 1470 if (this->glCaps().packRowLengthSupport()) { 1471 GrAssert(!(rowBytes % sizeof(GrColor))); 1472 GL_CALL(PixelStorei(GR_GL_PACK_ROW_LENGTH, rowBytes / sizeof(GrColor))); 1473 readDstRowBytes = rowBytes; 1474 } else { 1475 scratch.reset(tightRowBytes * height); 1476 readDst = scratch.get(); 1477 } 1478 } 1479 if (flipY && this->glCaps().packFlipYSupport()) { 1480 GL_CALL(PixelStorei(GR_GL_PACK_REVERSE_ROW_ORDER, 1)); 1481 } 1482 GL_CALL(ReadPixels(readRect.fLeft, readRect.fBottom, 1483 readRect.fWidth, readRect.fHeight, 1484 format, type, readDst)); 1485 if (readDstRowBytes != tightRowBytes) { 1486 GrAssert(this->glCaps().packRowLengthSupport()); 1487 GL_CALL(PixelStorei(GR_GL_PACK_ROW_LENGTH, 0)); 1488 } 1489 if (flipY && this->glCaps().packFlipYSupport()) { 1490 GL_CALL(PixelStorei(GR_GL_PACK_REVERSE_ROW_ORDER, 0)); 1491 flipY = false; 1492 } 1493 1494 // now reverse the order of the rows, since GL's are bottom-to-top, but our 1495 // API presents top-to-bottom. We must preserve the padding contents. Note 1496 // that the above readPixels did not overwrite the padding. 1497 if (readDst == buffer) { 1498 GrAssert(rowBytes == readDstRowBytes); 1499 if (flipY) { 1500 scratch.reset(tightRowBytes); 1501 void* tmpRow = scratch.get(); 1502 // flip y in-place by rows 1503 const int halfY = height >> 1; 1504 char* top = reinterpret_cast<char*>(buffer); 1505 char* bottom = top + (height - 1) * rowBytes; 1506 for (int y = 0; y < halfY; y++) { 1507 memcpy(tmpRow, top, tightRowBytes); 1508 memcpy(top, bottom, tightRowBytes); 1509 memcpy(bottom, tmpRow, tightRowBytes); 1510 top += rowBytes; 1511 bottom -= rowBytes; 1512 } 1513 } 1514 } else { 1515 GrAssert(readDst != buffer); GrAssert(rowBytes != tightRowBytes); 1516 // copy from readDst to buffer while flipping y 1517 // const int halfY = height >> 1; 1518 const char* src = reinterpret_cast<const char*>(readDst); 1519 char* dst = reinterpret_cast<char*>(buffer); 1520 if (flipY) { 1521 dst += (height-1) * rowBytes; 1522 } 1523 for (int y = 0; y < height; y++) { 1524 memcpy(dst, src, tightRowBytes); 1525 src += readDstRowBytes; 1526 if (!flipY) { 1527 dst += rowBytes; 1528 } else { 1529 dst -= rowBytes; 1530 } 1531 } 1532 } 1533 return true; 1534 } 1535 1536 void GrGpuGL::flushRenderTarget(const SkIRect* bound) { 1537 1538 GrGLRenderTarget* rt = 1539 static_cast<GrGLRenderTarget*>(this->drawState()->getRenderTarget()); 1540 GrAssert(NULL != rt); 1541 1542 if (fHWBoundRenderTarget != rt) { 1543 GL_CALL(BindFramebuffer(GR_GL_FRAMEBUFFER, rt->renderFBOID())); 1544 #if GR_DEBUG 1545 GrGLenum status; 1546 GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER)); 1547 if (status != GR_GL_FRAMEBUFFER_COMPLETE) { 1548 GrPrintf("GrGpuGL::flushRenderTarget glCheckFramebufferStatus %x\n", status); 1549 } 1550 #endif 1551 fHWBoundRenderTarget = rt; 1552 const GrGLIRect& vp = rt->getViewport(); 1553 if (fHWViewport != vp) { 1554 vp.pushToGLViewport(this->glInterface()); 1555 fHWViewport = vp; 1556 } 1557 } 1558 if (NULL == bound || !bound->isEmpty()) { 1559 rt->flagAsNeedingResolve(bound); 1560 } 1561 1562 GrTexture *texture = rt->asTexture(); 1563 if (texture) { 1564 texture->dirtyMipMaps(true); 1565 } 1566 } 1567 1568 GrGLenum gPrimitiveType2GLMode[] = { 1569 GR_GL_TRIANGLES, 1570 GR_GL_TRIANGLE_STRIP, 1571 GR_GL_TRIANGLE_FAN, 1572 GR_GL_POINTS, 1573 GR_GL_LINES, 1574 GR_GL_LINE_STRIP 1575 }; 1576 1577 #define SWAP_PER_DRAW 0 1578 1579 #if SWAP_PER_DRAW 1580 #if GR_MAC_BUILD 1581 #include <AGL/agl.h> 1582 #elif GR_WIN32_BUILD 1583 #include <gl/GL.h> 1584 void SwapBuf() { 1585 DWORD procID = GetCurrentProcessId(); 1586 HWND hwnd = GetTopWindow(GetDesktopWindow()); 1587 while(hwnd) { 1588 DWORD wndProcID = 0; 1589 GetWindowThreadProcessId(hwnd, &wndProcID); 1590 if(wndProcID == procID) { 1591 SwapBuffers(GetDC(hwnd)); 1592 } 1593 hwnd = GetNextWindow(hwnd, GW_HWNDNEXT); 1594 } 1595 } 1596 #endif 1597 #endif 1598 1599 void GrGpuGL::onGpuDraw(const DrawInfo& info) { 1600 size_t indexOffsetInBytes; 1601 this->setupGeometry(info, &indexOffsetInBytes); 1602 1603 GrAssert((size_t)info.primitiveType() < GR_ARRAY_COUNT(gPrimitiveType2GLMode)); 1604 1605 if (info.isIndexed()) { 1606 GrGLvoid* indices = 1607 reinterpret_cast<GrGLvoid*>(indexOffsetInBytes + sizeof(uint16_t) * info.startIndex()); 1608 // info.startVertex() was accounted for by setupGeometry. 1609 GL_CALL(DrawElements(gPrimitiveType2GLMode[info.primitiveType()], 1610 info.indexCount(), 1611 GR_GL_UNSIGNED_SHORT, 1612 indices)); 1613 } else { 1614 // Pass 0 for parameter first. We have to adjust glVertexAttribPointer() to account for 1615 // startVertex in the DrawElements case. So we always rely on setupGeometry to have 1616 // accounted for startVertex. 1617 GL_CALL(DrawArrays(gPrimitiveType2GLMode[info.primitiveType()], 0, info.vertexCount())); 1618 } 1619 #if SWAP_PER_DRAW 1620 glFlush(); 1621 #if GR_MAC_BUILD 1622 aglSwapBuffers(aglGetCurrentContext()); 1623 int set_a_break_pt_here = 9; 1624 aglSwapBuffers(aglGetCurrentContext()); 1625 #elif GR_WIN32_BUILD 1626 SwapBuf(); 1627 int set_a_break_pt_here = 9; 1628 SwapBuf(); 1629 #endif 1630 #endif 1631 } 1632 1633 namespace { 1634 1635 static const uint16_t kOnes16 = static_cast<uint16_t>(~0); 1636 const GrStencilSettings& winding_nv_path_stencil_settings() { 1637 GR_STATIC_CONST_SAME_STENCIL_STRUCT(gSettings, 1638 kIncClamp_StencilOp, 1639 kIncClamp_StencilOp, 1640 kAlwaysIfInClip_StencilFunc, 1641 kOnes16, kOnes16, kOnes16); 1642 return *GR_CONST_STENCIL_SETTINGS_PTR_FROM_STRUCT_PTR(&gSettings); 1643 } 1644 const GrStencilSettings& even_odd_nv_path_stencil_settings() { 1645 GR_STATIC_CONST_SAME_STENCIL_STRUCT(gSettings, 1646 kInvert_StencilOp, 1647 kInvert_StencilOp, 1648 kAlwaysIfInClip_StencilFunc, 1649 kOnes16, kOnes16, kOnes16); 1650 return *GR_CONST_STENCIL_SETTINGS_PTR_FROM_STRUCT_PTR(&gSettings); 1651 } 1652 } 1653 1654 void GrGpuGL::setStencilPathSettings(const GrPath&, 1655 SkPath::FillType fill, 1656 GrStencilSettings* settings) { 1657 switch (fill) { 1658 case SkPath::kEvenOdd_FillType: 1659 *settings = even_odd_nv_path_stencil_settings(); 1660 return; 1661 case SkPath::kWinding_FillType: 1662 *settings = winding_nv_path_stencil_settings(); 1663 return; 1664 default: 1665 GrCrash("Unexpected path fill."); 1666 } 1667 } 1668 1669 void GrGpuGL::onGpuStencilPath(const GrPath* path, SkPath::FillType fill) { 1670 GrAssert(this->caps()->pathStencilingSupport()); 1671 1672 GrGLuint id = static_cast<const GrGLPath*>(path)->pathID(); 1673 GrDrawState* drawState = this->drawState(); 1674 GrAssert(NULL != drawState->getRenderTarget()); 1675 if (NULL == drawState->getRenderTarget()->getStencilBuffer()) { 1676 return; 1677 } 1678 1679 // Decide how to manipulate the stencil buffer based on the fill rule. 1680 // Also, assert that the stencil settings we set in setStencilPathSettings 1681 // are present. 1682 GrAssert(!fStencilSettings.isTwoSided()); 1683 GrGLenum fillMode; 1684 switch (fill) { 1685 case SkPath::kWinding_FillType: 1686 fillMode = GR_GL_COUNT_UP; 1687 GrAssert(kIncClamp_StencilOp == 1688 fStencilSettings.passOp(GrStencilSettings::kFront_Face)); 1689 GrAssert(kIncClamp_StencilOp == 1690 fStencilSettings.failOp(GrStencilSettings::kFront_Face)); 1691 break; 1692 case SkPath::kEvenOdd_FillType: 1693 fillMode = GR_GL_INVERT; 1694 GrAssert(kInvert_StencilOp == 1695 fStencilSettings.passOp(GrStencilSettings::kFront_Face)); 1696 GrAssert(kInvert_StencilOp == 1697 fStencilSettings.failOp(GrStencilSettings::kFront_Face)); 1698 break; 1699 default: 1700 // Only the above two fill rules are allowed. 1701 GrCrash("Unexpected path fill."); 1702 return; // suppress unused var warning. 1703 } 1704 GrGLint writeMask = fStencilSettings.writeMask(GrStencilSettings::kFront_Face); 1705 GL_CALL(StencilFillPath(id, fillMode, writeMask)); 1706 } 1707 1708 void GrGpuGL::onResolveRenderTarget(GrRenderTarget* target) { 1709 GrGLRenderTarget* rt = static_cast<GrGLRenderTarget*>(target); 1710 if (rt->needsResolve()) { 1711 // Some extensions automatically resolves the texture when it is read. 1712 if (this->glCaps().usesMSAARenderBuffers()) { 1713 GrAssert(rt->textureFBOID() != rt->renderFBOID()); 1714 GL_CALL(BindFramebuffer(GR_GL_READ_FRAMEBUFFER, rt->renderFBOID())); 1715 GL_CALL(BindFramebuffer(GR_GL_DRAW_FRAMEBUFFER, rt->textureFBOID())); 1716 // make sure we go through flushRenderTarget() since we've modified 1717 // the bound DRAW FBO ID. 1718 fHWBoundRenderTarget = NULL; 1719 const GrGLIRect& vp = rt->getViewport(); 1720 const SkIRect dirtyRect = rt->getResolveRect(); 1721 GrGLIRect r; 1722 r.setRelativeTo(vp, dirtyRect.fLeft, dirtyRect.fTop, 1723 dirtyRect.width(), dirtyRect.height(), target->origin()); 1724 1725 GrAutoTRestore<ScissorState> asr; 1726 if (GrGLCaps::kES_Apple_MSFBOType == this->glCaps().msFBOType()) { 1727 // Apple's extension uses the scissor as the blit bounds. 1728 asr.reset(&fScissorState); 1729 fScissorState.fEnabled = true; 1730 fScissorState.fRect = dirtyRect; 1731 this->flushScissor(); 1732 GL_CALL(ResolveMultisampleFramebuffer()); 1733 } else { 1734 if (GrGLCaps::kDesktop_EXT_MSFBOType == this->glCaps().msFBOType()) { 1735 // this respects the scissor during the blit, so disable it. 1736 asr.reset(&fScissorState); 1737 fScissorState.fEnabled = false; 1738 this->flushScissor(); 1739 } 1740 int right = r.fLeft + r.fWidth; 1741 int top = r.fBottom + r.fHeight; 1742 GL_CALL(BlitFramebuffer(r.fLeft, r.fBottom, right, top, 1743 r.fLeft, r.fBottom, right, top, 1744 GR_GL_COLOR_BUFFER_BIT, GR_GL_NEAREST)); 1745 } 1746 } 1747 rt->flagAsResolved(); 1748 } 1749 } 1750 1751 namespace { 1752 1753 GrGLenum gr_to_gl_stencil_func(GrStencilFunc basicFunc) { 1754 static const GrGLenum gTable[] = { 1755 GR_GL_ALWAYS, // kAlways_StencilFunc 1756 GR_GL_NEVER, // kNever_StencilFunc 1757 GR_GL_GREATER, // kGreater_StencilFunc 1758 GR_GL_GEQUAL, // kGEqual_StencilFunc 1759 GR_GL_LESS, // kLess_StencilFunc 1760 GR_GL_LEQUAL, // kLEqual_StencilFunc, 1761 GR_GL_EQUAL, // kEqual_StencilFunc, 1762 GR_GL_NOTEQUAL, // kNotEqual_StencilFunc, 1763 }; 1764 GR_STATIC_ASSERT(GR_ARRAY_COUNT(gTable) == kBasicStencilFuncCount); 1765 GR_STATIC_ASSERT(0 == kAlways_StencilFunc); 1766 GR_STATIC_ASSERT(1 == kNever_StencilFunc); 1767 GR_STATIC_ASSERT(2 == kGreater_StencilFunc); 1768 GR_STATIC_ASSERT(3 == kGEqual_StencilFunc); 1769 GR_STATIC_ASSERT(4 == kLess_StencilFunc); 1770 GR_STATIC_ASSERT(5 == kLEqual_StencilFunc); 1771 GR_STATIC_ASSERT(6 == kEqual_StencilFunc); 1772 GR_STATIC_ASSERT(7 == kNotEqual_StencilFunc); 1773 GrAssert((unsigned) basicFunc < kBasicStencilFuncCount); 1774 1775 return gTable[basicFunc]; 1776 } 1777 1778 GrGLenum gr_to_gl_stencil_op(GrStencilOp op) { 1779 static const GrGLenum gTable[] = { 1780 GR_GL_KEEP, // kKeep_StencilOp 1781 GR_GL_REPLACE, // kReplace_StencilOp 1782 GR_GL_INCR_WRAP, // kIncWrap_StencilOp 1783 GR_GL_INCR, // kIncClamp_StencilOp 1784 GR_GL_DECR_WRAP, // kDecWrap_StencilOp 1785 GR_GL_DECR, // kDecClamp_StencilOp 1786 GR_GL_ZERO, // kZero_StencilOp 1787 GR_GL_INVERT, // kInvert_StencilOp 1788 }; 1789 GR_STATIC_ASSERT(GR_ARRAY_COUNT(gTable) == kStencilOpCount); 1790 GR_STATIC_ASSERT(0 == kKeep_StencilOp); 1791 GR_STATIC_ASSERT(1 == kReplace_StencilOp); 1792 GR_STATIC_ASSERT(2 == kIncWrap_StencilOp); 1793 GR_STATIC_ASSERT(3 == kIncClamp_StencilOp); 1794 GR_STATIC_ASSERT(4 == kDecWrap_StencilOp); 1795 GR_STATIC_ASSERT(5 == kDecClamp_StencilOp); 1796 GR_STATIC_ASSERT(6 == kZero_StencilOp); 1797 GR_STATIC_ASSERT(7 == kInvert_StencilOp); 1798 GrAssert((unsigned) op < kStencilOpCount); 1799 return gTable[op]; 1800 } 1801 1802 void set_gl_stencil(const GrGLInterface* gl, 1803 const GrStencilSettings& settings, 1804 GrGLenum glFace, 1805 GrStencilSettings::Face grFace) { 1806 GrGLenum glFunc = gr_to_gl_stencil_func(settings.func(grFace)); 1807 GrGLenum glFailOp = gr_to_gl_stencil_op(settings.failOp(grFace)); 1808 GrGLenum glPassOp = gr_to_gl_stencil_op(settings.passOp(grFace)); 1809 1810 GrGLint ref = settings.funcRef(grFace); 1811 GrGLint mask = settings.funcMask(grFace); 1812 GrGLint writeMask = settings.writeMask(grFace); 1813 1814 if (GR_GL_FRONT_AND_BACK == glFace) { 1815 // we call the combined func just in case separate stencil is not 1816 // supported. 1817 GR_GL_CALL(gl, StencilFunc(glFunc, ref, mask)); 1818 GR_GL_CALL(gl, StencilMask(writeMask)); 1819 GR_GL_CALL(gl, StencilOp(glFailOp, glPassOp, glPassOp)); 1820 } else { 1821 GR_GL_CALL(gl, StencilFuncSeparate(glFace, glFunc, ref, mask)); 1822 GR_GL_CALL(gl, StencilMaskSeparate(glFace, writeMask)); 1823 GR_GL_CALL(gl, StencilOpSeparate(glFace, glFailOp, glPassOp, glPassOp)); 1824 } 1825 } 1826 } 1827 1828 void GrGpuGL::flushStencil(DrawType type) { 1829 if (kStencilPath_DrawType == type) { 1830 GrAssert(!fStencilSettings.isTwoSided()); 1831 // Just the func, ref, and mask is set here. The op and write mask are params to the call 1832 // that draws the path to the SB (glStencilFillPath) 1833 GrGLenum func = 1834 gr_to_gl_stencil_func(fStencilSettings.func(GrStencilSettings::kFront_Face)); 1835 GL_CALL(PathStencilFunc(func, 1836 fStencilSettings.funcRef(GrStencilSettings::kFront_Face), 1837 fStencilSettings.funcMask(GrStencilSettings::kFront_Face))); 1838 } else if (fHWStencilSettings != fStencilSettings) { 1839 if (fStencilSettings.isDisabled()) { 1840 if (kNo_TriState != fHWStencilTestEnabled) { 1841 GL_CALL(Disable(GR_GL_STENCIL_TEST)); 1842 fHWStencilTestEnabled = kNo_TriState; 1843 } 1844 } else { 1845 if (kYes_TriState != fHWStencilTestEnabled) { 1846 GL_CALL(Enable(GR_GL_STENCIL_TEST)); 1847 fHWStencilTestEnabled = kYes_TriState; 1848 } 1849 } 1850 if (!fStencilSettings.isDisabled()) { 1851 if (this->caps()->twoSidedStencilSupport()) { 1852 set_gl_stencil(this->glInterface(), 1853 fStencilSettings, 1854 GR_GL_FRONT, 1855 GrStencilSettings::kFront_Face); 1856 set_gl_stencil(this->glInterface(), 1857 fStencilSettings, 1858 GR_GL_BACK, 1859 GrStencilSettings::kBack_Face); 1860 } else { 1861 set_gl_stencil(this->glInterface(), 1862 fStencilSettings, 1863 GR_GL_FRONT_AND_BACK, 1864 GrStencilSettings::kFront_Face); 1865 } 1866 } 1867 fHWStencilSettings = fStencilSettings; 1868 } 1869 } 1870 1871 void GrGpuGL::flushAAState(DrawType type) { 1872 // At least some ATI linux drivers will render GL_LINES incorrectly when MSAA state is enabled but 1873 // the target is not multisampled. Single pixel wide lines are rendered thicker than 1 pixel wide. 1874 #if 0 1875 // Replace RT_HAS_MSAA with this definition once this driver bug is no longer a relevant concern 1876 #define RT_HAS_MSAA rt->isMultisampled() 1877 #else 1878 #define RT_HAS_MSAA (rt->isMultisampled() || kDrawLines_DrawType == type) 1879 #endif 1880 1881 const GrRenderTarget* rt = this->getDrawState().getRenderTarget(); 1882 if (kDesktop_GrGLBinding == this->glBinding()) { 1883 // ES doesn't support toggling GL_MULTISAMPLE and doesn't have 1884 // smooth lines. 1885 // we prefer smooth lines over multisampled lines 1886 bool smoothLines = false; 1887 1888 if (kDrawLines_DrawType == type) { 1889 smoothLines = this->willUseHWAALines(); 1890 if (smoothLines) { 1891 if (kYes_TriState != fHWAAState.fSmoothLineEnabled) { 1892 GL_CALL(Enable(GR_GL_LINE_SMOOTH)); 1893 fHWAAState.fSmoothLineEnabled = kYes_TriState; 1894 // must disable msaa to use line smoothing 1895 if (RT_HAS_MSAA && 1896 kNo_TriState != fHWAAState.fMSAAEnabled) { 1897 GL_CALL(Disable(GR_GL_MULTISAMPLE)); 1898 fHWAAState.fMSAAEnabled = kNo_TriState; 1899 } 1900 } 1901 } else { 1902 if (kNo_TriState != fHWAAState.fSmoothLineEnabled) { 1903 GL_CALL(Disable(GR_GL_LINE_SMOOTH)); 1904 fHWAAState.fSmoothLineEnabled = kNo_TriState; 1905 } 1906 } 1907 } 1908 if (!smoothLines && RT_HAS_MSAA) { 1909 // FIXME: GL_NV_pr doesn't seem to like MSAA disabled. The paths 1910 // convex hulls of each segment appear to get filled. 1911 bool enableMSAA = kStencilPath_DrawType == type || 1912 this->getDrawState().isHWAntialiasState(); 1913 if (enableMSAA) { 1914 if (kYes_TriState != fHWAAState.fMSAAEnabled) { 1915 GL_CALL(Enable(GR_GL_MULTISAMPLE)); 1916 fHWAAState.fMSAAEnabled = kYes_TriState; 1917 } 1918 } else { 1919 if (kNo_TriState != fHWAAState.fMSAAEnabled) { 1920 GL_CALL(Disable(GR_GL_MULTISAMPLE)); 1921 fHWAAState.fMSAAEnabled = kNo_TriState; 1922 } 1923 } 1924 } 1925 } 1926 } 1927 1928 void GrGpuGL::flushBlend(bool isLines, 1929 GrBlendCoeff srcCoeff, 1930 GrBlendCoeff dstCoeff) { 1931 if (isLines && this->willUseHWAALines()) { 1932 if (kYes_TriState != fHWBlendState.fEnabled) { 1933 GL_CALL(Enable(GR_GL_BLEND)); 1934 fHWBlendState.fEnabled = kYes_TriState; 1935 } 1936 if (kSA_GrBlendCoeff != fHWBlendState.fSrcCoeff || 1937 kISA_GrBlendCoeff != fHWBlendState.fDstCoeff) { 1938 GL_CALL(BlendFunc(gXfermodeCoeff2Blend[kSA_GrBlendCoeff], 1939 gXfermodeCoeff2Blend[kISA_GrBlendCoeff])); 1940 fHWBlendState.fSrcCoeff = kSA_GrBlendCoeff; 1941 fHWBlendState.fDstCoeff = kISA_GrBlendCoeff; 1942 } 1943 } else { 1944 // any optimization to disable blending should 1945 // have already been applied and tweaked the coeffs 1946 // to (1, 0). 1947 bool blendOff = kOne_GrBlendCoeff == srcCoeff && 1948 kZero_GrBlendCoeff == dstCoeff; 1949 if (blendOff) { 1950 if (kNo_TriState != fHWBlendState.fEnabled) { 1951 GL_CALL(Disable(GR_GL_BLEND)); 1952 fHWBlendState.fEnabled = kNo_TriState; 1953 } 1954 } else { 1955 if (kYes_TriState != fHWBlendState.fEnabled) { 1956 GL_CALL(Enable(GR_GL_BLEND)); 1957 fHWBlendState.fEnabled = kYes_TriState; 1958 } 1959 if (fHWBlendState.fSrcCoeff != srcCoeff || 1960 fHWBlendState.fDstCoeff != dstCoeff) { 1961 GL_CALL(BlendFunc(gXfermodeCoeff2Blend[srcCoeff], 1962 gXfermodeCoeff2Blend[dstCoeff])); 1963 fHWBlendState.fSrcCoeff = srcCoeff; 1964 fHWBlendState.fDstCoeff = dstCoeff; 1965 } 1966 GrColor blendConst = this->getDrawState().getBlendConstant(); 1967 if ((BlendCoeffReferencesConstant(srcCoeff) || 1968 BlendCoeffReferencesConstant(dstCoeff)) && 1969 (!fHWBlendState.fConstColorValid || 1970 fHWBlendState.fConstColor != blendConst)) { 1971 GrGLfloat c[4]; 1972 GrColorToRGBAFloat(blendConst, c); 1973 GL_CALL(BlendColor(c[0], c[1], c[2], c[3])); 1974 fHWBlendState.fConstColor = blendConst; 1975 fHWBlendState.fConstColorValid = true; 1976 } 1977 } 1978 } 1979 } 1980 namespace { 1981 1982 inline void set_tex_swizzle(GrGLenum swizzle[4], const GrGLInterface* gl) { 1983 GR_GL_CALL(gl, TexParameteriv(GR_GL_TEXTURE_2D, 1984 GR_GL_TEXTURE_SWIZZLE_RGBA, 1985 reinterpret_cast<const GrGLint*>(swizzle))); 1986 } 1987 1988 inline GrGLenum tile_to_gl_wrap(SkShader::TileMode tm) { 1989 static const GrGLenum gWrapModes[] = { 1990 GR_GL_CLAMP_TO_EDGE, 1991 GR_GL_REPEAT, 1992 GR_GL_MIRRORED_REPEAT 1993 }; 1994 GR_STATIC_ASSERT(SkShader::kTileModeCount == SK_ARRAY_COUNT(gWrapModes)); 1995 GR_STATIC_ASSERT(0 == SkShader::kClamp_TileMode); 1996 GR_STATIC_ASSERT(1 == SkShader::kRepeat_TileMode); 1997 GR_STATIC_ASSERT(2 == SkShader::kMirror_TileMode); 1998 return gWrapModes[tm]; 1999 } 2000 2001 } 2002 2003 void GrGpuGL::bindTexture(int unitIdx, const GrTextureParams& params, GrGLTexture* texture) { 2004 GrAssert(NULL != texture); 2005 2006 // If we created a rt/tex and rendered to it without using a texture and now we're texturing 2007 // from the rt it will still be the last bound texture, but it needs resolving. So keep this 2008 // out of the "last != next" check. 2009 GrGLRenderTarget* texRT = static_cast<GrGLRenderTarget*>(texture->asRenderTarget()); 2010 if (NULL != texRT) { 2011 this->onResolveRenderTarget(texRT); 2012 } 2013 2014 if (fHWBoundTextures[unitIdx] != texture) { 2015 this->setTextureUnit(unitIdx); 2016 GL_CALL(BindTexture(GR_GL_TEXTURE_2D, texture->textureID())); 2017 fHWBoundTextures[unitIdx] = texture; 2018 } 2019 2020 ResetTimestamp timestamp; 2021 const GrGLTexture::TexParams& oldTexParams = texture->getCachedTexParams(×tamp); 2022 bool setAll = timestamp < this->getResetTimestamp(); 2023 GrGLTexture::TexParams newTexParams; 2024 2025 static GrGLenum glMinFilterModes[] = { 2026 GR_GL_NEAREST, 2027 GR_GL_LINEAR, 2028 GR_GL_LINEAR_MIPMAP_LINEAR 2029 }; 2030 static GrGLenum glMagFilterModes[] = { 2031 GR_GL_NEAREST, 2032 GR_GL_LINEAR, 2033 GR_GL_LINEAR 2034 }; 2035 newTexParams.fMinFilter = glMinFilterModes[params.filterMode()]; 2036 newTexParams.fMagFilter = glMagFilterModes[params.filterMode()]; 2037 2038 #ifndef SKIA_IGNORE_GPU_MIPMAPS 2039 if (params.filterMode() == GrTextureParams::kMipMap_FilterMode && 2040 texture->mipMapsAreDirty()) { 2041 // GL_CALL(Hint(GR_GL_GENERATE_MIPMAP_HINT,GR_GL_NICEST)); 2042 GL_CALL(GenerateMipmap(GR_GL_TEXTURE_2D)); 2043 texture->dirtyMipMaps(false); 2044 } 2045 #endif 2046 2047 newTexParams.fWrapS = tile_to_gl_wrap(params.getTileModeX()); 2048 newTexParams.fWrapT = tile_to_gl_wrap(params.getTileModeY()); 2049 memcpy(newTexParams.fSwizzleRGBA, 2050 GrGLShaderBuilder::GetTexParamSwizzle(texture->config(), this->glCaps()), 2051 sizeof(newTexParams.fSwizzleRGBA)); 2052 if (setAll || newTexParams.fMagFilter != oldTexParams.fMagFilter) { 2053 this->setTextureUnit(unitIdx); 2054 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, 2055 GR_GL_TEXTURE_MAG_FILTER, 2056 newTexParams.fMagFilter)); 2057 } 2058 if (setAll || newTexParams.fMinFilter != oldTexParams.fMinFilter) { 2059 this->setTextureUnit(unitIdx); 2060 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, 2061 GR_GL_TEXTURE_MIN_FILTER, 2062 newTexParams.fMinFilter)); 2063 } 2064 if (setAll || newTexParams.fWrapS != oldTexParams.fWrapS) { 2065 this->setTextureUnit(unitIdx); 2066 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, 2067 GR_GL_TEXTURE_WRAP_S, 2068 newTexParams.fWrapS)); 2069 } 2070 if (setAll || newTexParams.fWrapT != oldTexParams.fWrapT) { 2071 this->setTextureUnit(unitIdx); 2072 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, 2073 GR_GL_TEXTURE_WRAP_T, 2074 newTexParams.fWrapT)); 2075 } 2076 if (this->glCaps().textureSwizzleSupport() && 2077 (setAll || memcmp(newTexParams.fSwizzleRGBA, 2078 oldTexParams.fSwizzleRGBA, 2079 sizeof(newTexParams.fSwizzleRGBA)))) { 2080 this->setTextureUnit(unitIdx); 2081 set_tex_swizzle(newTexParams.fSwizzleRGBA, 2082 this->glInterface()); 2083 } 2084 texture->setCachedTexParams(newTexParams, this->getResetTimestamp()); 2085 } 2086 2087 void GrGpuGL::flushMiscFixedFunctionState() { 2088 2089 const GrDrawState& drawState = this->getDrawState(); 2090 2091 if (drawState.isDitherState()) { 2092 if (kYes_TriState != fHWDitherEnabled) { 2093 GL_CALL(Enable(GR_GL_DITHER)); 2094 fHWDitherEnabled = kYes_TriState; 2095 } 2096 } else { 2097 if (kNo_TriState != fHWDitherEnabled) { 2098 GL_CALL(Disable(GR_GL_DITHER)); 2099 fHWDitherEnabled = kNo_TriState; 2100 } 2101 } 2102 2103 if (drawState.isColorWriteDisabled()) { 2104 if (kNo_TriState != fHWWriteToColor) { 2105 GL_CALL(ColorMask(GR_GL_FALSE, GR_GL_FALSE, 2106 GR_GL_FALSE, GR_GL_FALSE)); 2107 fHWWriteToColor = kNo_TriState; 2108 } 2109 } else { 2110 if (kYes_TriState != fHWWriteToColor) { 2111 GL_CALL(ColorMask(GR_GL_TRUE, GR_GL_TRUE, GR_GL_TRUE, GR_GL_TRUE)); 2112 fHWWriteToColor = kYes_TriState; 2113 } 2114 } 2115 2116 if (fHWDrawFace != drawState.getDrawFace()) { 2117 switch (this->getDrawState().getDrawFace()) { 2118 case GrDrawState::kCCW_DrawFace: 2119 GL_CALL(Enable(GR_GL_CULL_FACE)); 2120 GL_CALL(CullFace(GR_GL_BACK)); 2121 break; 2122 case GrDrawState::kCW_DrawFace: 2123 GL_CALL(Enable(GR_GL_CULL_FACE)); 2124 GL_CALL(CullFace(GR_GL_FRONT)); 2125 break; 2126 case GrDrawState::kBoth_DrawFace: 2127 GL_CALL(Disable(GR_GL_CULL_FACE)); 2128 break; 2129 default: 2130 GrCrash("Unknown draw face."); 2131 } 2132 fHWDrawFace = drawState.getDrawFace(); 2133 } 2134 } 2135 2136 void GrGpuGL::notifyRenderTargetDelete(GrRenderTarget* renderTarget) { 2137 GrAssert(NULL != renderTarget); 2138 if (fHWBoundRenderTarget == renderTarget) { 2139 fHWBoundRenderTarget = NULL; 2140 } 2141 } 2142 2143 void GrGpuGL::notifyTextureDelete(GrGLTexture* texture) { 2144 for (int s = 0; s < fHWBoundTextures.count(); ++s) { 2145 if (fHWBoundTextures[s] == texture) { 2146 // deleting bound texture does implied bind to 0 2147 fHWBoundTextures[s] = NULL; 2148 } 2149 } 2150 } 2151 2152 bool GrGpuGL::configToGLFormats(GrPixelConfig config, 2153 bool getSizedInternalFormat, 2154 GrGLenum* internalFormat, 2155 GrGLenum* externalFormat, 2156 GrGLenum* externalType) { 2157 GrGLenum dontCare; 2158 if (NULL == internalFormat) { 2159 internalFormat = &dontCare; 2160 } 2161 if (NULL == externalFormat) { 2162 externalFormat = &dontCare; 2163 } 2164 if (NULL == externalType) { 2165 externalType = &dontCare; 2166 } 2167 2168 switch (config) { 2169 case kRGBA_8888_GrPixelConfig: 2170 *internalFormat = GR_GL_RGBA; 2171 *externalFormat = GR_GL_RGBA; 2172 if (getSizedInternalFormat) { 2173 *internalFormat = GR_GL_RGBA8; 2174 } else { 2175 *internalFormat = GR_GL_RGBA; 2176 } 2177 *externalType = GR_GL_UNSIGNED_BYTE; 2178 break; 2179 case kBGRA_8888_GrPixelConfig: 2180 if (!this->glCaps().bgraFormatSupport()) { 2181 return false; 2182 } 2183 if (this->glCaps().bgraIsInternalFormat()) { 2184 if (getSizedInternalFormat) { 2185 *internalFormat = GR_GL_BGRA8; 2186 } else { 2187 *internalFormat = GR_GL_BGRA; 2188 } 2189 } else { 2190 if (getSizedInternalFormat) { 2191 *internalFormat = GR_GL_RGBA8; 2192 } else { 2193 *internalFormat = GR_GL_RGBA; 2194 } 2195 } 2196 *externalFormat = GR_GL_BGRA; 2197 *externalType = GR_GL_UNSIGNED_BYTE; 2198 break; 2199 case kRGB_565_GrPixelConfig: 2200 *internalFormat = GR_GL_RGB; 2201 *externalFormat = GR_GL_RGB; 2202 if (getSizedInternalFormat) { 2203 if (this->glBinding() == kDesktop_GrGLBinding) { 2204 return false; 2205 } else { 2206 *internalFormat = GR_GL_RGB565; 2207 } 2208 } else { 2209 *internalFormat = GR_GL_RGB; 2210 } 2211 *externalType = GR_GL_UNSIGNED_SHORT_5_6_5; 2212 break; 2213 case kRGBA_4444_GrPixelConfig: 2214 *internalFormat = GR_GL_RGBA; 2215 *externalFormat = GR_GL_RGBA; 2216 if (getSizedInternalFormat) { 2217 *internalFormat = GR_GL_RGBA4; 2218 } else { 2219 *internalFormat = GR_GL_RGBA; 2220 } 2221 *externalType = GR_GL_UNSIGNED_SHORT_4_4_4_4; 2222 break; 2223 case kIndex_8_GrPixelConfig: 2224 if (this->caps()->eightBitPaletteSupport()) { 2225 *internalFormat = GR_GL_PALETTE8_RGBA8; 2226 // glCompressedTexImage doesn't take external params 2227 *externalFormat = GR_GL_PALETTE8_RGBA8; 2228 // no sized/unsized internal format distinction here 2229 *internalFormat = GR_GL_PALETTE8_RGBA8; 2230 // unused with CompressedTexImage 2231 *externalType = GR_GL_UNSIGNED_BYTE; 2232 } else { 2233 return false; 2234 } 2235 break; 2236 case kAlpha_8_GrPixelConfig: 2237 if (this->glCaps().textureRedSupport()) { 2238 *internalFormat = GR_GL_RED; 2239 *externalFormat = GR_GL_RED; 2240 if (getSizedInternalFormat) { 2241 *internalFormat = GR_GL_R8; 2242 } else { 2243 *internalFormat = GR_GL_RED; 2244 } 2245 *externalType = GR_GL_UNSIGNED_BYTE; 2246 } else { 2247 *internalFormat = GR_GL_ALPHA; 2248 *externalFormat = GR_GL_ALPHA; 2249 if (getSizedInternalFormat) { 2250 *internalFormat = GR_GL_ALPHA8; 2251 } else { 2252 *internalFormat = GR_GL_ALPHA; 2253 } 2254 *externalType = GR_GL_UNSIGNED_BYTE; 2255 } 2256 break; 2257 default: 2258 return false; 2259 } 2260 return true; 2261 } 2262 2263 void GrGpuGL::setTextureUnit(int unit) { 2264 GrAssert(unit >= 0 && unit < fHWBoundTextures.count()); 2265 if (unit != fHWActiveTextureUnitIdx) { 2266 GL_CALL(ActiveTexture(GR_GL_TEXTURE0 + unit)); 2267 fHWActiveTextureUnitIdx = unit; 2268 } 2269 } 2270 2271 void GrGpuGL::setScratchTextureUnit() { 2272 // Bind the last texture unit since it is the least likely to be used by GrGLProgram. 2273 int lastUnitIdx = fHWBoundTextures.count() - 1; 2274 if (lastUnitIdx != fHWActiveTextureUnitIdx) { 2275 GL_CALL(ActiveTexture(GR_GL_TEXTURE0 + lastUnitIdx)); 2276 fHWActiveTextureUnitIdx = lastUnitIdx; 2277 } 2278 // clear out the this field so that if a program does use this unit it will rebind the correct 2279 // texture. 2280 fHWBoundTextures[lastUnitIdx] = NULL; 2281 } 2282 2283 namespace { 2284 // Determines whether glBlitFramebuffer could be used between src and dst. 2285 inline bool can_blit_framebuffer(const GrSurface* dst, 2286 const GrSurface* src, 2287 const GrGpuGL* gpu, 2288 bool* wouldNeedTempFBO = NULL) { 2289 if (gpu->isConfigRenderable(dst->config()) && 2290 gpu->isConfigRenderable(src->config()) && 2291 gpu->glCaps().usesMSAARenderBuffers()) { 2292 if (NULL != wouldNeedTempFBO) { 2293 *wouldNeedTempFBO = NULL == dst->asRenderTarget() || NULL == src->asRenderTarget(); 2294 } 2295 return true; 2296 } else { 2297 return false; 2298 } 2299 } 2300 2301 inline bool can_copy_texsubimage(const GrSurface* dst, 2302 const GrSurface* src, 2303 const GrGpuGL* gpu, 2304 bool* wouldNeedTempFBO = NULL) { 2305 // Table 3.9 of the ES2 spec indicates the supported formats with CopyTexSubImage 2306 // and BGRA isn't in the spec. There doesn't appear to be any extension that adds it. Perhaps 2307 // many drivers would allow it to work, but ANGLE does not. 2308 if (kES2_GrGLBinding == gpu->glBinding() && gpu->glCaps().bgraIsInternalFormat() && 2309 (kBGRA_8888_GrPixelConfig == dst->config() || kBGRA_8888_GrPixelConfig == src->config())) { 2310 return false; 2311 } 2312 const GrGLRenderTarget* dstRT = static_cast<const GrGLRenderTarget*>(dst->asRenderTarget()); 2313 // If dst is multisampled (and uses an extension where there is a separate MSAA renderbuffer) 2314 // then we don't want to copy to the texture but to the MSAA buffer. 2315 if (NULL != dstRT && dstRT->renderFBOID() != dstRT->textureFBOID()) { 2316 return false; 2317 } 2318 const GrGLRenderTarget* srcRT = static_cast<const GrGLRenderTarget*>(src->asRenderTarget()); 2319 // If the src is multisampled (and uses an extension where there is a separate MSAA 2320 // renderbuffer) then it is an invalid operation to call CopyTexSubImage 2321 if (NULL != srcRT && srcRT->renderFBOID() != srcRT->textureFBOID()) { 2322 return false; 2323 } 2324 if (gpu->isConfigRenderable(src->config()) && NULL != dst->asTexture() && 2325 dst->origin() == src->origin() && kIndex_8_GrPixelConfig != src->config()) { 2326 if (NULL != wouldNeedTempFBO) { 2327 *wouldNeedTempFBO = NULL == src->asRenderTarget(); 2328 } 2329 return true; 2330 } else { 2331 return false; 2332 } 2333 } 2334 2335 // If a temporary FBO was created, its non-zero ID is returned. The viewport that the copy rect is 2336 // relative to is output. 2337 inline GrGLuint bind_surface_as_fbo(const GrGLInterface* gl, 2338 GrSurface* surface, 2339 GrGLenum fboTarget, 2340 GrGLIRect* viewport) { 2341 GrGLRenderTarget* rt = static_cast<GrGLRenderTarget*>(surface->asRenderTarget()); 2342 GrGLuint tempFBOID; 2343 if (NULL == rt) { 2344 GrAssert(NULL != surface->asTexture()); 2345 GrGLuint texID = static_cast<GrGLTexture*>(surface->asTexture())->textureID(); 2346 GR_GL_CALL(gl, GenFramebuffers(1, &tempFBOID)); 2347 GR_GL_CALL(gl, BindFramebuffer(fboTarget, tempFBOID)); 2348 GR_GL_CALL(gl, FramebufferTexture2D(fboTarget, 2349 GR_GL_COLOR_ATTACHMENT0, 2350 GR_GL_TEXTURE_2D, 2351 texID, 2352 0)); 2353 viewport->fLeft = 0; 2354 viewport->fBottom = 0; 2355 viewport->fWidth = surface->width(); 2356 viewport->fHeight = surface->height(); 2357 } else { 2358 tempFBOID = 0; 2359 GR_GL_CALL(gl, BindFramebuffer(fboTarget, rt->renderFBOID())); 2360 *viewport = rt->getViewport(); 2361 } 2362 return tempFBOID; 2363 } 2364 2365 } 2366 2367 void GrGpuGL::initCopySurfaceDstDesc(const GrSurface* src, GrTextureDesc* desc) { 2368 // Check for format issues with glCopyTexSubImage2D 2369 if (kES2_GrGLBinding == this->glBinding() && this->glCaps().bgraIsInternalFormat() && 2370 kBGRA_8888_GrPixelConfig == src->config()) { 2371 // glCopyTexSubImage2D doesn't work with this config. We'll want to make it a render target 2372 // in order to call glBlitFramebuffer or to copy to it by rendering. 2373 INHERITED::initCopySurfaceDstDesc(src, desc); 2374 return; 2375 } else if (NULL == src->asRenderTarget()) { 2376 // We don't want to have to create an FBO just to use glCopyTexSubImage2D. Let the base 2377 // class handle it by rendering. 2378 INHERITED::initCopySurfaceDstDesc(src, desc); 2379 return; 2380 } 2381 2382 const GrGLRenderTarget* srcRT = static_cast<const GrGLRenderTarget*>(src->asRenderTarget()); 2383 if (NULL != srcRT && srcRT->renderFBOID() != srcRT->textureFBOID()) { 2384 // It's illegal to call CopyTexSubImage2D on a MSAA renderbuffer. 2385 INHERITED::initCopySurfaceDstDesc(src, desc); 2386 } else { 2387 desc->fConfig = src->config(); 2388 desc->fOrigin = src->origin(); 2389 desc->fFlags = kNone_GrTextureFlags; 2390 } 2391 } 2392 2393 bool GrGpuGL::onCopySurface(GrSurface* dst, 2394 GrSurface* src, 2395 const SkIRect& srcRect, 2396 const SkIPoint& dstPoint) { 2397 bool inheritedCouldCopy = INHERITED::onCanCopySurface(dst, src, srcRect, dstPoint); 2398 bool copied = false; 2399 bool wouldNeedTempFBO = false; 2400 if (can_copy_texsubimage(dst, src, this, &wouldNeedTempFBO) && 2401 (!wouldNeedTempFBO || !inheritedCouldCopy)) { 2402 GrGLuint srcFBO; 2403 GrGLIRect srcVP; 2404 srcFBO = bind_surface_as_fbo(this->glInterface(), src, GR_GL_FRAMEBUFFER, &srcVP); 2405 GrGLTexture* dstTex = static_cast<GrGLTexture*>(dst->asTexture()); 2406 GrAssert(NULL != dstTex); 2407 // We modified the bound FBO 2408 fHWBoundRenderTarget = NULL; 2409 GrGLIRect srcGLRect; 2410 srcGLRect.setRelativeTo(srcVP, 2411 srcRect.fLeft, 2412 srcRect.fTop, 2413 srcRect.width(), 2414 srcRect.height(), 2415 src->origin()); 2416 2417 this->setScratchTextureUnit(); 2418 GL_CALL(BindTexture(GR_GL_TEXTURE_2D, dstTex->textureID())); 2419 GrGLint dstY; 2420 if (kBottomLeft_GrSurfaceOrigin == dst->origin()) { 2421 dstY = dst->height() - (dstPoint.fY + srcGLRect.fHeight); 2422 } else { 2423 dstY = dstPoint.fY; 2424 } 2425 GL_CALL(CopyTexSubImage2D(GR_GL_TEXTURE_2D, 0, 2426 dstPoint.fX, dstY, 2427 srcGLRect.fLeft, srcGLRect.fBottom, 2428 srcGLRect.fWidth, srcGLRect.fHeight)); 2429 copied = true; 2430 if (srcFBO) { 2431 GL_CALL(DeleteFramebuffers(1, &srcFBO)); 2432 } 2433 } else if (can_blit_framebuffer(dst, src, this, &wouldNeedTempFBO) && 2434 (!wouldNeedTempFBO || !inheritedCouldCopy)) { 2435 SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY, 2436 srcRect.width(), srcRect.height()); 2437 bool selfOverlap = false; 2438 if (dst->isSameAs(src)) { 2439 selfOverlap = SkIRect::IntersectsNoEmptyCheck(dstRect, srcRect); 2440 } 2441 2442 if (!selfOverlap) { 2443 GrGLuint dstFBO; 2444 GrGLuint srcFBO; 2445 GrGLIRect dstVP; 2446 GrGLIRect srcVP; 2447 dstFBO = bind_surface_as_fbo(this->glInterface(), dst, GR_GL_DRAW_FRAMEBUFFER, &dstVP); 2448 srcFBO = bind_surface_as_fbo(this->glInterface(), src, GR_GL_READ_FRAMEBUFFER, &srcVP); 2449 // We modified the bound FBO 2450 fHWBoundRenderTarget = NULL; 2451 GrGLIRect srcGLRect; 2452 GrGLIRect dstGLRect; 2453 srcGLRect.setRelativeTo(srcVP, 2454 srcRect.fLeft, 2455 srcRect.fTop, 2456 srcRect.width(), 2457 srcRect.height(), 2458 src->origin()); 2459 dstGLRect.setRelativeTo(dstVP, 2460 dstRect.fLeft, 2461 dstRect.fTop, 2462 dstRect.width(), 2463 dstRect.height(), 2464 dst->origin()); 2465 2466 GrAutoTRestore<ScissorState> asr; 2467 if (GrGLCaps::kDesktop_EXT_MSFBOType == this->glCaps().msFBOType()) { 2468 // The EXT version applies the scissor during the blit, so disable it. 2469 asr.reset(&fScissorState); 2470 fScissorState.fEnabled = false; 2471 this->flushScissor(); 2472 } 2473 GrGLint srcY0; 2474 GrGLint srcY1; 2475 // Does the blit need to y-mirror or not? 2476 if (src->origin() == dst->origin()) { 2477 srcY0 = srcGLRect.fBottom; 2478 srcY1 = srcGLRect.fBottom + srcGLRect.fHeight; 2479 } else { 2480 srcY0 = srcGLRect.fBottom + srcGLRect.fHeight; 2481 srcY1 = srcGLRect.fBottom; 2482 } 2483 GL_CALL(BlitFramebuffer(srcGLRect.fLeft, 2484 srcY0, 2485 srcGLRect.fLeft + srcGLRect.fWidth, 2486 srcY1, 2487 dstGLRect.fLeft, 2488 dstGLRect.fBottom, 2489 dstGLRect.fLeft + dstGLRect.fWidth, 2490 dstGLRect.fBottom + dstGLRect.fHeight, 2491 GR_GL_COLOR_BUFFER_BIT, GR_GL_NEAREST)); 2492 if (dstFBO) { 2493 GL_CALL(DeleteFramebuffers(1, &dstFBO)); 2494 } 2495 if (srcFBO) { 2496 GL_CALL(DeleteFramebuffers(1, &srcFBO)); 2497 } 2498 copied = true; 2499 } 2500 } 2501 if (!copied && inheritedCouldCopy) { 2502 copied = INHERITED::onCopySurface(dst, src, srcRect, dstPoint); 2503 GrAssert(copied); 2504 } 2505 return copied; 2506 } 2507 2508 bool GrGpuGL::onCanCopySurface(GrSurface* dst, 2509 GrSurface* src, 2510 const SkIRect& srcRect, 2511 const SkIPoint& dstPoint) { 2512 // This mirrors the logic in onCopySurface. 2513 if (can_copy_texsubimage(dst, src, this)) { 2514 return true; 2515 } 2516 if (can_blit_framebuffer(dst, src, this)) { 2517 if (dst->isSameAs(src)) { 2518 SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY, 2519 srcRect.width(), srcRect.height()); 2520 if(!SkIRect::IntersectsNoEmptyCheck(dstRect, srcRect)) { 2521 return true; 2522 } 2523 } else { 2524 return true; 2525 } 2526 } 2527 return INHERITED::onCanCopySurface(dst, src, srcRect, dstPoint); 2528 } 2529 2530 2531 /////////////////////////////////////////////////////////////////////////////// 2532 2533 GrGLAttribArrayState* GrGpuGL::HWGeometryState::bindArrayAndBuffersToDraw( 2534 GrGpuGL* gpu, 2535 const GrGLVertexBuffer* vbuffer, 2536 const GrGLIndexBuffer* ibuffer) { 2537 GrAssert(NULL != vbuffer); 2538 GrGLAttribArrayState* attribState; 2539 2540 // We use a vertex array if we're on a core profile and the verts are in a VBO. 2541 if (gpu->glCaps().isCoreProfile() && !vbuffer->isCPUBacked()) { 2542 if (NULL == fVBOVertexArray || !fVBOVertexArray->isValid()) { 2543 SkSafeUnref(fVBOVertexArray); 2544 GrGLuint arrayID; 2545 GR_GL_CALL(gpu->glInterface(), GenVertexArrays(1, &arrayID)); 2546 int attrCount = gpu->glCaps().maxVertexAttributes(); 2547 fVBOVertexArray = SkNEW_ARGS(GrGLVertexArray, (gpu, arrayID, attrCount)); 2548 } 2549 attribState = fVBOVertexArray->bindWithIndexBuffer(ibuffer); 2550 } else { 2551 if (NULL != ibuffer) { 2552 this->setIndexBufferIDOnDefaultVertexArray(gpu, ibuffer->bufferID()); 2553 } else { 2554 this->setVertexArrayID(gpu, 0); 2555 } 2556 int attrCount = gpu->glCaps().maxVertexAttributes(); 2557 if (fDefaultVertexArrayAttribState.count() != attrCount) { 2558 fDefaultVertexArrayAttribState.resize(attrCount); 2559 } 2560 attribState = &fDefaultVertexArrayAttribState; 2561 } 2562 return attribState; 2563 } 2564