1 /* 2 * Copyright 2011 Google Inc. 3 * 4 * Use of this source code is governed by a BSD-style license that can be 5 * found in the LICENSE file. 6 */ 7 8 #include "GrGLGpu.h" 9 #include "GrBackendSemaphore.h" 10 #include "GrBackendSurface.h" 11 #include "GrCpuBuffer.h" 12 #include "GrFixedClip.h" 13 #include "GrGLBuffer.h" 14 #include "GrGLGpuCommandBuffer.h" 15 #include "GrGLSemaphore.h" 16 #include "GrGLStencilAttachment.h" 17 #include "GrGLTextureRenderTarget.h" 18 #include "GrGpuResourcePriv.h" 19 #include "GrMesh.h" 20 #include "GrPipeline.h" 21 #include "GrRenderTargetPriv.h" 22 #include "GrShaderCaps.h" 23 #include "GrSurfaceProxyPriv.h" 24 #include "GrTexturePriv.h" 25 #include "GrTypes.h" 26 #include "SkAutoMalloc.h" 27 #include "SkConvertPixels.h" 28 #include "SkHalf.h" 29 #include "SkMakeUnique.h" 30 #include "SkMipMap.h" 31 #include "SkPixmap.h" 32 #include "SkSLCompiler.h" 33 #include "SkStrokeRec.h" 34 #include "SkTemplates.h" 35 #include "SkTo.h" 36 #include "SkTraceEvent.h" 37 #include "SkTypes.h" 38 #include "builders/GrGLShaderStringBuilder.h" 39 40 #include <cmath> 41 42 #define GL_CALL(X) GR_GL_CALL(this->glInterface(), X) 43 #define GL_CALL_RET(RET, X) GR_GL_CALL_RET(this->glInterface(), RET, X) 44 45 #define SKIP_CACHE_CHECK true 46 47 #if GR_GL_CHECK_ALLOC_WITH_GET_ERROR 48 #define CLEAR_ERROR_BEFORE_ALLOC(iface) GrGLClearErr(iface) 49 #define GL_ALLOC_CALL(iface, call) GR_GL_CALL_NOERRCHECK(iface, call) 50 #define CHECK_ALLOC_ERROR(iface) GR_GL_GET_ERROR(iface) 51 #else 52 #define CLEAR_ERROR_BEFORE_ALLOC(iface) 53 #define GL_ALLOC_CALL(iface, call) GR_GL_CALL(iface, call) 54 #define CHECK_ALLOC_ERROR(iface) GR_GL_NO_ERROR 55 #endif 56 57 //#define USE_NSIGHT 58 59 /////////////////////////////////////////////////////////////////////////////// 60 61 static const GrGLenum gXfermodeEquation2Blend[] = { 62 // Basic OpenGL blend equations. 63 GR_GL_FUNC_ADD, 64 GR_GL_FUNC_SUBTRACT, 65 GR_GL_FUNC_REVERSE_SUBTRACT, 66 67 // GL_KHR_blend_equation_advanced. 68 GR_GL_SCREEN, 69 GR_GL_OVERLAY, 70 GR_GL_DARKEN, 71 GR_GL_LIGHTEN, 72 GR_GL_COLORDODGE, 73 GR_GL_COLORBURN, 74 GR_GL_HARDLIGHT, 75 GR_GL_SOFTLIGHT, 76 GR_GL_DIFFERENCE, 77 GR_GL_EXCLUSION, 78 GR_GL_MULTIPLY, 79 GR_GL_HSL_HUE, 80 GR_GL_HSL_SATURATION, 81 GR_GL_HSL_COLOR, 82 GR_GL_HSL_LUMINOSITY, 83 84 // Illegal... needs to map to something. 85 GR_GL_FUNC_ADD, 86 }; 87 GR_STATIC_ASSERT(0 == kAdd_GrBlendEquation); 88 GR_STATIC_ASSERT(1 == kSubtract_GrBlendEquation); 89 GR_STATIC_ASSERT(2 == kReverseSubtract_GrBlendEquation); 90 GR_STATIC_ASSERT(3 == kScreen_GrBlendEquation); 91 GR_STATIC_ASSERT(4 == kOverlay_GrBlendEquation); 92 GR_STATIC_ASSERT(5 == kDarken_GrBlendEquation); 93 GR_STATIC_ASSERT(6 == kLighten_GrBlendEquation); 94 GR_STATIC_ASSERT(7 == kColorDodge_GrBlendEquation); 95 GR_STATIC_ASSERT(8 == kColorBurn_GrBlendEquation); 96 GR_STATIC_ASSERT(9 == kHardLight_GrBlendEquation); 97 GR_STATIC_ASSERT(10 == kSoftLight_GrBlendEquation); 98 GR_STATIC_ASSERT(11 == kDifference_GrBlendEquation); 99 GR_STATIC_ASSERT(12 == kExclusion_GrBlendEquation); 100 GR_STATIC_ASSERT(13 == kMultiply_GrBlendEquation); 101 GR_STATIC_ASSERT(14 == kHSLHue_GrBlendEquation); 102 GR_STATIC_ASSERT(15 == kHSLSaturation_GrBlendEquation); 103 GR_STATIC_ASSERT(16 == kHSLColor_GrBlendEquation); 104 GR_STATIC_ASSERT(17 == kHSLLuminosity_GrBlendEquation); 105 GR_STATIC_ASSERT(SK_ARRAY_COUNT(gXfermodeEquation2Blend) == kGrBlendEquationCnt); 106 107 static const GrGLenum gXfermodeCoeff2Blend[] = { 108 GR_GL_ZERO, 109 GR_GL_ONE, 110 GR_GL_SRC_COLOR, 111 GR_GL_ONE_MINUS_SRC_COLOR, 112 GR_GL_DST_COLOR, 113 GR_GL_ONE_MINUS_DST_COLOR, 114 GR_GL_SRC_ALPHA, 115 GR_GL_ONE_MINUS_SRC_ALPHA, 116 GR_GL_DST_ALPHA, 117 GR_GL_ONE_MINUS_DST_ALPHA, 118 GR_GL_CONSTANT_COLOR, 119 GR_GL_ONE_MINUS_CONSTANT_COLOR, 120 GR_GL_CONSTANT_ALPHA, 121 GR_GL_ONE_MINUS_CONSTANT_ALPHA, 122 123 // extended blend coeffs 124 GR_GL_SRC1_COLOR, 125 GR_GL_ONE_MINUS_SRC1_COLOR, 126 GR_GL_SRC1_ALPHA, 127 GR_GL_ONE_MINUS_SRC1_ALPHA, 128 129 // Illegal... needs to map to something. 130 GR_GL_ZERO, 131 }; 132 133 bool GrGLGpu::BlendCoeffReferencesConstant(GrBlendCoeff coeff) { 134 static const bool gCoeffReferencesBlendConst[] = { 135 false, 136 false, 137 false, 138 false, 139 false, 140 false, 141 false, 142 false, 143 false, 144 false, 145 true, 146 true, 147 true, 148 true, 149 150 // extended blend coeffs 151 false, 152 false, 153 false, 154 false, 155 156 // Illegal. 157 false, 158 }; 159 return gCoeffReferencesBlendConst[coeff]; 160 GR_STATIC_ASSERT(kGrBlendCoeffCnt == SK_ARRAY_COUNT(gCoeffReferencesBlendConst)); 161 162 GR_STATIC_ASSERT(0 == kZero_GrBlendCoeff); 163 GR_STATIC_ASSERT(1 == kOne_GrBlendCoeff); 164 GR_STATIC_ASSERT(2 == kSC_GrBlendCoeff); 165 GR_STATIC_ASSERT(3 == kISC_GrBlendCoeff); 166 GR_STATIC_ASSERT(4 == kDC_GrBlendCoeff); 167 GR_STATIC_ASSERT(5 == kIDC_GrBlendCoeff); 168 GR_STATIC_ASSERT(6 == kSA_GrBlendCoeff); 169 GR_STATIC_ASSERT(7 == kISA_GrBlendCoeff); 170 GR_STATIC_ASSERT(8 == kDA_GrBlendCoeff); 171 GR_STATIC_ASSERT(9 == kIDA_GrBlendCoeff); 172 GR_STATIC_ASSERT(10 == kConstC_GrBlendCoeff); 173 GR_STATIC_ASSERT(11 == kIConstC_GrBlendCoeff); 174 GR_STATIC_ASSERT(12 == kConstA_GrBlendCoeff); 175 GR_STATIC_ASSERT(13 == kIConstA_GrBlendCoeff); 176 177 GR_STATIC_ASSERT(14 == kS2C_GrBlendCoeff); 178 GR_STATIC_ASSERT(15 == kIS2C_GrBlendCoeff); 179 GR_STATIC_ASSERT(16 == kS2A_GrBlendCoeff); 180 GR_STATIC_ASSERT(17 == kIS2A_GrBlendCoeff); 181 182 // assertion for gXfermodeCoeff2Blend have to be in GrGpu scope 183 GR_STATIC_ASSERT(kGrBlendCoeffCnt == SK_ARRAY_COUNT(gXfermodeCoeff2Blend)); 184 } 185 186 ////////////////////////////////////////////////////////////////////////////// 187 188 static int gl_target_to_binding_index(GrGLenum target) { 189 switch (target) { 190 case GR_GL_TEXTURE_2D: 191 return 0; 192 case GR_GL_TEXTURE_RECTANGLE: 193 return 1; 194 case GR_GL_TEXTURE_EXTERNAL: 195 return 2; 196 } 197 SK_ABORT("Unexpected GL texture target."); 198 return 0; 199 } 200 201 GrGpuResource::UniqueID GrGLGpu::TextureUnitBindings::boundID(GrGLenum target) const { 202 return fTargetBindings[gl_target_to_binding_index(target)].fBoundResourceID; 203 } 204 205 bool GrGLGpu::TextureUnitBindings::hasBeenModified(GrGLenum target) const { 206 return fTargetBindings[gl_target_to_binding_index(target)].fHasBeenModified; 207 } 208 209 void GrGLGpu::TextureUnitBindings::setBoundID(GrGLenum target, GrGpuResource::UniqueID resourceID) { 210 int targetIndex = gl_target_to_binding_index(target); 211 fTargetBindings[targetIndex].fBoundResourceID = resourceID; 212 fTargetBindings[targetIndex].fHasBeenModified = true; 213 } 214 215 void GrGLGpu::TextureUnitBindings::invalidateForScratchUse(GrGLenum target) { 216 this->setBoundID(target, GrGpuResource::UniqueID()); 217 } 218 219 void GrGLGpu::TextureUnitBindings::invalidateAllTargets(bool markUnmodified) { 220 for (auto& targetBinding : fTargetBindings) { 221 targetBinding.fBoundResourceID.makeInvalid(); 222 if (markUnmodified) { 223 targetBinding.fHasBeenModified = false; 224 } 225 } 226 } 227 228 ////////////////////////////////////////////////////////////////////////////// 229 230 static GrGLenum filter_to_gl_mag_filter(GrSamplerState::Filter filter) { 231 switch (filter) { 232 case GrSamplerState::Filter::kNearest: return GR_GL_NEAREST; 233 case GrSamplerState::Filter::kBilerp: return GR_GL_LINEAR; 234 case GrSamplerState::Filter::kMipMap: return GR_GL_LINEAR; 235 } 236 SK_ABORT("Unknown filter"); 237 return 0; 238 } 239 240 static GrGLenum filter_to_gl_min_filter(GrSamplerState::Filter filter) { 241 switch (filter) { 242 case GrSamplerState::Filter::kNearest: return GR_GL_NEAREST; 243 case GrSamplerState::Filter::kBilerp: return GR_GL_LINEAR; 244 case GrSamplerState::Filter::kMipMap: return GR_GL_LINEAR_MIPMAP_LINEAR; 245 } 246 SK_ABORT("Unknown filter"); 247 return 0; 248 } 249 250 static inline GrGLenum wrap_mode_to_gl_wrap(GrSamplerState::WrapMode wrapMode, 251 const GrCaps& caps) { 252 switch (wrapMode) { 253 case GrSamplerState::WrapMode::kClamp: return GR_GL_CLAMP_TO_EDGE; 254 case GrSamplerState::WrapMode::kRepeat: return GR_GL_REPEAT; 255 case GrSamplerState::WrapMode::kMirrorRepeat: return GR_GL_MIRRORED_REPEAT; 256 case GrSamplerState::WrapMode::kClampToBorder: 257 // May not be supported but should have been caught earlier 258 SkASSERT(caps.clampToBorderSupport()); 259 return GR_GL_CLAMP_TO_BORDER; 260 } 261 SK_ABORT("Unknown wrap mode"); 262 return 0; 263 } 264 265 /////////////////////////////////////////////////////////////////////////////// 266 267 class GrGLGpu::SamplerObjectCache { 268 public: 269 SamplerObjectCache(GrGLGpu* gpu) : fGpu(gpu) { 270 fNumTextureUnits = fGpu->glCaps().shaderCaps()->maxFragmentSamplers(); 271 fHWBoundSamplers.reset(new GrGLuint[fNumTextureUnits]); 272 std::fill_n(fHWBoundSamplers.get(), fNumTextureUnits, 0); 273 std::fill_n(fSamplers, kNumSamplers, 0); 274 } 275 276 ~SamplerObjectCache() { 277 if (!fNumTextureUnits) { 278 // We've already been abandoned. 279 return; 280 } 281 GR_GL_CALL(fGpu->glInterface(), DeleteSamplers(kNumSamplers, fSamplers)); 282 } 283 284 void bindSampler(int unitIdx, const GrSamplerState& state) { 285 int index = StateToIndex(state); 286 if (!fSamplers[index]) { 287 GrGLuint s; 288 GR_GL_CALL(fGpu->glInterface(), GenSamplers(1, &s)); 289 if (!s) { 290 return; 291 } 292 fSamplers[index] = s; 293 auto minFilter = filter_to_gl_min_filter(state.filter()); 294 auto magFilter = filter_to_gl_mag_filter(state.filter()); 295 auto wrapX = wrap_mode_to_gl_wrap(state.wrapModeX(), fGpu->glCaps()); 296 auto wrapY = wrap_mode_to_gl_wrap(state.wrapModeY(), fGpu->glCaps()); 297 GR_GL_CALL(fGpu->glInterface(), 298 SamplerParameteri(s, GR_GL_TEXTURE_MIN_FILTER, minFilter)); 299 GR_GL_CALL(fGpu->glInterface(), 300 SamplerParameteri(s, GR_GL_TEXTURE_MAG_FILTER, magFilter)); 301 GR_GL_CALL(fGpu->glInterface(), SamplerParameteri(s, GR_GL_TEXTURE_WRAP_S, wrapX)); 302 GR_GL_CALL(fGpu->glInterface(), SamplerParameteri(s, GR_GL_TEXTURE_WRAP_T, wrapY)); 303 } 304 if (fHWBoundSamplers[unitIdx] != fSamplers[index]) { 305 GR_GL_CALL(fGpu->glInterface(), BindSampler(unitIdx, fSamplers[index])); 306 fHWBoundSamplers[unitIdx] = fSamplers[index]; 307 } 308 } 309 310 void invalidateBindings() { 311 // When we have sampler support we always use samplers. So setting these to zero will cause 312 // a rebind on next usage. 313 std::fill_n(fHWBoundSamplers.get(), fNumTextureUnits, 0); 314 } 315 316 void abandon() { 317 fHWBoundSamplers.reset(); 318 fNumTextureUnits = 0; 319 } 320 321 void release() { 322 if (!fNumTextureUnits) { 323 // We've already been abandoned. 324 return; 325 } 326 GR_GL_CALL(fGpu->glInterface(), DeleteSamplers(kNumSamplers, fSamplers)); 327 std::fill_n(fSamplers, kNumSamplers, 0); 328 // Deleting a bound sampler implicitly binds sampler 0. 329 std::fill_n(fHWBoundSamplers.get(), fNumTextureUnits, 0); 330 } 331 332 private: 333 static int StateToIndex(const GrSamplerState& state) { 334 int filter = static_cast<int>(state.filter()); 335 SkASSERT(filter >= 0 && filter < 3); 336 int wrapX = static_cast<int>(state.wrapModeX()); 337 SkASSERT(wrapX >= 0 && wrapX < 4); 338 int wrapY = static_cast<int>(state.wrapModeY()); 339 SkASSERT(wrapY >= 0 && wrapY < 4); 340 int idx = 16 * filter + 4 * wrapX + wrapY; 341 SkASSERT(idx < kNumSamplers); 342 return idx; 343 } 344 345 GrGLGpu* fGpu; 346 static constexpr int kNumSamplers = 48; 347 std::unique_ptr<GrGLuint[]> fHWBoundSamplers; 348 GrGLuint fSamplers[kNumSamplers]; 349 int fNumTextureUnits; 350 }; 351 352 /////////////////////////////////////////////////////////////////////////////// 353 354 sk_sp<GrGpu> GrGLGpu::Make(sk_sp<const GrGLInterface> interface, const GrContextOptions& options, 355 GrContext* context) { 356 if (!interface) { 357 interface = GrGLMakeNativeInterface(); 358 // For clients that have written their own GrGLCreateNativeInterface and haven't yet updated 359 // to GrGLMakeNativeInterface. 360 if (!interface) { 361 interface = sk_ref_sp(GrGLCreateNativeInterface()); 362 } 363 if (!interface) { 364 return nullptr; 365 } 366 } 367 #ifdef USE_NSIGHT 368 const_cast<GrContextOptions&>(options).fSuppressPathRendering = true; 369 #endif 370 auto glContext = GrGLContext::Make(std::move(interface), options); 371 if (!glContext) { 372 return nullptr; 373 } 374 return sk_sp<GrGpu>(new GrGLGpu(std::move(glContext), context)); 375 } 376 377 GrGLGpu::GrGLGpu(std::unique_ptr<GrGLContext> ctx, GrContext* context) 378 : GrGpu(context) 379 , fGLContext(std::move(ctx)) 380 , fProgramCache(new ProgramCache(this)) 381 , fHWProgramID(0) 382 , fTempSrcFBOID(0) 383 , fTempDstFBOID(0) 384 , fStencilClearFBOID(0) { 385 SkASSERT(fGLContext); 386 GrGLClearErr(this->glInterface()); 387 fCaps = sk_ref_sp(fGLContext->caps()); 388 389 fHWTextureUnitBindings.reset(this->numTextureUnits()); 390 391 this->hwBufferState(GrGpuBufferType::kVertex)->fGLTarget = GR_GL_ARRAY_BUFFER; 392 this->hwBufferState(GrGpuBufferType::kIndex)->fGLTarget = GR_GL_ELEMENT_ARRAY_BUFFER; 393 if (GrGLCaps::kChromium_TransferBufferType == this->glCaps().transferBufferType()) { 394 this->hwBufferState(GrGpuBufferType::kXferCpuToGpu)->fGLTarget = 395 GR_GL_PIXEL_UNPACK_TRANSFER_BUFFER_CHROMIUM; 396 this->hwBufferState(GrGpuBufferType::kXferGpuToCpu)->fGLTarget = 397 GR_GL_PIXEL_PACK_TRANSFER_BUFFER_CHROMIUM; 398 } else { 399 this->hwBufferState(GrGpuBufferType::kXferCpuToGpu)->fGLTarget = GR_GL_PIXEL_UNPACK_BUFFER; 400 this->hwBufferState(GrGpuBufferType::kXferGpuToCpu)->fGLTarget = GR_GL_PIXEL_PACK_BUFFER; 401 } 402 for (int i = 0; i < kGrGpuBufferTypeCount; ++i) { 403 fHWBufferState[i].invalidate(); 404 } 405 GR_STATIC_ASSERT(4 == SK_ARRAY_COUNT(fHWBufferState)); 406 407 if (this->glCaps().shaderCaps()->pathRenderingSupport()) { 408 fPathRendering.reset(new GrGLPathRendering(this)); 409 } 410 411 if (this->glCaps().samplerObjectSupport()) { 412 fSamplerObjectCache.reset(new SamplerObjectCache(this)); 413 } 414 } 415 416 GrGLGpu::~GrGLGpu() { 417 // Ensure any GrGpuResource objects get deleted first, since they may require a working GrGLGpu 418 // to release the resources held by the objects themselves. 419 fPathRendering.reset(); 420 fCopyProgramArrayBuffer.reset(); 421 fMipmapProgramArrayBuffer.reset(); 422 423 fHWProgram.reset(); 424 if (fHWProgramID) { 425 // detach the current program so there is no confusion on OpenGL's part 426 // that we want it to be deleted 427 GL_CALL(UseProgram(0)); 428 } 429 430 if (fTempSrcFBOID) { 431 this->deleteFramebuffer(fTempSrcFBOID); 432 } 433 if (fTempDstFBOID) { 434 this->deleteFramebuffer(fTempDstFBOID); 435 } 436 if (fStencilClearFBOID) { 437 this->deleteFramebuffer(fStencilClearFBOID); 438 } 439 440 for (size_t i = 0; i < SK_ARRAY_COUNT(fCopyPrograms); ++i) { 441 if (0 != fCopyPrograms[i].fProgram) { 442 GL_CALL(DeleteProgram(fCopyPrograms[i].fProgram)); 443 } 444 } 445 446 for (size_t i = 0; i < SK_ARRAY_COUNT(fMipmapPrograms); ++i) { 447 if (0 != fMipmapPrograms[i].fProgram) { 448 GL_CALL(DeleteProgram(fMipmapPrograms[i].fProgram)); 449 } 450 } 451 452 delete fProgramCache; 453 fSamplerObjectCache.reset(); 454 } 455 456 void GrGLGpu::disconnect(DisconnectType type) { 457 INHERITED::disconnect(type); 458 if (DisconnectType::kCleanup == type) { 459 if (fHWProgramID) { 460 GL_CALL(UseProgram(0)); 461 } 462 if (fTempSrcFBOID) { 463 this->deleteFramebuffer(fTempSrcFBOID); 464 } 465 if (fTempDstFBOID) { 466 this->deleteFramebuffer(fTempDstFBOID); 467 } 468 if (fStencilClearFBOID) { 469 this->deleteFramebuffer(fStencilClearFBOID); 470 } 471 for (size_t i = 0; i < SK_ARRAY_COUNT(fCopyPrograms); ++i) { 472 if (fCopyPrograms[i].fProgram) { 473 GL_CALL(DeleteProgram(fCopyPrograms[i].fProgram)); 474 } 475 } 476 for (size_t i = 0; i < SK_ARRAY_COUNT(fMipmapPrograms); ++i) { 477 if (fMipmapPrograms[i].fProgram) { 478 GL_CALL(DeleteProgram(fMipmapPrograms[i].fProgram)); 479 } 480 } 481 482 if (fSamplerObjectCache) { 483 fSamplerObjectCache->release(); 484 } 485 } else { 486 if (fProgramCache) { 487 fProgramCache->abandon(); 488 } 489 if (fSamplerObjectCache) { 490 fSamplerObjectCache->abandon(); 491 } 492 } 493 494 fHWProgram.reset(); 495 delete fProgramCache; 496 fProgramCache = nullptr; 497 498 fHWProgramID = 0; 499 fTempSrcFBOID = 0; 500 fTempDstFBOID = 0; 501 fStencilClearFBOID = 0; 502 fCopyProgramArrayBuffer.reset(); 503 for (size_t i = 0; i < SK_ARRAY_COUNT(fCopyPrograms); ++i) { 504 fCopyPrograms[i].fProgram = 0; 505 } 506 fMipmapProgramArrayBuffer.reset(); 507 for (size_t i = 0; i < SK_ARRAY_COUNT(fMipmapPrograms); ++i) { 508 fMipmapPrograms[i].fProgram = 0; 509 } 510 511 if (this->glCaps().shaderCaps()->pathRenderingSupport()) { 512 this->glPathRendering()->disconnect(type); 513 } 514 } 515 516 /////////////////////////////////////////////////////////////////////////////// 517 518 void GrGLGpu::onResetContext(uint32_t resetBits) { 519 if (resetBits & kMisc_GrGLBackendState) { 520 // we don't use the zb at all 521 GL_CALL(Disable(GR_GL_DEPTH_TEST)); 522 GL_CALL(DepthMask(GR_GL_FALSE)); 523 524 // We don't use face culling. 525 GL_CALL(Disable(GR_GL_CULL_FACE)); 526 // We do use separate stencil. Our algorithms don't care which face is front vs. back so 527 // just set this to the default for self-consistency. 528 GL_CALL(FrontFace(GR_GL_CCW)); 529 530 this->hwBufferState(GrGpuBufferType::kXferCpuToGpu)->invalidate(); 531 this->hwBufferState(GrGpuBufferType::kXferGpuToCpu)->invalidate(); 532 533 if (kGL_GrGLStandard == this->glStandard()) { 534 #ifndef USE_NSIGHT 535 // Desktop-only state that we never change 536 if (!this->glCaps().isCoreProfile()) { 537 GL_CALL(Disable(GR_GL_POINT_SMOOTH)); 538 GL_CALL(Disable(GR_GL_LINE_SMOOTH)); 539 GL_CALL(Disable(GR_GL_POLYGON_SMOOTH)); 540 GL_CALL(Disable(GR_GL_POLYGON_STIPPLE)); 541 GL_CALL(Disable(GR_GL_COLOR_LOGIC_OP)); 542 GL_CALL(Disable(GR_GL_INDEX_LOGIC_OP)); 543 } 544 // The windows NVIDIA driver has GL_ARB_imaging in the extension string when using a 545 // core profile. This seems like a bug since the core spec removes any mention of 546 // GL_ARB_imaging. 547 if (this->glCaps().imagingSupport() && !this->glCaps().isCoreProfile()) { 548 GL_CALL(Disable(GR_GL_COLOR_TABLE)); 549 } 550 GL_CALL(Disable(GR_GL_POLYGON_OFFSET_FILL)); 551 552 if (this->caps()->wireframeMode()) { 553 GL_CALL(PolygonMode(GR_GL_FRONT_AND_BACK, GR_GL_LINE)); 554 } else { 555 GL_CALL(PolygonMode(GR_GL_FRONT_AND_BACK, GR_GL_FILL)); 556 } 557 #endif 558 // Since ES doesn't support glPointSize at all we always use the VS to 559 // set the point size 560 GL_CALL(Enable(GR_GL_VERTEX_PROGRAM_POINT_SIZE)); 561 562 } 563 564 if (kGLES_GrGLStandard == this->glStandard() && 565 this->glCaps().fbFetchRequiresEnablePerSample()) { 566 // The arm extension requires specifically enabling MSAA fetching per sample. 567 // On some devices this may have a perf hit. Also multiple render targets are disabled 568 GL_CALL(Enable(GR_GL_FETCH_PER_SAMPLE)); 569 } 570 fHWWriteToColor = kUnknown_TriState; 571 // we only ever use lines in hairline mode 572 GL_CALL(LineWidth(1)); 573 GL_CALL(Disable(GR_GL_DITHER)); 574 575 fHWClearColor[0] = fHWClearColor[1] = fHWClearColor[2] = fHWClearColor[3] = SK_FloatNaN; 576 } 577 578 if (resetBits & kMSAAEnable_GrGLBackendState) { 579 fMSAAEnabled = kUnknown_TriState; 580 581 if (this->caps()->usesMixedSamples()) { 582 // The skia blend modes all use premultiplied alpha and therefore expect RGBA coverage 583 // modulation. This state has no effect when not rendering to a mixed sampled target. 584 GL_CALL(CoverageModulation(GR_GL_RGBA)); 585 } 586 } 587 588 fHWActiveTextureUnitIdx = -1; // invalid 589 fLastPrimitiveType = static_cast<GrPrimitiveType>(-1); 590 591 if (resetBits & kTextureBinding_GrGLBackendState) { 592 for (int s = 0; s < this->numTextureUnits(); ++s) { 593 fHWTextureUnitBindings[s].invalidateAllTargets(false); 594 } 595 if (fSamplerObjectCache) { 596 fSamplerObjectCache->invalidateBindings(); 597 } 598 } 599 600 if (resetBits & kBlend_GrGLBackendState) { 601 fHWBlendState.invalidate(); 602 } 603 604 if (resetBits & kView_GrGLBackendState) { 605 fHWScissorSettings.invalidate(); 606 fHWWindowRectsState.invalidate(); 607 fHWViewport.invalidate(); 608 } 609 610 if (resetBits & kStencil_GrGLBackendState) { 611 fHWStencilSettings.invalidate(); 612 fHWStencilTestEnabled = kUnknown_TriState; 613 } 614 615 // Vertex 616 if (resetBits & kVertex_GrGLBackendState) { 617 fHWVertexArrayState.invalidate(); 618 this->hwBufferState(GrGpuBufferType::kVertex)->invalidate(); 619 this->hwBufferState(GrGpuBufferType::kIndex)->invalidate(); 620 } 621 622 if (resetBits & kRenderTarget_GrGLBackendState) { 623 fHWBoundRenderTargetUniqueID.makeInvalid(); 624 fHWSRGBFramebuffer = kUnknown_TriState; 625 } 626 627 if (resetBits & kPathRendering_GrGLBackendState) { 628 if (this->caps()->shaderCaps()->pathRenderingSupport()) { 629 this->glPathRendering()->resetContext(); 630 } 631 } 632 633 // we assume these values 634 if (resetBits & kPixelStore_GrGLBackendState) { 635 if (this->glCaps().unpackRowLengthSupport()) { 636 GL_CALL(PixelStorei(GR_GL_UNPACK_ROW_LENGTH, 0)); 637 } 638 if (this->glCaps().packRowLengthSupport()) { 639 GL_CALL(PixelStorei(GR_GL_PACK_ROW_LENGTH, 0)); 640 } 641 if (this->glCaps().packFlipYSupport()) { 642 GL_CALL(PixelStorei(GR_GL_PACK_REVERSE_ROW_ORDER, GR_GL_FALSE)); 643 } 644 } 645 646 if (resetBits & kProgram_GrGLBackendState) { 647 fHWProgramID = 0; 648 fHWProgram.reset(); 649 } 650 } 651 652 static bool check_backend_texture(const GrBackendTexture& backendTex, const GrGLCaps& caps, 653 GrGLTexture::IDDesc* idDesc) { 654 GrGLTextureInfo info; 655 if (!backendTex.getGLTextureInfo(&info) || !info.fID) { 656 return false; 657 } 658 659 idDesc->fInfo = info; 660 661 if (GR_GL_TEXTURE_EXTERNAL == idDesc->fInfo.fTarget) { 662 if (!caps.shaderCaps()->externalTextureSupport()) { 663 return false; 664 } 665 } else if (GR_GL_TEXTURE_RECTANGLE == idDesc->fInfo.fTarget) { 666 if (!caps.rectangleTextureSupport()) { 667 return false; 668 } 669 } else if (GR_GL_TEXTURE_2D != idDesc->fInfo.fTarget) { 670 return false; 671 } 672 return true; 673 } 674 675 sk_sp<GrTexture> GrGLGpu::onWrapBackendTexture(const GrBackendTexture& backendTex, 676 GrWrapOwnership ownership, GrWrapCacheable cacheable, 677 GrIOType ioType) { 678 GrGLTexture::IDDesc idDesc; 679 if (!check_backend_texture(backendTex, this->glCaps(), &idDesc)) { 680 return nullptr; 681 } 682 if (!idDesc.fInfo.fFormat) { 683 idDesc.fInfo.fFormat = this->glCaps().configSizedInternalFormat(backendTex.config()); 684 } 685 if (kBorrow_GrWrapOwnership == ownership) { 686 idDesc.fOwnership = GrBackendObjectOwnership::kBorrowed; 687 } else { 688 idDesc.fOwnership = GrBackendObjectOwnership::kOwned; 689 } 690 691 GrSurfaceDesc surfDesc; 692 surfDesc.fFlags = kNone_GrSurfaceFlags; 693 surfDesc.fWidth = backendTex.width(); 694 surfDesc.fHeight = backendTex.height(); 695 surfDesc.fConfig = backendTex.config(); 696 surfDesc.fSampleCnt = 1; 697 698 GrMipMapsStatus mipMapsStatus = backendTex.hasMipMaps() ? GrMipMapsStatus::kValid 699 : GrMipMapsStatus::kNotAllocated; 700 701 auto texture = 702 GrGLTexture::MakeWrapped(this, surfDesc, mipMapsStatus, idDesc, cacheable, ioType); 703 // We don't know what parameters are already set on wrapped textures. 704 texture->textureParamsModified(); 705 return std::move(texture); 706 } 707 708 sk_sp<GrTexture> GrGLGpu::onWrapRenderableBackendTexture(const GrBackendTexture& backendTex, 709 int sampleCnt, 710 GrWrapOwnership ownership, 711 GrWrapCacheable cacheable) { 712 GrGLTexture::IDDesc idDesc; 713 if (!check_backend_texture(backendTex, this->glCaps(), &idDesc)) { 714 return nullptr; 715 } 716 if (!idDesc.fInfo.fFormat) { 717 idDesc.fInfo.fFormat = this->glCaps().configSizedInternalFormat(backendTex.config()); 718 } 719 720 // We don't support rendering to a EXTERNAL texture. 721 if (GR_GL_TEXTURE_EXTERNAL == idDesc.fInfo.fTarget) { 722 return nullptr; 723 } 724 725 if (kBorrow_GrWrapOwnership == ownership) { 726 idDesc.fOwnership = GrBackendObjectOwnership::kBorrowed; 727 } else { 728 idDesc.fOwnership = GrBackendObjectOwnership::kOwned; 729 } 730 731 GrSurfaceDesc surfDesc; 732 surfDesc.fFlags = kRenderTarget_GrSurfaceFlag; 733 surfDesc.fWidth = backendTex.width(); 734 surfDesc.fHeight = backendTex.height(); 735 surfDesc.fConfig = backendTex.config(); 736 surfDesc.fSampleCnt = this->caps()->getRenderTargetSampleCount(sampleCnt, backendTex.config()); 737 if (surfDesc.fSampleCnt < 1) { 738 return nullptr; 739 } 740 741 GrGLRenderTarget::IDDesc rtIDDesc; 742 if (!this->createRenderTargetObjects(surfDesc, idDesc.fInfo, &rtIDDesc)) { 743 return nullptr; 744 } 745 746 GrMipMapsStatus mipMapsStatus = backendTex.hasMipMaps() ? GrMipMapsStatus::kDirty 747 : GrMipMapsStatus::kNotAllocated; 748 749 sk_sp<GrGLTextureRenderTarget> texRT(GrGLTextureRenderTarget::MakeWrapped( 750 this, surfDesc, idDesc, rtIDDesc, cacheable, mipMapsStatus)); 751 texRT->baseLevelWasBoundToFBO(); 752 // We don't know what parameters are already set on wrapped textures. 753 texRT->textureParamsModified(); 754 return std::move(texRT); 755 } 756 757 sk_sp<GrRenderTarget> GrGLGpu::onWrapBackendRenderTarget(const GrBackendRenderTarget& backendRT) { 758 GrGLFramebufferInfo info; 759 if (!backendRT.getGLFramebufferInfo(&info)) { 760 return nullptr; 761 } 762 763 GrGLRenderTarget::IDDesc idDesc; 764 idDesc.fRTFBOID = info.fFBOID; 765 idDesc.fMSColorRenderbufferID = 0; 766 idDesc.fTexFBOID = GrGLRenderTarget::kUnresolvableFBOID; 767 idDesc.fRTFBOOwnership = GrBackendObjectOwnership::kBorrowed; 768 idDesc.fIsMixedSampled = false; 769 770 GrSurfaceDesc desc; 771 desc.fFlags = kRenderTarget_GrSurfaceFlag; 772 desc.fWidth = backendRT.width(); 773 desc.fHeight = backendRT.height(); 774 desc.fConfig = backendRT.config(); 775 desc.fSampleCnt = 776 this->caps()->getRenderTargetSampleCount(backendRT.sampleCnt(), backendRT.config()); 777 778 return GrGLRenderTarget::MakeWrapped(this, desc, info.fFormat, idDesc, backendRT.stencilBits()); 779 } 780 781 sk_sp<GrRenderTarget> GrGLGpu::onWrapBackendTextureAsRenderTarget(const GrBackendTexture& tex, 782 int sampleCnt) { 783 GrGLTextureInfo info; 784 if (!tex.getGLTextureInfo(&info) || !info.fID) { 785 return nullptr; 786 } 787 788 if (GR_GL_TEXTURE_RECTANGLE != info.fTarget && 789 GR_GL_TEXTURE_2D != info.fTarget) { 790 // Only texture rectangle and texture 2d are supported. We do not check whether texture 791 // rectangle is supported by Skia - if the caller provided us with a texture rectangle, 792 // we assume the necessary support exists. 793 return nullptr; 794 } 795 796 GrSurfaceDesc surfDesc; 797 surfDesc.fFlags = kRenderTarget_GrSurfaceFlag; 798 surfDesc.fWidth = tex.width(); 799 surfDesc.fHeight = tex.height(); 800 surfDesc.fConfig = tex.config(); 801 surfDesc.fSampleCnt = this->caps()->getRenderTargetSampleCount(sampleCnt, tex.config()); 802 803 GrGLRenderTarget::IDDesc rtIDDesc; 804 if (!this->createRenderTargetObjects(surfDesc, info, &rtIDDesc)) { 805 return nullptr; 806 } 807 return GrGLRenderTarget::MakeWrapped(this, surfDesc, info.fFormat, rtIDDesc, 0); 808 } 809 810 static bool check_write_and_transfer_input(GrGLTexture* glTex) { 811 if (!glTex) { 812 return false; 813 } 814 815 // Write or transfer of pixels is not implemented for TEXTURE_EXTERNAL textures 816 if (GR_GL_TEXTURE_EXTERNAL == glTex->target()) { 817 return false; 818 } 819 820 return true; 821 } 822 823 bool GrGLGpu::onWritePixels(GrSurface* surface, int left, int top, int width, int height, 824 GrColorType srcColorType, const GrMipLevel texels[], 825 int mipLevelCount) { 826 auto glTex = static_cast<GrGLTexture*>(surface->asTexture()); 827 828 if (!check_write_and_transfer_input(glTex)) { 829 return false; 830 } 831 832 this->bindTextureToScratchUnit(glTex->target(), glTex->textureID()); 833 834 // No sRGB transformation occurs in uploadTexData. We choose to make the src config match the 835 // srgb-ness of the surface to avoid issues in ES2 where internal/external formats must match. 836 // When we're on ES2 and the dst is GL_SRGB_ALPHA by making the config be kSRGB_8888 we know 837 // that our caps will choose GL_SRGB_ALPHA as the external format, too. On ES3 or regular GL our 838 // caps knows to make the external format be GL_RGBA. 839 auto srgbEncoded = GrPixelConfigIsSRGBEncoded(surface->config()); 840 auto srcAsConfig = GrColorTypeToPixelConfig(srcColorType, srgbEncoded); 841 842 SkASSERT(!GrPixelConfigIsCompressed(glTex->config())); 843 return this->uploadTexData(glTex->config(), glTex->width(), glTex->height(), glTex->target(), 844 kWrite_UploadType, left, top, width, height, srcAsConfig, texels, 845 mipLevelCount); 846 } 847 848 // For GL_[UN]PACK_ALIGNMENT. TODO: This really wants to be GrColorType. 849 static inline GrGLint config_alignment(GrPixelConfig config) { 850 SkASSERT(!GrPixelConfigIsCompressed(config)); 851 switch (config) { 852 case kAlpha_8_GrPixelConfig: 853 case kAlpha_8_as_Alpha_GrPixelConfig: 854 case kAlpha_8_as_Red_GrPixelConfig: 855 case kGray_8_GrPixelConfig: 856 case kGray_8_as_Lum_GrPixelConfig: 857 case kGray_8_as_Red_GrPixelConfig: 858 return 1; 859 case kRGB_565_GrPixelConfig: 860 case kRGBA_4444_GrPixelConfig: 861 case kRG_88_GrPixelConfig: 862 case kAlpha_half_GrPixelConfig: 863 case kAlpha_half_as_Red_GrPixelConfig: 864 case kRGBA_half_GrPixelConfig: 865 case kRGBA_half_Clamped_GrPixelConfig: 866 return 2; 867 case kRGBA_8888_GrPixelConfig: 868 case kRGB_888_GrPixelConfig: // We're really talking about GrColorType::kRGB_888x here. 869 case kRGB_888X_GrPixelConfig: 870 case kBGRA_8888_GrPixelConfig: 871 case kSRGBA_8888_GrPixelConfig: 872 case kSBGRA_8888_GrPixelConfig: 873 case kRGBA_1010102_GrPixelConfig: 874 case kRGBA_float_GrPixelConfig: 875 case kRG_float_GrPixelConfig: 876 return 4; 877 case kRGB_ETC1_GrPixelConfig: 878 case kUnknown_GrPixelConfig: 879 return 0; 880 } 881 SK_ABORT("Invalid pixel config"); 882 return 0; 883 } 884 885 bool GrGLGpu::onTransferPixels(GrTexture* texture, int left, int top, int width, int height, 886 GrColorType bufferColorType, GrGpuBuffer* transferBuffer, 887 size_t offset, size_t rowBytes) { 888 GrGLTexture* glTex = static_cast<GrGLTexture*>(texture); 889 GrPixelConfig texConfig = glTex->config(); 890 SkASSERT(this->caps()->isConfigTexturable(texConfig)); 891 892 // Can't transfer compressed data 893 SkASSERT(!GrPixelConfigIsCompressed(glTex->config())); 894 895 if (!check_write_and_transfer_input(glTex)) { 896 return false; 897 } 898 899 static_assert(sizeof(int) == sizeof(int32_t), ""); 900 if (width <= 0 || height <= 0) { 901 return false; 902 } 903 904 this->bindTextureToScratchUnit(glTex->target(), glTex->textureID()); 905 906 SkASSERT(!transferBuffer->isMapped()); 907 SkASSERT(!transferBuffer->isCpuBuffer()); 908 const GrGLBuffer* glBuffer = static_cast<const GrGLBuffer*>(transferBuffer); 909 this->bindBuffer(GrGpuBufferType::kXferCpuToGpu, glBuffer); 910 911 SkDEBUGCODE( 912 SkIRect subRect = SkIRect::MakeXYWH(left, top, width, height); 913 SkIRect bounds = SkIRect::MakeWH(texture->width(), texture->height()); 914 SkASSERT(bounds.contains(subRect)); 915 ) 916 917 int bpp = GrColorTypeBytesPerPixel(bufferColorType); 918 const size_t trimRowBytes = width * bpp; 919 if (!rowBytes) { 920 rowBytes = trimRowBytes; 921 } 922 const void* pixels = (void*)offset; 923 if (width < 0 || height < 0) { 924 return false; 925 } 926 927 bool restoreGLRowLength = false; 928 if (trimRowBytes != rowBytes) { 929 // we should have checked for this support already 930 SkASSERT(this->glCaps().unpackRowLengthSupport()); 931 GL_CALL(PixelStorei(GR_GL_UNPACK_ROW_LENGTH, rowBytes / bpp)); 932 restoreGLRowLength = true; 933 } 934 935 // Internal format comes from the texture desc. 936 GrGLenum internalFormat; 937 // External format and type come from the upload data. 938 GrGLenum externalFormat; 939 GrGLenum externalType; 940 auto bufferAsConfig = GrColorTypeToPixelConfig(bufferColorType, GrSRGBEncoded::kNo); 941 if (!this->glCaps().getTexImageFormats(texConfig, bufferAsConfig, &internalFormat, 942 &externalFormat, &externalType)) { 943 return false; 944 } 945 946 GL_CALL(PixelStorei(GR_GL_UNPACK_ALIGNMENT, config_alignment(texConfig))); 947 GL_CALL(TexSubImage2D(glTex->target(), 948 0, 949 left, top, 950 width, 951 height, 952 externalFormat, externalType, 953 pixels)); 954 955 if (restoreGLRowLength) { 956 GL_CALL(PixelStorei(GR_GL_UNPACK_ROW_LENGTH, 0)); 957 } 958 959 return true; 960 } 961 962 /** 963 * Creates storage space for the texture and fills it with texels. 964 * 965 * @param config Pixel config of the texture. 966 * @param interface The GL interface in use. 967 * @param caps The capabilities of the GL device. 968 * @param target Which bound texture to target (GR_GL_TEXTURE_2D, e.g.) 969 * @param internalFormat The data format used for the internal storage of the texture. May be sized. 970 * @param internalFormatForTexStorage The data format used for the TexStorage API. Must be sized. 971 * @param externalFormat The data format used for the external storage of the texture. 972 * @param externalType The type of the data used for the external storage of the texture. 973 * @param texels The texel data of the texture being created. 974 * @param mipLevelCount Number of mipmap levels 975 * @param baseWidth The width of the texture's base mipmap level 976 * @param baseHeight The height of the texture's base mipmap level 977 */ 978 static bool allocate_and_populate_texture(GrPixelConfig config, 979 const GrGLInterface& interface, 980 const GrGLCaps& caps, 981 GrGLenum target, 982 GrGLenum internalFormat, 983 GrGLenum internalFormatForTexStorage, 984 GrGLenum externalFormat, 985 GrGLenum externalType, 986 const GrMipLevel texels[], int mipLevelCount, 987 int baseWidth, int baseHeight) { 988 CLEAR_ERROR_BEFORE_ALLOC(&interface); 989 990 bool useTexStorage = caps.isConfigTexSupportEnabled(config); 991 // We can only use TexStorage if we know we will not later change the storage requirements. 992 // This means if we may later want to add mipmaps, we cannot use TexStorage. 993 // Right now, we cannot know if we will later add mipmaps or not. 994 // The only time we can use TexStorage is when we already have the 995 // mipmaps. 996 useTexStorage &= mipLevelCount > 1; 997 998 if (useTexStorage) { 999 // We never resize or change formats of textures. 1000 GL_ALLOC_CALL(&interface, 1001 TexStorage2D(target, SkTMax(mipLevelCount, 1), internalFormatForTexStorage, 1002 baseWidth, baseHeight)); 1003 GrGLenum error = CHECK_ALLOC_ERROR(&interface); 1004 if (error != GR_GL_NO_ERROR) { 1005 return false; 1006 } else { 1007 for (int currentMipLevel = 0; currentMipLevel < mipLevelCount; currentMipLevel++) { 1008 const void* currentMipData = texels[currentMipLevel].fPixels; 1009 if (currentMipData == nullptr) { 1010 continue; 1011 } 1012 int twoToTheMipLevel = 1 << currentMipLevel; 1013 int currentWidth = SkTMax(1, baseWidth / twoToTheMipLevel); 1014 int currentHeight = SkTMax(1, baseHeight / twoToTheMipLevel); 1015 1016 GR_GL_CALL(&interface, 1017 TexSubImage2D(target, 1018 currentMipLevel, 1019 0, // left 1020 0, // top 1021 currentWidth, 1022 currentHeight, 1023 externalFormat, externalType, 1024 currentMipData)); 1025 } 1026 return true; 1027 } 1028 } else { 1029 if (!mipLevelCount) { 1030 GL_ALLOC_CALL(&interface, 1031 TexImage2D(target, 1032 0, 1033 internalFormat, 1034 baseWidth, 1035 baseHeight, 1036 0, // border 1037 externalFormat, externalType, 1038 nullptr)); 1039 GrGLenum error = CHECK_ALLOC_ERROR(&interface); 1040 if (error != GR_GL_NO_ERROR) { 1041 return false; 1042 } 1043 } else { 1044 for (int currentMipLevel = 0; currentMipLevel < mipLevelCount; currentMipLevel++) { 1045 int twoToTheMipLevel = 1 << currentMipLevel; 1046 int currentWidth = SkTMax(1, baseWidth / twoToTheMipLevel); 1047 int currentHeight = SkTMax(1, baseHeight / twoToTheMipLevel); 1048 const void* currentMipData = texels[currentMipLevel].fPixels; 1049 // Even if curremtMipData is nullptr, continue to call TexImage2D. 1050 // This will allocate texture memory which we can later populate. 1051 GL_ALLOC_CALL(&interface, 1052 TexImage2D(target, 1053 currentMipLevel, 1054 internalFormat, 1055 currentWidth, 1056 currentHeight, 1057 0, // border 1058 externalFormat, externalType, 1059 currentMipData)); 1060 GrGLenum error = CHECK_ALLOC_ERROR(&interface); 1061 if (error != GR_GL_NO_ERROR) { 1062 return false; 1063 } 1064 } 1065 } 1066 } 1067 return true; 1068 } 1069 1070 /** 1071 * Creates storage space for the texture and fills it with texels. 1072 * 1073 * @param config Compressed pixel config of the texture. 1074 * @param interface The GL interface in use. 1075 * @param caps The capabilities of the GL device. 1076 * @param target Which bound texture to target (GR_GL_TEXTURE_2D, e.g.) 1077 * @param internalFormat The data format used for the internal storage of the texture. 1078 * @param texels The texel data of the texture being created. 1079 * @param mipLevelCount Number of mipmap levels 1080 * @param baseWidth The width of the texture's base mipmap level 1081 * @param baseHeight The height of the texture's base mipmap level 1082 */ 1083 static bool allocate_and_populate_compressed_texture(GrPixelConfig config, 1084 const GrGLInterface& interface, 1085 const GrGLCaps& caps, 1086 GrGLenum target, GrGLenum internalFormat, 1087 const GrMipLevel texels[], int mipLevelCount, 1088 int baseWidth, int baseHeight) { 1089 CLEAR_ERROR_BEFORE_ALLOC(&interface); 1090 SkASSERT(GrPixelConfigIsCompressed(config)); 1091 1092 bool useTexStorage = caps.isConfigTexSupportEnabled(config); 1093 // We can only use TexStorage if we know we will not later change the storage requirements. 1094 // This means if we may later want to add mipmaps, we cannot use TexStorage. 1095 // Right now, we cannot know if we will later add mipmaps or not. 1096 // The only time we can use TexStorage is when we already have the 1097 // mipmaps. 1098 useTexStorage &= mipLevelCount > 1; 1099 1100 if (useTexStorage) { 1101 // We never resize or change formats of textures. 1102 GL_ALLOC_CALL(&interface, 1103 TexStorage2D(target, 1104 mipLevelCount, 1105 internalFormat, 1106 baseWidth, baseHeight)); 1107 GrGLenum error = CHECK_ALLOC_ERROR(&interface); 1108 if (error != GR_GL_NO_ERROR) { 1109 return false; 1110 } else { 1111 for (int currentMipLevel = 0; currentMipLevel < mipLevelCount; currentMipLevel++) { 1112 const void* currentMipData = texels[currentMipLevel].fPixels; 1113 if (currentMipData == nullptr) { 1114 // Compressed textures require data for every level 1115 return false; 1116 } 1117 1118 int twoToTheMipLevel = 1 << currentMipLevel; 1119 int currentWidth = SkTMax(1, baseWidth / twoToTheMipLevel); 1120 int currentHeight = SkTMax(1, baseHeight / twoToTheMipLevel); 1121 1122 // Make sure that the width and height that we pass to OpenGL 1123 // is a multiple of the block size. 1124 size_t dataSize = GrCompressedFormatDataSize(config, currentWidth, currentHeight); 1125 GR_GL_CALL(&interface, CompressedTexSubImage2D(target, 1126 currentMipLevel, 1127 0, // left 1128 0, // top 1129 currentWidth, 1130 currentHeight, 1131 internalFormat, 1132 SkToInt(dataSize), 1133 currentMipData)); 1134 } 1135 } 1136 } else { 1137 for (int currentMipLevel = 0; currentMipLevel < mipLevelCount; currentMipLevel++) { 1138 const void* currentMipData = texels[currentMipLevel].fPixels; 1139 if (currentMipData == nullptr) { 1140 // Compressed textures require data for every level 1141 return false; 1142 } 1143 1144 int twoToTheMipLevel = 1 << currentMipLevel; 1145 int currentWidth = SkTMax(1, baseWidth / twoToTheMipLevel); 1146 int currentHeight = SkTMax(1, baseHeight / twoToTheMipLevel); 1147 1148 // Make sure that the width and height that we pass to OpenGL 1149 // is a multiple of the block size. 1150 size_t dataSize = GrCompressedFormatDataSize(config, baseWidth, baseHeight); 1151 1152 GL_ALLOC_CALL(&interface, 1153 CompressedTexImage2D(target, 1154 currentMipLevel, 1155 internalFormat, 1156 currentWidth, 1157 currentHeight, 1158 0, // border 1159 SkToInt(dataSize), 1160 currentMipData)); 1161 1162 GrGLenum error = CHECK_ALLOC_ERROR(&interface); 1163 if (error != GR_GL_NO_ERROR) { 1164 return false; 1165 } 1166 } 1167 } 1168 1169 return true; 1170 } 1171 /** 1172 * After a texture is created, any state which was altered during its creation 1173 * needs to be restored. 1174 * 1175 * @param interface The GL interface to use. 1176 * @param caps The capabilities of the GL device. 1177 * @param restoreGLRowLength Should the row length unpacking be restored? 1178 * @param glFlipY Did GL flip the texture vertically? 1179 */ 1180 static void restore_pixelstore_state(const GrGLInterface& interface, const GrGLCaps& caps, 1181 bool restoreGLRowLength) { 1182 if (restoreGLRowLength) { 1183 SkASSERT(caps.unpackRowLengthSupport()); 1184 GR_GL_CALL(&interface, PixelStorei(GR_GL_UNPACK_ROW_LENGTH, 0)); 1185 } 1186 } 1187 1188 void GrGLGpu::unbindCpuToGpuXferBuffer() { 1189 auto* xferBufferState = this->hwBufferState(GrGpuBufferType::kXferCpuToGpu); 1190 if (!xferBufferState->fBoundBufferUniqueID.isInvalid()) { 1191 GL_CALL(BindBuffer(xferBufferState->fGLTarget, 0)); 1192 xferBufferState->invalidate(); 1193 } 1194 } 1195 1196 // TODO: Make this take a GrColorType instead of dataConfig. This requires updating GrGLCaps to 1197 // convert from GrColorType to externalFormat/externalType GLenum values. 1198 bool GrGLGpu::uploadTexData(GrPixelConfig texConfig, int texWidth, int texHeight, GrGLenum target, 1199 UploadType uploadType, int left, int top, int width, int height, 1200 GrPixelConfig dataConfig, const GrMipLevel texels[], int mipLevelCount, 1201 GrMipMapsStatus* mipMapsStatus) { 1202 // If we're uploading compressed data then we should be using uploadCompressedTexData 1203 SkASSERT(!GrPixelConfigIsCompressed(dataConfig)); 1204 1205 SkASSERT(this->caps()->isConfigTexturable(texConfig)); 1206 SkDEBUGCODE( 1207 SkIRect subRect = SkIRect::MakeXYWH(left, top, width, height); 1208 SkIRect bounds = SkIRect::MakeWH(texWidth, texHeight); 1209 SkASSERT(bounds.contains(subRect)); 1210 ) 1211 SkASSERT(1 == mipLevelCount || 1212 (0 == left && 0 == top && width == texWidth && height == texHeight)); 1213 1214 this->unbindCpuToGpuXferBuffer(); 1215 1216 // texels is const. 1217 // But we may need to flip the texture vertically to prepare it. 1218 // Rather than flip in place and alter the incoming data, 1219 // we allocate a new buffer to flip into. 1220 // This means we need to make a non-const shallow copy of texels. 1221 SkAutoTMalloc<GrMipLevel> texelsShallowCopy; 1222 1223 if (mipLevelCount) { 1224 texelsShallowCopy.reset(mipLevelCount); 1225 memcpy(texelsShallowCopy.get(), texels, mipLevelCount*sizeof(GrMipLevel)); 1226 } 1227 1228 const GrGLInterface* interface = this->glInterface(); 1229 const GrGLCaps& caps = this->glCaps(); 1230 1231 size_t bpp = GrBytesPerPixel(dataConfig); 1232 1233 if (width == 0 || height == 0) { 1234 return false; 1235 } 1236 1237 // Internal format comes from the texture desc. 1238 GrGLenum internalFormat; 1239 // External format and type come from the upload data. 1240 GrGLenum externalFormat; 1241 GrGLenum externalType; 1242 if (!this->glCaps().getTexImageFormats(texConfig, dataConfig, &internalFormat, &externalFormat, 1243 &externalType)) { 1244 return false; 1245 } 1246 // TexStorage requires a sized format, and internalFormat may or may not be 1247 GrGLenum internalFormatForTexStorage = this->glCaps().configSizedInternalFormat(texConfig); 1248 1249 /* 1250 * Check whether to allocate a temporary buffer for flipping y or 1251 * because our srcData has extra bytes past each row. If so, we need 1252 * to trim those off here, since GL ES may not let us specify 1253 * GL_UNPACK_ROW_LENGTH. 1254 */ 1255 bool restoreGLRowLength = false; 1256 1257 // in case we need a temporary, trimmed copy of the src pixels 1258 SkAutoSMalloc<128 * 128> tempStorage; 1259 1260 if (mipMapsStatus) { 1261 *mipMapsStatus = GrMipMapsStatus::kValid; 1262 } 1263 1264 const bool usesMips = mipLevelCount > 1; 1265 1266 // find the combined size of all the mip levels and the relative offset of 1267 // each into the collective buffer 1268 bool willNeedData = false; 1269 size_t combinedBufferSize = 0; 1270 SkTArray<size_t> individualMipOffsets(mipLevelCount); 1271 for (int currentMipLevel = 0; currentMipLevel < mipLevelCount; currentMipLevel++) { 1272 if (texelsShallowCopy[currentMipLevel].fPixels) { 1273 int twoToTheMipLevel = 1 << currentMipLevel; 1274 int currentWidth = SkTMax(1, width / twoToTheMipLevel); 1275 int currentHeight = SkTMax(1, height / twoToTheMipLevel); 1276 const size_t trimRowBytes = currentWidth * bpp; 1277 const size_t trimmedSize = trimRowBytes * currentHeight; 1278 1279 const size_t rowBytes = texelsShallowCopy[currentMipLevel].fRowBytes 1280 ? texelsShallowCopy[currentMipLevel].fRowBytes 1281 : trimRowBytes; 1282 1283 if (((!caps.unpackRowLengthSupport() || usesMips) && trimRowBytes != rowBytes)) { 1284 willNeedData = true; 1285 } 1286 1287 individualMipOffsets.push_back(combinedBufferSize); 1288 combinedBufferSize += trimmedSize; 1289 } else { 1290 if (mipMapsStatus) { 1291 *mipMapsStatus = GrMipMapsStatus::kDirty; 1292 } 1293 individualMipOffsets.push_back(0); 1294 } 1295 } 1296 if (mipMapsStatus && mipLevelCount <= 1) { 1297 *mipMapsStatus = GrMipMapsStatus::kNotAllocated; 1298 } 1299 char* buffer = nullptr; 1300 if (willNeedData) { 1301 buffer = (char*)tempStorage.reset(combinedBufferSize); 1302 } 1303 1304 for (int currentMipLevel = 0; currentMipLevel < mipLevelCount; currentMipLevel++) { 1305 if (!texelsShallowCopy[currentMipLevel].fPixels) { 1306 continue; 1307 } 1308 int twoToTheMipLevel = 1 << currentMipLevel; 1309 int currentWidth = SkTMax(1, width / twoToTheMipLevel); 1310 int currentHeight = SkTMax(1, height / twoToTheMipLevel); 1311 const size_t trimRowBytes = currentWidth * bpp; 1312 1313 /* 1314 * check whether to allocate a temporary buffer for flipping y or 1315 * because our srcData has extra bytes past each row. If so, we need 1316 * to trim those off here, since GL ES may not let us specify 1317 * GL_UNPACK_ROW_LENGTH. 1318 */ 1319 restoreGLRowLength = false; 1320 1321 const size_t rowBytes = texelsShallowCopy[currentMipLevel].fRowBytes 1322 ? texelsShallowCopy[currentMipLevel].fRowBytes 1323 : trimRowBytes; 1324 1325 // TODO: This optimization should be enabled with or without mips. 1326 // For use with mips, we must set GR_GL_UNPACK_ROW_LENGTH once per 1327 // mip level, before calling glTexImage2D. 1328 if (caps.unpackRowLengthSupport() && !usesMips) { 1329 // can't use this for flipping, only non-neg values allowed. :( 1330 if (rowBytes != trimRowBytes) { 1331 GrGLint rowLength = static_cast<GrGLint>(rowBytes / bpp); 1332 GR_GL_CALL(interface, PixelStorei(GR_GL_UNPACK_ROW_LENGTH, rowLength)); 1333 restoreGLRowLength = true; 1334 } 1335 } else if (trimRowBytes != rowBytes) { 1336 // copy data into our new storage, skipping the trailing bytes 1337 const char* src = (const char*)texelsShallowCopy[currentMipLevel].fPixels; 1338 char* dst = buffer + individualMipOffsets[currentMipLevel]; 1339 SkRectMemcpy(dst, trimRowBytes, src, rowBytes, trimRowBytes, currentHeight); 1340 // now point data to our copied version 1341 texelsShallowCopy[currentMipLevel].fPixels = buffer + 1342 individualMipOffsets[currentMipLevel]; 1343 texelsShallowCopy[currentMipLevel].fRowBytes = trimRowBytes; 1344 } 1345 } 1346 1347 if (mipLevelCount) { 1348 GR_GL_CALL(interface, PixelStorei(GR_GL_UNPACK_ALIGNMENT, config_alignment(texConfig))); 1349 } 1350 1351 bool succeeded = true; 1352 if (kNewTexture_UploadType == uploadType) { 1353 if (0 == left && 0 == top && texWidth == width && texHeight == height) { 1354 succeeded = allocate_and_populate_texture( 1355 texConfig, *interface, caps, target, internalFormat, 1356 internalFormatForTexStorage, externalFormat, externalType, 1357 texelsShallowCopy, mipLevelCount, width, height); 1358 } else { 1359 succeeded = false; 1360 } 1361 } else { 1362 for (int currentMipLevel = 0; currentMipLevel < mipLevelCount; currentMipLevel++) { 1363 if (!texelsShallowCopy[currentMipLevel].fPixels) { 1364 continue; 1365 } 1366 int twoToTheMipLevel = 1 << currentMipLevel; 1367 int currentWidth = SkTMax(1, width / twoToTheMipLevel); 1368 int currentHeight = SkTMax(1, height / twoToTheMipLevel); 1369 1370 GL_CALL(TexSubImage2D(target, 1371 currentMipLevel, 1372 left, top, 1373 currentWidth, 1374 currentHeight, 1375 externalFormat, externalType, 1376 texelsShallowCopy[currentMipLevel].fPixels)); 1377 } 1378 } 1379 1380 restore_pixelstore_state(*interface, caps, restoreGLRowLength); 1381 1382 return succeeded; 1383 } 1384 1385 bool GrGLGpu::uploadCompressedTexData(GrPixelConfig texConfig, int texWidth, int texHeight, 1386 GrGLenum target, GrPixelConfig dataConfig, 1387 const GrMipLevel texels[], int mipLevelCount, 1388 GrMipMapsStatus* mipMapsStatus) { 1389 SkASSERT(this->caps()->isConfigTexturable(texConfig)); 1390 1391 const GrGLInterface* interface = this->glInterface(); 1392 const GrGLCaps& caps = this->glCaps(); 1393 1394 // We only need the internal format for compressed 2D textures. 1395 GrGLenum internalFormat; 1396 if (!caps.getCompressedTexImageFormats(texConfig, &internalFormat)) { 1397 return false; 1398 } 1399 1400 if (mipMapsStatus) { 1401 if (mipLevelCount <= 1) { 1402 *mipMapsStatus = GrMipMapsStatus::kNotAllocated; 1403 } else { 1404 *mipMapsStatus = GrMipMapsStatus::kValid; 1405 } 1406 } 1407 1408 return allocate_and_populate_compressed_texture(texConfig, *interface, caps, target, 1409 internalFormat, texels, mipLevelCount, 1410 texWidth, texHeight); 1411 } 1412 1413 static bool renderbuffer_storage_msaa(const GrGLContext& ctx, 1414 int sampleCount, 1415 GrGLenum format, 1416 int width, int height) { 1417 CLEAR_ERROR_BEFORE_ALLOC(ctx.interface()); 1418 SkASSERT(GrGLCaps::kNone_MSFBOType != ctx.caps()->msFBOType()); 1419 switch (ctx.caps()->msFBOType()) { 1420 case GrGLCaps::kStandard_MSFBOType: 1421 case GrGLCaps::kMixedSamples_MSFBOType: 1422 GL_ALLOC_CALL(ctx.interface(), 1423 RenderbufferStorageMultisample(GR_GL_RENDERBUFFER, 1424 sampleCount, 1425 format, 1426 width, height)); 1427 break; 1428 case GrGLCaps::kES_Apple_MSFBOType: 1429 GL_ALLOC_CALL(ctx.interface(), 1430 RenderbufferStorageMultisampleES2APPLE(GR_GL_RENDERBUFFER, 1431 sampleCount, 1432 format, 1433 width, height)); 1434 break; 1435 case GrGLCaps::kES_EXT_MsToTexture_MSFBOType: 1436 case GrGLCaps::kES_IMG_MsToTexture_MSFBOType: 1437 GL_ALLOC_CALL(ctx.interface(), 1438 RenderbufferStorageMultisampleES2EXT(GR_GL_RENDERBUFFER, 1439 sampleCount, 1440 format, 1441 width, height)); 1442 break; 1443 case GrGLCaps::kNone_MSFBOType: 1444 SK_ABORT("Shouldn't be here if we don't support multisampled renderbuffers."); 1445 break; 1446 } 1447 return (GR_GL_NO_ERROR == CHECK_ALLOC_ERROR(ctx.interface())); 1448 } 1449 1450 bool GrGLGpu::createRenderTargetObjects(const GrSurfaceDesc& desc, 1451 const GrGLTextureInfo& texInfo, 1452 GrGLRenderTarget::IDDesc* idDesc) { 1453 idDesc->fMSColorRenderbufferID = 0; 1454 idDesc->fRTFBOID = 0; 1455 idDesc->fRTFBOOwnership = GrBackendObjectOwnership::kOwned; 1456 idDesc->fTexFBOID = 0; 1457 SkASSERT((GrGLCaps::kMixedSamples_MSFBOType == this->glCaps().msFBOType()) == 1458 this->caps()->usesMixedSamples()); 1459 idDesc->fIsMixedSampled = desc.fSampleCnt > 1 && this->caps()->usesMixedSamples(); 1460 1461 GrGLenum status; 1462 1463 GrGLenum colorRenderbufferFormat = 0; // suppress warning 1464 1465 if (desc.fSampleCnt > 1 && GrGLCaps::kNone_MSFBOType == this->glCaps().msFBOType()) { 1466 goto FAILED; 1467 } 1468 1469 GL_CALL(GenFramebuffers(1, &idDesc->fTexFBOID)); 1470 if (!idDesc->fTexFBOID) { 1471 goto FAILED; 1472 } 1473 1474 // If we are using multisampling we will create two FBOS. We render to one and then resolve to 1475 // the texture bound to the other. The exception is the IMG multisample extension. With this 1476 // extension the texture is multisampled when rendered to and then auto-resolves it when it is 1477 // rendered from. 1478 if (desc.fSampleCnt > 1 && this->glCaps().usesMSAARenderBuffers()) { 1479 GL_CALL(GenFramebuffers(1, &idDesc->fRTFBOID)); 1480 GL_CALL(GenRenderbuffers(1, &idDesc->fMSColorRenderbufferID)); 1481 if (!idDesc->fRTFBOID || 1482 !idDesc->fMSColorRenderbufferID) { 1483 goto FAILED; 1484 } 1485 this->glCaps().getRenderbufferFormat(desc.fConfig, &colorRenderbufferFormat); 1486 } else { 1487 idDesc->fRTFBOID = idDesc->fTexFBOID; 1488 } 1489 1490 // below here we may bind the FBO 1491 fHWBoundRenderTargetUniqueID.makeInvalid(); 1492 if (idDesc->fRTFBOID != idDesc->fTexFBOID) { 1493 SkASSERT(desc.fSampleCnt > 1); 1494 GL_CALL(BindRenderbuffer(GR_GL_RENDERBUFFER, idDesc->fMSColorRenderbufferID)); 1495 if (!renderbuffer_storage_msaa(*fGLContext, 1496 desc.fSampleCnt, 1497 colorRenderbufferFormat, 1498 desc.fWidth, desc.fHeight)) { 1499 goto FAILED; 1500 } 1501 this->bindFramebuffer(GR_GL_FRAMEBUFFER, idDesc->fRTFBOID); 1502 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, 1503 GR_GL_COLOR_ATTACHMENT0, 1504 GR_GL_RENDERBUFFER, 1505 idDesc->fMSColorRenderbufferID)); 1506 if (!this->glCaps().isConfigVerifiedColorAttachment(desc.fConfig)) { 1507 GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER)); 1508 if (status != GR_GL_FRAMEBUFFER_COMPLETE) { 1509 goto FAILED; 1510 } 1511 fGLContext->caps()->markConfigAsValidColorAttachment(desc.fConfig); 1512 } 1513 } 1514 this->bindFramebuffer(GR_GL_FRAMEBUFFER, idDesc->fTexFBOID); 1515 1516 if (this->glCaps().usesImplicitMSAAResolve() && desc.fSampleCnt > 1) { 1517 GL_CALL(FramebufferTexture2DMultisample(GR_GL_FRAMEBUFFER, 1518 GR_GL_COLOR_ATTACHMENT0, 1519 texInfo.fTarget, 1520 texInfo.fID, 0, desc.fSampleCnt)); 1521 } else { 1522 GL_CALL(FramebufferTexture2D(GR_GL_FRAMEBUFFER, 1523 GR_GL_COLOR_ATTACHMENT0, 1524 texInfo.fTarget, 1525 texInfo.fID, 0)); 1526 } 1527 if (!this->glCaps().isConfigVerifiedColorAttachment(desc.fConfig)) { 1528 GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER)); 1529 if (status != GR_GL_FRAMEBUFFER_COMPLETE) { 1530 goto FAILED; 1531 } 1532 fGLContext->caps()->markConfigAsValidColorAttachment(desc.fConfig); 1533 } 1534 1535 return true; 1536 1537 FAILED: 1538 if (idDesc->fMSColorRenderbufferID) { 1539 GL_CALL(DeleteRenderbuffers(1, &idDesc->fMSColorRenderbufferID)); 1540 } 1541 if (idDesc->fRTFBOID != idDesc->fTexFBOID) { 1542 this->deleteFramebuffer(idDesc->fRTFBOID); 1543 } 1544 if (idDesc->fTexFBOID) { 1545 this->deleteFramebuffer(idDesc->fTexFBOID); 1546 } 1547 return false; 1548 } 1549 1550 // good to set a break-point here to know when createTexture fails 1551 static sk_sp<GrTexture> return_null_texture() { 1552 // SkDEBUGFAIL("null texture"); 1553 return nullptr; 1554 } 1555 1556 static GrGLTexture::SamplerParams set_initial_texture_params(const GrGLInterface* interface, 1557 const GrGLTextureInfo& info) { 1558 // Some drivers like to know filter/wrap before seeing glTexImage2D. Some 1559 // drivers have a bug where an FBO won't be complete if it includes a 1560 // texture that is not mipmap complete (considering the filter in use). 1561 GrGLTexture::SamplerParams params; 1562 params.fMinFilter = GR_GL_NEAREST; 1563 params.fMagFilter = GR_GL_NEAREST; 1564 params.fWrapS = GR_GL_CLAMP_TO_EDGE; 1565 params.fWrapT = GR_GL_CLAMP_TO_EDGE; 1566 GR_GL_CALL(interface, TexParameteri(info.fTarget, GR_GL_TEXTURE_MAG_FILTER, params.fMagFilter)); 1567 GR_GL_CALL(interface, TexParameteri(info.fTarget, GR_GL_TEXTURE_MIN_FILTER, params.fMinFilter)); 1568 GR_GL_CALL(interface, TexParameteri(info.fTarget, GR_GL_TEXTURE_WRAP_S, params.fWrapS)); 1569 GR_GL_CALL(interface, TexParameteri(info.fTarget, GR_GL_TEXTURE_WRAP_T, params.fWrapT)); 1570 return params; 1571 } 1572 1573 sk_sp<GrTexture> GrGLGpu::onCreateTexture(const GrSurfaceDesc& desc, 1574 SkBudgeted budgeted, 1575 const GrMipLevel texels[], 1576 int mipLevelCount) { 1577 // We fail if the MSAA was requested and is not available. 1578 if (GrGLCaps::kNone_MSFBOType == this->glCaps().msFBOType() && desc.fSampleCnt > 1) { 1579 //SkDebugf("MSAA RT requested but not supported on this platform."); 1580 return return_null_texture(); 1581 } 1582 1583 bool performClear = (desc.fFlags & kPerformInitialClear_GrSurfaceFlag) && 1584 !GrPixelConfigIsCompressed(desc.fConfig); 1585 1586 GrMipLevel zeroLevel; 1587 std::unique_ptr<uint8_t[]> zeros; 1588 if (performClear && !this->glCaps().clearTextureSupport() && 1589 !this->glCaps().canConfigBeFBOColorAttachment(desc.fConfig)) { 1590 size_t rowSize = GrBytesPerPixel(desc.fConfig) * desc.fWidth; 1591 size_t size = rowSize * desc.fHeight; 1592 zeros.reset(new uint8_t[size]); 1593 memset(zeros.get(), 0, size); 1594 zeroLevel.fPixels = zeros.get(); 1595 zeroLevel.fRowBytes = 0; 1596 texels = &zeroLevel; 1597 mipLevelCount = 1; 1598 performClear = false; 1599 } 1600 1601 bool isRenderTarget = SkToBool(desc.fFlags & kRenderTarget_GrSurfaceFlag); 1602 1603 GrGLTexture::IDDesc idDesc; 1604 idDesc.fOwnership = GrBackendObjectOwnership::kOwned; 1605 GrMipMapsStatus mipMapsStatus; 1606 GrGLTexture::SamplerParams initialTexParams; 1607 if (!this->createTextureImpl(desc, &idDesc.fInfo, isRenderTarget, &initialTexParams, texels, 1608 mipLevelCount, &mipMapsStatus)) { 1609 return return_null_texture(); 1610 } 1611 1612 sk_sp<GrGLTexture> tex; 1613 if (isRenderTarget) { 1614 // unbind the texture from the texture unit before binding it to the frame buffer 1615 GL_CALL(BindTexture(idDesc.fInfo.fTarget, 0)); 1616 GrGLRenderTarget::IDDesc rtIDDesc; 1617 1618 if (!this->createRenderTargetObjects(desc, idDesc.fInfo, &rtIDDesc)) { 1619 GL_CALL(DeleteTextures(1, &idDesc.fInfo.fID)); 1620 return return_null_texture(); 1621 } 1622 tex = sk_make_sp<GrGLTextureRenderTarget>(this, budgeted, desc, idDesc, rtIDDesc, 1623 mipMapsStatus); 1624 tex->baseLevelWasBoundToFBO(); 1625 } else { 1626 tex = sk_make_sp<GrGLTexture>(this, budgeted, desc, idDesc, mipMapsStatus); 1627 } 1628 1629 tex->setCachedParams(&initialTexParams, tex->getCachedNonSamplerParams(), 1630 this->getResetTimestamp()); 1631 #ifdef TRACE_TEXTURE_CREATION 1632 SkDebugf("--- new texture [%d] size=(%d %d) config=%d\n", 1633 idDesc.fInfo.fID, desc.fWidth, desc.fHeight, desc.fConfig); 1634 #endif 1635 if (tex && performClear) { 1636 if (this->glCaps().clearTextureSupport()) { 1637 static constexpr uint32_t kZero = 0; 1638 GL_CALL(ClearTexImage(tex->textureID(), 0, GR_GL_RGBA, GR_GL_UNSIGNED_BYTE, &kZero)); 1639 } else { 1640 GrGLIRect viewport; 1641 this->bindSurfaceFBOForPixelOps(tex.get(), GR_GL_FRAMEBUFFER, &viewport, 1642 kDst_TempFBOTarget); 1643 this->disableScissor(); 1644 this->disableWindowRectangles(); 1645 this->flushColorWrite(true); 1646 this->flushClearColor(0, 0, 0, 0); 1647 GL_CALL(Clear(GR_GL_COLOR_BUFFER_BIT)); 1648 this->unbindTextureFBOForPixelOps(GR_GL_FRAMEBUFFER, tex.get()); 1649 fHWBoundRenderTargetUniqueID.makeInvalid(); 1650 } 1651 } 1652 return std::move(tex); 1653 } 1654 1655 namespace { 1656 1657 const GrGLuint kUnknownBitCount = GrGLStencilAttachment::kUnknownBitCount; 1658 1659 void inline get_stencil_rb_sizes(const GrGLInterface* gl, 1660 GrGLStencilAttachment::Format* format) { 1661 1662 // we shouldn't ever know one size and not the other 1663 SkASSERT((kUnknownBitCount == format->fStencilBits) == 1664 (kUnknownBitCount == format->fTotalBits)); 1665 if (kUnknownBitCount == format->fStencilBits) { 1666 GR_GL_GetRenderbufferParameteriv(gl, GR_GL_RENDERBUFFER, 1667 GR_GL_RENDERBUFFER_STENCIL_SIZE, 1668 (GrGLint*)&format->fStencilBits); 1669 if (format->fPacked) { 1670 GR_GL_GetRenderbufferParameteriv(gl, GR_GL_RENDERBUFFER, 1671 GR_GL_RENDERBUFFER_DEPTH_SIZE, 1672 (GrGLint*)&format->fTotalBits); 1673 format->fTotalBits += format->fStencilBits; 1674 } else { 1675 format->fTotalBits = format->fStencilBits; 1676 } 1677 } 1678 } 1679 } 1680 1681 int GrGLGpu::getCompatibleStencilIndex(GrPixelConfig config) { 1682 static const int kSize = 16; 1683 SkASSERT(this->caps()->isConfigRenderable(config)); 1684 if (!this->glCaps().hasStencilFormatBeenDeterminedForConfig(config)) { 1685 // Default to unsupported, set this if we find a stencil format that works. 1686 int firstWorkingStencilFormatIndex = -1; 1687 1688 // Create color texture 1689 GrGLuint colorID = 0; 1690 GL_CALL(GenTextures(1, &colorID)); 1691 this->bindTextureToScratchUnit(GR_GL_TEXTURE_2D, colorID); 1692 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, 1693 GR_GL_TEXTURE_MAG_FILTER, 1694 GR_GL_NEAREST)); 1695 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, 1696 GR_GL_TEXTURE_MIN_FILTER, 1697 GR_GL_NEAREST)); 1698 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, 1699 GR_GL_TEXTURE_WRAP_S, 1700 GR_GL_CLAMP_TO_EDGE)); 1701 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, 1702 GR_GL_TEXTURE_WRAP_T, 1703 GR_GL_CLAMP_TO_EDGE)); 1704 1705 GrGLenum internalFormat; 1706 GrGLenum externalFormat; 1707 GrGLenum externalType; 1708 if (!this->glCaps().getTexImageFormats(config, config, &internalFormat, &externalFormat, 1709 &externalType)) { 1710 return false; 1711 } 1712 this->unbindCpuToGpuXferBuffer(); 1713 CLEAR_ERROR_BEFORE_ALLOC(this->glInterface()); 1714 GL_ALLOC_CALL(this->glInterface(), TexImage2D(GR_GL_TEXTURE_2D, 1715 0, 1716 internalFormat, 1717 kSize, 1718 kSize, 1719 0, 1720 externalFormat, 1721 externalType, 1722 nullptr)); 1723 if (GR_GL_NO_ERROR != CHECK_ALLOC_ERROR(this->glInterface())) { 1724 GL_CALL(DeleteTextures(1, &colorID)); 1725 return -1; 1726 } 1727 1728 // unbind the texture from the texture unit before binding it to the frame buffer 1729 GL_CALL(BindTexture(GR_GL_TEXTURE_2D, 0)); 1730 1731 // Create Framebuffer 1732 GrGLuint fb = 0; 1733 GL_CALL(GenFramebuffers(1, &fb)); 1734 this->bindFramebuffer(GR_GL_FRAMEBUFFER, fb); 1735 fHWBoundRenderTargetUniqueID.makeInvalid(); 1736 GL_CALL(FramebufferTexture2D(GR_GL_FRAMEBUFFER, 1737 GR_GL_COLOR_ATTACHMENT0, 1738 GR_GL_TEXTURE_2D, 1739 colorID, 1740 0)); 1741 GrGLuint sbRBID = 0; 1742 GL_CALL(GenRenderbuffers(1, &sbRBID)); 1743 1744 // look over formats till I find a compatible one 1745 int stencilFmtCnt = this->glCaps().stencilFormats().count(); 1746 if (sbRBID) { 1747 GL_CALL(BindRenderbuffer(GR_GL_RENDERBUFFER, sbRBID)); 1748 for (int i = 0; i < stencilFmtCnt && sbRBID; ++i) { 1749 const GrGLCaps::StencilFormat& sFmt = this->glCaps().stencilFormats()[i]; 1750 CLEAR_ERROR_BEFORE_ALLOC(this->glInterface()); 1751 GL_ALLOC_CALL(this->glInterface(), RenderbufferStorage(GR_GL_RENDERBUFFER, 1752 sFmt.fInternalFormat, 1753 kSize, kSize)); 1754 if (GR_GL_NO_ERROR == CHECK_ALLOC_ERROR(this->glInterface())) { 1755 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, 1756 GR_GL_STENCIL_ATTACHMENT, 1757 GR_GL_RENDERBUFFER, sbRBID)); 1758 if (sFmt.fPacked) { 1759 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, 1760 GR_GL_DEPTH_ATTACHMENT, 1761 GR_GL_RENDERBUFFER, sbRBID)); 1762 } else { 1763 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, 1764 GR_GL_DEPTH_ATTACHMENT, 1765 GR_GL_RENDERBUFFER, 0)); 1766 } 1767 GrGLenum status; 1768 GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER)); 1769 if (status == GR_GL_FRAMEBUFFER_COMPLETE) { 1770 firstWorkingStencilFormatIndex = i; 1771 break; 1772 } 1773 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, 1774 GR_GL_STENCIL_ATTACHMENT, 1775 GR_GL_RENDERBUFFER, 0)); 1776 if (sFmt.fPacked) { 1777 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, 1778 GR_GL_DEPTH_ATTACHMENT, 1779 GR_GL_RENDERBUFFER, 0)); 1780 } 1781 } 1782 } 1783 GL_CALL(DeleteRenderbuffers(1, &sbRBID)); 1784 } 1785 GL_CALL(DeleteTextures(1, &colorID)); 1786 this->bindFramebuffer(GR_GL_FRAMEBUFFER, 0); 1787 this->deleteFramebuffer(fb); 1788 fGLContext->caps()->setStencilFormatIndexForConfig(config, firstWorkingStencilFormatIndex); 1789 } 1790 return this->glCaps().getStencilFormatIndexForConfig(config); 1791 } 1792 1793 bool GrGLGpu::createTextureImpl(const GrSurfaceDesc& desc, GrGLTextureInfo* info, bool renderTarget, 1794 GrGLTexture::SamplerParams* initialTexParams, 1795 const GrMipLevel texels[], int mipLevelCount, 1796 GrMipMapsStatus* mipMapsStatus) { 1797 info->fID = 0; 1798 info->fTarget = GR_GL_TEXTURE_2D; 1799 GL_CALL(GenTextures(1, &(info->fID))); 1800 1801 if (!info->fID) { 1802 return false; 1803 } 1804 1805 this->bindTextureToScratchUnit(info->fTarget, info->fID); 1806 1807 if (renderTarget && this->glCaps().textureUsageSupport()) { 1808 // provides a hint about how this texture will be used 1809 GL_CALL(TexParameteri(info->fTarget, 1810 GR_GL_TEXTURE_USAGE, 1811 GR_GL_FRAMEBUFFER_ATTACHMENT)); 1812 } 1813 1814 if (info) { 1815 *initialTexParams = set_initial_texture_params(this->glInterface(), *info); 1816 } 1817 1818 bool success = false; 1819 if (GrPixelConfigIsCompressed(desc.fConfig)) { 1820 SkASSERT(!renderTarget); 1821 success = this->uploadCompressedTexData(desc.fConfig, desc.fWidth, desc.fHeight, 1822 info->fTarget, desc.fConfig, 1823 texels, mipLevelCount, mipMapsStatus); 1824 } else { 1825 success = this->uploadTexData(desc.fConfig, desc.fWidth, desc.fHeight, info->fTarget, 1826 kNewTexture_UploadType, 0, 0, desc.fWidth, desc.fHeight, 1827 desc.fConfig, texels, mipLevelCount, mipMapsStatus); 1828 } 1829 if (!success) { 1830 GL_CALL(DeleteTextures(1, &(info->fID))); 1831 return false; 1832 } 1833 info->fFormat = this->glCaps().configSizedInternalFormat(desc.fConfig); 1834 return true; 1835 } 1836 1837 GrStencilAttachment* GrGLGpu::createStencilAttachmentForRenderTarget(const GrRenderTarget* rt, 1838 int width, int height) { 1839 SkASSERT(width >= rt->width()); 1840 SkASSERT(height >= rt->height()); 1841 1842 int samples = rt->numStencilSamples(); 1843 GrGLStencilAttachment::IDDesc sbDesc; 1844 1845 int sIdx = this->getCompatibleStencilIndex(rt->config()); 1846 if (sIdx < 0) { 1847 return nullptr; 1848 } 1849 1850 if (!sbDesc.fRenderbufferID) { 1851 GL_CALL(GenRenderbuffers(1, &sbDesc.fRenderbufferID)); 1852 } 1853 if (!sbDesc.fRenderbufferID) { 1854 return nullptr; 1855 } 1856 GL_CALL(BindRenderbuffer(GR_GL_RENDERBUFFER, sbDesc.fRenderbufferID)); 1857 const GrGLCaps::StencilFormat& sFmt = this->glCaps().stencilFormats()[sIdx]; 1858 CLEAR_ERROR_BEFORE_ALLOC(this->glInterface()); 1859 // we do this "if" so that we don't call the multisample 1860 // version on a GL that doesn't have an MSAA extension. 1861 if (samples > 1) { 1862 SkAssertResult(renderbuffer_storage_msaa(*fGLContext, 1863 samples, 1864 sFmt.fInternalFormat, 1865 width, height)); 1866 } else { 1867 GL_ALLOC_CALL(this->glInterface(), RenderbufferStorage(GR_GL_RENDERBUFFER, 1868 sFmt.fInternalFormat, 1869 width, height)); 1870 SkASSERT(GR_GL_NO_ERROR == CHECK_ALLOC_ERROR(this->glInterface())); 1871 } 1872 fStats.incStencilAttachmentCreates(); 1873 // After sized formats we attempt an unsized format and take 1874 // whatever sizes GL gives us. In that case we query for the size. 1875 GrGLStencilAttachment::Format format = sFmt; 1876 get_stencil_rb_sizes(this->glInterface(), &format); 1877 GrGLStencilAttachment* stencil = new GrGLStencilAttachment(this, 1878 sbDesc, 1879 width, 1880 height, 1881 samples, 1882 format); 1883 return stencil; 1884 } 1885 1886 //////////////////////////////////////////////////////////////////////////////// 1887 1888 sk_sp<GrGpuBuffer> GrGLGpu::onCreateBuffer(size_t size, GrGpuBufferType intendedType, 1889 GrAccessPattern accessPattern, const void* data) { 1890 return GrGLBuffer::Make(this, size, intendedType, accessPattern, data); 1891 } 1892 1893 void GrGLGpu::flushScissor(const GrScissorState& scissorState, 1894 const GrGLIRect& rtViewport, 1895 GrSurfaceOrigin rtOrigin) { 1896 if (scissorState.enabled()) { 1897 GrGLIRect scissor; 1898 scissor.setRelativeTo(rtViewport, scissorState.rect(), rtOrigin); 1899 // if the scissor fully contains the viewport then we fall through and 1900 // disable the scissor test. 1901 if (!scissor.contains(rtViewport)) { 1902 if (fHWScissorSettings.fRect != scissor) { 1903 scissor.pushToGLScissor(this->glInterface()); 1904 fHWScissorSettings.fRect = scissor; 1905 } 1906 if (kYes_TriState != fHWScissorSettings.fEnabled) { 1907 GL_CALL(Enable(GR_GL_SCISSOR_TEST)); 1908 fHWScissorSettings.fEnabled = kYes_TriState; 1909 } 1910 return; 1911 } 1912 } 1913 1914 // See fall through note above 1915 this->disableScissor(); 1916 } 1917 1918 void GrGLGpu::flushWindowRectangles(const GrWindowRectsState& windowState, 1919 const GrGLRenderTarget* rt, GrSurfaceOrigin origin) { 1920 #ifndef USE_NSIGHT 1921 typedef GrWindowRectsState::Mode Mode; 1922 SkASSERT(!windowState.enabled() || rt->renderFBOID()); // Window rects can't be used on-screen. 1923 SkASSERT(windowState.numWindows() <= this->caps()->maxWindowRectangles()); 1924 1925 if (!this->caps()->maxWindowRectangles() || 1926 fHWWindowRectsState.knownEqualTo(origin, rt->getViewport(), windowState)) { 1927 return; 1928 } 1929 1930 // This is purely a workaround for a spurious warning generated by gcc. Otherwise the above 1931 // assert would be sufficient. https://gcc.gnu.org/bugzilla/show_bug.cgi?id=5912 1932 int numWindows = SkTMin(windowState.numWindows(), int(GrWindowRectangles::kMaxWindows)); 1933 SkASSERT(windowState.numWindows() == numWindows); 1934 1935 GrGLIRect glwindows[GrWindowRectangles::kMaxWindows]; 1936 const SkIRect* skwindows = windowState.windows().data(); 1937 for (int i = 0; i < numWindows; ++i) { 1938 glwindows[i].setRelativeTo(rt->getViewport(), skwindows[i], origin); 1939 } 1940 1941 GrGLenum glmode = (Mode::kExclusive == windowState.mode()) ? GR_GL_EXCLUSIVE : GR_GL_INCLUSIVE; 1942 GL_CALL(WindowRectangles(glmode, numWindows, glwindows->asInts())); 1943 1944 fHWWindowRectsState.set(origin, rt->getViewport(), windowState); 1945 #endif 1946 } 1947 1948 void GrGLGpu::disableWindowRectangles() { 1949 #ifndef USE_NSIGHT 1950 if (!this->caps()->maxWindowRectangles() || fHWWindowRectsState.knownDisabled()) { 1951 return; 1952 } 1953 GL_CALL(WindowRectangles(GR_GL_EXCLUSIVE, 0, nullptr)); 1954 fHWWindowRectsState.setDisabled(); 1955 #endif 1956 } 1957 1958 void GrGLGpu::resolveAndGenerateMipMapsForProcessorTextures( 1959 const GrPrimitiveProcessor& primProc, 1960 const GrPipeline& pipeline, 1961 const GrTextureProxy* const primProcTextures[], 1962 int numPrimitiveProcessorTextureSets) { 1963 auto genLevelsIfNeeded = [this](GrTexture* tex, const GrSamplerState& sampler) { 1964 SkASSERT(tex); 1965 if (sampler.filter() == GrSamplerState::Filter::kMipMap && 1966 tex->texturePriv().mipMapped() == GrMipMapped::kYes && 1967 tex->texturePriv().mipMapsAreDirty()) { 1968 SkASSERT(this->caps()->mipMapSupport()); 1969 this->regenerateMipMapLevels(static_cast<GrGLTexture*>(tex)); 1970 SkASSERT(!tex->asRenderTarget() || !tex->asRenderTarget()->needsResolve()); 1971 } else if (auto* rt = tex->asRenderTarget()) { 1972 if (rt->needsResolve()) { 1973 this->resolveRenderTarget(rt); 1974 } 1975 } 1976 }; 1977 1978 for (int set = 0, tex = 0; set < numPrimitiveProcessorTextureSets; ++set) { 1979 for (int sampler = 0; sampler < primProc.numTextureSamplers(); ++sampler, ++tex) { 1980 GrTexture* texture = primProcTextures[tex]->peekTexture(); 1981 genLevelsIfNeeded(texture, primProc.textureSampler(sampler).samplerState()); 1982 } 1983 } 1984 1985 GrFragmentProcessor::Iter iter(pipeline); 1986 while (const GrFragmentProcessor* fp = iter.next()) { 1987 for (int i = 0; i < fp->numTextureSamplers(); ++i) { 1988 const auto& textureSampler = fp->textureSampler(i); 1989 genLevelsIfNeeded(textureSampler.peekTexture(), textureSampler.samplerState()); 1990 } 1991 } 1992 } 1993 1994 bool GrGLGpu::flushGLState(GrRenderTarget* renderTarget, 1995 GrSurfaceOrigin origin, 1996 const GrPrimitiveProcessor& primProc, 1997 const GrPipeline& pipeline, 1998 const GrPipeline::FixedDynamicState* fixedDynamicState, 1999 const GrPipeline::DynamicStateArrays* dynamicStateArrays, 2000 int dynamicStateArraysLength, 2001 bool willDrawPoints) { 2002 const GrTextureProxy* const* primProcProxiesForMipRegen = nullptr; 2003 const GrTextureProxy* const* primProcProxiesToBind = nullptr; 2004 int numPrimProcTextureSets = 1; // number of texture per prim proc sampler. 2005 if (dynamicStateArrays && dynamicStateArrays->fPrimitiveProcessorTextures) { 2006 primProcProxiesForMipRegen = dynamicStateArrays->fPrimitiveProcessorTextures; 2007 numPrimProcTextureSets = dynamicStateArraysLength; 2008 } else if (fixedDynamicState && fixedDynamicState->fPrimitiveProcessorTextures) { 2009 primProcProxiesForMipRegen = fixedDynamicState->fPrimitiveProcessorTextures; 2010 primProcProxiesToBind = fixedDynamicState->fPrimitiveProcessorTextures; 2011 } 2012 2013 SkASSERT(SkToBool(primProcProxiesForMipRegen) == SkToBool(primProc.numTextureSamplers())); 2014 2015 sk_sp<GrGLProgram> program(fProgramCache->refProgram(this, renderTarget, origin, primProc, 2016 primProcProxiesForMipRegen, 2017 pipeline, willDrawPoints)); 2018 if (!program) { 2019 GrCapsDebugf(this->caps(), "Failed to create program!\n"); 2020 return false; 2021 } 2022 this->resolveAndGenerateMipMapsForProcessorTextures( 2023 primProc, pipeline, primProcProxiesForMipRegen, numPrimProcTextureSets); 2024 2025 GrXferProcessor::BlendInfo blendInfo; 2026 pipeline.getXferProcessor().getBlendInfo(&blendInfo); 2027 2028 this->flushColorWrite(blendInfo.fWriteColor); 2029 2030 this->flushProgram(std::move(program)); 2031 2032 // Swizzle the blend to match what the shader will output. 2033 const GrSwizzle& swizzle = this->caps()->shaderCaps()->configOutputSwizzle( 2034 renderTarget->config()); 2035 this->flushBlend(blendInfo, swizzle); 2036 2037 fHWProgram->updateUniformsAndTextureBindings(renderTarget, origin, 2038 primProc, pipeline, primProcProxiesToBind); 2039 2040 GrGLRenderTarget* glRT = static_cast<GrGLRenderTarget*>(renderTarget); 2041 GrStencilSettings stencil; 2042 if (pipeline.isStencilEnabled()) { 2043 // TODO: attach stencil and create settings during render target flush. 2044 SkASSERT(glRT->renderTargetPriv().getStencilAttachment()); 2045 stencil.reset(*pipeline.getUserStencil(), pipeline.hasStencilClip(), 2046 glRT->renderTargetPriv().numStencilBits()); 2047 } 2048 this->flushStencil(stencil); 2049 if (pipeline.isScissorEnabled()) { 2050 static constexpr SkIRect kBogusScissor{0, 0, 1, 1}; 2051 GrScissorState state(fixedDynamicState ? fixedDynamicState->fScissorRect : kBogusScissor); 2052 this->flushScissor(state, glRT->getViewport(), origin); 2053 } else { 2054 this->disableScissor(); 2055 } 2056 this->flushWindowRectangles(pipeline.getWindowRectsState(), glRT, origin); 2057 this->flushHWAAState(glRT, pipeline.isHWAntialiasState()); 2058 2059 // This must come after textures are flushed because a texture may need 2060 // to be msaa-resolved (which will modify bound FBO state). 2061 this->flushRenderTarget(glRT); 2062 2063 return true; 2064 } 2065 2066 void GrGLGpu::flushProgram(sk_sp<GrGLProgram> program) { 2067 if (!program) { 2068 fHWProgram.reset(); 2069 fHWProgramID = 0; 2070 return; 2071 } 2072 SkASSERT((program == fHWProgram) == (fHWProgramID == program->programID())); 2073 if (program == fHWProgram) { 2074 return; 2075 } 2076 auto id = program->programID(); 2077 SkASSERT(id); 2078 GL_CALL(UseProgram(id)); 2079 fHWProgram = std::move(program); 2080 fHWProgramID = id; 2081 } 2082 2083 void GrGLGpu::flushProgram(GrGLuint id) { 2084 SkASSERT(id); 2085 if (fHWProgramID == id) { 2086 SkASSERT(!fHWProgram); 2087 return; 2088 } 2089 fHWProgram.reset(); 2090 GL_CALL(UseProgram(id)); 2091 fHWProgramID = id; 2092 } 2093 2094 void GrGLGpu::setupGeometry(const GrBuffer* indexBuffer, 2095 const GrBuffer* vertexBuffer, 2096 int baseVertex, 2097 const GrBuffer* instanceBuffer, 2098 int baseInstance, 2099 GrPrimitiveRestart enablePrimitiveRestart) { 2100 SkASSERT((enablePrimitiveRestart == GrPrimitiveRestart::kNo) || indexBuffer); 2101 2102 GrGLAttribArrayState* attribState; 2103 if (indexBuffer) { 2104 SkASSERT(indexBuffer->isCpuBuffer() || 2105 !static_cast<const GrGpuBuffer*>(indexBuffer)->isMapped()); 2106 attribState = fHWVertexArrayState.bindInternalVertexArray(this, indexBuffer); 2107 } else { 2108 attribState = fHWVertexArrayState.bindInternalVertexArray(this); 2109 } 2110 2111 int numAttribs = fHWProgram->numVertexAttributes() + fHWProgram->numInstanceAttributes(); 2112 attribState->enableVertexArrays(this, numAttribs, enablePrimitiveRestart); 2113 2114 if (int vertexStride = fHWProgram->vertexStride()) { 2115 SkASSERT(vertexBuffer); 2116 SkASSERT(vertexBuffer->isCpuBuffer() || 2117 !static_cast<const GrGpuBuffer*>(vertexBuffer)->isMapped()); 2118 size_t bufferOffset = baseVertex * static_cast<size_t>(vertexStride); 2119 for (int i = 0; i < fHWProgram->numVertexAttributes(); ++i) { 2120 const auto& attrib = fHWProgram->vertexAttribute(i); 2121 static constexpr int kDivisor = 0; 2122 attribState->set(this, attrib.fLocation, vertexBuffer, attrib.fCPUType, attrib.fGPUType, 2123 vertexStride, bufferOffset + attrib.fOffset, kDivisor); 2124 } 2125 } 2126 if (int instanceStride = fHWProgram->instanceStride()) { 2127 SkASSERT(instanceBuffer); 2128 SkASSERT(instanceBuffer->isCpuBuffer() || 2129 !static_cast<const GrGpuBuffer*>(instanceBuffer)->isMapped()); 2130 size_t bufferOffset = baseInstance * static_cast<size_t>(instanceStride); 2131 int attribIdx = fHWProgram->numVertexAttributes(); 2132 for (int i = 0; i < fHWProgram->numInstanceAttributes(); ++i, ++attribIdx) { 2133 const auto& attrib = fHWProgram->instanceAttribute(i); 2134 static constexpr int kDivisor = 1; 2135 attribState->set(this, attrib.fLocation, instanceBuffer, attrib.fCPUType, 2136 attrib.fGPUType, instanceStride, bufferOffset + attrib.fOffset, 2137 kDivisor); 2138 } 2139 } 2140 } 2141 2142 GrGLenum GrGLGpu::bindBuffer(GrGpuBufferType type, const GrBuffer* buffer) { 2143 this->handleDirtyContext(); 2144 2145 // Index buffer state is tied to the vertex array. 2146 if (GrGpuBufferType::kIndex == type) { 2147 this->bindVertexArray(0); 2148 } 2149 2150 auto* bufferState = this->hwBufferState(type); 2151 if (buffer->isCpuBuffer()) { 2152 if (!bufferState->fBufferZeroKnownBound) { 2153 GL_CALL(BindBuffer(bufferState->fGLTarget, 0)); 2154 bufferState->fBufferZeroKnownBound = true; 2155 bufferState->fBoundBufferUniqueID.makeInvalid(); 2156 } 2157 } else if (static_cast<const GrGpuBuffer*>(buffer)->uniqueID() != 2158 bufferState->fBoundBufferUniqueID) { 2159 const GrGLBuffer* glBuffer = static_cast<const GrGLBuffer*>(buffer); 2160 GL_CALL(BindBuffer(bufferState->fGLTarget, glBuffer->bufferID())); 2161 bufferState->fBufferZeroKnownBound = false; 2162 bufferState->fBoundBufferUniqueID = glBuffer->uniqueID(); 2163 } 2164 2165 return bufferState->fGLTarget; 2166 } 2167 void GrGLGpu::disableScissor() { 2168 if (kNo_TriState != fHWScissorSettings.fEnabled) { 2169 GL_CALL(Disable(GR_GL_SCISSOR_TEST)); 2170 fHWScissorSettings.fEnabled = kNo_TriState; 2171 return; 2172 } 2173 } 2174 2175 void GrGLGpu::clear(const GrFixedClip& clip, const SkPMColor4f& color, 2176 GrRenderTarget* target, GrSurfaceOrigin origin) { 2177 // parent class should never let us get here with no RT 2178 SkASSERT(target); 2179 SkASSERT(!this->caps()->performColorClearsAsDraws()); 2180 SkASSERT(!clip.scissorEnabled() || !this->caps()->performPartialClearsAsDraws()); 2181 2182 this->handleDirtyContext(); 2183 2184 GrGLRenderTarget* glRT = static_cast<GrGLRenderTarget*>(target); 2185 2186 if (clip.scissorEnabled()) { 2187 this->flushRenderTarget(glRT, origin, clip.scissorRect()); 2188 } else { 2189 this->flushRenderTarget(glRT); 2190 } 2191 this->flushScissor(clip.scissorState(), glRT->getViewport(), origin); 2192 this->flushWindowRectangles(clip.windowRectsState(), glRT, origin); 2193 this->flushColorWrite(true); 2194 2195 GrGLfloat r = color.fR, g = color.fG, b = color.fB, a = color.fA; 2196 if (this->glCaps().clearToBoundaryValuesIsBroken() && 2197 (1 == r || 0 == r) && (1 == g || 0 == g) && (1 == b || 0 == b) && (1 == a || 0 == a)) { 2198 static const GrGLfloat safeAlpha1 = nextafter(1.f, 2.f); 2199 static const GrGLfloat safeAlpha0 = nextafter(0.f, -1.f); 2200 a = (1 == a) ? safeAlpha1 : safeAlpha0; 2201 } 2202 this->flushClearColor(r, g, b, a); 2203 2204 GL_CALL(Clear(GR_GL_COLOR_BUFFER_BIT)); 2205 } 2206 2207 void GrGLGpu::clearStencil(GrRenderTarget* target, int clearValue) { 2208 SkASSERT(!this->caps()->performStencilClearsAsDraws()); 2209 2210 if (!target) { 2211 return; 2212 } 2213 2214 GrStencilAttachment* sb = target->renderTargetPriv().getStencilAttachment(); 2215 // this should only be called internally when we know we have a 2216 // stencil buffer. 2217 SkASSERT(sb); 2218 2219 GrGLRenderTarget* glRT = static_cast<GrGLRenderTarget*>(target); 2220 this->flushRenderTargetNoColorWrites(glRT); 2221 2222 this->disableScissor(); 2223 this->disableWindowRectangles(); 2224 2225 GL_CALL(StencilMask(0xffffffff)); 2226 GL_CALL(ClearStencil(clearValue)); 2227 GL_CALL(Clear(GR_GL_STENCIL_BUFFER_BIT)); 2228 fHWStencilSettings.invalidate(); 2229 if (!clearValue) { 2230 sb->cleared(); 2231 } 2232 } 2233 2234 void GrGLGpu::clearStencilClip(const GrFixedClip& clip, 2235 bool insideStencilMask, 2236 GrRenderTarget* target, GrSurfaceOrigin origin) { 2237 SkASSERT(target); 2238 SkASSERT(!this->caps()->performStencilClearsAsDraws()); 2239 this->handleDirtyContext(); 2240 2241 GrStencilAttachment* sb = target->renderTargetPriv().getStencilAttachment(); 2242 // this should only be called internally when we know we have a 2243 // stencil buffer. 2244 SkASSERT(sb); 2245 GrGLint stencilBitCount = sb->bits(); 2246 #if 0 2247 SkASSERT(stencilBitCount > 0); 2248 GrGLint clipStencilMask = (1 << (stencilBitCount - 1)); 2249 #else 2250 // we could just clear the clip bit but when we go through 2251 // ANGLE a partial stencil mask will cause clears to be 2252 // turned into draws. Our contract on GrOpList says that 2253 // changing the clip between stencil passes may or may not 2254 // zero the client's clip bits. So we just clear the whole thing. 2255 static const GrGLint clipStencilMask = ~0; 2256 #endif 2257 GrGLint value; 2258 if (insideStencilMask) { 2259 value = (1 << (stencilBitCount - 1)); 2260 } else { 2261 value = 0; 2262 } 2263 GrGLRenderTarget* glRT = static_cast<GrGLRenderTarget*>(target); 2264 this->flushRenderTargetNoColorWrites(glRT); 2265 2266 this->flushScissor(clip.scissorState(), glRT->getViewport(), origin); 2267 this->flushWindowRectangles(clip.windowRectsState(), glRT, origin); 2268 2269 GL_CALL(StencilMask((uint32_t) clipStencilMask)); 2270 GL_CALL(ClearStencil(value)); 2271 GL_CALL(Clear(GR_GL_STENCIL_BUFFER_BIT)); 2272 fHWStencilSettings.invalidate(); 2273 } 2274 2275 bool GrGLGpu::readPixelsSupported(GrRenderTarget* target, GrPixelConfig readConfig) { 2276 #ifdef SK_BUILD_FOR_MAC 2277 // Chromium may ask us to read back from locked IOSurfaces. Calling the command buffer's 2278 // glGetIntegerv() with GL_IMPLEMENTATION_COLOR_READ_FORMAT/_TYPE causes the command buffer 2279 // to make a call to check the framebuffer status which can hang the driver. So in Mac Chromium 2280 // we always use a temporary surface to test for read pixels support. 2281 // https://www.crbug.com/662802 2282 if (this->glContext().driver() == kChromium_GrGLDriver) { 2283 return this->readPixelsSupported(target->config(), readConfig); 2284 } 2285 #endif 2286 auto bindRenderTarget = [this, target]() -> bool { 2287 this->flushRenderTargetNoColorWrites(static_cast<GrGLRenderTarget*>(target)); 2288 return true; 2289 }; 2290 auto unbindRenderTarget = []{}; 2291 auto getIntegerv = [this](GrGLenum query, GrGLint* value) { 2292 GR_GL_GetIntegerv(this->glInterface(), query, value); 2293 }; 2294 GrPixelConfig rtConfig = target->config(); 2295 return this->glCaps().readPixelsSupported(rtConfig, readConfig, getIntegerv, bindRenderTarget, 2296 unbindRenderTarget); 2297 } 2298 2299 bool GrGLGpu::readPixelsSupported(GrPixelConfig rtConfig, GrPixelConfig readConfig) { 2300 sk_sp<GrTexture> temp; 2301 auto bindRenderTarget = [this, rtConfig, &temp]() -> bool { 2302 GrSurfaceDesc desc; 2303 desc.fConfig = rtConfig; 2304 desc.fWidth = desc.fHeight = 16; 2305 if (this->glCaps().isConfigRenderable(rtConfig)) { 2306 desc.fFlags = kRenderTarget_GrSurfaceFlag; 2307 temp = this->createTexture(desc, SkBudgeted::kNo); 2308 if (!temp) { 2309 return false; 2310 } 2311 GrGLRenderTarget* glrt = static_cast<GrGLRenderTarget*>(temp->asRenderTarget()); 2312 this->flushRenderTargetNoColorWrites(glrt); 2313 return true; 2314 } else if (this->glCaps().canConfigBeFBOColorAttachment(rtConfig)) { 2315 temp = this->createTexture(desc, SkBudgeted::kNo); 2316 if (!temp) { 2317 return false; 2318 } 2319 GrGLIRect vp; 2320 this->bindSurfaceFBOForPixelOps(temp.get(), GR_GL_FRAMEBUFFER, &vp, kDst_TempFBOTarget); 2321 fHWBoundRenderTargetUniqueID.makeInvalid(); 2322 return true; 2323 } 2324 return false; 2325 }; 2326 auto unbindRenderTarget = [this, &temp]() { 2327 this->unbindTextureFBOForPixelOps(GR_GL_FRAMEBUFFER, temp.get()); 2328 }; 2329 auto getIntegerv = [this](GrGLenum query, GrGLint* value) { 2330 GR_GL_GetIntegerv(this->glInterface(), query, value); 2331 }; 2332 return this->glCaps().readPixelsSupported(rtConfig, readConfig, getIntegerv, bindRenderTarget, 2333 unbindRenderTarget); 2334 } 2335 2336 bool GrGLGpu::readPixelsSupported(GrSurface* surfaceForConfig, GrPixelConfig readConfig) { 2337 if (GrRenderTarget* rt = surfaceForConfig->asRenderTarget()) { 2338 return this->readPixelsSupported(rt, readConfig); 2339 } else { 2340 GrPixelConfig config = surfaceForConfig->config(); 2341 return this->readPixelsSupported(config, readConfig); 2342 } 2343 } 2344 2345 bool GrGLGpu::onReadPixels(GrSurface* surface, int left, int top, int width, int height, 2346 GrColorType dstColorType, void* buffer, size_t rowBytes) { 2347 SkASSERT(surface); 2348 2349 GrGLRenderTarget* renderTarget = static_cast<GrGLRenderTarget*>(surface->asRenderTarget()); 2350 if (!renderTarget && !this->glCaps().canConfigBeFBOColorAttachment(surface->config())) { 2351 return false; 2352 } 2353 2354 // TODO: Avoid this conversion by making GrGLCaps work with color types. 2355 auto dstAsConfig = GrColorTypeToPixelConfig(dstColorType, GrSRGBEncoded::kNo); 2356 2357 if (!this->readPixelsSupported(surface, dstAsConfig)) { 2358 return false; 2359 } 2360 2361 GrGLenum externalFormat; 2362 GrGLenum externalType; 2363 if (!this->glCaps().getReadPixelsFormat(surface->config(), dstAsConfig, &externalFormat, 2364 &externalType)) { 2365 return false; 2366 } 2367 2368 GrGLIRect glvp; 2369 if (renderTarget) { 2370 // resolve the render target if necessary 2371 switch (renderTarget->getResolveType()) { 2372 case GrGLRenderTarget::kCantResolve_ResolveType: 2373 return false; 2374 case GrGLRenderTarget::kAutoResolves_ResolveType: 2375 this->flushRenderTargetNoColorWrites(renderTarget); 2376 break; 2377 case GrGLRenderTarget::kCanResolve_ResolveType: 2378 this->onResolveRenderTarget(renderTarget); 2379 // we don't track the state of the READ FBO ID. 2380 this->bindFramebuffer(GR_GL_READ_FRAMEBUFFER, renderTarget->textureFBOID()); 2381 break; 2382 default: 2383 SK_ABORT("Unknown resolve type"); 2384 } 2385 glvp = renderTarget->getViewport(); 2386 } else { 2387 // Use a temporary FBO. 2388 this->bindSurfaceFBOForPixelOps(surface, GR_GL_FRAMEBUFFER, &glvp, kSrc_TempFBOTarget); 2389 fHWBoundRenderTargetUniqueID.makeInvalid(); 2390 } 2391 2392 // the read rect is viewport-relative 2393 GrGLIRect readRect; 2394 readRect.setRelativeTo(glvp, left, top, width, height, kTopLeft_GrSurfaceOrigin); 2395 2396 int bytesPerPixel = GrBytesPerPixel(dstAsConfig); 2397 size_t tightRowBytes = bytesPerPixel * width; 2398 2399 size_t readDstRowBytes = tightRowBytes; 2400 void* readDst = buffer; 2401 2402 // determine if GL can read using the passed rowBytes or if we need a scratch buffer. 2403 SkAutoSMalloc<32 * sizeof(GrColor)> scratch; 2404 if (rowBytes != tightRowBytes) { 2405 if (this->glCaps().packRowLengthSupport() && !(rowBytes % bytesPerPixel)) { 2406 GL_CALL(PixelStorei(GR_GL_PACK_ROW_LENGTH, 2407 static_cast<GrGLint>(rowBytes / bytesPerPixel))); 2408 readDstRowBytes = rowBytes; 2409 } else { 2410 scratch.reset(tightRowBytes * height); 2411 readDst = scratch.get(); 2412 } 2413 } 2414 GL_CALL(PixelStorei(GR_GL_PACK_ALIGNMENT, config_alignment(dstAsConfig))); 2415 2416 bool reattachStencil = false; 2417 if (this->glCaps().detachStencilFromMSAABuffersBeforeReadPixels() && 2418 renderTarget && 2419 renderTarget->renderTargetPriv().getStencilAttachment() && 2420 renderTarget->numColorSamples() > 1) { 2421 // Fix Adreno devices that won't read from MSAA framebuffers with stencil attached 2422 reattachStencil = true; 2423 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, GR_GL_STENCIL_ATTACHMENT, 2424 GR_GL_RENDERBUFFER, 0)); 2425 } 2426 2427 GL_CALL(ReadPixels(readRect.fLeft, readRect.fBottom, 2428 readRect.fWidth, readRect.fHeight, 2429 externalFormat, externalType, readDst)); 2430 2431 if (reattachStencil) { 2432 GrGLStencilAttachment* stencilAttachment = static_cast<GrGLStencilAttachment*>( 2433 renderTarget->renderTargetPriv().getStencilAttachment()); 2434 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, GR_GL_STENCIL_ATTACHMENT, 2435 GR_GL_RENDERBUFFER, stencilAttachment->renderbufferID())); 2436 } 2437 2438 if (readDstRowBytes != tightRowBytes) { 2439 SkASSERT(this->glCaps().packRowLengthSupport()); 2440 GL_CALL(PixelStorei(GR_GL_PACK_ROW_LENGTH, 0)); 2441 } 2442 2443 if (readDst != buffer) { 2444 SkASSERT(readDst != buffer); 2445 SkASSERT(rowBytes != tightRowBytes); 2446 const char* src = reinterpret_cast<const char*>(readDst); 2447 char* dst = reinterpret_cast<char*>(buffer); 2448 SkRectMemcpy(dst, rowBytes, src, readDstRowBytes, tightRowBytes, height); 2449 } 2450 if (!renderTarget) { 2451 this->unbindTextureFBOForPixelOps(GR_GL_FRAMEBUFFER, surface); 2452 } 2453 return true; 2454 } 2455 2456 GrGpuRTCommandBuffer* GrGLGpu::getCommandBuffer( 2457 GrRenderTarget* rt, GrSurfaceOrigin origin, const SkRect& bounds, 2458 const GrGpuRTCommandBuffer::LoadAndStoreInfo& colorInfo, 2459 const GrGpuRTCommandBuffer::StencilLoadAndStoreInfo& stencilInfo) { 2460 if (!fCachedRTCommandBuffer) { 2461 fCachedRTCommandBuffer.reset(new GrGLGpuRTCommandBuffer(this)); 2462 } 2463 2464 fCachedRTCommandBuffer->set(rt, origin, colorInfo, stencilInfo); 2465 return fCachedRTCommandBuffer.get(); 2466 } 2467 2468 GrGpuTextureCommandBuffer* GrGLGpu::getCommandBuffer(GrTexture* texture, GrSurfaceOrigin origin) { 2469 if (!fCachedTexCommandBuffer) { 2470 fCachedTexCommandBuffer.reset(new GrGLGpuTextureCommandBuffer(this)); 2471 } 2472 2473 fCachedTexCommandBuffer->set(texture, origin); 2474 return fCachedTexCommandBuffer.get(); 2475 } 2476 2477 void GrGLGpu::flushRenderTarget(GrGLRenderTarget* target, GrSurfaceOrigin origin, 2478 const SkIRect& bounds) { 2479 this->flushRenderTargetNoColorWrites(target); 2480 this->didWriteToSurface(target, origin, &bounds); 2481 } 2482 2483 void GrGLGpu::flushRenderTarget(GrGLRenderTarget* target) { 2484 this->flushRenderTargetNoColorWrites(target); 2485 this->didWriteToSurface(target, kTopLeft_GrSurfaceOrigin, nullptr); 2486 } 2487 2488 void GrGLGpu::flushRenderTargetNoColorWrites(GrGLRenderTarget* target) { 2489 SkASSERT(target); 2490 GrGpuResource::UniqueID rtID = target->uniqueID(); 2491 if (fHWBoundRenderTargetUniqueID != rtID) { 2492 this->bindFramebuffer(GR_GL_FRAMEBUFFER, target->renderFBOID()); 2493 #ifdef SK_DEBUG 2494 // don't do this check in Chromium -- this is causing 2495 // lots of repeated command buffer flushes when the compositor is 2496 // rendering with Ganesh, which is really slow; even too slow for 2497 // Debug mode. 2498 if (kChromium_GrGLDriver != this->glContext().driver()) { 2499 GrGLenum status; 2500 GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER)); 2501 if (status != GR_GL_FRAMEBUFFER_COMPLETE) { 2502 SkDebugf("GrGLGpu::flushRenderTarget glCheckFramebufferStatus %x\n", status); 2503 } 2504 } 2505 #endif 2506 fHWBoundRenderTargetUniqueID = rtID; 2507 this->flushViewport(target->getViewport()); 2508 } 2509 2510 if (this->glCaps().srgbWriteControl()) { 2511 this->flushFramebufferSRGB(GrPixelConfigIsSRGB(target->config())); 2512 } 2513 } 2514 2515 void GrGLGpu::flushFramebufferSRGB(bool enable) { 2516 if (enable && kYes_TriState != fHWSRGBFramebuffer) { 2517 GL_CALL(Enable(GR_GL_FRAMEBUFFER_SRGB)); 2518 fHWSRGBFramebuffer = kYes_TriState; 2519 } else if (!enable && kNo_TriState != fHWSRGBFramebuffer) { 2520 GL_CALL(Disable(GR_GL_FRAMEBUFFER_SRGB)); 2521 fHWSRGBFramebuffer = kNo_TriState; 2522 } 2523 } 2524 2525 void GrGLGpu::flushViewport(const GrGLIRect& viewport) { 2526 if (fHWViewport != viewport) { 2527 viewport.pushToGLViewport(this->glInterface()); 2528 fHWViewport = viewport; 2529 } 2530 } 2531 2532 #define SWAP_PER_DRAW 0 2533 2534 #if SWAP_PER_DRAW 2535 #if defined(SK_BUILD_FOR_MAC) 2536 #include <AGL/agl.h> 2537 #elif defined(SK_BUILD_FOR_WIN) 2538 #include <gl/GL.h> 2539 void SwapBuf() { 2540 DWORD procID = GetCurrentProcessId(); 2541 HWND hwnd = GetTopWindow(GetDesktopWindow()); 2542 while(hwnd) { 2543 DWORD wndProcID = 0; 2544 GetWindowThreadProcessId(hwnd, &wndProcID); 2545 if(wndProcID == procID) { 2546 SwapBuffers(GetDC(hwnd)); 2547 } 2548 hwnd = GetNextWindow(hwnd, GW_HWNDNEXT); 2549 } 2550 } 2551 #endif 2552 #endif 2553 2554 void GrGLGpu::draw(GrRenderTarget* renderTarget, GrSurfaceOrigin origin, 2555 const GrPrimitiveProcessor& primProc, 2556 const GrPipeline& pipeline, 2557 const GrPipeline::FixedDynamicState* fixedDynamicState, 2558 const GrPipeline::DynamicStateArrays* dynamicStateArrays, 2559 const GrMesh meshes[], 2560 int meshCount) { 2561 this->handleDirtyContext(); 2562 2563 bool hasPoints = false; 2564 for (int i = 0; i < meshCount; ++i) { 2565 if (meshes[i].primitiveType() == GrPrimitiveType::kPoints) { 2566 hasPoints = true; 2567 break; 2568 } 2569 } 2570 if (!this->flushGLState(renderTarget, origin, primProc, pipeline, fixedDynamicState, 2571 dynamicStateArrays, meshCount, hasPoints)) { 2572 return; 2573 } 2574 2575 bool dynamicScissor = false; 2576 bool dynamicPrimProcTextures = false; 2577 if (dynamicStateArrays) { 2578 dynamicScissor = pipeline.isScissorEnabled() && dynamicStateArrays->fScissorRects; 2579 dynamicPrimProcTextures = dynamicStateArrays->fPrimitiveProcessorTextures; 2580 } 2581 for (int m = 0; m < meshCount; ++m) { 2582 if (GrXferBarrierType barrierType = pipeline.xferBarrierType(renderTarget->asTexture(), 2583 *this->caps())) { 2584 this->xferBarrier(renderTarget, barrierType); 2585 } 2586 2587 if (dynamicScissor) { 2588 GrGLRenderTarget* glRT = static_cast<GrGLRenderTarget*>(renderTarget); 2589 this->flushScissor(GrScissorState(dynamicStateArrays->fScissorRects[m]), 2590 glRT->getViewport(), origin); 2591 } 2592 if (dynamicPrimProcTextures) { 2593 auto texProxyArray = dynamicStateArrays->fPrimitiveProcessorTextures + 2594 m * primProc.numTextureSamplers(); 2595 fHWProgram->updatePrimitiveProcessorTextureBindings(primProc, texProxyArray); 2596 } 2597 if (this->glCaps().requiresCullFaceEnableDisableWhenDrawingLinesAfterNonLines() && 2598 GrIsPrimTypeLines(meshes[m].primitiveType()) && 2599 !GrIsPrimTypeLines(fLastPrimitiveType)) { 2600 GL_CALL(Enable(GR_GL_CULL_FACE)); 2601 GL_CALL(Disable(GR_GL_CULL_FACE)); 2602 } 2603 meshes[m].sendToGpu(this); 2604 fLastPrimitiveType = meshes[m].primitiveType(); 2605 } 2606 2607 #if SWAP_PER_DRAW 2608 glFlush(); 2609 #if defined(SK_BUILD_FOR_MAC) 2610 aglSwapBuffers(aglGetCurrentContext()); 2611 int set_a_break_pt_here = 9; 2612 aglSwapBuffers(aglGetCurrentContext()); 2613 #elif defined(SK_BUILD_FOR_WIN) 2614 SwapBuf(); 2615 int set_a_break_pt_here = 9; 2616 SwapBuf(); 2617 #endif 2618 #endif 2619 } 2620 2621 static GrGLenum gr_primitive_type_to_gl_mode(GrPrimitiveType primitiveType) { 2622 switch (primitiveType) { 2623 case GrPrimitiveType::kTriangles: 2624 return GR_GL_TRIANGLES; 2625 case GrPrimitiveType::kTriangleStrip: 2626 return GR_GL_TRIANGLE_STRIP; 2627 case GrPrimitiveType::kPoints: 2628 return GR_GL_POINTS; 2629 case GrPrimitiveType::kLines: 2630 return GR_GL_LINES; 2631 case GrPrimitiveType::kLineStrip: 2632 return GR_GL_LINE_STRIP; 2633 case GrPrimitiveType::kLinesAdjacency: 2634 return GR_GL_LINES_ADJACENCY; 2635 } 2636 SK_ABORT("invalid GrPrimitiveType"); 2637 return GR_GL_TRIANGLES; 2638 } 2639 2640 void GrGLGpu::sendMeshToGpu(GrPrimitiveType primitiveType, const GrBuffer* vertexBuffer, 2641 int vertexCount, int baseVertex) { 2642 const GrGLenum glPrimType = gr_primitive_type_to_gl_mode(primitiveType); 2643 if (this->glCaps().drawArraysBaseVertexIsBroken()) { 2644 this->setupGeometry(nullptr, vertexBuffer, baseVertex, nullptr, 0, GrPrimitiveRestart::kNo); 2645 GL_CALL(DrawArrays(glPrimType, 0, vertexCount)); 2646 } else { 2647 this->setupGeometry(nullptr, vertexBuffer, 0, nullptr, 0, GrPrimitiveRestart::kNo); 2648 GL_CALL(DrawArrays(glPrimType, baseVertex, vertexCount)); 2649 } 2650 fStats.incNumDraws(); 2651 } 2652 2653 static const GrGLvoid* element_ptr(const GrBuffer* indexBuffer, int baseIndex) { 2654 size_t baseOffset = baseIndex * sizeof(uint16_t); 2655 if (indexBuffer->isCpuBuffer()) { 2656 return static_cast<const GrCpuBuffer*>(indexBuffer)->data() + baseOffset; 2657 } else { 2658 return reinterpret_cast<const GrGLvoid*>(baseOffset); 2659 } 2660 } 2661 2662 void GrGLGpu::sendIndexedMeshToGpu(GrPrimitiveType primitiveType, const GrBuffer* indexBuffer, 2663 int indexCount, int baseIndex, uint16_t minIndexValue, 2664 uint16_t maxIndexValue, const GrBuffer* vertexBuffer, 2665 int baseVertex, GrPrimitiveRestart enablePrimitiveRestart) { 2666 const GrGLenum glPrimType = gr_primitive_type_to_gl_mode(primitiveType); 2667 const GrGLvoid* elementPtr = element_ptr(indexBuffer, baseIndex); 2668 2669 this->setupGeometry(indexBuffer, vertexBuffer, baseVertex, nullptr, 0, enablePrimitiveRestart); 2670 2671 if (this->glCaps().drawRangeElementsSupport()) { 2672 GL_CALL(DrawRangeElements(glPrimType, minIndexValue, maxIndexValue, indexCount, 2673 GR_GL_UNSIGNED_SHORT, elementPtr)); 2674 } else { 2675 GL_CALL(DrawElements(glPrimType, indexCount, GR_GL_UNSIGNED_SHORT, elementPtr)); 2676 } 2677 fStats.incNumDraws(); 2678 } 2679 2680 void GrGLGpu::sendInstancedMeshToGpu(GrPrimitiveType primitiveType, const GrBuffer* vertexBuffer, 2681 int vertexCount, int baseVertex, 2682 const GrBuffer* instanceBuffer, int instanceCount, 2683 int baseInstance) { 2684 GrGLenum glPrimType = gr_primitive_type_to_gl_mode(primitiveType); 2685 int maxInstances = this->glCaps().maxInstancesPerDrawWithoutCrashing(instanceCount); 2686 for (int i = 0; i < instanceCount; i += maxInstances) { 2687 this->setupGeometry(nullptr, vertexBuffer, 0, instanceBuffer, baseInstance + i, 2688 GrPrimitiveRestart::kNo); 2689 GL_CALL(DrawArraysInstanced(glPrimType, baseVertex, vertexCount, 2690 SkTMin(instanceCount - i, maxInstances))); 2691 fStats.incNumDraws(); 2692 } 2693 } 2694 2695 void GrGLGpu::sendIndexedInstancedMeshToGpu(GrPrimitiveType primitiveType, 2696 const GrBuffer* indexBuffer, int indexCount, 2697 int baseIndex, const GrBuffer* vertexBuffer, 2698 int baseVertex, const GrBuffer* instanceBuffer, 2699 int instanceCount, int baseInstance, 2700 GrPrimitiveRestart enablePrimitiveRestart) { 2701 const GrGLenum glPrimType = gr_primitive_type_to_gl_mode(primitiveType); 2702 const GrGLvoid* elementPtr = element_ptr(indexBuffer, baseIndex); 2703 int maxInstances = this->glCaps().maxInstancesPerDrawWithoutCrashing(instanceCount); 2704 for (int i = 0; i < instanceCount; i += maxInstances) { 2705 this->setupGeometry(indexBuffer, vertexBuffer, baseVertex, instanceBuffer, baseInstance + i, 2706 enablePrimitiveRestart); 2707 GL_CALL(DrawElementsInstanced(glPrimType, indexCount, GR_GL_UNSIGNED_SHORT, elementPtr, 2708 SkTMin(instanceCount - i, maxInstances))); 2709 fStats.incNumDraws(); 2710 } 2711 } 2712 2713 void GrGLGpu::onResolveRenderTarget(GrRenderTarget* target) { 2714 GrGLRenderTarget* rt = static_cast<GrGLRenderTarget*>(target); 2715 if (rt->needsResolve()) { 2716 // Some extensions automatically resolves the texture when it is read. 2717 if (this->glCaps().usesMSAARenderBuffers()) { 2718 SkASSERT(rt->textureFBOID() != rt->renderFBOID()); 2719 SkASSERT(rt->textureFBOID() != 0 && rt->renderFBOID() != 0); 2720 this->bindFramebuffer(GR_GL_READ_FRAMEBUFFER, rt->renderFBOID()); 2721 this->bindFramebuffer(GR_GL_DRAW_FRAMEBUFFER, rt->textureFBOID()); 2722 2723 // make sure we go through flushRenderTarget() since we've modified 2724 // the bound DRAW FBO ID. 2725 fHWBoundRenderTargetUniqueID.makeInvalid(); 2726 const GrGLIRect& vp = rt->getViewport(); 2727 const SkIRect dirtyRect = rt->getResolveRect(); 2728 // The dirty rect tracked on the RT is always stored in the native coordinates of the 2729 // surface. Choose kTopLeft so no adjustments are made 2730 static constexpr auto kDirtyRectOrigin = kTopLeft_GrSurfaceOrigin; 2731 if (GrGLCaps::kES_Apple_MSFBOType == this->glCaps().msFBOType()) { 2732 // Apple's extension uses the scissor as the blit bounds. 2733 GrScissorState scissorState; 2734 scissorState.set(dirtyRect); 2735 this->flushScissor(scissorState, vp, kDirtyRectOrigin); 2736 this->disableWindowRectangles(); 2737 GL_CALL(ResolveMultisampleFramebuffer()); 2738 } else { 2739 int l, b, r, t; 2740 if (GrGLCaps::kResolveMustBeFull_BlitFrambufferFlag & 2741 this->glCaps().blitFramebufferSupportFlags()) { 2742 l = 0; 2743 b = 0; 2744 r = target->width(); 2745 t = target->height(); 2746 } else { 2747 GrGLIRect rect; 2748 rect.setRelativeTo(vp, dirtyRect, kDirtyRectOrigin); 2749 l = rect.fLeft; 2750 b = rect.fBottom; 2751 r = rect.fLeft + rect.fWidth; 2752 t = rect.fBottom + rect.fHeight; 2753 } 2754 2755 // BlitFrameBuffer respects the scissor, so disable it. 2756 this->disableScissor(); 2757 this->disableWindowRectangles(); 2758 GL_CALL(BlitFramebuffer(l, b, r, t, l, b, r, t, 2759 GR_GL_COLOR_BUFFER_BIT, GR_GL_NEAREST)); 2760 } 2761 } 2762 rt->flagAsResolved(); 2763 } 2764 } 2765 2766 namespace { 2767 2768 2769 GrGLenum gr_to_gl_stencil_op(GrStencilOp op) { 2770 static const GrGLenum gTable[kGrStencilOpCount] = { 2771 GR_GL_KEEP, // kKeep 2772 GR_GL_ZERO, // kZero 2773 GR_GL_REPLACE, // kReplace 2774 GR_GL_INVERT, // kInvert 2775 GR_GL_INCR_WRAP, // kIncWrap 2776 GR_GL_DECR_WRAP, // kDecWrap 2777 GR_GL_INCR, // kIncClamp 2778 GR_GL_DECR, // kDecClamp 2779 }; 2780 GR_STATIC_ASSERT(0 == (int)GrStencilOp::kKeep); 2781 GR_STATIC_ASSERT(1 == (int)GrStencilOp::kZero); 2782 GR_STATIC_ASSERT(2 == (int)GrStencilOp::kReplace); 2783 GR_STATIC_ASSERT(3 == (int)GrStencilOp::kInvert); 2784 GR_STATIC_ASSERT(4 == (int)GrStencilOp::kIncWrap); 2785 GR_STATIC_ASSERT(5 == (int)GrStencilOp::kDecWrap); 2786 GR_STATIC_ASSERT(6 == (int)GrStencilOp::kIncClamp); 2787 GR_STATIC_ASSERT(7 == (int)GrStencilOp::kDecClamp); 2788 SkASSERT(op < (GrStencilOp)kGrStencilOpCount); 2789 return gTable[(int)op]; 2790 } 2791 2792 void set_gl_stencil(const GrGLInterface* gl, 2793 const GrStencilSettings::Face& face, 2794 GrGLenum glFace) { 2795 GrGLenum glFunc = GrToGLStencilFunc(face.fTest); 2796 GrGLenum glFailOp = gr_to_gl_stencil_op(face.fFailOp); 2797 GrGLenum glPassOp = gr_to_gl_stencil_op(face.fPassOp); 2798 2799 GrGLint ref = face.fRef; 2800 GrGLint mask = face.fTestMask; 2801 GrGLint writeMask = face.fWriteMask; 2802 2803 if (GR_GL_FRONT_AND_BACK == glFace) { 2804 // we call the combined func just in case separate stencil is not 2805 // supported. 2806 GR_GL_CALL(gl, StencilFunc(glFunc, ref, mask)); 2807 GR_GL_CALL(gl, StencilMask(writeMask)); 2808 GR_GL_CALL(gl, StencilOp(glFailOp, GR_GL_KEEP, glPassOp)); 2809 } else { 2810 GR_GL_CALL(gl, StencilFuncSeparate(glFace, glFunc, ref, mask)); 2811 GR_GL_CALL(gl, StencilMaskSeparate(glFace, writeMask)); 2812 GR_GL_CALL(gl, StencilOpSeparate(glFace, glFailOp, GR_GL_KEEP, glPassOp)); 2813 } 2814 } 2815 } 2816 2817 void GrGLGpu::flushStencil(const GrStencilSettings& stencilSettings) { 2818 if (stencilSettings.isDisabled()) { 2819 this->disableStencil(); 2820 } else if (fHWStencilSettings != stencilSettings) { 2821 if (kYes_TriState != fHWStencilTestEnabled) { 2822 GL_CALL(Enable(GR_GL_STENCIL_TEST)); 2823 2824 fHWStencilTestEnabled = kYes_TriState; 2825 } 2826 if (stencilSettings.isTwoSided()) { 2827 set_gl_stencil(this->glInterface(), 2828 stencilSettings.front(), 2829 GR_GL_FRONT); 2830 set_gl_stencil(this->glInterface(), 2831 stencilSettings.back(), 2832 GR_GL_BACK); 2833 } else { 2834 set_gl_stencil(this->glInterface(), 2835 stencilSettings.front(), 2836 GR_GL_FRONT_AND_BACK); 2837 } 2838 fHWStencilSettings = stencilSettings; 2839 } 2840 } 2841 2842 void GrGLGpu::disableStencil() { 2843 if (kNo_TriState != fHWStencilTestEnabled) { 2844 GL_CALL(Disable(GR_GL_STENCIL_TEST)); 2845 2846 fHWStencilTestEnabled = kNo_TriState; 2847 fHWStencilSettings.invalidate(); 2848 } 2849 } 2850 2851 void GrGLGpu::flushHWAAState(GrRenderTarget* rt, bool useHWAA) { 2852 // rt is only optional if useHWAA is false. 2853 SkASSERT(rt || !useHWAA); 2854 SkASSERT(!useHWAA || rt->isStencilBufferMultisampled()); 2855 2856 if (this->caps()->multisampleDisableSupport()) { 2857 if (useHWAA) { 2858 if (kYes_TriState != fMSAAEnabled) { 2859 GL_CALL(Enable(GR_GL_MULTISAMPLE)); 2860 fMSAAEnabled = kYes_TriState; 2861 } 2862 } else { 2863 if (kNo_TriState != fMSAAEnabled) { 2864 GL_CALL(Disable(GR_GL_MULTISAMPLE)); 2865 fMSAAEnabled = kNo_TriState; 2866 } 2867 } 2868 } 2869 } 2870 2871 void GrGLGpu::flushBlend(const GrXferProcessor::BlendInfo& blendInfo, const GrSwizzle& swizzle) { 2872 // Any optimization to disable blending should have already been applied and 2873 // tweaked the equation to "add" or "subtract", and the coeffs to (1, 0). 2874 2875 GrBlendEquation equation = blendInfo.fEquation; 2876 GrBlendCoeff srcCoeff = blendInfo.fSrcBlend; 2877 GrBlendCoeff dstCoeff = blendInfo.fDstBlend; 2878 bool blendOff = 2879 ((kAdd_GrBlendEquation == equation || kSubtract_GrBlendEquation == equation) && 2880 kOne_GrBlendCoeff == srcCoeff && kZero_GrBlendCoeff == dstCoeff) || 2881 !blendInfo.fWriteColor; 2882 if (blendOff) { 2883 if (kNo_TriState != fHWBlendState.fEnabled) { 2884 GL_CALL(Disable(GR_GL_BLEND)); 2885 2886 // Workaround for the ARM KHR_blend_equation_advanced blacklist issue 2887 // https://code.google.com/p/skia/issues/detail?id=3943 2888 if (kARM_GrGLVendor == this->ctxInfo().vendor() && 2889 GrBlendEquationIsAdvanced(fHWBlendState.fEquation)) { 2890 SkASSERT(this->caps()->advancedBlendEquationSupport()); 2891 // Set to any basic blending equation. 2892 GrBlendEquation blend_equation = kAdd_GrBlendEquation; 2893 GL_CALL(BlendEquation(gXfermodeEquation2Blend[blend_equation])); 2894 fHWBlendState.fEquation = blend_equation; 2895 } 2896 2897 fHWBlendState.fEnabled = kNo_TriState; 2898 } 2899 return; 2900 } 2901 2902 if (kYes_TriState != fHWBlendState.fEnabled) { 2903 GL_CALL(Enable(GR_GL_BLEND)); 2904 2905 fHWBlendState.fEnabled = kYes_TriState; 2906 } 2907 2908 if (fHWBlendState.fEquation != equation) { 2909 GL_CALL(BlendEquation(gXfermodeEquation2Blend[equation])); 2910 fHWBlendState.fEquation = equation; 2911 } 2912 2913 if (GrBlendEquationIsAdvanced(equation)) { 2914 SkASSERT(this->caps()->advancedBlendEquationSupport()); 2915 // Advanced equations have no other blend state. 2916 return; 2917 } 2918 2919 if (fHWBlendState.fSrcCoeff != srcCoeff || fHWBlendState.fDstCoeff != dstCoeff) { 2920 GL_CALL(BlendFunc(gXfermodeCoeff2Blend[srcCoeff], 2921 gXfermodeCoeff2Blend[dstCoeff])); 2922 fHWBlendState.fSrcCoeff = srcCoeff; 2923 fHWBlendState.fDstCoeff = dstCoeff; 2924 } 2925 2926 if ((BlendCoeffReferencesConstant(srcCoeff) || BlendCoeffReferencesConstant(dstCoeff))) { 2927 SkPMColor4f blendConst = swizzle.applyTo(blendInfo.fBlendConstant); 2928 if (!fHWBlendState.fConstColorValid || fHWBlendState.fConstColor != blendConst) { 2929 GL_CALL(BlendColor(blendConst.fR, blendConst.fG, blendConst.fB, blendConst.fA)); 2930 fHWBlendState.fConstColor = blendConst; 2931 fHWBlendState.fConstColorValid = true; 2932 } 2933 } 2934 } 2935 2936 static void get_gl_swizzle_values(const GrSwizzle& swizzle, GrGLenum glValues[4]) { 2937 for (int i = 0; i < 4; ++i) { 2938 switch (swizzle[i]) { 2939 case 'r': glValues[i] = GR_GL_RED; break; 2940 case 'g': glValues[i] = GR_GL_GREEN; break; 2941 case 'b': glValues[i] = GR_GL_BLUE; break; 2942 case 'a': glValues[i] = GR_GL_ALPHA; break; 2943 case '1': glValues[i] = GR_GL_ONE; break; 2944 default: SK_ABORT("Unsupported component"); 2945 } 2946 } 2947 } 2948 2949 void GrGLGpu::bindTexture(int unitIdx, GrSamplerState samplerState, GrGLTexture* texture) { 2950 SkASSERT(texture); 2951 2952 #ifdef SK_DEBUG 2953 if (!this->caps()->npotTextureTileSupport()) { 2954 if (samplerState.isRepeated()) { 2955 const int w = texture->width(); 2956 const int h = texture->height(); 2957 SkASSERT(SkIsPow2(w) && SkIsPow2(h)); 2958 } 2959 } 2960 #endif 2961 2962 // If we created a rt/tex and rendered to it without using a texture and now we're texturing 2963 // from the rt it will still be the last bound texture, but it needs resolving. So keep this 2964 // out of the "last != next" check. 2965 GrGLRenderTarget* texRT = static_cast<GrGLRenderTarget*>(texture->asRenderTarget()); 2966 if (texRT) { 2967 this->onResolveRenderTarget(texRT); 2968 } 2969 2970 GrGpuResource::UniqueID textureID = texture->uniqueID(); 2971 GrGLenum target = texture->target(); 2972 if (fHWTextureUnitBindings[unitIdx].boundID(target) != textureID) { 2973 this->setTextureUnit(unitIdx); 2974 GL_CALL(BindTexture(target, texture->textureID())); 2975 fHWTextureUnitBindings[unitIdx].setBoundID(target, textureID); 2976 } 2977 2978 if (samplerState.filter() == GrSamplerState::Filter::kMipMap) { 2979 if (!this->caps()->mipMapSupport() || 2980 texture->texturePriv().mipMapped() == GrMipMapped::kNo) { 2981 samplerState.setFilterMode(GrSamplerState::Filter::kBilerp); 2982 } 2983 } 2984 2985 #ifdef SK_DEBUG 2986 // We were supposed to ensure MipMaps were up-to-date before getting here. 2987 if (samplerState.filter() == GrSamplerState::Filter::kMipMap) { 2988 SkASSERT(!texture->texturePriv().mipMapsAreDirty()); 2989 } 2990 #endif 2991 2992 ResetTimestamp timestamp = texture->getCachedParamsTimestamp(); 2993 bool setAll = timestamp < this->getResetTimestamp(); 2994 2995 const GrGLTexture::SamplerParams* samplerParamsToRecord = nullptr; 2996 GrGLTexture::SamplerParams newSamplerParams; 2997 if (fSamplerObjectCache) { 2998 fSamplerObjectCache->bindSampler(unitIdx, samplerState); 2999 } else { 3000 const GrGLTexture::SamplerParams& oldSamplerParams = texture->getCachedSamplerParams(); 3001 samplerParamsToRecord = &newSamplerParams; 3002 3003 newSamplerParams.fMinFilter = filter_to_gl_min_filter(samplerState.filter()); 3004 newSamplerParams.fMagFilter = filter_to_gl_mag_filter(samplerState.filter()); 3005 3006 newSamplerParams.fWrapS = wrap_mode_to_gl_wrap(samplerState.wrapModeX(), this->glCaps()); 3007 newSamplerParams.fWrapT = wrap_mode_to_gl_wrap(samplerState.wrapModeY(), this->glCaps()); 3008 3009 // These are the OpenGL default values. 3010 newSamplerParams.fMinLOD = -1000.f; 3011 newSamplerParams.fMaxLOD = 1000.f; 3012 3013 if (setAll || newSamplerParams.fMagFilter != oldSamplerParams.fMagFilter) { 3014 this->setTextureUnit(unitIdx); 3015 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_MAG_FILTER, newSamplerParams.fMagFilter)); 3016 } 3017 if (setAll || newSamplerParams.fMinFilter != oldSamplerParams.fMinFilter) { 3018 this->setTextureUnit(unitIdx); 3019 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_MIN_FILTER, newSamplerParams.fMinFilter)); 3020 } 3021 if (this->glCaps().mipMapLevelAndLodControlSupport()) { 3022 if (setAll || newSamplerParams.fMinLOD != oldSamplerParams.fMinLOD) { 3023 this->setTextureUnit(unitIdx); 3024 GL_CALL(TexParameterf(target, GR_GL_TEXTURE_MIN_LOD, newSamplerParams.fMinLOD)); 3025 } 3026 if (setAll || newSamplerParams.fMaxLOD != oldSamplerParams.fMaxLOD) { 3027 this->setTextureUnit(unitIdx); 3028 GL_CALL(TexParameterf(target, GR_GL_TEXTURE_MAX_LOD, newSamplerParams.fMaxLOD)); 3029 } 3030 } 3031 if (setAll || newSamplerParams.fWrapS != oldSamplerParams.fWrapS) { 3032 this->setTextureUnit(unitIdx); 3033 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_WRAP_S, newSamplerParams.fWrapS)); 3034 } 3035 if (setAll || newSamplerParams.fWrapT != oldSamplerParams.fWrapT) { 3036 this->setTextureUnit(unitIdx); 3037 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_WRAP_T, newSamplerParams.fWrapT)); 3038 } 3039 if (this->glCaps().clampToBorderSupport()) { 3040 // Make sure the border color is transparent black (the default) 3041 if (setAll || oldSamplerParams.fBorderColorInvalid) { 3042 this->setTextureUnit(unitIdx); 3043 static const GrGLfloat kTransparentBlack[4] = {0.f, 0.f, 0.f, 0.f}; 3044 GL_CALL(TexParameterfv(target, GR_GL_TEXTURE_BORDER_COLOR, kTransparentBlack)); 3045 } 3046 } 3047 } 3048 GrGLTexture::NonSamplerParams newNonSamplerParams; 3049 newNonSamplerParams.fBaseMipMapLevel = 0; 3050 newNonSamplerParams.fMaxMipMapLevel = texture->texturePriv().maxMipMapLevel(); 3051 3052 const GrGLTexture::NonSamplerParams& oldNonSamplerParams = texture->getCachedNonSamplerParams(); 3053 if (this->glCaps().textureSwizzleSupport()) { 3054 auto swizzle = this->glCaps().configSwizzle(texture->config()); 3055 newNonSamplerParams.fSwizzleKey = swizzle.asKey(); 3056 if (setAll || swizzle.asKey() != oldNonSamplerParams.fSwizzleKey) { 3057 GrGLenum glValues[4]; 3058 get_gl_swizzle_values(swizzle, glValues); 3059 this->setTextureUnit(unitIdx); 3060 if (this->glStandard() == kGLES_GrGLStandard) { 3061 // ES3 added swizzle support but not GL_TEXTURE_SWIZZLE_RGBA. 3062 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_SWIZZLE_R, glValues[0])); 3063 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_SWIZZLE_G, glValues[1])); 3064 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_SWIZZLE_B, glValues[2])); 3065 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_SWIZZLE_A, glValues[3])); 3066 } else { 3067 GR_STATIC_ASSERT(sizeof(glValues[0]) == sizeof(GrGLint)); 3068 GL_CALL(TexParameteriv(target, GR_GL_TEXTURE_SWIZZLE_RGBA, 3069 reinterpret_cast<const GrGLint*>(glValues))); 3070 } 3071 } 3072 } 3073 // These are not supported in ES2 contexts 3074 if (this->glCaps().mipMapLevelAndLodControlSupport() && 3075 (texture->texturePriv().textureType() != GrTextureType::kExternal || 3076 !this->glCaps().dontSetBaseOrMaxLevelForExternalTextures())) { 3077 if (newNonSamplerParams.fBaseMipMapLevel != oldNonSamplerParams.fBaseMipMapLevel) { 3078 this->setTextureUnit(unitIdx); 3079 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_BASE_LEVEL, 3080 newNonSamplerParams.fBaseMipMapLevel)); 3081 } 3082 if (newNonSamplerParams.fMaxMipMapLevel != oldNonSamplerParams.fMaxMipMapLevel) { 3083 this->setTextureUnit(unitIdx); 3084 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_MAX_LEVEL, 3085 newNonSamplerParams.fMaxMipMapLevel)); 3086 } 3087 } 3088 texture->setCachedParams(samplerParamsToRecord, newNonSamplerParams, this->getResetTimestamp()); 3089 } 3090 3091 void GrGLGpu::onResetTextureBindings() { 3092 static constexpr GrGLenum kTargets[] = {GR_GL_TEXTURE_2D, GR_GL_TEXTURE_RECTANGLE, 3093 GR_GL_TEXTURE_EXTERNAL}; 3094 for (int i = 0; i < this->numTextureUnits(); ++i) { 3095 this->setTextureUnit(i); 3096 for (auto target : kTargets) { 3097 if (fHWTextureUnitBindings[i].hasBeenModified(target)) { 3098 GL_CALL(BindTexture(target, 0)); 3099 } 3100 } 3101 fHWTextureUnitBindings[i].invalidateAllTargets(true); 3102 } 3103 } 3104 3105 void GrGLGpu::flushColorWrite(bool writeColor) { 3106 if (!writeColor) { 3107 if (kNo_TriState != fHWWriteToColor) { 3108 GL_CALL(ColorMask(GR_GL_FALSE, GR_GL_FALSE, 3109 GR_GL_FALSE, GR_GL_FALSE)); 3110 fHWWriteToColor = kNo_TriState; 3111 } 3112 } else { 3113 if (kYes_TriState != fHWWriteToColor) { 3114 GL_CALL(ColorMask(GR_GL_TRUE, GR_GL_TRUE, GR_GL_TRUE, GR_GL_TRUE)); 3115 fHWWriteToColor = kYes_TriState; 3116 } 3117 } 3118 } 3119 3120 void GrGLGpu::flushClearColor(GrGLfloat r, GrGLfloat g, GrGLfloat b, GrGLfloat a) { 3121 if (r != fHWClearColor[0] || g != fHWClearColor[1] || 3122 b != fHWClearColor[2] || a != fHWClearColor[3]) { 3123 GL_CALL(ClearColor(r, g, b, a)); 3124 fHWClearColor[0] = r; 3125 fHWClearColor[1] = g; 3126 fHWClearColor[2] = b; 3127 fHWClearColor[3] = a; 3128 } 3129 } 3130 3131 void GrGLGpu::setTextureUnit(int unit) { 3132 SkASSERT(unit >= 0 && unit < this->numTextureUnits()); 3133 if (unit != fHWActiveTextureUnitIdx) { 3134 GL_CALL(ActiveTexture(GR_GL_TEXTURE0 + unit)); 3135 fHWActiveTextureUnitIdx = unit; 3136 } 3137 } 3138 3139 void GrGLGpu::bindTextureToScratchUnit(GrGLenum target, GrGLint textureID) { 3140 // Bind the last texture unit since it is the least likely to be used by GrGLProgram. 3141 int lastUnitIdx = this->numTextureUnits() - 1; 3142 if (lastUnitIdx != fHWActiveTextureUnitIdx) { 3143 GL_CALL(ActiveTexture(GR_GL_TEXTURE0 + lastUnitIdx)); 3144 fHWActiveTextureUnitIdx = lastUnitIdx; 3145 } 3146 // Clear out the this field so that if a GrGLProgram does use this unit it will rebind the 3147 // correct texture. 3148 fHWTextureUnitBindings[lastUnitIdx].invalidateForScratchUse(target); 3149 GL_CALL(BindTexture(target, textureID)); 3150 } 3151 3152 // Determines whether glBlitFramebuffer could be used between src and dst by onCopySurface. 3153 static inline bool can_blit_framebuffer_for_copy_surface( 3154 const GrSurface* dst, GrSurfaceOrigin dstOrigin, 3155 const GrSurface* src, GrSurfaceOrigin srcOrigin, 3156 const SkIRect& srcRect, 3157 const SkIPoint& dstPoint, 3158 const GrGLCaps& caps) { 3159 int dstSampleCnt = 0; 3160 int srcSampleCnt = 0; 3161 if (const GrRenderTarget* rt = dst->asRenderTarget()) { 3162 dstSampleCnt = rt->numColorSamples(); 3163 } 3164 if (const GrRenderTarget* rt = src->asRenderTarget()) { 3165 srcSampleCnt = rt->numColorSamples(); 3166 } 3167 SkASSERT((dstSampleCnt > 0) == SkToBool(dst->asRenderTarget())); 3168 SkASSERT((srcSampleCnt > 0) == SkToBool(src->asRenderTarget())); 3169 3170 const GrGLTexture* dstTex = static_cast<const GrGLTexture*>(dst->asTexture()); 3171 const GrGLTexture* srcTex = static_cast<const GrGLTexture*>(src->asTexture()); 3172 3173 bool dstIsGLTexture2D = dstTex ? GR_GL_TEXTURE_2D == dstTex->target() : false; 3174 bool srcIsGLTexture2D = srcTex ? GR_GL_TEXTURE_2D == srcTex->target() : false; 3175 3176 return caps.canCopyAsBlit(dst->config(), dstSampleCnt, SkToBool(dstTex), dstIsGLTexture2D, 3177 dstOrigin, src->config(), srcSampleCnt, SkToBool(srcTex), 3178 srcIsGLTexture2D, srcOrigin, src->getBoundsRect(), srcRect, dstPoint); 3179 } 3180 3181 static bool rt_has_msaa_render_buffer(const GrGLRenderTarget* rt, const GrGLCaps& glCaps) { 3182 // A RT has a separate MSAA renderbuffer if: 3183 // 1) It's multisampled 3184 // 2) We're using an extension with separate MSAA renderbuffers 3185 // 3) It's not FBO 0, which is special and always auto-resolves 3186 return rt->numColorSamples() > 1 && glCaps.usesMSAARenderBuffers() && rt->renderFBOID() != 0; 3187 } 3188 3189 static inline bool can_copy_texsubimage(const GrSurface* dst, GrSurfaceOrigin dstOrigin, 3190 const GrSurface* src, GrSurfaceOrigin srcOrigin, 3191 const GrGLCaps& caps) { 3192 3193 const GrGLRenderTarget* dstRT = static_cast<const GrGLRenderTarget*>(dst->asRenderTarget()); 3194 const GrGLRenderTarget* srcRT = static_cast<const GrGLRenderTarget*>(src->asRenderTarget()); 3195 const GrGLTexture* dstTex = static_cast<const GrGLTexture*>(dst->asTexture()); 3196 const GrGLTexture* srcTex = static_cast<const GrGLTexture*>(src->asTexture()); 3197 3198 bool dstHasMSAARenderBuffer = dstRT ? rt_has_msaa_render_buffer(dstRT, caps) : false; 3199 bool srcHasMSAARenderBuffer = srcRT ? rt_has_msaa_render_buffer(srcRT, caps) : false; 3200 3201 bool dstIsGLTexture2D = dstTex ? GR_GL_TEXTURE_2D == dstTex->target() : false; 3202 bool srcIsGLTexture2D = srcTex ? GR_GL_TEXTURE_2D == srcTex->target() : false; 3203 3204 return caps.canCopyTexSubImage(dst->config(), dstHasMSAARenderBuffer, SkToBool(dstTex), 3205 dstIsGLTexture2D, dstOrigin, src->config(), 3206 srcHasMSAARenderBuffer, SkToBool(srcTex), srcIsGLTexture2D, 3207 srcOrigin); 3208 } 3209 3210 // If a temporary FBO was created, its non-zero ID is returned. The viewport that the copy rect is 3211 // relative to is output. 3212 void GrGLGpu::bindSurfaceFBOForPixelOps(GrSurface* surface, GrGLenum fboTarget, GrGLIRect* viewport, 3213 TempFBOTarget tempFBOTarget) { 3214 GrGLRenderTarget* rt = static_cast<GrGLRenderTarget*>(surface->asRenderTarget()); 3215 if (!rt) { 3216 SkASSERT(surface->asTexture()); 3217 GrGLTexture* texture = static_cast<GrGLTexture*>(surface->asTexture()); 3218 GrGLuint texID = texture->textureID(); 3219 GrGLenum target = texture->target(); 3220 GrGLuint* tempFBOID; 3221 tempFBOID = kSrc_TempFBOTarget == tempFBOTarget ? &fTempSrcFBOID : &fTempDstFBOID; 3222 3223 if (0 == *tempFBOID) { 3224 GR_GL_CALL(this->glInterface(), GenFramebuffers(1, tempFBOID)); 3225 } 3226 3227 this->bindFramebuffer(fboTarget, *tempFBOID); 3228 GR_GL_CALL(this->glInterface(), FramebufferTexture2D(fboTarget, 3229 GR_GL_COLOR_ATTACHMENT0, 3230 target, 3231 texID, 3232 0)); 3233 texture->baseLevelWasBoundToFBO(); 3234 viewport->fLeft = 0; 3235 viewport->fBottom = 0; 3236 viewport->fWidth = surface->width(); 3237 viewport->fHeight = surface->height(); 3238 } else { 3239 this->bindFramebuffer(fboTarget, rt->renderFBOID()); 3240 *viewport = rt->getViewport(); 3241 } 3242 } 3243 3244 void GrGLGpu::unbindTextureFBOForPixelOps(GrGLenum fboTarget, GrSurface* surface) { 3245 // bindSurfaceFBOForPixelOps temporarily binds textures that are not render targets to 3246 if (!surface->asRenderTarget()) { 3247 SkASSERT(surface->asTexture()); 3248 GrGLenum textureTarget = static_cast<GrGLTexture*>(surface->asTexture())->target(); 3249 GR_GL_CALL(this->glInterface(), FramebufferTexture2D(fboTarget, 3250 GR_GL_COLOR_ATTACHMENT0, 3251 textureTarget, 3252 0, 3253 0)); 3254 } 3255 } 3256 3257 void GrGLGpu::onFBOChanged() { 3258 if (this->caps()->workarounds().flush_on_framebuffer_change || 3259 this->caps()->workarounds().restore_scissor_on_fbo_change) { 3260 GL_CALL(Flush()); 3261 } 3262 } 3263 3264 void GrGLGpu::bindFramebuffer(GrGLenum target, GrGLuint fboid) { 3265 fStats.incRenderTargetBinds(); 3266 GL_CALL(BindFramebuffer(target, fboid)); 3267 if (target == GR_GL_FRAMEBUFFER || target == GR_GL_DRAW_FRAMEBUFFER) { 3268 fBoundDrawFramebuffer = fboid; 3269 } 3270 3271 if (this->caps()->workarounds().restore_scissor_on_fbo_change) { 3272 // The driver forgets the correct scissor when modifying the FBO binding. 3273 if (!fHWScissorSettings.fRect.isInvalid()) { 3274 fHWScissorSettings.fRect.pushToGLScissor(this->glInterface()); 3275 } 3276 } 3277 3278 this->onFBOChanged(); 3279 } 3280 3281 void GrGLGpu::deleteFramebuffer(GrGLuint fboid) { 3282 if (fboid == fBoundDrawFramebuffer && 3283 this->caps()->workarounds().unbind_attachments_on_bound_render_fbo_delete) { 3284 // This workaround only applies to deleting currently bound framebuffers 3285 // on Adreno 420. Because this is a somewhat rare case, instead of 3286 // tracking all the attachments of every framebuffer instead just always 3287 // unbind all attachments. 3288 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, GR_GL_COLOR_ATTACHMENT0, 3289 GR_GL_RENDERBUFFER, 0)); 3290 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, GR_GL_STENCIL_ATTACHMENT, 3291 GR_GL_RENDERBUFFER, 0)); 3292 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, GR_GL_DEPTH_ATTACHMENT, 3293 GR_GL_RENDERBUFFER, 0)); 3294 } 3295 3296 GL_CALL(DeleteFramebuffers(1, &fboid)); 3297 3298 // Deleting the currently bound framebuffer rebinds to 0. 3299 if (fboid == fBoundDrawFramebuffer) { 3300 this->onFBOChanged(); 3301 } 3302 } 3303 3304 bool GrGLGpu::onCopySurface(GrSurface* dst, GrSurfaceOrigin dstOrigin, 3305 GrSurface* src, GrSurfaceOrigin srcOrigin, 3306 const SkIRect& srcRect, const SkIPoint& dstPoint, 3307 bool canDiscardOutsideDstRect) { 3308 // None of our copy methods can handle a swizzle. TODO: Make copySurfaceAsDraw handle the 3309 // swizzle. 3310 if (this->caps()->shaderCaps()->configOutputSwizzle(src->config()) != 3311 this->caps()->shaderCaps()->configOutputSwizzle(dst->config())) { 3312 return false; 3313 } 3314 // Don't prefer copying as a draw if the dst doesn't already have a FBO object. 3315 // This implicitly handles this->glCaps().useDrawInsteadOfAllRenderTargetWrites(). 3316 bool preferCopy = SkToBool(dst->asRenderTarget()); 3317 if (preferCopy && this->glCaps().canCopyAsDraw(dst->config(), SkToBool(src->asTexture()))) { 3318 if (this->copySurfaceAsDraw(dst, dstOrigin, src, srcOrigin, srcRect, dstPoint)) { 3319 return true; 3320 } 3321 } 3322 3323 if (can_copy_texsubimage(dst, dstOrigin, src, srcOrigin, this->glCaps())) { 3324 this->copySurfaceAsCopyTexSubImage(dst, dstOrigin, src, srcOrigin, srcRect, dstPoint); 3325 return true; 3326 } 3327 3328 if (can_blit_framebuffer_for_copy_surface(dst, dstOrigin, src, srcOrigin, 3329 srcRect, dstPoint, this->glCaps())) { 3330 return this->copySurfaceAsBlitFramebuffer(dst, dstOrigin, src, srcOrigin, 3331 srcRect, dstPoint); 3332 } 3333 3334 if (!preferCopy && this->glCaps().canCopyAsDraw(dst->config(), SkToBool(src->asTexture()))) { 3335 if (this->copySurfaceAsDraw(dst, dstOrigin, src, srcOrigin, srcRect, dstPoint)) { 3336 return true; 3337 } 3338 } 3339 3340 return false; 3341 } 3342 3343 bool GrGLGpu::createCopyProgram(GrTexture* srcTex) { 3344 TRACE_EVENT0("skia", TRACE_FUNC); 3345 3346 int progIdx = TextureToCopyProgramIdx(srcTex); 3347 const GrShaderCaps* shaderCaps = this->caps()->shaderCaps(); 3348 GrSLType samplerType = 3349 GrSLCombinedSamplerTypeForTextureType(srcTex->texturePriv().textureType()); 3350 3351 if (!fCopyProgramArrayBuffer) { 3352 static const GrGLfloat vdata[] = { 3353 0, 0, 3354 0, 1, 3355 1, 0, 3356 1, 1 3357 }; 3358 fCopyProgramArrayBuffer = GrGLBuffer::Make(this, sizeof(vdata), GrGpuBufferType::kVertex, 3359 kStatic_GrAccessPattern, vdata); 3360 } 3361 if (!fCopyProgramArrayBuffer) { 3362 return false; 3363 } 3364 3365 SkASSERT(!fCopyPrograms[progIdx].fProgram); 3366 GL_CALL_RET(fCopyPrograms[progIdx].fProgram, CreateProgram()); 3367 if (!fCopyPrograms[progIdx].fProgram) { 3368 return false; 3369 } 3370 3371 const char* version = shaderCaps->versionDeclString(); 3372 GrShaderVar aVertex("a_vertex", kHalf2_GrSLType, GrShaderVar::kIn_TypeModifier); 3373 GrShaderVar uTexCoordXform("u_texCoordXform", kHalf4_GrSLType, 3374 GrShaderVar::kUniform_TypeModifier); 3375 GrShaderVar uPosXform("u_posXform", kHalf4_GrSLType, GrShaderVar::kUniform_TypeModifier); 3376 GrShaderVar uTexture("u_texture", samplerType, GrShaderVar::kUniform_TypeModifier); 3377 GrShaderVar vTexCoord("v_texCoord", kHalf2_GrSLType, GrShaderVar::kOut_TypeModifier); 3378 GrShaderVar oFragColor("o_FragColor", kHalf4_GrSLType, GrShaderVar::kOut_TypeModifier); 3379 3380 SkString vshaderTxt(version); 3381 if (shaderCaps->noperspectiveInterpolationSupport()) { 3382 if (const char* extension = shaderCaps->noperspectiveInterpolationExtensionString()) { 3383 vshaderTxt.appendf("#extension %s : require\n", extension); 3384 } 3385 vTexCoord.addModifier("noperspective"); 3386 } 3387 3388 aVertex.appendDecl(shaderCaps, &vshaderTxt); 3389 vshaderTxt.append(";"); 3390 uTexCoordXform.appendDecl(shaderCaps, &vshaderTxt); 3391 vshaderTxt.append(";"); 3392 uPosXform.appendDecl(shaderCaps, &vshaderTxt); 3393 vshaderTxt.append(";"); 3394 vTexCoord.appendDecl(shaderCaps, &vshaderTxt); 3395 vshaderTxt.append(";"); 3396 3397 vshaderTxt.append( 3398 "// Copy Program VS\n" 3399 "void main() {" 3400 " v_texCoord = half2(a_vertex.xy * u_texCoordXform.xy + u_texCoordXform.zw);" 3401 " sk_Position.xy = a_vertex * u_posXform.xy + u_posXform.zw;" 3402 " sk_Position.zw = half2(0, 1);" 3403 "}" 3404 ); 3405 3406 SkString fshaderTxt(version); 3407 if (shaderCaps->noperspectiveInterpolationSupport()) { 3408 if (const char* extension = shaderCaps->noperspectiveInterpolationExtensionString()) { 3409 fshaderTxt.appendf("#extension %s : require\n", extension); 3410 } 3411 } 3412 vTexCoord.setTypeModifier(GrShaderVar::kIn_TypeModifier); 3413 vTexCoord.appendDecl(shaderCaps, &fshaderTxt); 3414 fshaderTxt.append(";"); 3415 uTexture.appendDecl(shaderCaps, &fshaderTxt); 3416 fshaderTxt.append(";"); 3417 fshaderTxt.appendf( 3418 "// Copy Program FS\n" 3419 "void main() {" 3420 " sk_FragColor = texture(u_texture, v_texCoord);" 3421 "}" 3422 ); 3423 3424 const char* str; 3425 GrGLint length; 3426 3427 str = vshaderTxt.c_str(); 3428 length = SkToInt(vshaderTxt.size()); 3429 SkSL::Program::Settings settings; 3430 settings.fCaps = shaderCaps; 3431 SkSL::String glsl; 3432 std::unique_ptr<SkSL::Program> program = GrSkSLtoGLSL(*fGLContext, GR_GL_VERTEX_SHADER, 3433 &str, &length, 1, settings, &glsl); 3434 GrGLuint vshader = GrGLCompileAndAttachShader(*fGLContext, fCopyPrograms[progIdx].fProgram, 3435 GR_GL_VERTEX_SHADER, glsl.c_str(), glsl.size(), 3436 &fStats, settings); 3437 SkASSERT(program->fInputs.isEmpty()); 3438 3439 str = fshaderTxt.c_str(); 3440 length = SkToInt(fshaderTxt.size()); 3441 program = GrSkSLtoGLSL(*fGLContext, GR_GL_FRAGMENT_SHADER, &str, &length, 1, settings, &glsl); 3442 GrGLuint fshader = GrGLCompileAndAttachShader(*fGLContext, fCopyPrograms[progIdx].fProgram, 3443 GR_GL_FRAGMENT_SHADER, glsl.c_str(), glsl.size(), 3444 &fStats, settings); 3445 SkASSERT(program->fInputs.isEmpty()); 3446 3447 GL_CALL(LinkProgram(fCopyPrograms[progIdx].fProgram)); 3448 3449 GL_CALL_RET(fCopyPrograms[progIdx].fTextureUniform, 3450 GetUniformLocation(fCopyPrograms[progIdx].fProgram, "u_texture")); 3451 GL_CALL_RET(fCopyPrograms[progIdx].fPosXformUniform, 3452 GetUniformLocation(fCopyPrograms[progIdx].fProgram, "u_posXform")); 3453 GL_CALL_RET(fCopyPrograms[progIdx].fTexCoordXformUniform, 3454 GetUniformLocation(fCopyPrograms[progIdx].fProgram, "u_texCoordXform")); 3455 3456 GL_CALL(BindAttribLocation(fCopyPrograms[progIdx].fProgram, 0, "a_vertex")); 3457 3458 GL_CALL(DeleteShader(vshader)); 3459 GL_CALL(DeleteShader(fshader)); 3460 3461 return true; 3462 } 3463 3464 bool GrGLGpu::createMipmapProgram(int progIdx) { 3465 const bool oddWidth = SkToBool(progIdx & 0x2); 3466 const bool oddHeight = SkToBool(progIdx & 0x1); 3467 const int numTaps = (oddWidth ? 2 : 1) * (oddHeight ? 2 : 1); 3468 3469 const GrShaderCaps* shaderCaps = this->caps()->shaderCaps(); 3470 3471 SkASSERT(!fMipmapPrograms[progIdx].fProgram); 3472 GL_CALL_RET(fMipmapPrograms[progIdx].fProgram, CreateProgram()); 3473 if (!fMipmapPrograms[progIdx].fProgram) { 3474 return false; 3475 } 3476 3477 const char* version = shaderCaps->versionDeclString(); 3478 GrShaderVar aVertex("a_vertex", kHalf2_GrSLType, GrShaderVar::kIn_TypeModifier); 3479 GrShaderVar uTexCoordXform("u_texCoordXform", kHalf4_GrSLType, 3480 GrShaderVar::kUniform_TypeModifier); 3481 GrShaderVar uTexture("u_texture", kTexture2DSampler_GrSLType, 3482 GrShaderVar::kUniform_TypeModifier); 3483 // We need 1, 2, or 4 texture coordinates (depending on parity of each dimension): 3484 GrShaderVar vTexCoords[] = { 3485 GrShaderVar("v_texCoord0", kHalf2_GrSLType, GrShaderVar::kOut_TypeModifier), 3486 GrShaderVar("v_texCoord1", kHalf2_GrSLType, GrShaderVar::kOut_TypeModifier), 3487 GrShaderVar("v_texCoord2", kHalf2_GrSLType, GrShaderVar::kOut_TypeModifier), 3488 GrShaderVar("v_texCoord3", kHalf2_GrSLType, GrShaderVar::kOut_TypeModifier), 3489 }; 3490 GrShaderVar oFragColor("o_FragColor", kHalf4_GrSLType,GrShaderVar::kOut_TypeModifier); 3491 3492 SkString vshaderTxt(version); 3493 if (shaderCaps->noperspectiveInterpolationSupport()) { 3494 if (const char* extension = shaderCaps->noperspectiveInterpolationExtensionString()) { 3495 vshaderTxt.appendf("#extension %s : require\n", extension); 3496 } 3497 vTexCoords[0].addModifier("noperspective"); 3498 vTexCoords[1].addModifier("noperspective"); 3499 vTexCoords[2].addModifier("noperspective"); 3500 vTexCoords[3].addModifier("noperspective"); 3501 } 3502 3503 aVertex.appendDecl(shaderCaps, &vshaderTxt); 3504 vshaderTxt.append(";"); 3505 uTexCoordXform.appendDecl(shaderCaps, &vshaderTxt); 3506 vshaderTxt.append(";"); 3507 for (int i = 0; i < numTaps; ++i) { 3508 vTexCoords[i].appendDecl(shaderCaps, &vshaderTxt); 3509 vshaderTxt.append(";"); 3510 } 3511 3512 vshaderTxt.append( 3513 "// Mipmap Program VS\n" 3514 "void main() {" 3515 " sk_Position.xy = a_vertex * half2(2, 2) - half2(1, 1);" 3516 " sk_Position.zw = half2(0, 1);" 3517 ); 3518 3519 // Insert texture coordinate computation: 3520 if (oddWidth && oddHeight) { 3521 vshaderTxt.append( 3522 " v_texCoord0 = a_vertex.xy * u_texCoordXform.yw;" 3523 " v_texCoord1 = a_vertex.xy * u_texCoordXform.yw + half2(u_texCoordXform.x, 0);" 3524 " v_texCoord2 = a_vertex.xy * u_texCoordXform.yw + half2(0, u_texCoordXform.z);" 3525 " v_texCoord3 = a_vertex.xy * u_texCoordXform.yw + u_texCoordXform.xz;" 3526 ); 3527 } else if (oddWidth) { 3528 vshaderTxt.append( 3529 " v_texCoord0 = a_vertex.xy * half2(u_texCoordXform.y, 1);" 3530 " v_texCoord1 = a_vertex.xy * half2(u_texCoordXform.y, 1) + half2(u_texCoordXform.x, 0);" 3531 ); 3532 } else if (oddHeight) { 3533 vshaderTxt.append( 3534 " v_texCoord0 = a_vertex.xy * half2(1, u_texCoordXform.w);" 3535 " v_texCoord1 = a_vertex.xy * half2(1, u_texCoordXform.w) + half2(0, u_texCoordXform.z);" 3536 ); 3537 } else { 3538 vshaderTxt.append( 3539 " v_texCoord0 = a_vertex.xy;" 3540 ); 3541 } 3542 3543 vshaderTxt.append("}"); 3544 3545 SkString fshaderTxt(version); 3546 if (shaderCaps->noperspectiveInterpolationSupport()) { 3547 if (const char* extension = shaderCaps->noperspectiveInterpolationExtensionString()) { 3548 fshaderTxt.appendf("#extension %s : require\n", extension); 3549 } 3550 } 3551 for (int i = 0; i < numTaps; ++i) { 3552 vTexCoords[i].setTypeModifier(GrShaderVar::kIn_TypeModifier); 3553 vTexCoords[i].appendDecl(shaderCaps, &fshaderTxt); 3554 fshaderTxt.append(";"); 3555 } 3556 uTexture.appendDecl(shaderCaps, &fshaderTxt); 3557 fshaderTxt.append(";"); 3558 fshaderTxt.append( 3559 "// Mipmap Program FS\n" 3560 "void main() {" 3561 ); 3562 3563 if (oddWidth && oddHeight) { 3564 fshaderTxt.append( 3565 " sk_FragColor = (texture(u_texture, v_texCoord0) + " 3566 " texture(u_texture, v_texCoord1) + " 3567 " texture(u_texture, v_texCoord2) + " 3568 " texture(u_texture, v_texCoord3)) * 0.25;" 3569 ); 3570 } else if (oddWidth || oddHeight) { 3571 fshaderTxt.append( 3572 " sk_FragColor = (texture(u_texture, v_texCoord0) + " 3573 " texture(u_texture, v_texCoord1)) * 0.5;" 3574 ); 3575 } else { 3576 fshaderTxt.append( 3577 " sk_FragColor = texture(u_texture, v_texCoord0);" 3578 ); 3579 } 3580 3581 fshaderTxt.append("}"); 3582 3583 const char* str; 3584 GrGLint length; 3585 3586 str = vshaderTxt.c_str(); 3587 length = SkToInt(vshaderTxt.size()); 3588 SkSL::Program::Settings settings; 3589 settings.fCaps = shaderCaps; 3590 SkSL::String glsl; 3591 std::unique_ptr<SkSL::Program> program = GrSkSLtoGLSL(*fGLContext, GR_GL_VERTEX_SHADER, 3592 &str, &length, 1, settings, &glsl); 3593 GrGLuint vshader = GrGLCompileAndAttachShader(*fGLContext, fMipmapPrograms[progIdx].fProgram, 3594 GR_GL_VERTEX_SHADER, glsl.c_str(), glsl.size(), 3595 &fStats, settings); 3596 SkASSERT(program->fInputs.isEmpty()); 3597 3598 str = fshaderTxt.c_str(); 3599 length = SkToInt(fshaderTxt.size()); 3600 program = GrSkSLtoGLSL(*fGLContext, GR_GL_FRAGMENT_SHADER, &str, &length, 1, settings, &glsl); 3601 GrGLuint fshader = GrGLCompileAndAttachShader(*fGLContext, fMipmapPrograms[progIdx].fProgram, 3602 GR_GL_FRAGMENT_SHADER, glsl.c_str(), glsl.size(), 3603 &fStats, settings); 3604 SkASSERT(program->fInputs.isEmpty()); 3605 3606 GL_CALL(LinkProgram(fMipmapPrograms[progIdx].fProgram)); 3607 3608 GL_CALL_RET(fMipmapPrograms[progIdx].fTextureUniform, 3609 GetUniformLocation(fMipmapPrograms[progIdx].fProgram, "u_texture")); 3610 GL_CALL_RET(fMipmapPrograms[progIdx].fTexCoordXformUniform, 3611 GetUniformLocation(fMipmapPrograms[progIdx].fProgram, "u_texCoordXform")); 3612 3613 GL_CALL(BindAttribLocation(fMipmapPrograms[progIdx].fProgram, 0, "a_vertex")); 3614 3615 GL_CALL(DeleteShader(vshader)); 3616 GL_CALL(DeleteShader(fshader)); 3617 3618 return true; 3619 } 3620 3621 bool GrGLGpu::copySurfaceAsDraw(GrSurface* dst, GrSurfaceOrigin dstOrigin, 3622 GrSurface* src, GrSurfaceOrigin srcOrigin, 3623 const SkIRect& srcRect, 3624 const SkIPoint& dstPoint) { 3625 GrGLTexture* srcTex = static_cast<GrGLTexture*>(src->asTexture()); 3626 int progIdx = TextureToCopyProgramIdx(srcTex); 3627 3628 if (!this->glCaps().canConfigBeFBOColorAttachment(dst->config())) { 3629 return false; 3630 } 3631 3632 if (!fCopyPrograms[progIdx].fProgram) { 3633 if (!this->createCopyProgram(srcTex)) { 3634 SkDebugf("Failed to create copy program.\n"); 3635 return false; 3636 } 3637 } 3638 3639 int w = srcRect.width(); 3640 int h = srcRect.height(); 3641 3642 this->bindTexture(0, GrSamplerState::ClampNearest(), srcTex); 3643 3644 GrGLIRect dstVP; 3645 this->bindSurfaceFBOForPixelOps(dst, GR_GL_FRAMEBUFFER, &dstVP, kDst_TempFBOTarget); 3646 this->flushViewport(dstVP); 3647 fHWBoundRenderTargetUniqueID.makeInvalid(); 3648 3649 SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY, w, h); 3650 3651 this->flushProgram(fCopyPrograms[progIdx].fProgram); 3652 3653 fHWVertexArrayState.setVertexArrayID(this, 0); 3654 3655 GrGLAttribArrayState* attribs = fHWVertexArrayState.bindInternalVertexArray(this); 3656 attribs->enableVertexArrays(this, 1); 3657 attribs->set(this, 0, fCopyProgramArrayBuffer.get(), kFloat2_GrVertexAttribType, 3658 kFloat2_GrSLType, 2 * sizeof(GrGLfloat), 0); 3659 3660 // dst rect edges in NDC (-1 to 1) 3661 int dw = dst->width(); 3662 int dh = dst->height(); 3663 GrGLfloat dx0 = 2.f * dstPoint.fX / dw - 1.f; 3664 GrGLfloat dx1 = 2.f * (dstPoint.fX + w) / dw - 1.f; 3665 GrGLfloat dy0 = 2.f * dstPoint.fY / dh - 1.f; 3666 GrGLfloat dy1 = 2.f * (dstPoint.fY + h) / dh - 1.f; 3667 if (kBottomLeft_GrSurfaceOrigin == dstOrigin) { 3668 dy0 = -dy0; 3669 dy1 = -dy1; 3670 } 3671 3672 GrGLfloat sx0 = (GrGLfloat)srcRect.fLeft; 3673 GrGLfloat sx1 = (GrGLfloat)(srcRect.fLeft + w); 3674 GrGLfloat sy0 = (GrGLfloat)srcRect.fTop; 3675 GrGLfloat sy1 = (GrGLfloat)(srcRect.fTop + h); 3676 int sw = src->width(); 3677 int sh = src->height(); 3678 if (kBottomLeft_GrSurfaceOrigin == srcOrigin) { 3679 sy0 = sh - sy0; 3680 sy1 = sh - sy1; 3681 } 3682 if (srcTex->texturePriv().textureType() != GrTextureType::kRectangle) { 3683 // src rect edges in normalized texture space (0 to 1) 3684 sx0 /= sw; 3685 sx1 /= sw; 3686 sy0 /= sh; 3687 sy1 /= sh; 3688 } 3689 3690 GL_CALL(Uniform4f(fCopyPrograms[progIdx].fPosXformUniform, dx1 - dx0, dy1 - dy0, dx0, dy0)); 3691 GL_CALL(Uniform4f(fCopyPrograms[progIdx].fTexCoordXformUniform, 3692 sx1 - sx0, sy1 - sy0, sx0, sy0)); 3693 GL_CALL(Uniform1i(fCopyPrograms[progIdx].fTextureUniform, 0)); 3694 3695 GrXferProcessor::BlendInfo blendInfo; 3696 blendInfo.reset(); 3697 this->flushBlend(blendInfo, GrSwizzle::RGBA()); 3698 this->flushColorWrite(true); 3699 this->flushHWAAState(nullptr, false); 3700 this->disableScissor(); 3701 this->disableWindowRectangles(); 3702 this->disableStencil(); 3703 if (this->glCaps().srgbWriteControl()) { 3704 this->flushFramebufferSRGB(true); 3705 } 3706 3707 GL_CALL(DrawArrays(GR_GL_TRIANGLE_STRIP, 0, 4)); 3708 this->unbindTextureFBOForPixelOps(GR_GL_FRAMEBUFFER, dst); 3709 this->didWriteToSurface(dst, dstOrigin, &dstRect); 3710 3711 return true; 3712 } 3713 3714 void GrGLGpu::copySurfaceAsCopyTexSubImage(GrSurface* dst, GrSurfaceOrigin dstOrigin, 3715 GrSurface* src, GrSurfaceOrigin srcOrigin, 3716 const SkIRect& srcRect, 3717 const SkIPoint& dstPoint) { 3718 SkASSERT(can_copy_texsubimage(dst, dstOrigin, src, srcOrigin, this->glCaps())); 3719 GrGLIRect srcVP; 3720 this->bindSurfaceFBOForPixelOps(src, GR_GL_FRAMEBUFFER, &srcVP, kSrc_TempFBOTarget); 3721 GrGLTexture* dstTex = static_cast<GrGLTexture *>(dst->asTexture()); 3722 SkASSERT(dstTex); 3723 // We modified the bound FBO 3724 fHWBoundRenderTargetUniqueID.makeInvalid(); 3725 GrGLIRect srcGLRect; 3726 srcGLRect.setRelativeTo(srcVP, srcRect, srcOrigin); 3727 3728 this->bindTextureToScratchUnit(dstTex->target(), dstTex->textureID()); 3729 GrGLint dstY; 3730 if (kBottomLeft_GrSurfaceOrigin == dstOrigin) { 3731 dstY = dst->height() - (dstPoint.fY + srcGLRect.fHeight); 3732 } else { 3733 dstY = dstPoint.fY; 3734 } 3735 GL_CALL(CopyTexSubImage2D(dstTex->target(), 0, 3736 dstPoint.fX, dstY, 3737 srcGLRect.fLeft, srcGLRect.fBottom, 3738 srcGLRect.fWidth, srcGLRect.fHeight)); 3739 this->unbindTextureFBOForPixelOps(GR_GL_FRAMEBUFFER, src); 3740 SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY, 3741 srcRect.width(), srcRect.height()); 3742 this->didWriteToSurface(dst, dstOrigin, &dstRect); 3743 } 3744 3745 bool GrGLGpu::copySurfaceAsBlitFramebuffer(GrSurface* dst, GrSurfaceOrigin dstOrigin, 3746 GrSurface* src, GrSurfaceOrigin srcOrigin, 3747 const SkIRect& srcRect, 3748 const SkIPoint& dstPoint) { 3749 SkASSERT(can_blit_framebuffer_for_copy_surface(dst, dstOrigin, src, srcOrigin, 3750 srcRect, dstPoint, this->glCaps())); 3751 SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY, 3752 srcRect.width(), srcRect.height()); 3753 if (dst == src) { 3754 if (SkIRect::IntersectsNoEmptyCheck(dstRect, srcRect)) { 3755 return false; 3756 } 3757 } 3758 3759 GrGLIRect dstVP; 3760 GrGLIRect srcVP; 3761 this->bindSurfaceFBOForPixelOps(dst, GR_GL_DRAW_FRAMEBUFFER, &dstVP, kDst_TempFBOTarget); 3762 this->bindSurfaceFBOForPixelOps(src, GR_GL_READ_FRAMEBUFFER, &srcVP, kSrc_TempFBOTarget); 3763 // We modified the bound FBO 3764 fHWBoundRenderTargetUniqueID.makeInvalid(); 3765 GrGLIRect srcGLRect; 3766 GrGLIRect dstGLRect; 3767 srcGLRect.setRelativeTo(srcVP, srcRect, srcOrigin); 3768 dstGLRect.setRelativeTo(dstVP, dstRect, dstOrigin); 3769 3770 // BlitFrameBuffer respects the scissor, so disable it. 3771 this->disableScissor(); 3772 this->disableWindowRectangles(); 3773 3774 GrGLint srcY0; 3775 GrGLint srcY1; 3776 // Does the blit need to y-mirror or not? 3777 if (srcOrigin == dstOrigin) { 3778 srcY0 = srcGLRect.fBottom; 3779 srcY1 = srcGLRect.fBottom + srcGLRect.fHeight; 3780 } else { 3781 srcY0 = srcGLRect.fBottom + srcGLRect.fHeight; 3782 srcY1 = srcGLRect.fBottom; 3783 } 3784 GL_CALL(BlitFramebuffer(srcGLRect.fLeft, 3785 srcY0, 3786 srcGLRect.fLeft + srcGLRect.fWidth, 3787 srcY1, 3788 dstGLRect.fLeft, 3789 dstGLRect.fBottom, 3790 dstGLRect.fLeft + dstGLRect.fWidth, 3791 dstGLRect.fBottom + dstGLRect.fHeight, 3792 GR_GL_COLOR_BUFFER_BIT, GR_GL_NEAREST)); 3793 this->unbindTextureFBOForPixelOps(GR_GL_DRAW_FRAMEBUFFER, dst); 3794 this->unbindTextureFBOForPixelOps(GR_GL_READ_FRAMEBUFFER, src); 3795 this->didWriteToSurface(dst, dstOrigin, &dstRect); 3796 return true; 3797 } 3798 3799 bool GrGLGpu::onRegenerateMipMapLevels(GrTexture* texture) { 3800 auto glTex = static_cast<GrGLTexture*>(texture); 3801 // Mipmaps are only supported on 2D textures: 3802 if (GR_GL_TEXTURE_2D != glTex->target()) { 3803 return false; 3804 } 3805 3806 // Manual implementation of mipmap generation, to work around driver bugs w/sRGB. 3807 // Uses draw calls to do a series of downsample operations to successive mips. 3808 3809 // The manual approach requires the ability to limit which level we're sampling and that the 3810 // destination can be bound to a FBO: 3811 if (!this->glCaps().doManualMipmapping() || 3812 !this->glCaps().canConfigBeFBOColorAttachment(texture->config())) { 3813 GrGLenum target = glTex->target(); 3814 this->bindTextureToScratchUnit(target, glTex->textureID()); 3815 GL_CALL(GenerateMipmap(glTex->target())); 3816 return true; 3817 } 3818 3819 int width = texture->width(); 3820 int height = texture->height(); 3821 int levelCount = SkMipMap::ComputeLevelCount(width, height) + 1; 3822 SkASSERT(levelCount == texture->texturePriv().maxMipMapLevel() + 1); 3823 3824 // Create (if necessary), then bind temporary FBO: 3825 if (0 == fTempDstFBOID) { 3826 GL_CALL(GenFramebuffers(1, &fTempDstFBOID)); 3827 } 3828 this->bindFramebuffer(GR_GL_FRAMEBUFFER, fTempDstFBOID); 3829 fHWBoundRenderTargetUniqueID.makeInvalid(); 3830 3831 // Bind the texture, to get things configured for filtering. 3832 // We'll be changing our base level further below: 3833 this->setTextureUnit(0); 3834 this->bindTexture(0, GrSamplerState::ClampBilerp(), glTex); 3835 3836 // Vertex data: 3837 if (!fMipmapProgramArrayBuffer) { 3838 static const GrGLfloat vdata[] = { 3839 0, 0, 3840 0, 1, 3841 1, 0, 3842 1, 1 3843 }; 3844 fMipmapProgramArrayBuffer = GrGLBuffer::Make(this, sizeof(vdata), GrGpuBufferType::kVertex, 3845 kStatic_GrAccessPattern, vdata); 3846 } 3847 if (!fMipmapProgramArrayBuffer) { 3848 return false; 3849 } 3850 3851 fHWVertexArrayState.setVertexArrayID(this, 0); 3852 3853 GrGLAttribArrayState* attribs = fHWVertexArrayState.bindInternalVertexArray(this); 3854 attribs->enableVertexArrays(this, 1); 3855 attribs->set(this, 0, fMipmapProgramArrayBuffer.get(), kFloat2_GrVertexAttribType, 3856 kFloat2_GrSLType, 2 * sizeof(GrGLfloat), 0); 3857 3858 // Set "simple" state once: 3859 GrXferProcessor::BlendInfo blendInfo; 3860 blendInfo.reset(); 3861 this->flushBlend(blendInfo, GrSwizzle::RGBA()); 3862 this->flushColorWrite(true); 3863 this->flushHWAAState(nullptr, false); 3864 this->disableScissor(); 3865 this->disableWindowRectangles(); 3866 this->disableStencil(); 3867 3868 // Do all the blits: 3869 width = texture->width(); 3870 height = texture->height(); 3871 GrGLIRect viewport; 3872 viewport.fLeft = 0; 3873 viewport.fBottom = 0; 3874 3875 for (GrGLint level = 1; level < levelCount; ++level) { 3876 // Get and bind the program for this particular downsample (filter shape can vary): 3877 int progIdx = TextureSizeToMipmapProgramIdx(width, height); 3878 if (!fMipmapPrograms[progIdx].fProgram) { 3879 if (!this->createMipmapProgram(progIdx)) { 3880 SkDebugf("Failed to create mipmap program.\n"); 3881 // Invalidate all params to cover base level change in a previous iteration. 3882 glTex->textureParamsModified(); 3883 return false; 3884 } 3885 } 3886 this->flushProgram(fMipmapPrograms[progIdx].fProgram); 3887 3888 // Texcoord uniform is expected to contain (1/w, (w-1)/w, 1/h, (h-1)/h) 3889 const float invWidth = 1.0f / width; 3890 const float invHeight = 1.0f / height; 3891 GL_CALL(Uniform4f(fMipmapPrograms[progIdx].fTexCoordXformUniform, 3892 invWidth, (width - 1) * invWidth, invHeight, (height - 1) * invHeight)); 3893 GL_CALL(Uniform1i(fMipmapPrograms[progIdx].fTextureUniform, 0)); 3894 3895 // Only sample from previous mip 3896 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, GR_GL_TEXTURE_BASE_LEVEL, level - 1)); 3897 3898 GL_CALL(FramebufferTexture2D(GR_GL_FRAMEBUFFER, GR_GL_COLOR_ATTACHMENT0, GR_GL_TEXTURE_2D, 3899 glTex->textureID(), level)); 3900 3901 width = SkTMax(1, width / 2); 3902 height = SkTMax(1, height / 2); 3903 viewport.fWidth = width; 3904 viewport.fHeight = height; 3905 this->flushViewport(viewport); 3906 3907 GL_CALL(DrawArrays(GR_GL_TRIANGLE_STRIP, 0, 4)); 3908 } 3909 3910 // Unbind: 3911 GL_CALL(FramebufferTexture2D(GR_GL_FRAMEBUFFER, GR_GL_COLOR_ATTACHMENT0, 3912 GR_GL_TEXTURE_2D, 0, 0)); 3913 3914 // We modified the base level param. 3915 GrGLTexture::NonSamplerParams params = glTex->getCachedNonSamplerParams(); 3916 params.fBaseMipMapLevel = levelCount - 2; // we drew the 2nd to last level into the last level. 3917 glTex->setCachedParams(nullptr, params, this->getResetTimestamp()); 3918 3919 return true; 3920 } 3921 3922 void GrGLGpu::querySampleLocations( 3923 GrRenderTarget* renderTarget, const GrStencilSettings& stencilSettings, 3924 SkTArray<SkPoint>* sampleLocations) { 3925 this->flushStencil(stencilSettings); 3926 this->flushHWAAState(renderTarget, true); 3927 this->flushRenderTarget(static_cast<GrGLRenderTarget*>(renderTarget)); 3928 3929 int effectiveSampleCnt; 3930 GR_GL_GetIntegerv(this->glInterface(), GR_GL_SAMPLES, &effectiveSampleCnt); 3931 SkASSERT(effectiveSampleCnt >= renderTarget->numStencilSamples()); 3932 3933 sampleLocations->reset(effectiveSampleCnt); 3934 for (int i = 0; i < effectiveSampleCnt; ++i) { 3935 GL_CALL(GetMultisamplefv(GR_GL_SAMPLE_POSITION, i, &(*sampleLocations)[i].fX)); 3936 } 3937 } 3938 3939 void GrGLGpu::xferBarrier(GrRenderTarget* rt, GrXferBarrierType type) { 3940 SkASSERT(type); 3941 switch (type) { 3942 case kTexture_GrXferBarrierType: { 3943 GrGLRenderTarget* glrt = static_cast<GrGLRenderTarget*>(rt); 3944 SkASSERT(glrt->textureFBOID() != 0 && glrt->renderFBOID() != 0); 3945 if (glrt->textureFBOID() != glrt->renderFBOID()) { 3946 // The render target uses separate storage so no need for glTextureBarrier. 3947 // FIXME: The render target will resolve automatically when its texture is bound, 3948 // but we could resolve only the bounds that will be read if we do it here instead. 3949 return; 3950 } 3951 SkASSERT(this->caps()->textureBarrierSupport()); 3952 GL_CALL(TextureBarrier()); 3953 return; 3954 } 3955 case kBlend_GrXferBarrierType: 3956 SkASSERT(GrCaps::kAdvanced_BlendEquationSupport == 3957 this->caps()->blendEquationSupport()); 3958 GL_CALL(BlendBarrier()); 3959 return; 3960 default: break; // placate compiler warnings that kNone not handled 3961 } 3962 } 3963 3964 #if GR_TEST_UTILS 3965 GrBackendTexture GrGLGpu::createTestingOnlyBackendTexture(const void* pixels, int w, int h, 3966 GrColorType colorType, bool /*isRT*/, 3967 GrMipMapped mipMapped, 3968 size_t rowBytes) { 3969 this->handleDirtyContext(); 3970 3971 GrPixelConfig config = GrColorTypeToPixelConfig(colorType, GrSRGBEncoded::kNo); 3972 if (!this->caps()->isConfigTexturable(config)) { 3973 return GrBackendTexture(); // invalid 3974 } 3975 3976 if (w > this->caps()->maxTextureSize() || h > this->caps()->maxTextureSize()) { 3977 return GrBackendTexture(); // invalid 3978 } 3979 3980 // Currently we don't support uploading pixel data when mipped. 3981 if (pixels && GrMipMapped::kYes == mipMapped) { 3982 return GrBackendTexture(); // invalid 3983 } 3984 3985 int bpp = GrColorTypeBytesPerPixel(colorType); 3986 const size_t trimRowBytes = w * bpp; 3987 if (!rowBytes) { 3988 rowBytes = trimRowBytes; 3989 } 3990 3991 GrGLTextureInfo info; 3992 info.fTarget = GR_GL_TEXTURE_2D; 3993 info.fID = 0; 3994 GL_CALL(GenTextures(1, &info.fID)); 3995 this->bindTextureToScratchUnit(info.fTarget, info.fID); 3996 GL_CALL(PixelStorei(GR_GL_UNPACK_ALIGNMENT, 1)); 3997 GL_CALL(TexParameteri(info.fTarget, GR_GL_TEXTURE_MAG_FILTER, GR_GL_NEAREST)); 3998 GL_CALL(TexParameteri(info.fTarget, GR_GL_TEXTURE_MIN_FILTER, GR_GL_NEAREST)); 3999 GL_CALL(TexParameteri(info.fTarget, GR_GL_TEXTURE_WRAP_S, GR_GL_CLAMP_TO_EDGE)); 4000 GL_CALL(TexParameteri(info.fTarget, GR_GL_TEXTURE_WRAP_T, GR_GL_CLAMP_TO_EDGE)); 4001 4002 // we have to do something special for compressed textures 4003 if (GrPixelConfigIsCompressed(config)) { 4004 GrGLenum internalFormat; 4005 const GrGLInterface* interface = this->glInterface(); 4006 const GrGLCaps& caps = this->glCaps(); 4007 if (!caps.getCompressedTexImageFormats(config, &internalFormat)) { 4008 return GrBackendTexture(); 4009 } 4010 4011 GrMipLevel mipLevel = { pixels, rowBytes }; 4012 if (!allocate_and_populate_compressed_texture(config, *interface, caps, info.fTarget, 4013 internalFormat, &mipLevel, 1, 4014 w, h)) { 4015 return GrBackendTexture(); 4016 } 4017 } else { 4018 bool restoreGLRowLength = false; 4019 if (trimRowBytes != rowBytes && this->glCaps().unpackRowLengthSupport()) { 4020 GL_CALL(PixelStorei(GR_GL_UNPACK_ROW_LENGTH, rowBytes / bpp)); 4021 restoreGLRowLength = true; 4022 } 4023 4024 GrGLenum internalFormat; 4025 GrGLenum externalFormat; 4026 GrGLenum externalType; 4027 4028 if (!this->glCaps().getTexImageFormats(config, config, &internalFormat, &externalFormat, 4029 &externalType)) { 4030 return GrBackendTexture(); // invalid 4031 } 4032 4033 info.fFormat = this->glCaps().configSizedInternalFormat(config); 4034 4035 this->unbindCpuToGpuXferBuffer(); 4036 4037 // Figure out the number of mip levels. 4038 int mipLevels = 1; 4039 if (GrMipMapped::kYes == mipMapped) { 4040 mipLevels = SkMipMap::ComputeLevelCount(w, h) + 1; 4041 } 4042 4043 size_t baseLayerSize = bpp * w * h; 4044 SkAutoMalloc defaultStorage(baseLayerSize); 4045 if (!pixels) { 4046 // Fill in the texture with all zeros so we don't have random garbage 4047 pixels = defaultStorage.get(); 4048 memset(defaultStorage.get(), 0, baseLayerSize); 4049 } else if (trimRowBytes != rowBytes && !restoreGLRowLength) { 4050 // We weren't able to use GR_GL_UNPACK_ROW_LENGTH so make a copy 4051 char* copy = (char*)defaultStorage.get(); 4052 for (int y = 0; y < h; ++y) { 4053 memcpy(©[y*trimRowBytes], &((const char*)pixels)[y*rowBytes], trimRowBytes); 4054 } 4055 pixels = copy; 4056 } 4057 4058 int width = w; 4059 int height = h; 4060 for (int i = 0; i < mipLevels; ++i) { 4061 GL_CALL(TexImage2D(info.fTarget, i, internalFormat, width, height, 0, externalFormat, 4062 externalType, pixels)); 4063 width = SkTMax(1, width / 2); 4064 height = SkTMax(1, height / 2); 4065 } 4066 if (restoreGLRowLength) { 4067 GL_CALL(PixelStorei(GR_GL_UNPACK_ROW_LENGTH, 0)); 4068 } 4069 } 4070 4071 // unbind the texture from the texture unit to avoid asserts 4072 GL_CALL(BindTexture(info.fTarget, 0)); 4073 4074 GrBackendTexture beTex = GrBackendTexture(w, h, mipMapped, info); 4075 // Lots of tests don't go through Skia's public interface which will set the config so for 4076 // testing we make sure we set a config here. 4077 beTex.setPixelConfig(config); 4078 return beTex; 4079 } 4080 4081 bool GrGLGpu::isTestingOnlyBackendTexture(const GrBackendTexture& tex) const { 4082 SkASSERT(GrBackendApi::kOpenGL == tex.backend()); 4083 4084 GrGLTextureInfo info; 4085 if (!tex.getGLTextureInfo(&info)) { 4086 return false; 4087 } 4088 4089 GrGLboolean result; 4090 GL_CALL_RET(result, IsTexture(info.fID)); 4091 4092 return (GR_GL_TRUE == result); 4093 } 4094 4095 void GrGLGpu::deleteTestingOnlyBackendTexture(const GrBackendTexture& tex) { 4096 SkASSERT(GrBackendApi::kOpenGL == tex.backend()); 4097 4098 GrGLTextureInfo info; 4099 if (tex.getGLTextureInfo(&info)) { 4100 GL_CALL(DeleteTextures(1, &info.fID)); 4101 } 4102 } 4103 4104 GrBackendRenderTarget GrGLGpu::createTestingOnlyBackendRenderTarget(int w, int h, 4105 GrColorType colorType) { 4106 if (w > this->caps()->maxRenderTargetSize() || h > this->caps()->maxRenderTargetSize()) { 4107 return GrBackendRenderTarget(); // invalid 4108 } 4109 this->handleDirtyContext(); 4110 auto config = GrColorTypeToPixelConfig(colorType, GrSRGBEncoded::kNo); 4111 if (!this->glCaps().isConfigRenderable(config)) { 4112 return {}; 4113 } 4114 bool useTexture = false; 4115 GrGLenum colorBufferFormat; 4116 GrGLenum externalFormat = 0, externalType = 0; 4117 if (config == kBGRA_8888_GrPixelConfig && this->glCaps().bgraIsInternalFormat()) { 4118 // BGRA render buffers are not supported. 4119 this->glCaps().getTexImageFormats(config, config, &colorBufferFormat, &externalFormat, 4120 &externalType); 4121 useTexture = true; 4122 } else { 4123 this->glCaps().getRenderbufferFormat(config, &colorBufferFormat); 4124 } 4125 int sFormatIdx = this->getCompatibleStencilIndex(config); 4126 if (sFormatIdx < 0) { 4127 return {}; 4128 } 4129 GrGLuint colorID = 0; 4130 GrGLuint stencilID = 0; 4131 auto deleteIDs = [&] { 4132 if (colorID) { 4133 if (useTexture) { 4134 GL_CALL(DeleteTextures(1, &colorID)); 4135 } else { 4136 GL_CALL(DeleteRenderbuffers(1, &colorID)); 4137 } 4138 } 4139 if (stencilID) { 4140 GL_CALL(DeleteRenderbuffers(1, &stencilID)); 4141 } 4142 }; 4143 4144 if (useTexture) { 4145 GL_CALL(GenTextures(1, &colorID)); 4146 } else { 4147 GL_CALL(GenRenderbuffers(1, &colorID)); 4148 } 4149 GL_CALL(GenRenderbuffers(1, &stencilID)); 4150 if (!stencilID || !colorID) { 4151 deleteIDs(); 4152 return {}; 4153 } 4154 4155 GrGLFramebufferInfo info; 4156 info.fFBOID = 0; 4157 this->glCaps().getSizedInternalFormat(config, &info.fFormat); 4158 GL_CALL(GenFramebuffers(1, &info.fFBOID)); 4159 if (!info.fFBOID) { 4160 deleteIDs(); 4161 return {}; 4162 } 4163 4164 this->invalidateBoundRenderTarget(); 4165 4166 this->bindFramebuffer(GR_GL_FRAMEBUFFER, info.fFBOID); 4167 if (useTexture) { 4168 this->bindTextureToScratchUnit(GR_GL_TEXTURE_2D, colorID); 4169 GL_CALL(TexImage2D(GR_GL_TEXTURE_2D, 0, colorBufferFormat, w, h, 0, externalFormat, 4170 externalType, nullptr)); 4171 GL_CALL(FramebufferTexture2D(GR_GL_FRAMEBUFFER, GR_GL_COLOR_ATTACHMENT0, GR_GL_TEXTURE_2D, 4172 colorID, 0)); 4173 } else { 4174 GL_CALL(BindRenderbuffer(GR_GL_RENDERBUFFER, colorID)); 4175 GL_ALLOC_CALL(this->glInterface(), 4176 RenderbufferStorage(GR_GL_RENDERBUFFER, colorBufferFormat, w, h)); 4177 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, GR_GL_COLOR_ATTACHMENT0, 4178 GR_GL_RENDERBUFFER, colorID)); 4179 } 4180 GL_CALL(BindRenderbuffer(GR_GL_RENDERBUFFER, stencilID)); 4181 auto stencilBufferFormat = this->glCaps().stencilFormats()[sFormatIdx].fInternalFormat; 4182 GL_ALLOC_CALL(this->glInterface(), 4183 RenderbufferStorage(GR_GL_RENDERBUFFER, stencilBufferFormat, w, h)); 4184 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, GR_GL_STENCIL_ATTACHMENT, GR_GL_RENDERBUFFER, 4185 stencilID)); 4186 if (this->glCaps().stencilFormats()[sFormatIdx].fPacked) { 4187 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, GR_GL_DEPTH_ATTACHMENT, 4188 GR_GL_RENDERBUFFER, stencilID)); 4189 } 4190 4191 // We don't want to have to recover the renderbuffer/texture IDs later to delete them. OpenGL 4192 // has this rule that if a renderbuffer/texture is deleted and a FBO other than the current FBO 4193 // has the RB attached then deletion is delayed. So we unbind the FBO here and delete the 4194 // renderbuffers/texture. 4195 this->bindFramebuffer(GR_GL_FRAMEBUFFER, 0); 4196 deleteIDs(); 4197 4198 this->bindFramebuffer(GR_GL_FRAMEBUFFER, info.fFBOID); 4199 GrGLenum status; 4200 GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER)); 4201 if (GR_GL_FRAMEBUFFER_COMPLETE != status) { 4202 this->deleteFramebuffer(info.fFBOID); 4203 return {}; 4204 } 4205 auto stencilBits = SkToInt(this->glCaps().stencilFormats()[sFormatIdx].fStencilBits); 4206 GrBackendRenderTarget beRT = GrBackendRenderTarget(w, h, 1, stencilBits, info); 4207 // Lots of tests don't go through Skia's public interface which will set the config so for 4208 // testing we make sure we set a config here. 4209 beRT.setPixelConfig(config); 4210 #ifdef SK_DEBUG 4211 SkColorType skColorType = GrColorTypeToSkColorType(colorType); 4212 if (skColorType != kUnknown_SkColorType) { 4213 SkASSERT(this->caps()->validateBackendRenderTarget( 4214 beRT, GrColorTypeToSkColorType(colorType)) != kUnknown_GrPixelConfig); 4215 } 4216 #endif 4217 return beRT; 4218 } 4219 4220 void GrGLGpu::deleteTestingOnlyBackendRenderTarget(const GrBackendRenderTarget& backendRT) { 4221 SkASSERT(GrBackendApi::kOpenGL == backendRT.backend()); 4222 GrGLFramebufferInfo info; 4223 if (backendRT.getGLFramebufferInfo(&info)) { 4224 if (info.fFBOID) { 4225 this->deleteFramebuffer(info.fFBOID); 4226 } 4227 } 4228 } 4229 4230 void GrGLGpu::testingOnly_flushGpuAndSync() { 4231 GL_CALL(Finish()); 4232 } 4233 #endif 4234 4235 /////////////////////////////////////////////////////////////////////////////// 4236 4237 GrGLAttribArrayState* GrGLGpu::HWVertexArrayState::bindInternalVertexArray(GrGLGpu* gpu, 4238 const GrBuffer* ibuf) { 4239 GrGLAttribArrayState* attribState; 4240 4241 if (gpu->glCaps().isCoreProfile()) { 4242 if (!fCoreProfileVertexArray) { 4243 GrGLuint arrayID; 4244 GR_GL_CALL(gpu->glInterface(), GenVertexArrays(1, &arrayID)); 4245 int attrCount = gpu->glCaps().maxVertexAttributes(); 4246 fCoreProfileVertexArray = new GrGLVertexArray(arrayID, attrCount); 4247 } 4248 if (ibuf) { 4249 attribState = fCoreProfileVertexArray->bindWithIndexBuffer(gpu, ibuf); 4250 } else { 4251 attribState = fCoreProfileVertexArray->bind(gpu); 4252 } 4253 } else { 4254 if (ibuf) { 4255 // bindBuffer implicitly binds VAO 0 when binding an index buffer. 4256 gpu->bindBuffer(GrGpuBufferType::kIndex, ibuf); 4257 } else { 4258 this->setVertexArrayID(gpu, 0); 4259 } 4260 int attrCount = gpu->glCaps().maxVertexAttributes(); 4261 if (fDefaultVertexArrayAttribState.count() != attrCount) { 4262 fDefaultVertexArrayAttribState.resize(attrCount); 4263 } 4264 attribState = &fDefaultVertexArrayAttribState; 4265 } 4266 return attribState; 4267 } 4268 4269 void GrGLGpu::onFinishFlush(GrSurfaceProxy*, SkSurface::BackendSurfaceAccess access, 4270 GrFlushFlags flags, bool insertedSemaphore, 4271 GrGpuFinishedProc finishedProc, 4272 GrGpuFinishedContext finishedContext) { 4273 // If we inserted semaphores during the flush, we need to call GLFlush. 4274 if (insertedSemaphore) { 4275 GL_CALL(Flush()); 4276 } 4277 if (flags & kSyncCpu_GrFlushFlag) { 4278 GL_CALL(Finish()); 4279 } 4280 // TODO: We should have GL actually wait until the GPU has finished work on the GPU. 4281 if (finishedProc) { 4282 finishedProc(finishedContext); 4283 } 4284 } 4285 4286 void GrGLGpu::submit(GrGpuCommandBuffer* buffer) { 4287 if (buffer->asRTCommandBuffer()) { 4288 SkASSERT(fCachedRTCommandBuffer.get() == buffer); 4289 fCachedRTCommandBuffer->reset(); 4290 } else { 4291 SkASSERT(fCachedTexCommandBuffer.get() == buffer); 4292 fCachedTexCommandBuffer->reset(); 4293 } 4294 } 4295 4296 GrFence SK_WARN_UNUSED_RESULT GrGLGpu::insertFence() { 4297 SkASSERT(this->caps()->fenceSyncSupport()); 4298 GrGLsync sync; 4299 GL_CALL_RET(sync, FenceSync(GR_GL_SYNC_GPU_COMMANDS_COMPLETE, 0)); 4300 GR_STATIC_ASSERT(sizeof(GrFence) >= sizeof(GrGLsync)); 4301 return (GrFence)sync; 4302 } 4303 4304 bool GrGLGpu::waitFence(GrFence fence, uint64_t timeout) { 4305 GrGLenum result; 4306 GL_CALL_RET(result, ClientWaitSync((GrGLsync)fence, GR_GL_SYNC_FLUSH_COMMANDS_BIT, timeout)); 4307 return (GR_GL_CONDITION_SATISFIED == result); 4308 } 4309 4310 void GrGLGpu::deleteFence(GrFence fence) const { 4311 this->deleteSync((GrGLsync)fence); 4312 } 4313 4314 sk_sp<GrSemaphore> SK_WARN_UNUSED_RESULT GrGLGpu::makeSemaphore(bool isOwned) { 4315 SkASSERT(this->caps()->fenceSyncSupport()); 4316 return GrGLSemaphore::Make(this, isOwned); 4317 } 4318 4319 sk_sp<GrSemaphore> GrGLGpu::wrapBackendSemaphore(const GrBackendSemaphore& semaphore, 4320 GrResourceProvider::SemaphoreWrapType wrapType, 4321 GrWrapOwnership ownership) { 4322 SkASSERT(this->caps()->fenceSyncSupport()); 4323 return GrGLSemaphore::MakeWrapped(this, semaphore.glSync(), ownership); 4324 } 4325 4326 void GrGLGpu::insertSemaphore(sk_sp<GrSemaphore> semaphore) { 4327 GrGLSemaphore* glSem = static_cast<GrGLSemaphore*>(semaphore.get()); 4328 4329 GrGLsync sync; 4330 GL_CALL_RET(sync, FenceSync(GR_GL_SYNC_GPU_COMMANDS_COMPLETE, 0)); 4331 glSem->setSync(sync); 4332 } 4333 4334 void GrGLGpu::waitSemaphore(sk_sp<GrSemaphore> semaphore) { 4335 GrGLSemaphore* glSem = static_cast<GrGLSemaphore*>(semaphore.get()); 4336 4337 GL_CALL(WaitSync(glSem->sync(), 0, GR_GL_TIMEOUT_IGNORED)); 4338 } 4339 4340 void GrGLGpu::deleteSync(GrGLsync sync) const { 4341 GL_CALL(DeleteSync(sync)); 4342 } 4343 4344 void GrGLGpu::insertEventMarker(const char* msg) { 4345 GL_CALL(InsertEventMarker(strlen(msg), msg)); 4346 } 4347 4348 sk_sp<GrSemaphore> GrGLGpu::prepareTextureForCrossContextUsage(GrTexture* texture) { 4349 // Set up a semaphore to be signaled once the data is ready, and flush GL 4350 sk_sp<GrSemaphore> semaphore = this->makeSemaphore(true); 4351 this->insertSemaphore(semaphore); 4352 // We must call flush here to make sure the GrGLSync object gets created and sent to the gpu. 4353 GL_CALL(Flush()); 4354 4355 return semaphore; 4356 } 4357 4358 int GrGLGpu::TextureToCopyProgramIdx(GrTexture* texture) { 4359 switch (GrSLCombinedSamplerTypeForTextureType(texture->texturePriv().textureType())) { 4360 case kTexture2DSampler_GrSLType: 4361 return 0; 4362 case kTexture2DRectSampler_GrSLType: 4363 return 1; 4364 case kTextureExternalSampler_GrSLType: 4365 return 2; 4366 default: 4367 SK_ABORT("Unexpected samper type"); 4368 return 0; 4369 } 4370 } 4371 4372 #ifdef SK_ENABLE_DUMP_GPU 4373 #include "SkJSONWriter.h" 4374 void GrGLGpu::onDumpJSON(SkJSONWriter* writer) const { 4375 // We are called by the base class, which has already called beginObject(). We choose to nest 4376 // all of our caps information in a named sub-object. 4377 writer->beginObject("GL GPU"); 4378 4379 const GrGLubyte* str; 4380 GL_CALL_RET(str, GetString(GR_GL_VERSION)); 4381 writer->appendString("GL_VERSION", (const char*)(str)); 4382 GL_CALL_RET(str, GetString(GR_GL_RENDERER)); 4383 writer->appendString("GL_RENDERER", (const char*)(str)); 4384 GL_CALL_RET(str, GetString(GR_GL_VENDOR)); 4385 writer->appendString("GL_VENDOR", (const char*)(str)); 4386 GL_CALL_RET(str, GetString(GR_GL_SHADING_LANGUAGE_VERSION)); 4387 writer->appendString("GL_SHADING_LANGUAGE_VERSION", (const char*)(str)); 4388 4389 writer->appendName("extensions"); 4390 glInterface()->fExtensions.dumpJSON(writer); 4391 4392 writer->endObject(); 4393 } 4394 #endif 4395