1 /* 2 * Copyright 2015 Google Inc. 3 * 4 * Use of this source code is governed by a BSD-style license that can be 5 * found in the LICENSE file. 6 */ 7 8 #include "GrResourceProvider.h" 9 10 #include "GrBuffer.h" 11 #include "GrCaps.h" 12 #include "GrContext.h" 13 #include "GrGpu.h" 14 #include "GrPathRendering.h" 15 #include "GrRenderTarget.h" 16 #include "GrRenderTargetPriv.h" 17 #include "GrResourceCache.h" 18 #include "GrResourceKey.h" 19 #include "GrSemaphore.h" 20 #include "GrStencilAttachment.h" 21 #include "GrSurfaceProxyPriv.h" 22 #include "GrTexturePriv.h" 23 #include "../private/GrSingleOwner.h" 24 #include "SkMathPriv.h" 25 26 GR_DECLARE_STATIC_UNIQUE_KEY(gQuadIndexBufferKey); 27 28 const int GrResourceProvider::kMinScratchTextureSize = 16; 29 30 #define ASSERT_SINGLE_OWNER \ 31 SkDEBUGCODE(GrSingleOwner::AutoEnforce debug_SingleOwner(fSingleOwner);) 32 33 GrResourceProvider::GrResourceProvider(GrGpu* gpu, GrResourceCache* cache, GrSingleOwner* owner) 34 : fCache(cache) 35 , fGpu(gpu) 36 #ifdef SK_DEBUG 37 , fSingleOwner(owner) 38 #endif 39 { 40 fCaps = sk_ref_sp(fGpu->caps()); 41 42 GR_DEFINE_STATIC_UNIQUE_KEY(gQuadIndexBufferKey); 43 fQuadIndexBufferKey = gQuadIndexBufferKey; 44 } 45 46 bool GrResourceProvider::IsFunctionallyExact(GrTextureProxy* proxy) { 47 return proxy->priv().isExact() || (SkIsPow2(proxy->width()) && SkIsPow2(proxy->height())); 48 } 49 50 GrTexture* GrResourceProvider::createMipMappedTexture(const GrSurfaceDesc& desc, 51 SkBudgeted budgeted, const GrMipLevel* texels, 52 int mipLevelCount, uint32_t flags, 53 SkDestinationSurfaceColorMode mipColorMode) { 54 ASSERT_SINGLE_OWNER 55 56 if (this->isAbandoned()) { 57 return nullptr; 58 } 59 if (mipLevelCount && !texels) { 60 return nullptr; 61 } 62 for (int i = 0; i < mipLevelCount; ++i) { 63 if (!texels[i].fPixels) { 64 return nullptr; 65 } 66 } 67 if (mipLevelCount > 1 && GrPixelConfigIsSint(desc.fConfig)) { 68 return nullptr; 69 } 70 if ((desc.fFlags & kRenderTarget_GrSurfaceFlag) && 71 !fGpu->caps()->isConfigRenderable(desc.fConfig, desc.fSampleCnt > 0)) { 72 return nullptr; 73 } 74 if (!GrPixelConfigIsCompressed(desc.fConfig)) { 75 if (mipLevelCount < 2) { 76 flags |= kExact_Flag | kNoCreate_Flag; 77 if (GrTexture* texture = this->refScratchTexture(desc, flags)) { 78 if (!mipLevelCount || 79 texture->writePixels(0, 0, desc.fWidth, desc.fHeight, desc.fConfig, 80 texels[0].fPixels, texels[0].fRowBytes)) { 81 if (SkBudgeted::kNo == budgeted) { 82 texture->resourcePriv().makeUnbudgeted(); 83 } 84 texture->texturePriv().setMipColorMode(mipColorMode); 85 return texture; 86 } 87 texture->unref(); 88 } 89 } 90 } 91 92 SkTArray<GrMipLevel> texelsShallowCopy(mipLevelCount); 93 for (int i = 0; i < mipLevelCount; ++i) { 94 texelsShallowCopy.push_back(texels[i]); 95 } 96 GrTexture* texture = fGpu->createTexture(desc, budgeted, texelsShallowCopy); 97 if (texture) { 98 texture->texturePriv().setMipColorMode(mipColorMode); 99 } 100 return texture; 101 } 102 103 GrTexture* GrResourceProvider::createTexture(const GrSurfaceDesc& desc, SkBudgeted budgeted, 104 const void* srcData, size_t rowBytes, uint32_t flags) { 105 GrMipLevel tempTexels; 106 GrMipLevel* texels = nullptr; 107 int levelCount = 0; 108 if (srcData) { 109 tempTexels.fPixels = srcData; 110 tempTexels.fRowBytes = rowBytes; 111 texels = &tempTexels; 112 levelCount = 1; 113 } 114 return this->createMipMappedTexture(desc, budgeted, texels, levelCount, flags); 115 } 116 117 GrTexture* GrResourceProvider::createApproxTexture(const GrSurfaceDesc& desc, uint32_t flags) { 118 ASSERT_SINGLE_OWNER 119 SkASSERT(0 == flags || kNoPendingIO_Flag == flags); 120 return this->internalCreateApproxTexture(desc, flags); 121 } 122 123 GrTexture* GrResourceProvider::internalCreateApproxTexture(const GrSurfaceDesc& desc, 124 uint32_t scratchFlags) { 125 ASSERT_SINGLE_OWNER 126 if (this->isAbandoned()) { 127 return nullptr; 128 } 129 // Currently we don't recycle compressed textures as scratch. 130 if (GrPixelConfigIsCompressed(desc.fConfig)) { 131 return nullptr; 132 } else { 133 return this->refScratchTexture(desc, scratchFlags); 134 } 135 } 136 137 GrTexture* GrResourceProvider::refScratchTexture(const GrSurfaceDesc& inDesc, 138 uint32_t flags) { 139 ASSERT_SINGLE_OWNER 140 SkASSERT(!this->isAbandoned()); 141 SkASSERT(!GrPixelConfigIsCompressed(inDesc.fConfig)); 142 143 SkTCopyOnFirstWrite<GrSurfaceDesc> desc(inDesc); 144 145 if (fGpu->caps()->reuseScratchTextures() || (desc->fFlags & kRenderTarget_GrSurfaceFlag)) { 146 if (!(kExact_Flag & flags)) { 147 // bin by pow2 with a reasonable min 148 GrSurfaceDesc* wdesc = desc.writable(); 149 wdesc->fWidth = SkTMax(kMinScratchTextureSize, GrNextPow2(desc->fWidth)); 150 wdesc->fHeight = SkTMax(kMinScratchTextureSize, GrNextPow2(desc->fHeight)); 151 } 152 153 GrScratchKey key; 154 GrTexturePriv::ComputeScratchKey(*desc, &key); 155 uint32_t scratchFlags = 0; 156 if (kNoPendingIO_Flag & flags) { 157 scratchFlags = GrResourceCache::kRequireNoPendingIO_ScratchFlag; 158 } else if (!(desc->fFlags & kRenderTarget_GrSurfaceFlag)) { 159 // If it is not a render target then it will most likely be populated by 160 // writePixels() which will trigger a flush if the texture has pending IO. 161 scratchFlags = GrResourceCache::kPreferNoPendingIO_ScratchFlag; 162 } 163 GrGpuResource* resource = fCache->findAndRefScratchResource(key, 164 GrSurface::WorstCaseSize(*desc), 165 scratchFlags); 166 if (resource) { 167 GrSurface* surface = static_cast<GrSurface*>(resource); 168 GrRenderTarget* rt = surface->asRenderTarget(); 169 if (rt && fGpu->caps()->discardRenderTargetSupport()) { 170 rt->discard(); 171 } 172 return surface->asTexture(); 173 } 174 } 175 176 if (!(kNoCreate_Flag & flags)) { 177 return fGpu->createTexture(*desc, SkBudgeted::kYes); 178 } 179 180 return nullptr; 181 } 182 183 sk_sp<GrTexture> GrResourceProvider::wrapBackendTexture(const GrBackendTextureDesc& desc, 184 GrWrapOwnership ownership) { 185 ASSERT_SINGLE_OWNER 186 if (this->isAbandoned()) { 187 return nullptr; 188 } 189 return fGpu->wrapBackendTexture(desc, ownership); 190 } 191 192 sk_sp<GrRenderTarget> GrResourceProvider::wrapBackendRenderTarget( 193 const GrBackendRenderTargetDesc& desc) 194 { 195 ASSERT_SINGLE_OWNER 196 return this->isAbandoned() ? nullptr : fGpu->wrapBackendRenderTarget(desc); 197 } 198 199 void GrResourceProvider::assignUniqueKeyToResource(const GrUniqueKey& key, 200 GrGpuResource* resource) { 201 ASSERT_SINGLE_OWNER 202 if (this->isAbandoned() || !resource) { 203 return; 204 } 205 resource->resourcePriv().setUniqueKey(key); 206 } 207 208 GrGpuResource* GrResourceProvider::findAndRefResourceByUniqueKey(const GrUniqueKey& key) { 209 ASSERT_SINGLE_OWNER 210 return this->isAbandoned() ? nullptr : fCache->findAndRefUniqueResource(key); 211 } 212 213 GrTexture* GrResourceProvider::findAndRefTextureByUniqueKey(const GrUniqueKey& key) { 214 ASSERT_SINGLE_OWNER 215 GrGpuResource* resource = this->findAndRefResourceByUniqueKey(key); 216 if (resource) { 217 GrTexture* texture = static_cast<GrSurface*>(resource)->asTexture(); 218 SkASSERT(texture); 219 return texture; 220 } 221 return NULL; 222 } 223 224 // MDB TODO (caching): this side-steps the issue of texture proxies with unique IDs 225 void GrResourceProvider::assignUniqueKeyToProxy(const GrUniqueKey& key, GrTextureProxy* proxy) { 226 ASSERT_SINGLE_OWNER 227 SkASSERT(key.isValid()); 228 if (this->isAbandoned() || !proxy) { 229 return; 230 } 231 232 GrTexture* texture = proxy->instantiate(this); 233 if (!texture) { 234 return; 235 } 236 237 this->assignUniqueKeyToResource(key, texture); 238 } 239 240 // MDB TODO (caching): this side-steps the issue of texture proxies with unique IDs 241 sk_sp<GrTextureProxy> GrResourceProvider::findProxyByUniqueKey(const GrUniqueKey& key) { 242 ASSERT_SINGLE_OWNER 243 244 sk_sp<GrTexture> texture(this->findAndRefTextureByUniqueKey(key)); 245 if (!texture) { 246 return nullptr; 247 } 248 249 return GrSurfaceProxy::MakeWrapped(std::move(texture)); 250 } 251 252 const GrBuffer* GrResourceProvider::createInstancedIndexBuffer(const uint16_t* pattern, 253 int patternSize, 254 int reps, 255 int vertCount, 256 const GrUniqueKey& key) { 257 size_t bufferSize = patternSize * reps * sizeof(uint16_t); 258 259 // This is typically used in GrMeshDrawOps, so we assume kNoPendingIO. 260 GrBuffer* buffer = this->createBuffer(bufferSize, kIndex_GrBufferType, kStatic_GrAccessPattern, 261 kNoPendingIO_Flag); 262 if (!buffer) { 263 return nullptr; 264 } 265 uint16_t* data = (uint16_t*) buffer->map(); 266 bool useTempData = (nullptr == data); 267 if (useTempData) { 268 data = new uint16_t[reps * patternSize]; 269 } 270 for (int i = 0; i < reps; ++i) { 271 int baseIdx = i * patternSize; 272 uint16_t baseVert = (uint16_t)(i * vertCount); 273 for (int j = 0; j < patternSize; ++j) { 274 data[baseIdx+j] = baseVert + pattern[j]; 275 } 276 } 277 if (useTempData) { 278 if (!buffer->updateData(data, bufferSize)) { 279 buffer->unref(); 280 return nullptr; 281 } 282 delete[] data; 283 } else { 284 buffer->unmap(); 285 } 286 this->assignUniqueKeyToResource(key, buffer); 287 return buffer; 288 } 289 290 const GrBuffer* GrResourceProvider::createQuadIndexBuffer() { 291 static const int kMaxQuads = 1 << 12; // max possible: (1 << 14) - 1; 292 GR_STATIC_ASSERT(4 * kMaxQuads <= 65535); 293 static const uint16_t kPattern[] = { 0, 1, 2, 0, 2, 3 }; 294 295 return this->createInstancedIndexBuffer(kPattern, 6, kMaxQuads, 4, fQuadIndexBufferKey); 296 } 297 298 GrPath* GrResourceProvider::createPath(const SkPath& path, const GrStyle& style) { 299 SkASSERT(this->gpu()->pathRendering()); 300 return this->gpu()->pathRendering()->createPath(path, style); 301 } 302 303 GrPathRange* GrResourceProvider::createPathRange(GrPathRange::PathGenerator* gen, 304 const GrStyle& style) { 305 SkASSERT(this->gpu()->pathRendering()); 306 return this->gpu()->pathRendering()->createPathRange(gen, style); 307 } 308 309 GrPathRange* GrResourceProvider::createGlyphs(const SkTypeface* tf, 310 const SkScalerContextEffects& effects, 311 const SkDescriptor* desc, 312 const GrStyle& style) { 313 314 SkASSERT(this->gpu()->pathRendering()); 315 return this->gpu()->pathRendering()->createGlyphs(tf, effects, desc, style); 316 } 317 318 GrBuffer* GrResourceProvider::createBuffer(size_t size, GrBufferType intendedType, 319 GrAccessPattern accessPattern, uint32_t flags, 320 const void* data) { 321 if (this->isAbandoned()) { 322 return nullptr; 323 } 324 if (kDynamic_GrAccessPattern != accessPattern) { 325 return this->gpu()->createBuffer(size, intendedType, accessPattern, data); 326 } 327 if (!(flags & kRequireGpuMemory_Flag) && 328 this->gpu()->caps()->preferClientSideDynamicBuffers() && 329 GrBufferTypeIsVertexOrIndex(intendedType) && 330 kDynamic_GrAccessPattern == accessPattern) { 331 return GrBuffer::CreateCPUBacked(this->gpu(), size, intendedType, data); 332 } 333 334 // bin by pow2 with a reasonable min 335 static const size_t MIN_SIZE = 1 << 12; 336 size_t allocSize = SkTMax(MIN_SIZE, GrNextSizePow2(size)); 337 338 GrScratchKey key; 339 GrBuffer::ComputeScratchKeyForDynamicVBO(allocSize, intendedType, &key); 340 uint32_t scratchFlags = 0; 341 if (flags & kNoPendingIO_Flag) { 342 scratchFlags = GrResourceCache::kRequireNoPendingIO_ScratchFlag; 343 } else { 344 scratchFlags = GrResourceCache::kPreferNoPendingIO_ScratchFlag; 345 } 346 GrBuffer* buffer = static_cast<GrBuffer*>( 347 this->cache()->findAndRefScratchResource(key, allocSize, scratchFlags)); 348 if (!buffer) { 349 buffer = this->gpu()->createBuffer(allocSize, intendedType, kDynamic_GrAccessPattern); 350 if (!buffer) { 351 return nullptr; 352 } 353 } 354 if (data) { 355 buffer->updateData(data, size); 356 } 357 SkASSERT(!buffer->isCPUBacked()); // We should only cache real VBOs. 358 return buffer; 359 } 360 361 GrStencilAttachment* GrResourceProvider::attachStencilAttachment(GrRenderTarget* rt) { 362 SkASSERT(rt); 363 if (rt->renderTargetPriv().getStencilAttachment()) { 364 return rt->renderTargetPriv().getStencilAttachment(); 365 } 366 367 if (!rt->wasDestroyed() && rt->canAttemptStencilAttachment()) { 368 GrUniqueKey sbKey; 369 370 int width = rt->width(); 371 int height = rt->height(); 372 #if 0 373 if (this->caps()->oversizedStencilSupport()) { 374 width = SkNextPow2(width); 375 height = SkNextPow2(height); 376 } 377 #endif 378 bool newStencil = false; 379 GrStencilAttachment::ComputeSharedStencilAttachmentKey(width, height, 380 rt->numStencilSamples(), &sbKey); 381 GrStencilAttachment* stencil = static_cast<GrStencilAttachment*>( 382 this->findAndRefResourceByUniqueKey(sbKey)); 383 if (!stencil) { 384 // Need to try and create a new stencil 385 stencil = this->gpu()->createStencilAttachmentForRenderTarget(rt, width, height); 386 if (stencil) { 387 this->assignUniqueKeyToResource(sbKey, stencil); 388 newStencil = true; 389 } 390 } 391 if (rt->renderTargetPriv().attachStencilAttachment(stencil)) { 392 if (newStencil) { 393 // Right now we're clearing the stencil attachment here after it is 394 // attached to a RT for the first time. When we start matching 395 // stencil buffers with smaller color targets this will no longer 396 // be correct because it won't be guaranteed to clear the entire 397 // sb. 398 // We used to clear down in the GL subclass using a special purpose 399 // FBO. But iOS doesn't allow a stencil-only FBO. It reports unsupported 400 // FBO status. 401 this->gpu()->clearStencil(rt); 402 } 403 } 404 } 405 return rt->renderTargetPriv().getStencilAttachment(); 406 } 407 408 sk_sp<GrRenderTarget> GrResourceProvider::wrapBackendTextureAsRenderTarget( 409 const GrBackendTextureDesc& desc) 410 { 411 if (this->isAbandoned()) { 412 return nullptr; 413 } 414 return this->gpu()->wrapBackendTextureAsRenderTarget(desc); 415 } 416 417 sk_sp<GrSemaphore> SK_WARN_UNUSED_RESULT GrResourceProvider::makeSemaphore() { 418 return fGpu->makeSemaphore(); 419 } 420 421 void GrResourceProvider::takeOwnershipOfSemaphore(sk_sp<GrSemaphore> semaphore) { 422 semaphore->resetGpu(fGpu); 423 } 424 425 void GrResourceProvider::releaseOwnershipOfSemaphore(sk_sp<GrSemaphore> semaphore) { 426 semaphore->resetGpu(nullptr); 427 } 428