1 /* 2 * Copyright 2015 Google Inc. 3 * 4 * Use of this source code is governed by a BSD-style license that can be 5 * found in the LICENSE file. 6 */ 7 8 #include "GrDrawingManager.h" 9 10 #include "GrContext.h" 11 #include "GrGpu.h" 12 #include "GrOnFlushResourceProvider.h" 13 #include "GrRenderTargetContext.h" 14 #include "GrPathRenderingRenderTargetContext.h" 15 #include "GrRenderTargetProxy.h" 16 #include "GrResourceProvider.h" 17 #include "GrSoftwarePathRenderer.h" 18 #include "GrSurfacePriv.h" 19 #include "GrSurfaceProxyPriv.h" 20 #include "GrTextureContext.h" 21 #include "GrTextureOpList.h" 22 #include "SkSurface_Gpu.h" 23 #include "SkTTopoSort.h" 24 25 #include "GrTracing.h" 26 #include "text/GrAtlasTextContext.h" 27 #include "text/GrStencilAndCoverTextContext.h" 28 29 void GrDrawingManager::cleanup() { 30 for (int i = 0; i < fOpLists.count(); ++i) { 31 // no opList should receive a new command after this 32 fOpLists[i]->makeClosed(*fContext->caps()); 33 34 // We shouldn't need to do this, but it turns out some clients still hold onto opLists 35 // after a cleanup. 36 // MDB TODO: is this still true? 37 fOpLists[i]->reset(); 38 } 39 40 fOpLists.reset(); 41 42 delete fPathRendererChain; 43 fPathRendererChain = nullptr; 44 SkSafeSetNull(fSoftwarePathRenderer); 45 } 46 47 GrDrawingManager::~GrDrawingManager() { 48 this->cleanup(); 49 } 50 51 void GrDrawingManager::abandon() { 52 fAbandoned = true; 53 for (int i = 0; i < fOpLists.count(); ++i) { 54 fOpLists[i]->abandonGpuResources(); 55 } 56 this->cleanup(); 57 } 58 59 void GrDrawingManager::freeGpuResources() { 60 // a path renderer may be holding onto resources 61 delete fPathRendererChain; 62 fPathRendererChain = nullptr; 63 SkSafeSetNull(fSoftwarePathRenderer); 64 for (int i = 0; i < fOpLists.count(); ++i) { 65 fOpLists[i]->freeGpuResources(); 66 } 67 } 68 69 void GrDrawingManager::reset() { 70 for (int i = 0; i < fOpLists.count(); ++i) { 71 fOpLists[i]->reset(); 72 } 73 fFlushState.reset(); 74 } 75 76 gr_instanced::OpAllocator* GrDrawingManager::instancingAllocator() { 77 if (fInstancingAllocator) { 78 return fInstancingAllocator.get(); 79 } 80 81 fInstancingAllocator = fContext->getGpu()->createInstancedRenderingAllocator(); 82 return fInstancingAllocator.get(); 83 } 84 85 // MDB TODO: make use of the 'proxy' parameter. 86 void GrDrawingManager::internalFlush(GrSurfaceProxy*, GrResourceCache::FlushType type) { 87 GR_CREATE_TRACE_MARKER_CONTEXT("GrDrawingManager", "internalFlush", fContext); 88 89 if (fFlushing || this->wasAbandoned()) { 90 return; 91 } 92 fFlushing = true; 93 bool flushed = false; 94 95 for (int i = 0; i < fOpLists.count(); ++i) { 96 // Semi-usually the GrOpLists are already closed at this point, but sometimes Ganesh 97 // needs to flush mid-draw. In that case, the SkGpuDevice's GrOpLists won't be closed 98 // but need to be flushed anyway. Closing such GrOpLists here will mean new 99 // GrOpLists will be created to replace them if the SkGpuDevice(s) write to them again. 100 fOpLists[i]->makeClosed(*fContext->caps()); 101 } 102 103 #ifdef SK_DEBUG 104 // This block checks for any unnecessary splits in the opLists. If two sequential opLists 105 // share the same backing GrSurfaceProxy it means the opList was artificially split. 106 if (fOpLists.count()) { 107 GrRenderTargetOpList* prevOpList = fOpLists[0]->asRenderTargetOpList(); 108 for (int i = 1; i < fOpLists.count(); ++i) { 109 GrRenderTargetOpList* curOpList = fOpLists[i]->asRenderTargetOpList(); 110 111 if (prevOpList && curOpList) { 112 SkASSERT(prevOpList->fTarget.get() != curOpList->fTarget.get()); 113 } 114 115 prevOpList = curOpList; 116 } 117 } 118 #endif 119 120 #ifdef ENABLE_MDB 121 SkDEBUGCODE(bool result =) 122 SkTTopoSort<GrOpList, GrOpList::TopoSortTraits>(&fOpLists); 123 SkASSERT(result); 124 #endif 125 126 GrOnFlushResourceProvider onFlushProvider(this); 127 128 if (!fOnFlushCBObjects.empty()) { 129 // MDB TODO: pre-MDB '1' is the correct pre-allocated size. Post-MDB it will need 130 // to be larger. 131 SkAutoSTArray<1, uint32_t> opListIds(fOpLists.count()); 132 for (int i = 0; i < fOpLists.count(); ++i) { 133 opListIds[i] = fOpLists[i]->uniqueID(); 134 } 135 136 SkSTArray<1, sk_sp<GrRenderTargetContext>> renderTargetContexts; 137 for (GrOnFlushCallbackObject* onFlushCBObject : fOnFlushCBObjects) { 138 onFlushCBObject->preFlush(&onFlushProvider, 139 opListIds.get(), opListIds.count(), 140 &renderTargetContexts); 141 if (!renderTargetContexts.count()) { 142 continue; // This is fine. No atlases of this type are required for this flush 143 } 144 145 for (int j = 0; j < renderTargetContexts.count(); ++j) { 146 GrOpList* opList = renderTargetContexts[j]->getOpList(); 147 if (!opList) { 148 continue; // Odd - but not a big deal 149 } 150 opList->makeClosed(*fContext->caps()); 151 opList->prepareOps(&fFlushState); 152 if (!opList->executeOps(&fFlushState)) { 153 continue; // This is bad 154 } 155 } 156 renderTargetContexts.reset(); 157 } 158 } 159 160 #if 0 161 // Enable this to print out verbose GrOp information 162 for (int i = 0; i < fOpLists.count(); ++i) { 163 SkDEBUGCODE(fOpLists[i]->dump();) 164 } 165 #endif 166 167 for (int i = 0; i < fOpLists.count(); ++i) { 168 if (!fOpLists[i]->instantiate(fContext->resourceProvider())) { 169 fOpLists[i] = nullptr; 170 continue; 171 } 172 173 fOpLists[i]->prepareOps(&fFlushState); 174 } 175 176 // Upload all data to the GPU 177 fFlushState.preIssueDraws(); 178 179 for (int i = 0; i < fOpLists.count(); ++i) { 180 if (!fOpLists[i]) { 181 continue; 182 } 183 184 if (fOpLists[i]->executeOps(&fFlushState)) { 185 flushed = true; 186 } 187 fOpLists[i]->reset(); 188 } 189 fOpLists.reset(); 190 191 SkASSERT(fFlushState.nextDrawToken() == fFlushState.nextTokenToFlush()); 192 193 fContext->getGpu()->finishFlush(); 194 195 fFlushState.reset(); 196 // We always have to notify the cache when it requested a flush so it can reset its state. 197 if (flushed || type == GrResourceCache::FlushType::kCacheRequested) { 198 fContext->getResourceCache()->notifyFlushOccurred(type); 199 } 200 for (GrOnFlushCallbackObject* onFlushCBObject : fOnFlushCBObjects) { 201 onFlushCBObject->postFlush(); 202 } 203 fFlushing = false; 204 } 205 206 void GrDrawingManager::prepareSurfaceForExternalIO(GrSurfaceProxy* proxy) { 207 if (this->wasAbandoned()) { 208 return; 209 } 210 SkASSERT(proxy); 211 212 if (proxy->priv().hasPendingIO()) { 213 this->flush(proxy); 214 } 215 216 if (!proxy->instantiate(fContext->resourceProvider())) { 217 return; 218 } 219 220 GrSurface* surface = proxy->priv().peekSurface(); 221 222 if (fContext->getGpu() && surface->asRenderTarget()) { 223 fContext->getGpu()->resolveRenderTarget(surface->asRenderTarget()); 224 } 225 } 226 227 void GrDrawingManager::addOnFlushCallbackObject(GrOnFlushCallbackObject* onFlushCBObject) { 228 fOnFlushCBObjects.push_back(onFlushCBObject); 229 } 230 231 sk_sp<GrRenderTargetOpList> GrDrawingManager::newRTOpList(GrRenderTargetProxy* rtp, 232 bool managedOpList) { 233 SkASSERT(fContext); 234 235 // This is a temporary fix for the partial-MDB world. In that world we're not reordering 236 // so ops that (in the single opList world) would've just glommed onto the end of the single 237 // opList but referred to a far earlier RT need to appear in their own opList. 238 if (!fOpLists.empty()) { 239 fOpLists.back()->makeClosed(*fContext->caps()); 240 } 241 242 sk_sp<GrRenderTargetOpList> opList(new GrRenderTargetOpList(rtp, 243 fContext->getGpu(), 244 fContext->getAuditTrail())); 245 SkASSERT(rtp->getLastOpList() == opList.get()); 246 247 if (managedOpList) { 248 fOpLists.push_back() = opList; 249 } 250 251 return opList; 252 } 253 254 sk_sp<GrTextureOpList> GrDrawingManager::newTextureOpList(GrTextureProxy* textureProxy) { 255 SkASSERT(fContext); 256 257 // This is a temporary fix for the partial-MDB world. In that world we're not reordering 258 // so ops that (in the single opList world) would've just glommed onto the end of the single 259 // opList but referred to a far earlier RT need to appear in their own opList. 260 if (!fOpLists.empty()) { 261 fOpLists.back()->makeClosed(*fContext->caps()); 262 } 263 264 sk_sp<GrTextureOpList> opList(new GrTextureOpList(fContext->resourceProvider(), 265 textureProxy, 266 fContext->getAuditTrail())); 267 268 SkASSERT(textureProxy->getLastOpList() == opList.get()); 269 270 fOpLists.push_back() = opList; 271 272 return opList; 273 } 274 275 GrAtlasTextContext* GrDrawingManager::getAtlasTextContext() { 276 if (!fAtlasTextContext) { 277 fAtlasTextContext.reset(GrAtlasTextContext::Create()); 278 } 279 280 return fAtlasTextContext.get(); 281 } 282 283 /* 284 * This method finds a path renderer that can draw the specified path on 285 * the provided target. 286 * Due to its expense, the software path renderer has split out so it can 287 * can be individually allowed/disallowed via the "allowSW" boolean. 288 */ 289 GrPathRenderer* GrDrawingManager::getPathRenderer(const GrPathRenderer::CanDrawPathArgs& args, 290 bool allowSW, 291 GrPathRendererChain::DrawType drawType, 292 GrPathRenderer::StencilSupport* stencilSupport) { 293 294 if (!fPathRendererChain) { 295 fPathRendererChain = new GrPathRendererChain(fContext, fOptionsForPathRendererChain); 296 } 297 298 GrPathRenderer* pr = fPathRendererChain->getPathRenderer(args, drawType, stencilSupport); 299 if (!pr && allowSW) { 300 if (!fSoftwarePathRenderer) { 301 fSoftwarePathRenderer = 302 new GrSoftwarePathRenderer(fContext->resourceProvider(), 303 fOptionsForPathRendererChain.fAllowPathMaskCaching); 304 } 305 if (fSoftwarePathRenderer->canDrawPath(args)) { 306 pr = fSoftwarePathRenderer; 307 } 308 } 309 310 return pr; 311 } 312 313 sk_sp<GrRenderTargetContext> GrDrawingManager::makeRenderTargetContext( 314 sk_sp<GrSurfaceProxy> sProxy, 315 sk_sp<SkColorSpace> colorSpace, 316 const SkSurfaceProps* surfaceProps, 317 bool managedOpList) { 318 if (this->wasAbandoned() || !sProxy->asRenderTargetProxy()) { 319 return nullptr; 320 } 321 322 // SkSurface catches bad color space usage at creation. This check handles anything that slips 323 // by, including internal usage. We allow a null color space here, for read/write pixels and 324 // other special code paths. If a color space is provided, though, enforce all other rules. 325 if (colorSpace && !SkSurface_Gpu::Valid(fContext, sProxy->config(), colorSpace.get())) { 326 SkDEBUGFAIL("Invalid config and colorspace combination"); 327 return nullptr; 328 } 329 330 sk_sp<GrRenderTargetProxy> rtp(sk_ref_sp(sProxy->asRenderTargetProxy())); 331 332 bool useDIF = false; 333 if (surfaceProps) { 334 useDIF = surfaceProps->isUseDeviceIndependentFonts(); 335 } 336 337 if (useDIF && fContext->caps()->shaderCaps()->pathRenderingSupport() && 338 GrFSAAType::kNone != rtp->fsaaType()) { 339 // TODO: defer stencil buffer attachment for PathRenderingDrawContext 340 if (!rtp->instantiate(fContext->resourceProvider())) { 341 return nullptr; 342 } 343 GrRenderTarget* rt = rtp->priv().peekRenderTarget(); 344 345 GrStencilAttachment* sb = fContext->resourceProvider()->attachStencilAttachment(rt); 346 if (sb) { 347 return sk_sp<GrRenderTargetContext>(new GrPathRenderingRenderTargetContext( 348 fContext, this, std::move(rtp), 349 std::move(colorSpace), surfaceProps, 350 fContext->getAuditTrail(), fSingleOwner)); 351 } 352 } 353 354 return sk_sp<GrRenderTargetContext>(new GrRenderTargetContext(fContext, this, std::move(rtp), 355 std::move(colorSpace), 356 surfaceProps, 357 fContext->getAuditTrail(), 358 fSingleOwner, managedOpList)); 359 } 360 361 sk_sp<GrTextureContext> GrDrawingManager::makeTextureContext(sk_sp<GrSurfaceProxy> sProxy, 362 sk_sp<SkColorSpace> colorSpace) { 363 if (this->wasAbandoned() || !sProxy->asTextureProxy()) { 364 return nullptr; 365 } 366 367 // SkSurface catches bad color space usage at creation. This check handles anything that slips 368 // by, including internal usage. We allow a null color space here, for read/write pixels and 369 // other special code paths. If a color space is provided, though, enforce all other rules. 370 if (colorSpace && !SkSurface_Gpu::Valid(fContext, sProxy->config(), colorSpace.get())) { 371 SkDEBUGFAIL("Invalid config and colorspace combination"); 372 return nullptr; 373 } 374 375 // GrTextureRenderTargets should always be using GrRenderTargetContext 376 SkASSERT(!sProxy->asRenderTargetProxy()); 377 378 sk_sp<GrTextureProxy> textureProxy(sk_ref_sp(sProxy->asTextureProxy())); 379 380 return sk_sp<GrTextureContext>(new GrTextureContext(fContext, this, std::move(textureProxy), 381 std::move(colorSpace), 382 fContext->getAuditTrail(), 383 fSingleOwner)); 384 } 385